diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index aa43036fa..cd6690c6d 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -2,13 +2,12 @@ name: e2e on: push: - branches: [main] + branches: [main, smart-tests] # smart-tests branch temporarily added for Smart Tests CLI development workflow_dispatch: - env: - LAUNCHABLE_DEBUG: 1 - LAUNCHABLE_REPORT_ERROR: 1 + SMART_TESTS_DEBUG: 1 + SMART_TESTS_REPORT_ERROR: 1 # The WORKSPACE file is disabled by default in Bazel 8. # As a workaround, we configure the old version. USE_BAZEL_VERSION: "7.x" @@ -16,56 +15,58 @@ env: jobs: tests: runs-on: ubuntu-22.04 - strategy: - matrix: - python-version: [3.7, 3.8, 3.9, "3.10"] steps: - - uses: actions/checkout@v4 - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: repository: launchableinc/examples path: examples - name: Set up JDK 1.8 - uses: actions/setup-java@v4 + uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4 with: java-version: 8 - distribution: 'temurin' - - uses: actions/setup-go@v5 + distribution: "temurin" + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: 1.24 - - uses: actions/setup-python@v5 + - name: Install uv + uses: astral-sh/setup-uv@caf0cab7a618c569241d31dcd442f54681755d39 # v3 with: - python-version: ${{ matrix.python-version }} + enable-cache: true + cache-dependency-glob: "uv.lock" - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install . + run: uv sync --dev && uv tool install . # bazel - name: Install Bazelisk run: | go install github.com/bazelbuild/bazelisk@latest - name: "bazel: verify" - run: "launchable verify" + run: "smart-tests verify" env: - LAUNCHABLE_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_BAZEL }} + SMART_TESTS_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_BAZEL }} - name: "bazel: Record build" - run: 'launchable record build --name "$GITHUB_RUN_ID" --source main=../.. --source cli=../../..' + run: 'smart-tests record build --build "$GITHUB_RUN_ID" --source main=../.. --source cli=../../..' + env: + SMART_TESTS_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_BAZEL }} + working-directory: examples/bazel/java + - name: "bazel: Record session" + run: 'smart-tests record session --build "$GITHUB_RUN_ID" --test-suite "bazel" > bazel-test-session.txt' env: - LAUNCHABLE_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_BAZEL }} + SMART_TESTS_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_BAZEL }} working-directory: examples/bazel/java - name: "bazel: Subset" - run: bazelisk query 'tests(//...)' | launchable subset --target 30% bazel > subset.txt + run: bazelisk query 'tests(//...)' | smart-tests subset bazel --session $(cat bazel-test-session.txt) --target 30% > subset.txt env: - LAUNCHABLE_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_BAZEL }} + SMART_TESTS_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_BAZEL }} working-directory: examples/bazel/java - name: "bazel: Run test" run: bazelisk test $(cat subset.txt) working-directory: examples/bazel/java - name: "bazel: Record test results" - run: launchable record tests bazel . + run: smart-tests record test bazel --session $(cat bazel-test-session.txt) . if: always() env: - LAUNCHABLE_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_BAZEL }} + SMART_TESTS_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_BAZEL }} working-directory: examples/bazel/java # go - name: Install go-junit-report @@ -73,49 +74,59 @@ jobs: go install github.com/jstemmer/go-junit-report@latest working-directory: examples/go - name: "go: verify" - run: "launchable verify" + run: "smart-tests verify" env: - LAUNCHABLE_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_GO }} + SMART_TESTS_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_GO }} - name: "go: Record build" - run: 'launchable record build --name "$GITHUB_RUN_ID" --source main=.. --source cli=../..' + run: 'smart-tests record build --build "$GITHUB_RUN_ID" --source main=.. --source cli=../..' env: - LAUNCHABLE_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_GO }} + SMART_TESTS_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_GO }} + working-directory: examples/go + - name: "go: Record session" + run: 'smart-tests record session --build "$GITHUB_RUN_ID" --test-suite "go-test" > go-test-session.txt' + env: + SMART_TESTS_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_GO }} working-directory: examples/go - name: "go: Subset" - run: go test -list="Test|Example" ./... | launchable subset --confidence 80% go-test > launchable-subset.txt + run: go test -list="Test|Example" ./... | smart-tests subset go-test --session $(cat go-test-session.txt) --confidence 80% > smart-tests-subset.txt env: - LAUNCHABLE_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_GO }} + SMART_TESTS_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_GO }} working-directory: examples/go - name: "go: Run test" - run: go test -run $(cat launchable-subset.txt) ./... -v 2>&1 | go-junit-report -set-exit-code > report.xml + run: go test -run $(cat smart-tests-subset.txt) ./... -v 2>&1 | go-junit-report -set-exit-code > report.xml working-directory: examples/go - name: "go: Record test results" - run: launchable record tests go-test report.xml + run: smart-tests record test go-test --session $(cat go-test-session.txt) report.xml if: always() env: - LAUNCHABLE_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_GO }} + SMART_TESTS_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_GO }} working-directory: examples/go # gradle - name: "gradle: verify" - run: "launchable verify" + run: "smart-tests verify" env: - LAUNCHABLE_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_GRADLE }} + SMART_TESTS_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_GRADLE }} - name: "gradle: Record build" - run: 'launchable record build --name "$GITHUB_RUN_ID" --source main=.. --source cli=../..' + run: 'smart-tests record build --build "$GITHUB_RUN_ID" --source main=.. --source cli=../..' + env: + SMART_TESTS_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_GRADLE }} + working-directory: examples/gradle + - name: "gradle: Record session" + run: 'smart-tests record session --build "$GITHUB_RUN_ID" --test-suite "gradle" > gradle-test-session.txt' env: - LAUNCHABLE_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_GRADLE }} + SMART_TESTS_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_GRADLE }} working-directory: examples/gradle - name: "gradle: Subset" - run: launchable subset --target 80% gradle src/test/java > launchable-subset.txt + run: smart-tests subset gradle --session $(cat gradle-test-session.txt) --target 80% src/test/java > smart-tests-subset.txt env: - LAUNCHABLE_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_GRADLE }} + SMART_TESTS_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_GRADLE }} working-directory: examples/gradle - name: "gradle: Run test" - run: ./gradlew test $(cat launchable-subset.txt) + run: ./gradlew test $(cat smart-tests-subset.txt) working-directory: examples/gradle - name: "gradle: Record test results" - run: launchable record tests gradle build/test-results/test + run: smart-tests record test gradle --session $(cat gradle-test-session.txt) build/test-results/test if: always() env: - LAUNCHABLE_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_GRADLE }} + SMART_TESTS_TOKEN: ${{ secrets.LAUNCHABLE_TOKEN_GRADLE }} working-directory: examples/gradle diff --git a/.github/workflows/python-publish.yml b/.github/workflows/publish.yml similarity index 67% rename from .github/workflows/python-publish.yml rename to .github/workflows/publish.yml index 17b2b2484..5c2d712ff 100644 --- a/.github/workflows/python-publish.yml +++ b/.github/workflows/publish.yml @@ -1,7 +1,7 @@ -# This workflows will upload a Python Package -# For more information see: https://github.com/marketplace/actions/pypi-publish +# Publish workflow for the Smart Tests CLI +# Builds and publishes packages to PyPI and Docker Hub -name: Upload Python Package +name: Publish on: workflow_dispatch: @@ -10,7 +10,7 @@ on: - main env: - IMAGE_NAME: cloudbees/launchable + IMAGE_NAME: cloudbees/smart-tests-cli jobs: tagpr: @@ -38,38 +38,19 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: '3.x' - # build and publish package using GitHub Actions workflow - # https://packaging.python.org/en/latest/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/ - - name: Install pypa/build - run: >- - python -m - pip install - build - --user - - name: Build a binary wheel and a source tarball - run: >- - python -m - build - --sdist - --wheel - --outdir dist/ - . - # actual publish + - name: Install uv + uses: astral-sh/setup-uv@v3 + + - name: Build package + run: uv build - name: Publish to PyPI - uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # v1.12.4 - with: - user: __token__ - password: ${{ secrets.PYPI_API_TOKEN }} + run: uv publish --token ${{ secrets.SMART_TESTS_PYPI_API_TOKEN }} - name: Actions for Discord uses: Ilshidur/action-discord@0.3.2 env: DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }} with: - args: 'Launchable CLI ${{ needs.tagpr.outputs.tag }} is released! https://github.com/cloudbees-oss/smart-tests-cli/releases/tag/${{ needs.tagpr.outputs.tag }}' + args: 'Smart Tests CLI ${{ needs.tagpr.outputs.tag }} is released! https://github.com/launchableinc/cli/releases/tag/${{ needs.tagpr.outputs.tag }}' docker: name: Push Docker image to Docker Hub diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml deleted file mode 100644 index aee24985c..000000000 --- a/.github/workflows/python-package.yml +++ /dev/null @@ -1,119 +0,0 @@ -# This workflow will install Python dependencies, run tests and lint with a variety of Python versions -# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions - -name: Python package - -on: - workflow_dispatch: - push: - branches: [ main ] - paths-ignore: - - 'WORKSPACE' - - 'src/**' - pull_request: - branches: [ main ] - paths-ignore: - - 'WORKSPACE' - - 'src/**' - schedule: - # This job runs at 00:00 JST every day. - - cron: '0 9 * * *' - -env: - LAUNCHABLE_ORGANIZATION: "launchableinc" - LAUNCHABLE_WORKSPACE: "cli" - GITHUB_PULL_REQUEST_URL: ${{ github.event.pull_request.html_url }} - GITHUB_PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }} - -permissions: - id-token: write - contents: read - -jobs: - build: - runs-on: ${{ matrix.os }} - strategy: - matrix: - # Python 3.6 is not supported on Ubuntu 22.04. - os: [ubuntu-22.04, windows-latest] - python-version: [3.7, 3.8, 3.9, "3.10", "3.11", "3.12"] - include: - - os: windows-latest - python-version: 3.6 - - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - fetch-depth: 0 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - name: Set up JDK 1.8 - uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1 - with: - java-version: 8 - distribution: 'temurin' - - name: Install specific dependencies in 3.6 - if: matrix.python-version == '3.6' - uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3.0.2 - with: - max_attempts: 3 - timeout_minutes: 5 - retry_on: error - command: | - python -m pip install --upgrade pip - pip install pipenv==2021.11.5 - pipenv install --dev --python ${{ matrix.python-version }} - - name: Install dependencies - if: matrix.python-version != '3.6' - uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3.0.2 - with: - max_attempts: 3 - timeout_minutes: 5 - retry_on: error - command: | - python -m pip install --upgrade pip - pip install pipenv - pipenv install --dev --python ${{ matrix.python-version }} - - name: Build - run: | - pipenv run pip list - pipenv run build - pipenv run install - - name: Type check - run: pipenv run type - - name: Lint with flake8 - run: | - # stop the build if there are Python syntax errors or undefined names - pipenv run lint - - name: Pull request validation - run: | - # Install Launchable CLI from this repos's code - pip3 install . > /dev/null - - set -x - - launchable verify - - # Tell Launchable about the build you are producing and testing - launchable record build --name ${GITHUB_RUN_ID} - - launchable record session --build ${GITHUB_RUN_ID} --flavor os=${{ matrix.os }} --flavor python=${{ matrix.python-version }} > session.txt - - # Find 25% of the relevant tests to run for this change - find tests -name test_*.py | grep -v tests/data | launchable subset --target 25% --session $(cat session.txt) --rest launchable-remainder.txt file > subset.txt - - function record() { - # Record test results - LAUNCHABLE_SLACK_NOTIFICATION=true launchable record tests --session $(cat session.txt) file test-results/*.xml - } - - trap record EXIT - - # Test subset of tests - pipenv run test-xml $(tr '\r\n' '\n' < subset.txt) - - # Test rest of tests - pipenv run test-xml $(tr '\r\n' '\n' < launchable-remainder.txt) - shell: bash diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 000000000..272c74308 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,95 @@ +# Test workflow for the Smart Tests CLI +# Runs tests, linting, type checking, and build verification + +name: Test + +on: + workflow_dispatch: + push: + branches: [main, smart-tests] + paths-ignore: + - "WORKSPACE" + - "src/**" + pull_request: + paths-ignore: + - "WORKSPACE" + - "src/**" + schedule: + # This job runs at 00:00 JST every day. + - cron: "0 9 * * *" + +env: + SMART_TESTS_ORGANIZATION: "launchableinc" + SMART_TESTS_WORKSPACE: "cli" + GITHUB_PULL_REQUEST_URL: ${{ github.event.pull_request.html_url }} + GITHUB_PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }} + +permissions: + id-token: write + contents: read + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-22.04, windows-latest] + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Install uv + uses: astral-sh/setup-uv@caf0cab7a618c569241d31dcd442f54681755d39 # v3 + with: + enable-cache: true + cache-dependency-glob: "uv.lock" + + - name: Set up JDK 1.8 + uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1 + with: + java-version: 8 + distribution: "temurin" + + - name: Install dependencies + run: uv sync --dev + + - name: Test package build + run: uv build + + - name: Type check + run: uv run poe type + + - name: Lint with flake8 + run: uv run poe lint + - name: Pull request validation + run: | + # Install Smart Tests CLI from this repo's code as a global tool + uv tool install . + + set -x + + smart-tests verify + + # Tell Smart Tests about the build you are producing and testing + smart-tests record build --build ${GITHUB_RUN_ID} + + smart-tests record session --build ${GITHUB_RUN_ID} --test-suite 'python-unittest' --flavor os=${{ matrix.os }} --flavor python=$(cat .python-version) > session.txt + + # Find 25% of the relevant tests to run for this change + find tests -name test_*.py | grep -v tests/data | smart-tests subset file --session $(cat session.txt) --target 25% --rest smart-tests-remainder.txt > subset.txt + + function record() { + # Record test results + SMART_TESTS_SLACK_NOTIFICATION=true smart-tests record test file --session $(cat session.txt) test-results/*.xml + } + + trap record EXIT + + # Test subset of tests + uv run poe test-xml $(tr '\r\n' '\n' < subset.txt) + + # Test rest of tests + uv run poe test-xml $(tr '\r\n' '\n' < smart-tests-remainder.txt) + shell: bash diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f3372a8ca..6c8b21e07 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,9 +9,9 @@ repos: hooks: - id: flake8 args: - - --ignore=C901,E741 + - --ignore=C901,E741,E126 - --max-line-length=130 - - launchable/ + - smart_tests/ - tests/ - repo: https://github.com/pycqa/isort @@ -21,11 +21,11 @@ repos: args: - "-l 130" - --balanced - - launchable/*.py + - smart_tests/*.py - tests/*.py - - repo: https://github.com/pre-commit/mirrors-autopep8 - rev: v1.7.0 + - repo: https://github.com/hhatto/autopep8 + rev: v2.3.1 hooks: - id: autopep8 args: @@ -34,5 +34,6 @@ repos: - --aggressive - --experimental - --max-line-length=130 - - launchable/ + - --ignore=E126 + - smart_tests/ - tests/ diff --git a/.python-version b/.python-version index cd337510b..24ee5b1be 100644 --- a/.python-version +++ b/.python-version @@ -1 +1 @@ -3.6.15 +3.13 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fc7d9a19e..c233d77f4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,11 +5,11 @@ Code should follow [pep8](https://www.python.org/dev/peps/pep-0008/). To check c # Development You can use Python's `-m` option to launch module directly. ```shell -python3 -m launchable record commit +python3 -m smart_tests record commit ``` # Design Philosophy -- **Dependencies**: Launchable needs to run with varying environments of users. So when we need to +- **Dependencies**: Smart Tests needs to run with varying environments of users. So when we need to reduce dependencies to other packages or tools installed on the system. For example, Python packages we depend on and their version constraints might conflict with what other Python packages specifies. Some libraries have native components, which need to be built during `pip install` and that adds to diff --git a/Dockerfile b/Dockerfile index e35958a73..b0e5f2853 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,26 +1,27 @@ -FROM python:3.11-slim AS builder +FROM python:3.13-slim RUN apt-get update && \ - apt-get install -y git && \ + apt-get install -y --no-install-recommends \ + git \ + openjdk-21-jre-headless \ + curl && \ rm -rf /var/lib/apt/lists/* +# Install uv +COPY --from=ghcr.io/astral-sh/uv:latest /uv /bin/uv + WORKDIR /src COPY . . -RUN pip wheel --no-cache-dir -w /wheels . -FROM python:3.11-slim +# Install dependencies and build the package using uv +# This works with normal Git repositories (non-worktree) +RUN uv sync --frozen --no-dev RUN apt-get update && \ apt-get install -y --no-install-recommends openjdk-21-jre-headless git && \ rm -rf /var/lib/apt/lists/* -RUN --mount=type=bind,from=builder,source=/wheels,target=/wheels pip install --no-cache-dir /wheels/*.whl - -# get rid of a warning that talks about pkg_resources deprecation -# see https://setuptools.pypa.io/en/latest/history.html#v67-3-0 -RUN pip install setuptools==66.1.1 - -RUN useradd -m launchable -USER launchable +RUN useradd -m smart-tests && chown -R smart-tests:smart-tests /src +USER smart-tests -ENTRYPOINT ["launchable"] +ENTRYPOINT ["smart-tests"] diff --git a/Pipfile b/Pipfile deleted file mode 100644 index 1490547d6..000000000 --- a/Pipfile +++ /dev/null @@ -1,45 +0,0 @@ -[[source]] -name = "pypi" -url = "https://pypi.org/simple" -verify_ssl = true - -[requires] -python_version = "3.6" - -[dev-packages] -flake8 = "*" -setuptools = ">=30.3.0" -setuptools-scm = "*" -wheel = "*" -# The last flake8 version that supports Python 3.6 specifies "pycodestyle >= -# 2.9.0, < 2.10.0". ref: https://github.com/PyCQA/flake8/pull/1633 -# The latest autopep8 specifies "pycodestyle >= 2.10.0". This conflict cannot be resolved. Pin the version to resolve this. -autopep8 = "<=1.7.0" -importlib-metadata = "<7.2" -isort = "*" -more_itertools = "<10.4" -mypy = "<1.16.0" -pre-commit = "*" -responses = "*" -types-click = "*" -types-pkg_resources = "0.1.3" -types-python-dateutil = "*" -types-requests = "*" -types-tabulate = "*" -lxml = "<=5.2.2" -unittest-xml-reporting = "*" -# newer virtualenv creates a conflict with importlib-metadata. This is the latest version that seems to avoid that -virtualenv = "==20.16.2" - -[packages] -launchable = {editable = true, path = "."} - -[scripts] -build = "python setup.py sdist bdist_wheel" -format = "/bin/bash -c 'isort -l 130 --balanced launchable/*.py tests/*.py && autopep8 --in-place --recursive --aggressive --experimental --max-line-length=130 --verbose launchable/ tests/'" -install = "pip install -U ." -lint = "flake8 --count --ignore=C901,E741,F401 --show-source --max-line-length=130 --statistics launchable/ tests/" -lint-warn = "flake8 --count --exit-zero --max-complexity=15 --max-line-length=130 --statistics launchable/ tests/" -test = "python -m unittest" -test-xml = "python -m test-runner" -type = "mypy launchable tests" diff --git a/README.md b/README.md index 2079ae423..6ab9408d8 100644 --- a/README.md +++ b/README.md @@ -7,27 +7,37 @@ https://www.launchableinc.com/docs/getting-started/. ## Preparation -We recommend Pipenv +We recommend uv for dependency management: ```shell -pip install pipenv==2021.5.29 -pipenv install --dev +# Install uv +curl -LsSf https://astral.sh/uv/install.sh | sh + +# Install dependencies +uv sync --dev ``` In order to automatically format files with autopep8, this repository contains a configuration for [pre-commit](https://pre-commit.com). Install the hook with -`pipenv run pre-commit install`. +`uv run pre-commit install`. ## Load development environment ```shell -pipenv shell +# Activate virtual environment +source .venv/bin/activate +# or use uv run for individual commands +uv run ``` ## Run tests cli ```shell -pipenv run test +# Using poethepoet (recommended) +uv run poe test + +# Direct command +uv run python -m unittest ``` ## Run tests exe_deploy.jar @@ -36,18 +46,64 @@ pipenv run test bazel test ... ``` +## Available Development Tasks + +This project uses [poethepoet](https://poethepoet.natn.io/) for task management. Available tasks: + +```shell +# Show all available tasks +uv run poe --help + +# Run tests +uv run poe test + +# Run tests with XML output +uv run poe test-xml + +# Run linting +uv run poe lint + +# Run type checking +uv run poe type + +# Format code +uv run poe format + +# Build package +uv run poe build + +# Install package locally +uv run poe install +``` + ## Add dependency ```shell -pipenv install --dev some-what-module +# Add runtime dependency +uv add some-package + +# Add development dependency +uv add --dev some-dev-package ``` +## Updating Python Version + +When updating the Python version requirement, update the following files: + +1. **`.python-version`** - Used by pyenv, uv, and local development +2. **`pyproject.toml`** - Update `requires-python = ">=X.Y"` +3. **`setup.cfg`** - Update `python_requires = >=X.Y` +4. **`.github/workflows/python-package.yml`** - Update `python-version: ["X.Y"]` +5. **`.github/workflows/python-publish.yml`** - Update `uv python install X.Y` +6. **`README.md`** - Update prerequisite section +7. **`CLAUDE.md`** - Update development notes + # How to release Create new release on Github, then Github Actions automatically uploads the module to PyPI. -## How to update launchable/jar/exe_deploy.jar +## How to update smart_tests/jar/exe_deploy.jar ``` ./build-java.sh @@ -55,11 +111,11 @@ module to PyPI. # Installing CLI -You can install the `launchable` command from either source or [pypi](https://pypi.org/project/launchable/). +You can install the `smart-tests` command from either source or [pypi](https://pypi.org/project/smart-tests/). ## Prerequisite -- \>= Python 3.6 +- \>= Python 3.13 - \>= Java 8 ## Install from source @@ -74,7 +130,7 @@ $ python setup.py install ## Install from pypi ```sh -$ pip3 install --user --upgrade launchable~=1.0 +$ pip3 install --user --upgrade smart-tests~=1.0 ``` ## Versioning diff --git a/build-java.sh b/build-java.sh index 01b82321f..d79dd6b97 100755 --- a/build-java.sh +++ b/build-java.sh @@ -1,5 +1,4 @@ #!/bin/bash -ex -bazel build //src/main/java/com/launchableinc/ingest/commits:exe_deploy.jar -bazel test //... -cp bazel-bin/src/main/java/com/launchableinc/ingest/commits/exe_deploy.jar launchable/jar/exe_deploy.jar - +bazelisk build //src/main/java/com/launchableinc/ingest/commits:exe_deploy.jar +bazelisk test //... +cp bazel-bin/src/main/java/com/launchableinc/ingest/commits/exe_deploy.jar smart_tests/jar/exe_deploy.jar diff --git a/launchable/__main__.py b/launchable/__main__.py deleted file mode 100644 index ef81dfd9a..000000000 --- a/launchable/__main__.py +++ /dev/null @@ -1,96 +0,0 @@ -import importlib -import importlib.util -import logging -import os -from glob import glob -from os.path import basename, dirname, join - -import click - -from launchable.app import Application - -from .commands.compare import compare -from .commands.inspect import inspect -from .commands.record import record -from .commands.split_subset import split_subset -from .commands.stats import stats -from .commands.subset import subset -from .commands.verify import verify -from .utils import logger -from .version import __version__ - - -@click.group() -@click.version_option(version=__version__, prog_name='launchable-cli') -@click.option( - '--log-level', - 'log_level', - help='Set logger\'s log level (CRITICAL, ERROR, WARNING, AUDIT, INFO, DEBUG).', - type=str, - default=logger.LOG_LEVEL_DEFAULT_STR, -) -@click.option( - '--plugins', - 'plugin_dir', - help='Directory to load plugins from', - type=click.Path(exists=True, file_okay=False) -) -@click.option( - '--dry-run', - 'dry_run', - help='Dry-run mode. No data is sent to the server. However, sometimes ' - 'GET requests without payload data or side effects could be sent.' - 'note: Since the dry run log is output together with the AUDIT log, ' - 'even if the log-level is set to warning or higher, the log level will ' - 'be forced to be set to AUDIT.', - is_flag=True, -) -@click.option( - '--skip-cert-verification', - 'skip_cert_verification', - help='Skip the SSL certificate check. This lets you bypass system setup issues ' - 'like CERTIFICATE_VERIFY_FAILED, at the expense of vulnerability against ' - 'a possible man-in-the-middle attack. Use it as an escape hatch, but with caution.', - is_flag=True, -) -@click.pass_context -def main(ctx, log_level, plugin_dir, dry_run, skip_cert_verification): - level = logger.get_log_level(log_level) - # In the case of dry-run, it is forced to set the level below the AUDIT. - # This is because the dry-run log will be output along with the audit log. - if dry_run and level > logger.LOG_LEVEL_AUDIT: - level = logger.LOG_LEVEL_AUDIT - - if not skip_cert_verification: - skip_cert_verification = (os.environ.get('LAUNCHABLE_SKIP_CERT_VERIFICATION') is not None) - - logging.basicConfig(level=level) - - # load all test runners - for f in glob(join(dirname(__file__), 'test_runners', "*.py")): - f = basename(f)[:-3] - if f == '__init__': - continue - importlib.import_module('launchable.test_runners.%s' % f) - - # load all plugins - if plugin_dir: - for f in glob(join(plugin_dir, '*.py')): - spec = importlib.util.spec_from_file_location( - "launchable.plugins.{}".format(basename(f)[:-3]), f) - plugin = importlib.util.module_from_spec(spec) - spec.loader.exec_module(plugin) - - ctx.obj = Application(dry_run=dry_run, skip_cert_verification=skip_cert_verification) - - -main.add_command(record) -main.add_command(subset) -main.add_command(split_subset) -main.add_command(verify) -main.add_command(inspect) -main.add_command(stats) -main.add_command(compare) - -if __name__ == '__main__': - main() diff --git a/launchable/commands/compare/__init__.py b/launchable/commands/compare/__init__.py deleted file mode 100644 index 3dc6280ba..000000000 --- a/launchable/commands/compare/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -import click - -from launchable.utils.click import GroupWithAlias - -from .subsets import subsets - - -@click.group(cls=GroupWithAlias) -def compare(): - pass - - -compare.add_command(subsets) diff --git a/launchable/commands/helper.py b/launchable/commands/helper.py deleted file mode 100644 index ebd83e887..000000000 --- a/launchable/commands/helper.py +++ /dev/null @@ -1,169 +0,0 @@ -import datetime -from time import time -from typing import Optional, Sequence, Tuple - -import click - -from launchable.utils.no_build import NO_BUILD_BUILD_NAME -from launchable.utils.tracking import TrackingClient - -from ..app import Application -from ..utils.launchable_client import LaunchableClient -from ..utils.session import read_build, read_session, validate_session_format - - -def require_session( - session: Optional[str], -) -> Optional[str]: - """Ascertain the contextual test session to operate a CLI command for. If one doesn't exit, fail. - - 1. If the user explicitly provides the session id via the `--session` option - 2. If the user gives no options, the current session ID is read from the session file tied to $PWD. - See https://github.com/cloudbees-oss/smart-tests-cli/pull/342 - """ - if session: - validate_session_format(session) - return session - - session = read_session(require_build()) - if session: - return session - - raise click.UsageError( - click.style( - "No saved test session found.\n" - "If you already created a test session on a different machine, use the --session option. " - "See https://docs.launchableinc.com/sending-data-to-launchable/managing-complex-test-session-layouts", - fg="yellow")) - - -def require_build() -> str: - """ - Like read_build() but fail if a build doesn't exist - """ - b = read_build() - if not b: - raise click.UsageError( - click.style( - "No saved build name found.\n" - "To fix this, run `launchable record build`.\n" - "If you already ran this command on a different machine, use the --session option. " - "See https://www.launchableinc.com/docs/sending-data-to-launchable/using-the-launchable-cli/" - "recording-test-results-with-the-launchable-cli/managing-complex-test-session-layouts/", - fg="yellow")) - return b - - -def find_or_create_session( - context: click.core.Context, - session: Optional[str], - build_name: Optional[str], - tracking_client: TrackingClient, - flavor: Sequence[Tuple[str, str]] = (), - is_observation: bool = False, - links: Sequence[Tuple[str, str]] = (), - is_no_build: bool = False, - lineage: Optional[str] = None, - test_suite: Optional[str] = None, - timestamp: Optional[datetime.datetime] = None, -) -> Optional[str]: - """Determine the test session ID to be used. - - 1. If the user explicitly provides the session id via the `--session` option - 2. If the user gives no options, the current session ID is read from the session file tied to $PWD, - or one is created from the current build name. See https://github.com/cloudbees-oss/smart-tests-cli/pull/342 - 3. The `--build` option is legacy compatible behaviour, in which case a session gets created and tied - to the build. This usage still requires a locally recorded build name that must match the specified name. - Kohsuke is not sure what the historical motivation for this behaviour is. - - Args: - session: The --session option value - build_name: The --build option value - flavor: The --flavor option values - is_observation: The --observation value - links: The --link option values - is_no_build: The --no-build option value - lineage: lineage option value - test_suite: --test-suite option value - """ - from .record.session import session as session_command - - if session: - validate_session_format(session) - _check_observation_mode_status(session, is_observation, tracking_client=tracking_client, app=context.obj) - return session - - if is_no_build: - context.invoke( - session_command, - build_name=NO_BUILD_BUILD_NAME, - save_session_file=True, - print_session=False, - flavor=flavor, - is_observation=is_observation, - links=links, - is_no_build=is_no_build, - lineage=lineage, - test_suite=test_suite, - ) - saved_build_name = read_build() - return read_session(str(saved_build_name)) - - saved_build_name = require_build() - - if build_name and saved_build_name != build_name: - raise click.UsageError( - click.style( - "The build name you provided ({}) is different from the last build name recorded on this machine ({}).\n" - "Make sure to run `launchable record build --name {}` before you run this command.\n" - "If you already recorded this build on a different machine, use the --session option instead of --build. " - "See https://www.launchableinc.com/docs/sending-data-to-launchable/using-the-launchable-cli/" - "recording-test-results-with-the-launchable-cli/managing-complex-test-session-layouts/".format( - build_name, saved_build_name, build_name), fg="yellow", )) - - session_id = read_session(saved_build_name) - if session_id: - _check_observation_mode_status(session_id, is_observation, tracking_client=tracking_client, app=context.obj) - return session_id - - context.invoke( - session_command, - build_name=saved_build_name, - save_session_file=True, - print_session=False, - flavor=flavor, - is_observation=is_observation, - links=links, - is_no_build=is_no_build, - lineage=lineage, - test_suite=test_suite, - timestamp=timestamp, - ) - return read_session(saved_build_name) - - -def time_ns(): - # time.time_ns() method is new in Python version 3.7 - # As a workaround, we convert time.time() to nanoseconds. - return int(time() * 1e9) - - -def _check_observation_mode_status(session: str, is_observation: bool, - tracking_client: TrackingClient, app: Optional[Application] = None): - if not is_observation: - return - - client = LaunchableClient(tracking_client=tracking_client, app=app) - res = client.request("get", session) - - # only check when the status code is 200 not to stop the command - if res.status_code == 200: - is_observation_in_recorded_session = res.json().get("isObservation", False) - if is_observation and not is_observation_in_recorded_session: - click.echo( - click.style( - "WARNING: --observation flag was ignored. Observation mode can only be enabled for a test session " - "during its initial creation. " - "Add `--observation` option to the `launchable record session` command instead.", - fg='yellow'), - err=True) diff --git a/launchable/commands/inspect/__init__.py b/launchable/commands/inspect/__init__.py deleted file mode 100644 index 7cfada420..000000000 --- a/launchable/commands/inspect/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -import click - -from launchable.utils.click import GroupWithAlias - -from .subset import subset -from .tests import tests - - -@click.group(cls=GroupWithAlias) -def inspect(): - pass - - -inspect.add_command(subset) -inspect.add_command(tests) diff --git a/launchable/commands/inspect/tests.py b/launchable/commands/inspect/tests.py deleted file mode 100644 index 4ff145d23..000000000 --- a/launchable/commands/inspect/tests.py +++ /dev/null @@ -1,186 +0,0 @@ -import json -import sys -from abc import ABCMeta, abstractmethod -from http import HTTPStatus -from typing import List - -import click -from tabulate import tabulate - -from ...utils.authentication import ensure_org_workspace -from ...utils.launchable_client import LaunchableClient -from ...utils.session import parse_session -from ..helper import require_session - - -class TestResult(object): - def __init__(self, result: dict): - self._status = result.get("status", "") - self._duration_sec = result.get("duration", 0.0) - self._created_at = result.get("createdAt", None) - self._test_path = "#".join([path["type"] + "=" + path["name"] - for path in result["testPath"] if path.keys() >= {"type", "name"}]) - - -class TestResults(object): - def __init__(self, test_session_id: int, results: List[TestResult]): - self._test_session_id = test_session_id - self._results = results - - def add(self, result: TestResult): - self._results.append(result) - - def list(self) -> List[TestResult]: - return self._results - - def total_duration_sec(self) -> float: - return sum([result._duration_sec for result in self._results]) - - def total_duration_min(self) -> float: - return (sum([result._duration_sec for result in self._results]) / 60) - - def total_count(self) -> int: - return len(self._results) - - def filter_by_status(self, status: str) -> 'TestResults': - return TestResults(self._test_session_id, [result for result in self._results if result._status == status]) - - -class TestResultAbstractDisplay(metaclass=ABCMeta): - def __init__(self, results: TestResults): - self._results = results - - @abstractmethod - def display(self): - raise NotImplementedError("display method is not implemented") - - -class TestResultJSONDisplay(TestResultAbstractDisplay): - def __init__(self, results: TestResults): - super().__init__(results) - - def display(self): - result_json = {} - result_json["summary"] = { - "total": { - "report_count": self._results.total_count(), - "duration_min": round(self._results.total_duration_min(), 2), - }, - "success": { - "report_count": self._results.filter_by_status("SUCCESS").total_count(), - "duration_min": round(self._results.filter_by_status("SUCCESS").total_duration_min(), 2) - }, - "failure": { - "report_count": self._results.filter_by_status("FAILURE").total_count(), - "duration_min": round(self._results.filter_by_status("FAILURE").total_duration_min(), 2) - }, - "skip": { - "report_count": self._results.filter_by_status("SKIPPED").total_count(), - "duration_min": round(self._results.filter_by_status("SKIPPED").total_duration_min(), 2) - } - } - result_json["results"] = [] - for result in self._results.list(): - result_json["results"].append({ - "test_path": result._test_path, - "duration_sec": result._duration_sec, - "status": result._status, - "created_at": result._created_at - }) - - org, workspace = ensure_org_workspace() - result_json["test_session_app_url"] = "https://app.launchableinc.com/organizations/{}/workspaces/{}/test-sessions/{}".format( # noqa: E501 - org, workspace, self._results._test_session_id) - - click.echo(json.dumps(result_json, indent=2)) - - -class TestResultTableDisplay(TestResultAbstractDisplay): - def __init__(self, results: TestResults): - super().__init__(results) - - def display(self): - header = ["Test Path", - "Duration (sec)", "Status", "Uploaded At"] - rows = [] - for result in self._results.list(): - rows.append( - [ - result._test_path, - result._duration_sec, - result._status, - result._created_at, - ] - ) - click.echo(tabulate(rows, header, tablefmt="github", floatfmt=".2f")) - - summary_header = ["Summary", "Report Count", "Total Duration (min)"] - summary_rows = [ - ["Total", self._results.total_count(), - self._results.total_duration_min()], - ["Success", self._results.filter_by_status("SUCCESS").total_count(), - self._results.filter_by_status("SUCCESS").total_duration_min()], - ["Failure", self._results.filter_by_status("FAILURE").total_count(), - self._results.filter_by_status("FAILURE").total_duration_min()], - ["Skip", self._results.filter_by_status("SKIPPED").total_count(), - self._results.filter_by_status("SKIPPED").total_duration_min()]] - - click.echo(tabulate(summary_rows, summary_header, tablefmt="grid", floatfmt=["", ".0f", ".2f"])) - - -@click.command() -@click.option( - '--test-session-id', - 'test_session_id', - help='test session id', -) -@click.option( - '--json', - 'is_json_format', - help='display JSON format', - is_flag=True -) -@click.pass_context -def tests(context: click.core.Context, test_session_id: int, is_json_format: bool): - if (test_session_id is None): - try: - session = require_session(None) - _, test_session_id = parse_session(session) - except Exception: - click.echo( - click.style( - "test session id requires.\n" - "Use the --test-session-id option or execute after `launchable record tests` command.", - fg="yellow")) - return - - client = LaunchableClient(app=context.obj) - try: - res = client.request( - "get", "/test_sessions/{}/events".format(test_session_id)) - - if res.status_code == HTTPStatus.NOT_FOUND: - click.echo(click.style( - "Test session {} not found. Check test session ID and try again.".format(test_session_id), 'yellow'), - err=True, - ) - sys.exit(1) - - res.raise_for_status() - results = res.json() - except Exception as e: - client.print_exception_and_recover(e, "Warning: failed to inspect tests") - return - - test_results = TestResults(test_session_id=test_session_id, results=[]) - for result in results: - if result.keys() >= {"testPath"}: - test_results.add(TestResult(result)) - - displayer: TestResultAbstractDisplay - if is_json_format: - displayer = TestResultJSONDisplay(test_results) - else: - displayer = TestResultTableDisplay(test_results) - - displayer.display() diff --git a/launchable/commands/record/__init__.py b/launchable/commands/record/__init__.py deleted file mode 100644 index 99bc4deba..000000000 --- a/launchable/commands/record/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -import click - -from launchable.utils.click import GroupWithAlias - -from .attachment import attachment -from .build import build -from .commit import commit -from .session import session -from .tests import tests - - -@click.group(cls=GroupWithAlias) -def record(): - pass - - -record.add_command(build) -record.add_command(commit) -record.add_command(tests) -# for backward compatibility -record.add_alias('test', tests) # type: ignore -record.add_command(session) -record.add_command(attachment) diff --git a/launchable/commands/record/attachment.py b/launchable/commands/record/attachment.py deleted file mode 100644 index 5ffb6de56..000000000 --- a/launchable/commands/record/attachment.py +++ /dev/null @@ -1,35 +0,0 @@ -from typing import Optional - -import click - -from ...utils.launchable_client import LaunchableClient -from ..helper import require_session - - -@click.command() -@click.option( - '--session', - 'session', - help='In the format builds//test_sessions/', - type=str, -) -@click.argument('attachments', nargs=-1) # type=click.Path(exists=True) -@click.pass_context -def attachment( - context: click.core.Context, - attachments, - session: Optional[str] = None -): - client = LaunchableClient(app=context.obj) - try: - session = require_session(session) - - for a in attachments: - click.echo("Sending {}".format(a)) - with open(a, mode='rb') as f: - res = client.request( - "post", "{}/attachment".format(session), compress=True, payload=f, - additional_headers={"Content-Disposition": "attachment;filename=\"{}\"".format(a)}) - res.raise_for_status() - except Exception as e: - client.print_exception_and_recover(e) diff --git a/launchable/commands/record/session.py b/launchable/commands/record/session.py deleted file mode 100644 index b5376e1a0..000000000 --- a/launchable/commands/record/session.py +++ /dev/null @@ -1,288 +0,0 @@ -import datetime -import os -import re -import sys -from http import HTTPStatus -from typing import Optional, Sequence, Tuple - -import click - -from launchable.utils.click import DATETIME_WITH_TZ, validate_past_datetime -from launchable.utils.link import LinkKind, capture_link -from launchable.utils.tracking import Tracking, TrackingClient - -from ...utils.click import KEY_VALUE -from ...utils.commands import Command -from ...utils.fail_fast_mode import FailFastModeValidateParams, fail_fast_mode_validate, set_fail_fast_mode -from ...utils.launchable_client import LaunchableClient -from ...utils.no_build import NO_BUILD_BUILD_NAME -from ...utils.session import _session_file_path, read_build, write_session - -LAUNCHABLE_SESSION_DIR_KEY = 'LAUNCHABLE_SESSION_DIR' - -TEST_SESSION_NAME_RULE = re.compile("^[a-zA-Z0-9][a-zA-Z0-9_-]*$") - - -def _validate_session_name(ctx, param, value): - if value is None: - return "" - - if TEST_SESSION_NAME_RULE.match(value): - return value - else: - raise click.BadParameter("--session-name option supports only alphabet(a-z, A-Z), number(0-9), '-', and '_'") - - -@click.command() -@click.option( - '--build', - 'build_name', - help='build name', - type=str, - metavar='BUILD_NAME' -) -@click.option( - '--save-file/--no-save-file', - 'save_session_file', - help='save session to file', - default=True, - metavar='SESSION_FILE' -) -@click.option( - "--flavor", - "flavor", - help='flavors', - metavar='KEY=VALUE', - type=KEY_VALUE, - default=(), - multiple=True, -) -@click.option( - "--observation", - "is_observation", - help="enable observation mode", - is_flag=True, -) -@click.option( - '--link', - 'links', - help="Set external link of title and url", - multiple=True, - default=(), - type=KEY_VALUE, -) -@click.option( - "--no-build", - "is_no_build", - help="If you want to only send test reports, please use this option", - is_flag=True, -) -@click.option( - '--session-name', - 'session_name', - help='test session name', - required=False, - type=str, - metavar='SESSION_NAME', - callback=_validate_session_name, -) -@click.option( - '--lineage', - 'lineage', - help='Set lineage name. A lineage is a set of test sessions grouped and this option value will be used for a lineage name.', - required=False, - type=str, - metavar='LINEAGE', -) -@click.option( - '--test-suite', - 'test_suite', - help='Set test suite name. A test suite is a collection of test sessions. Setting a test suite allows you to manage data over test sessions and lineages.', # noqa: E501 - required=False, - type=str, - metavar='TEST_SUITE', -) -@click.option( - '--timestamp', - 'timestamp', - help='Used to overwrite the session time when importing historical data. Note: Format must be `YYYY-MM-DDThh:mm:ssTZD` or `YYYY-MM-DDThh:mm:ss` (local timezone applied)', # noqa: E501 - type=DATETIME_WITH_TZ, - default=None, - callback=validate_past_datetime, -) -@click.pass_context -def session( - ctx: click.core.Context, - build_name: str, - save_session_file: bool, - print_session: bool = True, - flavor: Sequence[Tuple[str, str]] = [], - is_observation: bool = False, - links: Sequence[Tuple[str, str]] = [], - is_no_build: bool = False, - session_name: Optional[str] = None, - lineage: Optional[str] = None, - test_suite: Optional[str] = None, - timestamp: Optional[datetime.datetime] = None, -): - """ - print_session is for backward compatibility. - If you run this `record session` standalone, - the command should print the session ID because v1.1 users expect the beheivior. - That is why the flag is default True. - If you run this command from the other command such as `subset` and `record tests`, - you should set print_session = False because users don't expect to print session ID to the subset output. - """ - - tracking_client = TrackingClient(Command.RECORD_SESSION, app=ctx.obj) - client = LaunchableClient(app=ctx.obj, tracking_client=tracking_client) - set_fail_fast_mode(client.is_fail_fast_mode()) - - fail_fast_mode_validate(FailFastModeValidateParams( - command=Command.RECORD_SESSION, - build=build_name, - is_no_build=is_no_build, - test_suite=test_suite, - )) - - if not is_no_build and not build_name: - raise click.UsageError("Error: Missing option '--build'") - - if is_no_build: - build = read_build() - if build and build != "": - raise click.UsageError( - "The cli already created '{}'. If you want to use the '--no-build' option, please remove this file first.".format(_session_file_path())) # noqa: E501 - - build_name = NO_BUILD_BUILD_NAME - - if session_name: - sub_path = "builds/{}/test_session_names/{}".format(build_name, session_name) - try: - res = client.request("get", sub_path) - - if res.status_code != 404: - msg = "This session name ({}) is already used. Please set another name.".format(session_name) - click.echo(click.style( - msg, - fg='red'), - err=True) - tracking_client.send_error_event( - event_name=Tracking.ErrorEvent.USER_ERROR, - stack_trace=msg, - ) - sys.exit(2) - except Exception as e: - tracking_client.send_error_event( - event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR, - stack_trace=str(e), - ) - client.print_exception_and_recover(e) - - flavor_dict = dict(flavor) - - payload = { - "flavors": flavor_dict, - "isObservation": is_observation, - "noBuild": is_no_build, - "lineage": lineage, - "testSuite": test_suite, - "timestamp": timestamp.isoformat() if timestamp else None, - } - - _links = capture_link(os.environ) - for link in links: - _links.append({ - "title": link[0], - "url": link[1], - "kind": LinkKind.CUSTOM_LINK.name, - }) - payload["links"] = _links - - try: - sub_path = "builds/{}/test_sessions".format(build_name) - res = client.request("post", sub_path, payload=payload) - - if res.status_code == HTTPStatus.NOT_FOUND: - msg = "Build {} was not found." \ - "Make sure to run `launchable record build --name {}` before you run this command.".format( - build_name, build_name) - tracking_client.send_error_event( - event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR, - stack_trace=msg, - ) - click.echo( - click.style( - msg, - 'yellow'), - err=True, - ) - sys.exit(1) - - res.raise_for_status() - - session_id = res.json().get('id', None) - if is_no_build: - build_name = res.json().get("buildNumber", "") - sub_path = "builds/{}/test_sessions".format(build_name) - - if save_session_file: - write_session(build_name, "{}/{}".format(sub_path, session_id)) - if print_session: - # what we print here gets captured and passed to `--session` in - # later commands - click.echo("{}/{}".format(sub_path, session_id), nl=False) - - except Exception as e: - tracking_client.send_error_event( - event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR, - stack_trace=str(e), - ) - client.print_exception_and_recover(e) - - if session_name: - try: - add_session_name( - client=client, - build_name=build_name, - session_id=session_id, - session_name=session_name, - ) - except Exception as e: - tracking_client.send_error_event( - event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR, - stack_trace=str(e), - ) - client.print_exception_and_recover(e) - - -def add_session_name( - client: LaunchableClient, - build_name: str, - session_id: str, - session_name: str, -): - sub_path = "builds/{}/test_sessions/{}".format(build_name, session_id) - payload = { - "name": session_name - } - res = client.request("patch", sub_path, payload=payload) - - if res.status_code == HTTPStatus.NOT_FOUND: - click.echo( - click.style( - "Test session {} was not found. Record session may have failed.".format(session_id), - 'yellow'), - err=True, - ) - sys.exit(1) - if res.status_code == HTTPStatus.BAD_REQUEST: - click.echo( - click.style( - "You cannot use test session name {} since it is already used by other test session in your workspace. The record session is completed successfully without session name." # noqa: E501 - .format(session_name), - 'yellow'), - err=True,) - sys.exit(1) - - res.raise_for_status() diff --git a/launchable/commands/split_subset.py b/launchable/commands/split_subset.py deleted file mode 100644 index c5681e89d..000000000 --- a/launchable/commands/split_subset.py +++ /dev/null @@ -1,330 +0,0 @@ -import os -from typing import List, Optional - -import click - -from launchable.testpath import TestPath - -from ..app import Application -from ..utils.click import FRACTION, FractionType -from ..utils.launchable_client import LaunchableClient -from .test_path_writer import TestPathWriter - -SPLIT_BY_GROUPS_NO_GROUP_NAME = "nogroup" -SPLIT_BY_GROUP_SUBSET_GROUPS_FILE_NAME = "subset-groups.txt" -SPLIT_BY_GROUP_REST_GROUPS_FILE_NAME = "rest-groups.txt" - - -@click.group(help="Split subsetting tests") -@click.option( - '--subset-id', - 'subset_id', - help='subset id', - type=str, - required=True, -) -@click.option( - '--bin', - 'bin_target', - help='bin', - type=FRACTION, -) -@click.option( - '--rest', - 'rest', - help='output the rest of subset', - type=str, -) -@click.option( - '--base', - 'base_path', - help='(Advanced) base directory to make test names portable', - type=click.Path(exists=True, file_okay=False), - metavar="DIR", -) -@click.option( - "--same-bin", - 'same_bin_files', - help="(Advanced) gather specified tests into same bin", - type=click.Path(), - multiple=True, -) -@click.option( - "--split-by-groups", - 'is_split_by_groups', - help="split by groups that were set by `launchable record tests --group`", - is_flag=True -) -@click.option( - "--split-by-groups-with-rest", - 'is_split_by_groups_with_rest', - help="split by groups that were set by `launchable record tests --group` and produces with rest files", - is_flag=True -) -@click.option( - "--split-by-groups-output-dir", - 'split_by_groups_output_dir', - type=click.Path(file_okay=False), - default=os.getcwd(), - help="split results output dir", -) -@click.option( - "--output-exclusion-rules", - "is_output_exclusion_rules", - help="outputs the exclude test list. Switch the subset and rest.", - is_flag=True, -) -@click.pass_context -def split_subset( - context: click.core.Context, - subset_id: str, - bin_target: FractionType, - rest: str, - base_path: str, - same_bin_files: Optional[List[str]], - is_split_by_groups: bool, - is_split_by_groups_with_rest: bool, - split_by_groups_output_dir: click.Path, - is_output_exclusion_rules: bool, -): - if len(subset_id.split("/")) != 2: - click.echo( - click.style('Error: subset ID cannot be empty. It should be passed with `subset/` format.', - 'yellow'), - err=True, - ) - return - - TestPathWriter.base_path = base_path - - client = LaunchableClient(test_runner=context.invoked_subcommand, app=context.obj) - - class SplitSubset(TestPathWriter): - def __init__(self, app: Application): - super(SplitSubset, self).__init__(app=app) - self.rest = rest - self.output_handler = self._default_output_handler - self.exclusion_output_handler = self._default_exclusion_output_handler - self.split_by_groups_output_handler = self._default_split_by_groups_output_handler - self.split_by_groups_exclusion_output_handler = self._default_split_by_groups_exclusion_output_handler - self.is_split_by_groups_with_rest = is_split_by_groups_with_rest - self.split_by_groups_output_dir = split_by_groups_output_dir - self.is_output_exclusion_rules = is_output_exclusion_rules - - def _default_output_handler(self, output: List[TestPath], rests: List[TestPath]): - if rest: - self.write_file(rest, rests) - - if output: - self.print(output) - - def _default_exclusion_output_handler(self, subset: List[TestPath], rest: List[TestPath]): - self.output_handler(rest, subset) - - def _default_split_by_groups_output_handler(self, group_name: str, subset: List[TestPath], rests: List[TestPath]): - if is_split_by_groups_with_rest: - self.write_file("{}/rest-{}.txt".format(split_by_groups_output_dir, group_name), rests) - - if len(subset) > 0: - self.write_file("{}/subset-{}.txt".format(split_by_groups_output_dir, group_name), subset) - - def _default_split_by_groups_exclusion_output_handler( - self, group_name: str, subset: List[TestPath], - rests: List[TestPath]): - self.split_by_groups_output_handler(group_name, rests, subset) - - def _is_split_by_groups(self) -> bool: - return is_split_by_groups or is_split_by_groups_with_rest - - def split_by_bin(self): - index, count = 0, 0 - if not is_split_by_groups: - index = bin_target[0] - count = bin_target[1] - - if (index == 0 or count == 0): - click.echo( - click.style( - 'Error: invalid bin value. Make sure to set over 0 like `--bin 1/2` but set `--bin {}`'.format( - bin_target), - 'yellow'), - err=True, - ) - return - - if count < index: - click.echo( - click.style( - 'Error: invalid bin value. Make sure to set below 1 like `--bin 1/2`, `--bin 2/2` ' - 'but set `--bin {}`'.format(bin_target), - 'yellow'), - err=True, - ) - return - - output_subset = [] - output_rests = [] - is_observation = False - - try: - payload = { - "sliceCount": count, - "sliceIndex": index, - "sameBins": [], - "splitByGroups": is_split_by_groups - } - - tests_in_files = [] - - if same_bin_files is not None and len(same_bin_files) > 0: - if self.same_bin_formatter is None: - raise ValueError("--same-bin option is supported only for gradle test and go-test. " - "Please remove --same-bin option for the other test runner.") - same_bins = [] - for same_bin_file in same_bin_files: - with open(same_bin_file, "r") as f: - """ - A same_bin_file expects to have a list of tests with one test per line. - Each line of test gets formatted and packed to sameBins list in payload. - E.g. - For gradle: - ``` - $ cat same_bin_file.txt - example.AddTest - example.DivTest - example.SubTest - ``` - Formatted: - ``` - "sameBins" [ - [ - [{"type": "class", "name": "example.AddTest"}], - [{"type": "class", "name": "example.DivTest"}], - [{"type": "class", "name": "example.SubTest"}] - ] - ] - ``` - E.g. - For gotest: - ``` - $ cat same_bin_file.txt - example.BenchmarkGreeting - example.ExampleGreeting - ``` - Formatted: - ``` - "sameBins" [ - [ - [ - {"type": "class", "name": "example"}, - {"type": "testcase", "name": "BenchmarkGreeting"} - ], - [ - {"type": "class", "name": "example"}, - {"type": "testcase", "name": "ExampleGreeting"} - ] - ] - ] - ``` - """ - tests = f.readlines() - # make a list to set to remove duplicate. - tests = list(set([s.strip() for s in tests])) - for tests_in_file in tests_in_files: - for test in tests: - if test in tests_in_file: - raise ValueError( - "Error: you cannot have one test, {}, in multiple same-bins.".format(test)) - tests_in_files.append(tests) - test_data = [self.same_bin_formatter(s) for s in tests] - same_bins.append(test_data) - - payload["sameBins"] = same_bins - - res = client.request("POST", "{}/slice".format(subset_id), payload=payload) - res.raise_for_status() - - output_subset = res.json().get("testPaths", []) - output_rests = res.json().get("rest", []) - is_observation = res.json().get("isObservation", False) - - if len(output_subset) == 0: - click.echo(click.style( - "Error: no tests found for this subset id.", 'yellow'), err=True) - return - - if is_observation: - output_subset = output_subset + output_rests - output_rests = [] - - if is_output_exclusion_rules: - self.exclusion_output_handler(output_subset, output_rests) - else: - self.output_handler(output_subset, output_rests) - - except Exception as e: - client.print_exception_and_recover( - e, "Warning: the service failed to split subset. Falling back to running all tests") - return - - def _write_split_by_groups_group_names(self, subset_group_names: List[str], rest_group_names: List[str]): - if is_output_exclusion_rules: - subset_group_names, rest_group_names = rest_group_names, subset_group_names - - if len(subset_group_names) > 0: - with open("{}/{}".format(split_by_groups_output_dir, SPLIT_BY_GROUP_SUBSET_GROUPS_FILE_NAME), - "w+", encoding="utf-8") as f: - f.write("\n".join(subset_group_names)) - - if is_split_by_groups_with_rest: - with open("{}/{}".format(split_by_groups_output_dir, SPLIT_BY_GROUP_REST_GROUPS_FILE_NAME), - "w+", encoding="utf-8") as f: - f.write("\n".join(rest_group_names)) - - def split_by_group_names(self): - try: - res = client.request("POST", "{}/split-by-groups".format(subset_id)) - res.raise_for_status() - - is_observation = res.json().get("isObservation", False) - split_groups = res.json().get("splitGroups", []) - - subset_group_names = [] - rest_group_names = [] - - for group in split_groups: - group_name = group.get("groupName", "") - subset = group.get("subset", []) - rests = group.get("rest", []) - - if is_observation: - subset, rests = subset + rests, [] - - if len(subset) > 0 and group_name != SPLIT_BY_GROUPS_NO_GROUP_NAME: - subset_group_names.append(group_name) - elif group_name != SPLIT_BY_GROUPS_NO_GROUP_NAME: - rest_group_names.append(group_name) - - if is_output_exclusion_rules: - self.split_by_groups_exclusion_output_handler(group_name, subset, rests) - else: - self.split_by_groups_output_handler(group_name, subset, rests) - - self._write_split_by_groups_group_names(subset_group_names, rest_group_names) - - except Exception as e: - client.print_exception_and_recover(e, "Error: the service failed to split subset.", 'red') - exit(1) - - def run(self): - if (not self._is_split_by_groups() and bin_target is None) or (self._is_split_by_groups() and bin_target): - raise click.BadOptionUsage( - "--bin or --split-by-groups/--split-by-groups-with-rest", - "Missing option '--bin' or '--split-by-groups/--split-by-groups-with-rest'") - - if self._is_split_by_groups(): - self.split_by_group_names() - else: - self.split_by_bin() - - context.obj = SplitSubset(app=context.obj) diff --git a/launchable/commands/stats/__init__.py b/launchable/commands/stats/__init__.py deleted file mode 100644 index 7d8205a0d..000000000 --- a/launchable/commands/stats/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -import click - -from launchable.utils.click import GroupWithAlias - -from .test_sessions import test_sessions - - -@click.group(cls=GroupWithAlias) -def stats(): - pass - - -stats.add_command(test_sessions) diff --git a/launchable/commands/stats/test_sessions.py b/launchable/commands/stats/test_sessions.py deleted file mode 100644 index 602187e46..000000000 --- a/launchable/commands/stats/test_sessions.py +++ /dev/null @@ -1,49 +0,0 @@ -from typing import Any, Dict, Sequence, Tuple - -import click - -from ...utils.click import KEY_VALUE -from ...utils.launchable_client import LaunchableClient - - -@click.command() -@click.option( - '--days', - 'days', - help='How many days of test sessions in the past to be stat', - type=int, - default=7 -) -@click.option( - "--flavor", - "flavor", - help='flavors', - metavar='KEY=VALUE', - type=KEY_VALUE, - default=(), - multiple=True, -) -@click.pass_context -def test_sessions( - context: click.core.Context, - days: int, - flavor: Sequence[Tuple[str, str]] = (), -): - params: Dict[str, Any] = {'days': days, 'flavor': []} - flavors = [] - for f in flavor: - flavors.append('%s=%s' % (f[0], f[1])) - - if flavors: - params['flavor'] = flavors - else: - params.pop('flavor', None) - - client = LaunchableClient(app=context.obj) - try: - res = client.request('get', '/stats/test-sessions', params=params) - res.raise_for_status() - click.echo(res.text) - - except Exception as e: - client.print_exception_and_recover(e, "Warning: the service failed to get stat.") diff --git a/launchable/test_runners/gradle.py b/launchable/test_runners/gradle.py deleted file mode 100644 index 9a8ed7b9b..000000000 --- a/launchable/test_runners/gradle.py +++ /dev/null @@ -1,113 +0,0 @@ -import os -from typing import Dict, List - -import click - -from launchable.utils.java import junit5_nested_class_path_builder - -from ..utils.file_name_pattern import jvm_test_pattern -from . import launchable - - -@click.option('--bare', - help='outputs class names alone', - default=False, - is_flag=True - ) -@click.argument('source_roots', required=False, nargs=-1) -@launchable.subset -def subset(client, bare, source_roots): - def file2test(f: str): - if jvm_test_pattern.match(f): - f = f[:f.rindex('.')] # remove extension - # directory -> package name conversion - cls_name = f.replace(os.path.sep, '.') - return [{"type": "class", "name": cls_name}] - else: - return None - - if client.is_get_tests_from_previous_sessions: - if len(source_roots) != 0: - click.echo(click.style( - "Warning: SOURCE_ROOTS are ignored when --get-tests-from-previous-sessions is used", fg="yellow"), - err=True) - source_roots = [] - else: - if len(source_roots) == 0: - raise click.UsageError(click.style("Error: Missing argument 'SOURCE_ROOTS...'.", fg="red")) - - for root in source_roots: - client.scan(root, '**/*', file2test) - - def exclusion_output_handler(subset_tests, rest_tests): - if client.rest: - with open(client.rest, "w+", encoding="utf-8") as fp: - if not bare and len(rest_tests) == 0: - # This prevents the CLI output to be evaled as an empty - # string argument. - fp.write('-PdummyPlaceHolder') - else: - fp.write(client.separator.join(client.formatter(t) for t in rest_tests)) - - classes = [to_class_file(tp[0]['name']) for tp in rest_tests] - if bare: - click.echo(','.join(classes)) - else: - click.echo('-PexcludeTests=' + (','.join(classes))) - client.exclusion_output_handler = exclusion_output_handler - - if bare: - client.formatter = lambda x: x[0]['name'] - else: - client.formatter = lambda x: "--tests {}".format(x[0]['name']) - client.separator = ' ' - - client.run() - - -@click.option('--bare', - help='outputs class names alone', - default=False, - is_flag=True - ) -@launchable.split_subset -def split_subset(client, bare): - if bare: - client.formatter = lambda x: x[0]['name'] - else: - client.formatter = lambda x: "--tests {}".format(x[0]['name']) - client.separator = ' ' - - def format_same_bin(s: str) -> List[Dict[str, str]]: - return [{"type": "class", "name": s}] - - def exclusion_output_handler(group_name, subset, rests): - if client.is_split_by_groups_with_rest: - with open("{}/rest-{}.txt".format(client.split_by_groups_output_dir, group_name), "w+", encoding="utf-8") as fp: - if not bare and len(subset) == 0: - fp.write('-PdummyPlaceHolder') - else: - fp.write(client.separator.join(client.formatter(t) for t in subset)) - - classes = [to_class_file(tp[0]['name']) for tp in rests] - with open("{}/subset-{}.txt".format(client.split_by_groups_output_dir, group_name), "w+", encoding="utf-8") as fp: - if bare: - fp.write(','.join(classes)) - else: - fp.write('-PexcludeTests=' + (','.join(classes))) - - client.same_bin_formatter = format_same_bin - client.split_by_groups_exclusion_output_handler = exclusion_output_handler - - client.run() - - -def to_class_file(class_name: str): - return class_name.replace('.', '/') + '.class' - - -@click.argument('reports', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, reports): - client.path_builder = junit5_nested_class_path_builder(client.path_builder) - launchable.CommonRecordTestImpls.load_report_files(client=client, source_roots=reports) diff --git a/launchable/test_runners/rspec.py b/launchable/test_runners/rspec.py deleted file mode 100644 index 15ffcc155..000000000 --- a/launchable/test_runners/rspec.py +++ /dev/null @@ -1,5 +0,0 @@ -from . import launchable - -subset = launchable.CommonSubsetImpls(__name__).scan_files('*_spec.rb') -split_subset = launchable.CommonSplitSubsetImpls(__name__).split_subset() -record_tests = launchable.CommonRecordTestImpls(__name__).report_files() diff --git a/launchable/utils/click.py b/launchable/utils/click.py deleted file mode 100644 index 255a94123..000000000 --- a/launchable/utils/click.py +++ /dev/null @@ -1,186 +0,0 @@ -import datetime -import re -import sys -from typing import Dict, Optional, Tuple - -import click -import dateutil.parser -from click import ParamType -from dateutil.tz import tzlocal - -# click.Group has the notion of hidden commands but it doesn't allow us to easily add -# the same command under multiple names and hide all but one. - - -class GroupWithAlias(click.Group): - def __init__(self, name: Optional[str] = None, commands: Optional[Dict[str, click.Command]] = None, **attrs): - super().__init__(name, commands, **attrs) - self.aliases: Dict[str, str] = {} - - def get_command(self, ctx: click.core.Context, cmd_name: str): - return super().get_command(ctx, cmd_name) or self.aliases.get(cmd_name) - - def add_alias(self, name: str, cmd: str): - self.aliases[name] = cmd - - -class PercentageType(ParamType): - name = "percentage" - - def convert(self, value: str, param: Optional[click.core.Parameter], ctx: Optional[click.core.Context]): - try: - missing_percent = False - if value.endswith('%'): - x = float(value[:-1]) / 100 - if 0 <= x <= 100: - return x - else: - missing_percent = True - except ValueError: - pass - - msg = "Expected percentage like 50% but got '{}'".format(value) - if missing_percent and sys.platform.startswith("win"): - msg += " ('%' is a special character in batch files, so please write '50%%' to pass in '50%')" - self.fail(msg, param, ctx) - - -class DurationType(ParamType): - name = "duration" - - def convert(self, value: str, param: Optional[click.core.Parameter], ctx: Optional[click.core.Context]): - try: - return convert_to_seconds(value) - - except ValueError: - pass - - self.fail("Expected duration like 3600, 30m, 1h15m but got '{}'".format(value), param, ctx) - - -class KeyValueType(ParamType): - name = "key=value" - - ''' - Handles options that take key/value pairs. - - The preferred syntax is "--option key=value" and that's what we should be advertising in docs and help, - but for compatibility (?) we accept "--option key:value" - - Typically, this is used with multiple=True to produce `Sequence[Tuple[str, str]]`. - ''' - error_message = "Expected a key-value pair formatted as --option key=value, but got '{}'" - - def convert( - self, value: str, param: Optional[click.core.Parameter], ctx: Optional[click.core.Context] - ) -> Tuple[str, str]: - - for delimiter in ['=', ':']: - if delimiter in value: - kv = value.split(delimiter, 1) - if len(kv) != 2: - self.fail(self.error_message.format(value)) - return kv[0].strip(), kv[1].strip() - - self.fail(self.error_message.format(value)) - - -class FractionType(ParamType): - name = "fraction" - - def convert(self, value: str, param: Optional[click.core.Parameter], ctx: Optional[click.core.Context]): - try: - v = value.strip().split('/') - if len(v) == 2: - n = int(v[0]) - d = int(v[1]) - - return (n, d) - - except ValueError: - pass - - self.fail("Expected fraction like 1/2 but got '{}'".format(value), param, ctx) - - -class DateTimeWithTimezoneType(ParamType): - name = "datetime" - - def convert(self, value: str, param: Optional[click.core.Parameter], ctx: Optional[click.core.Context]): - try: - dt = dateutil.parser.parse(value) - if dt.tzinfo is None: - return dt.replace(tzinfo=tzlocal()) - return dt - except ValueError: - self.fail("Expected datetime like 2023-10-01T12:00:00 but got '{}'".format(value), param, ctx) - - -PERCENTAGE = PercentageType() -DURATION = DurationType() -FRACTION = FractionType() -KEY_VALUE = KeyValueType() -DATETIME_WITH_TZ = DateTimeWithTimezoneType() - -# Can the output deal with Unicode emojis? -try: - '\U0001f389'.encode(sys.stdout.encoding or "ascii") - # If stdout encoding is unavailable, such as in case of pipe, err on the safe side (EMOJI=False) - # This is a judgement call, but given that emojis do not serve functional purposes and purely decorative - # erring on the safe side seems like a reasonable call. - EMOJI = True -except UnicodeEncodeError: - EMOJI = False - - -def emoji(s: str, fallback: str = ''): - """ - Used to safely use Emoji where we can. - - Returns 's' in an environment where stdout can deal with emojis, but 'fallback' otherwise. - """ - return s if EMOJI else fallback - - -def convert_to_seconds(s: str): - units = {'s': 1, 'm': 60, 'h': 60 * 60, 'd': 60 * 60 * 24, 'w': 60 * 60 * 24 * 7} - - if s.isdigit(): - return float(s) - - duration = 0 - for m in re.finditer(r'(?P\d+)(?P[smhdw]?)', s, flags=re.I): - val = m.group('val') - unit = m.group('unit') - - if val is None or unit is None: - raise ValueError("unable to parse: {}".format(s)) - - u = units.get(unit) - if u is None: - raise ValueError("unable to parse: {}".format(s)) - - duration += int(val) * u - - return float(duration) - - -def ignorable_error(e: Exception): - return "An error occurred on Launchable CLI. You can ignore this message since the process will continue. Error: {}".format(e) - - -def validate_past_datetime(ctx, param, value): - """ - Validates that the provided datetime is in the past. - """ - if value is None: - return value - - if not isinstance(value, datetime.datetime): - raise click.BadParameter("Expected a datetime object.") - - now = datetime.datetime.now(tz=tzlocal()) - if value >= now: - raise click.BadParameter("The provided datetime must be in the past. But the value is {}".format(value)) - - return value diff --git a/launchable/utils/env_keys.py b/launchable/utils/env_keys.py deleted file mode 100644 index 3d26a581d..000000000 --- a/launchable/utils/env_keys.py +++ /dev/null @@ -1,7 +0,0 @@ -REPORT_ERROR_KEY = "LAUNCHABLE_REPORT_ERROR" -TOKEN_KEY = "LAUNCHABLE_TOKEN" -ORGANIZATION_KEY = "LAUNCHABLE_ORGANIZATION" -WORKSPACE_KEY = "LAUNCHABLE_WORKSPACE" -BASE_URL_KEY = "LAUNCHABLE_BASE_URL" -SKIP_TIMEOUT_RETRY = "LAUNCHABLE_SKIP_TIMEOUT_RETRY" -COMMIT_TIMEOUT = "LAUNCHABLE_COMMIT_TIMEOUT" diff --git a/launchable/utils/session.py b/launchable/utils/session.py deleted file mode 100644 index d94bff54a..000000000 --- a/launchable/utils/session.py +++ /dev/null @@ -1,112 +0,0 @@ -import json -import os -from pathlib import Path -from typing import Optional - -from .exceptions import ParseSessionException - -SESSION_DIR_KEY = 'LAUNCHABLE_SESSION_DIR' - - -def _session_file_dir() -> Path: - return Path(os.environ.get(SESSION_DIR_KEY) or os.getcwd()).expanduser() - - -def _session_file_path() -> Path: - return _session_file_dir() / ".launchable" - - -def read_build() -> Optional[str]: - f = _session_file_path() - try: - if not f.exists(): - return None - - with open(str(_session_file_path())) as session_file: - session = json.load(session_file) - return session.get("build") - - except Exception as e: - raise Exception("Can't read {}".format(f)) from e - - -def read_session(build_name: str) -> Optional[str]: - f = _session_file_path() - try: - if not f.exists(): - return None - - with open(str(_session_file_path())) as session_file: - session = json.load(session_file) - if build_name != session.get('build', None): - raise Exception("Build name is different from saved. input:{} saved:{}".format( - build_name, session.get('build', None))) - - return session.get("session") - - except Exception as e: - raise Exception("Can't read {}".format(f)) from e - - -def write_build(build_name: str) -> None: - try: - if not _session_file_dir().exists(): - _session_file_dir().mkdir(parents=True, exist_ok=True) - - session = {} - session["build"] = build_name - - with open(str(_session_file_path()), 'w') as session_file: - json.dump(session, session_file) - - except Exception as e: - raise Exception( - "Can't write to {}. Is the path writable? " - "If not, set the {} environment variable to specify an alternative directory for this file.".format( - _session_file_path(), - SESSION_DIR_KEY)) from e - - -def write_session(build_name: str, session_id: str) -> None: - try: - session = {} - session["build"] = build_name - session["session"] = session_id - - with open(str(_session_file_path()), 'w') as session_file: - json.dump(session, session_file) - - except Exception as e: - raise Exception( - "Can't write to {}. " - "Perhaps set the {} environment variable to specify an alternative writable path?".format( - _session_file_path(), SESSION_DIR_KEY)) from e - - -def remove_session() -> None: - """ - Call it after closing a session - """ - if _session_file_path().exists(): - _session_file_path().unlink() - - -def clean_session_files(days_ago: int = 0) -> None: - """ - Call it each build start - """ - remove_session() - - -def validate_session_format(session: str): - # session format: - # builds//test_sessions/ - if session.count("/") != 3: - raise ParseSessionException(session=session) - - -def parse_session(session: str): - validate_session_format(session) - - _, build_name, _, session_id = session.split("/") - return build_name, session_id diff --git a/pyproject.toml b/pyproject.toml index 3d2e73457..a20b14eef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,68 @@ +[project] +name = "smart-tests-cli" +authors = [ + {name = "CloudBees", email = "support@cloudbees.com"} +] +description = "Smart Tests CLI" +readme = "README.md" +license = {text = "Apache Software License v2"} +requires-python = ">=3.13" +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", +] +dependencies = [ + "typer>=0.9.0", + "requests>=2.25", + "urllib3>=1.26", + "junitparser>=4.0.0", + "more-itertools>=7.1.0", + "python-dateutil", + "tabulate", +] +dynamic = ["version"] + +[project.urls] +Homepage = "https://www.cloudbees.com/capabilities/cloudbees-smart-tests" +Repository = "https://github.com/launchableinc/cli" + +[project.scripts] +smart-tests = "smart_tests.__main__:main" + +[tool.uv] +dev-dependencies = [ + "flake8", + "isort", + "mypy", + "pre-commit", + "responses", + "types-pkg_resources", + "types-python-dateutil", + "types-requests", + "types-tabulate", + "lxml", + "unittest-xml-reporting", + "poethepoet", + "autopep8>=2.0.0", +] + +[tool.poe.tasks] +format = "/bin/bash -c 'isort -l 130 --balanced smart_tests/*.py tests/*.py && autopep8 --in-place --recursive --aggressive --experimental --max-line-length=130 --verbose smart_tests/ tests/'" +lint = "flake8 --count --ignore=C901,E741,F401,E126 --show-source --max-line-length=130 --statistics smart_tests/ tests/" +lint-warn = "flake8 --count --exit-zero --max-complexity=15 --max-line-length=130 --ignore=E126 --statistics smart_tests/ tests/" +test = "python -m unittest" +test-xml = "python -m test-runner" +type = "mypy smart_tests tests" + [build-system] requires = ["setuptools>=45", "wheel", "setuptools_scm"] build-backend = "setuptools.build_meta" +[tool.setuptools] +packages = ["smart_tests"] + +[tool.setuptools.package-data] +smart_tests = ["jar/exe_deploy.jar"] + [tool.setuptools_scm] diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index d86cc5b35..000000000 --- a/setup.cfg +++ /dev/null @@ -1,36 +0,0 @@ -[metadata] -name = launchable -author = Launchable, Inc. -author_email = info@launchableinc.com -license = Apache Software License v2 -description = Launchable CLI -url = https://launchableinc.com/ -long_description = file: README.md -long_description_content_type = text/markdown -classifiers = - Programming Language :: Python :: 3 - License :: OSI Approved :: Apache Software License - Operating System :: OS Independent - -[options] -packages = find: -install_requires = - click>=8.0,<8.1;python_version=='3.6' - click>=8.1,<8.2;python_version>'3.6' - requests>=2.25;python_version>='3.6' - urllib3>=1.26 - junitparser>=4.0.0 - setuptools - more_itertools>=7.1.0;python_version>='3.6' - python-dateutil - tabulate - importlib-metadata -python_requires = >=3.6 -setup_requires = - setuptools-scm - -[options.entry_points] -console_scripts = launchable = launchable.__main__:main - -[options.package_data] -launchable = jar/exe_deploy.jar diff --git a/launchable/__init__.py b/smart_tests/__init__.py similarity index 100% rename from launchable/__init__.py rename to smart_tests/__init__.py diff --git a/smart_tests/__main__.py b/smart_tests/__main__.py new file mode 100644 index 000000000..9f1904a1a --- /dev/null +++ b/smart_tests/__main__.py @@ -0,0 +1,173 @@ +import importlib +import importlib.util +import logging +import os +from glob import glob +from os.path import basename, dirname, join +from typing import Annotated + +import typer + +from smart_tests.app import Application +from smart_tests.commands.record.tests import create_nested_commands as create_record_target_commands +from smart_tests.commands.subset import create_nested_commands as create_subset_target_commands +from smart_tests.utils.test_runner_registry import get_registry + +from .commands import compare, inspect, record, stats, subset, verify +from .utils import logger +from .utils.env_keys import SKIP_CERT_VERIFICATION +from .version import __version__ + +# Load all test runners at module level so they register their commands +for f in glob(join(dirname(__file__), 'test_runners', "*.py")): + f = basename(f)[:-3] + if f == '__init__': + continue + importlib.import_module('smart_tests.test_runners.%s' % f) + +# Create initial NestedCommand commands with built-in test runners +try: + create_subset_target_commands() + create_record_target_commands() +except Exception as e: + # If NestedCommand creation fails, continue with legacy commands + # This ensures backward compatibility + logging.warning(f"Failed to create NestedCommand commands at import time: {e}") + pass + +# Global flag to track if plugins have been loaded and commands need rebuilding +_plugins_loaded = False + + +def _rebuild_nested_commands_with_plugins(): + """Rebuild NestedCommand apps after plugins are loaded.""" + global _plugins_loaded + if _plugins_loaded: + return # Already rebuilt + + try: + # Clear existing commands from nested apps and rebuild + for module_name in ['smart_tests.commands.subset', 'smart_tests.commands.record.tests']: + module = importlib.import_module(module_name) + if hasattr(module, 'nested_command_app'): + nested_app = module.nested_command_app + nested_app.registered_commands.clear() + nested_app.registered_groups.clear() + if hasattr(module, 'create_nested_commands'): + module.create_nested_commands() + + _plugins_loaded = True + logging.info("Successfully rebuilt NestedCommand apps with plugins") + + except Exception as e: + logging.warning(f"Failed to rebuild NestedCommand apps with plugins: {e}") + import traceback + logging.warning(f"Traceback: {traceback.format_exc()}") + + +# Set up automatic rebuilding when new test runners are registered + + +def _on_test_runner_registered(): + """Callback triggered when new test runners are registered.""" + _rebuild_nested_commands_with_plugins() + + +get_registry().set_on_register_callback(_on_test_runner_registered) + +app = typer.Typer() + + +def version_callback(value: bool): + if value: + typer.echo(f"smart-tests-cli {__version__}") + raise typer.Exit() + + +def main( + ctx: typer.Context, + log_level: Annotated[str, typer.Option( + help="Set logger's log level (CRITICAL, ERROR, WARNING, AUDIT, INFO, DEBUG)." + )] = logger.LOG_LEVEL_DEFAULT_STR, + plugin_dir: Annotated[str | None, typer.Option( + "--plugin-dir", "--plugins", + help="Directory to load plugins from" + )] = None, + dry_run: Annotated[bool, typer.Option( + help="Dry-run mode. No data is sent to the server. However, sometimes " + "GET requests without payload data or side effects could be sent." + "note: Since the dry run log is output together with the AUDIT log, " + "even if the log-level is set to warning or higher, the log level will " + "be forced to be set to AUDIT." + )] = False, + skip_cert_verification: Annotated[bool, typer.Option( + help="Skip the SSL certificate check. This lets you bypass system setup issues " + "like CERTIFICATE_VERIFY_FAILED, at the expense of vulnerability against " + "a possible man-in-the-middle attack. Use it as an escape hatch, but with caution." + )] = False, + version: Annotated[bool | None, typer.Option( + "--version", help="Show version and exit", callback=version_callback, is_eager=True + )] = None, +): + level = logger.get_log_level(log_level) + # In the case of dry-run, it is forced to set the level below the AUDIT. + # This is because the dry-run log will be output along with the audit log. + if dry_run and level > logger.LOG_LEVEL_AUDIT: + level = logger.LOG_LEVEL_AUDIT + + if not skip_cert_verification: + skip_cert_verification = (os.environ.get(SKIP_CERT_VERIFICATION) is not None) + + logging.basicConfig(level=level) + + # load all plugins + if plugin_dir: + for f in glob(join(plugin_dir, '*.py')): + spec = importlib.util.spec_from_file_location( + f"smart_tests.plugins.{basename(f)[:-3]}", f) + if spec is None: + raise ImportError(f"Failed to create module spec for plugin: {f}") + if spec.loader is None: + raise ImportError(f"Plugin spec has no loader: {f}") + plugin = importlib.util.module_from_spec(spec) + spec.loader.exec_module(plugin) + + # After loading plugins, rebuild NestedCommand apps to include plugin commands + if plugin_dir: + _rebuild_nested_commands_with_plugins() + + ctx.obj = Application(dry_run=dry_run, skip_cert_verification=skip_cert_verification) + + +# Use NestedCommand apps if available, otherwise fall back to legacy +try: + from smart_tests.commands.record.tests import nested_command_app as record_target_app + from smart_tests.commands.subset import nested_command_app as subset_target_app + + app.add_typer(record.app, name="record") + app.add_typer(subset_target_app, name="subset") # Use NestedCommand version + app.add_typer(verify.app, name="verify") + app.add_typer(inspect.app, name="inspect") + app.add_typer(stats.app, name="stats") + app.add_typer(compare.app, name="compare") + + # Add record-target as a sub-app to record command + record.app.add_typer(record_target_app, name="test") # Use NestedCommand version + record.app.add_typer(record_target_app, name="tests") # Alias for backward compatibility +except Exception as e: + logging.warning(f"Failed to use NestedCommand apps at init: {e}") + # Fallback to original structure + app.add_typer(record.app, name="record") + app.add_typer(subset.app, name="subset") + app.add_typer(verify.app, name="verify") + app.add_typer(inspect.app, name="inspect") + app.add_typer(stats.app, name="stats") + +app.callback()(main) + +# For backward compatibility with tests that expect a Click CLI +# We'll need to use Typer's testing utilities instead +main = app + +if __name__ == '__main__': + app() diff --git a/launchable/app.py b/smart_tests/app.py similarity index 83% rename from launchable/app.py rename to smart_tests/app.py index 8d84455ae..2bc6924e2 100644 --- a/launchable/app.py +++ b/smart_tests/app.py @@ -1,8 +1,8 @@ # Object representing the most global state possible, which represents a single invocation of CLI # Currently it's used to keep global configurations. # -# From command implementations, this is available from Click 'context.obj' -class Application(object): +# From command implementations, this is available via dependency injection +class Application: def __init__(self, dry_run: bool = False, skip_cert_verification: bool = False): # Dry run mode. This command is used by customers to inspect data we'd send to our server, # but without actually doing so. diff --git a/launchable/commands/__init__.py b/smart_tests/commands/__init__.py similarity index 100% rename from launchable/commands/__init__.py rename to smart_tests/commands/__init__.py diff --git a/smart_tests/commands/compare/__init__.py b/smart_tests/commands/compare/__init__.py new file mode 100644 index 000000000..fb4405b86 --- /dev/null +++ b/smart_tests/commands/compare/__init__.py @@ -0,0 +1,7 @@ +import typer + +from .subsets import app as subsets_app + +app = typer.Typer() + +app.add_typer(subsets_app, name="subsets") diff --git a/launchable/commands/compare/subsets.py b/smart_tests/commands/compare/subsets.py similarity index 81% rename from launchable/commands/compare/subsets.py rename to smart_tests/commands/compare/subsets.py index d55047256..351e393a9 100644 --- a/launchable/commands/compare/subsets.py +++ b/smart_tests/commands/compare/subsets.py @@ -1,13 +1,18 @@ +from pathlib import Path from typing import List, Tuple, Union -import click +import typer from tabulate import tabulate +app = typer.Typer() -@click.command() -@click.argument('file_before', type=click.Path(exists=True)) -@click.argument('file_after', type=click.Path(exists=True)) -def subsets(file_before, file_after): + +@app.callback(invoke_without_command=True) +def subsets( + ctx: typer.Context, + file_before: Path = typer.Argument(None, help="First subset file to compare"), + file_after: Path = typer.Argument(None, help="Second subset file to compare") +): """ Compare two subset files and display changes in test order positions """ @@ -47,4 +52,4 @@ def subsets(file_before, file_after): (before, after, f"{diff:+}" if isinstance(diff, int) else diff, test) for before, after, diff, test in rows ] - click.echo(tabulate(tabular_data, headers=headers, tablefmt="github")) + typer.echo(tabulate(tabular_data, headers=headers, tablefmt="github")) diff --git a/smart_tests/commands/inspect/__init__.py b/smart_tests/commands/inspect/__init__.py new file mode 100644 index 000000000..71b12b94d --- /dev/null +++ b/smart_tests/commands/inspect/__init__.py @@ -0,0 +1,7 @@ +import typer + +from . import subset + +app = typer.Typer(name="inspect", help="Inspect test and subset data") + +app.add_typer(subset.app, name="subset") diff --git a/launchable/commands/inspect/subset.py b/smart_tests/commands/inspect/subset.py similarity index 78% rename from launchable/commands/inspect/subset.py rename to smart_tests/commands/inspect/subset.py index a13e98505..097fd8191 100644 --- a/launchable/commands/inspect/subset.py +++ b/smart_tests/commands/inspect/subset.py @@ -2,12 +2,12 @@ import sys from abc import ABCMeta, abstractmethod from http import HTTPStatus -from typing import List +from typing import Annotated, List -import click +import typer from tabulate import tabulate -from ...utils.launchable_client import LaunchableClient +from ...utils.smart_tests_client import SmartTestsClient class SubsetResult (object): @@ -65,7 +65,7 @@ def display(self): result._estimated_duration_sec, ] ) - click.echo(tabulate(rows, header, tablefmt="github", floatfmt=".2f")) + typer.echo(tabulate(rows, header, tablefmt="github", floatfmt=".2f")) class SubsetResultJSONDisplay(SubsetResultAbstractDisplay): @@ -88,33 +88,37 @@ def display(self): "estimated_duration_sec": round(result._estimated_duration_sec, 2), }) - click.echo(json.dumps(result_json, indent=2)) - - -@click.command() -@click.option( - '--subset-id', - 'subset_id', - help='subest id', - required=True, -) -@click.option( - '--json', - 'is_json_format', - help='display JSON format', - is_flag=True -) -@click.pass_context -def subset(context: click.core.Context, subset_id: int, is_json_format: bool): + typer.echo(json.dumps(result_json, indent=2)) + + +app = typer.Typer(name="subset", help="Inspect subset data") + + +@app.callback(invoke_without_command=True) +def subset( + ctx: typer.Context, + subset_id: Annotated[int, typer.Option( + "--subset-id", + help="subset id" + )], + json: Annotated[bool, typer.Option( + "--json", + help="display JSON format" + )] = False, +): + # Run the subset inspection (no subcommands in this app) + app = ctx.obj + is_json_format = json # Map parameter name + subset = [] rest = [] - client = LaunchableClient(app=context.obj) + client = SmartTestsClient(app=app) try: - res = client.request("get", "subset/{}".format(subset_id)) + res = client.request("get", f"subset/{subset_id}") if res.status_code == HTTPStatus.NOT_FOUND: - click.echo(click.style( - "Subset {} not found. Check subset ID and try again.".format(subset_id), 'yellow'), err=True) + typer.echo(typer.style( + f"Subset {subset_id} not found. Check subset ID and try again.", fg=typer.colors.YELLOW), err=True) sys.exit(1) res.raise_for_status() diff --git a/smart_tests/commands/record/__init__.py b/smart_tests/commands/record/__init__.py new file mode 100644 index 000000000..43c41986c --- /dev/null +++ b/smart_tests/commands/record/__init__.py @@ -0,0 +1,12 @@ +import typer + +from . import attachment, build, commit, session + +app = typer.Typer(name="record", help="Record test results, builds, commits, and sessions") + +app.add_typer(build.app, name="build") +app.add_typer(commit.app, name="commit") +# NestedCommand version will be added in __main__.py +# Remove old tests command registration - it will be replaced by NestedCommand in __main__.py +app.add_typer(session.app, name="session") +app.add_typer(attachment.app, name="attachment") diff --git a/smart_tests/commands/record/attachment.py b/smart_tests/commands/record/attachment.py new file mode 100644 index 000000000..b4ce85c9d --- /dev/null +++ b/smart_tests/commands/record/attachment.py @@ -0,0 +1,36 @@ +from typing import Annotated, List + +import typer + +from smart_tests.utils.session import get_session + +from ...utils.smart_tests_client import SmartTestsClient + +app = typer.Typer(name="attachment", help="Record attachment information") + + +@app.callback(invoke_without_command=True) +def attachment( + ctx: typer.Context, + session: Annotated[str, typer.Option( + "--session", + help="test session name" + )], + attachments: Annotated[List[str], typer.Argument( + help="Attachment files to upload" + )], +): + app = ctx.obj + client = SmartTestsClient(app=app) + try: + # Note: Call get_session method to check test session exists + _ = get_session(session, client) + for a in attachments: + typer.echo(f"Sending {a}") + with open(a, mode='rb') as f: + res = client.request( + "post", f"{session}/attachment", compress=True, payload=f, + additional_headers={"Content-Disposition": f"attachment;filename=\"{a}\""}) + res.raise_for_status() + except Exception as e: + client.print_exception_and_recover(e) diff --git a/launchable/commands/record/build.py b/smart_tests/commands/record/build.py similarity index 61% rename from launchable/commands/record/build.py rename to smart_tests/commands/record/build.py index 321aceeff..350c1c8aa 100644 --- a/launchable/commands/record/build.py +++ b/smart_tests/commands/record/build.py @@ -1,22 +1,20 @@ -import datetime import os import re import sys -from typing import List, Optional, Sequence, Tuple +from typing import Annotated, List -import click +import typer from tabulate import tabulate -from launchable.utils.link import CIRCLECI_KEY, GITHUB_ACTIONS_KEY, JENKINS_URL_KEY, LinkKind, capture_link -from launchable.utils.tracking import Tracking, TrackingClient +from smart_tests.utils.link import CIRCLECI_KEY, GITHUB_ACTIONS_KEY, JENKINS_URL_KEY, capture_link +from smart_tests.utils.tracking import Tracking, TrackingClient from ...utils import subprocess from ...utils.authentication import get_org_workspace -from ...utils.click import DATETIME_WITH_TZ, KEY_VALUE, validate_past_datetime from ...utils.commands import Command from ...utils.fail_fast_mode import set_fail_fast_mode, warn_and_exit_if_fail_fast_mode -from ...utils.launchable_client import LaunchableClient -from ...utils.session import clean_session_files, write_build +from ...utils.smart_tests_client import SmartTestsClient +from ...utils.typer_types import validate_datetime_with_tz, validate_key_value, validate_past_datetime from .commit import commit JENKINS_GIT_BRANCH_KEY = "GIT_BRANCH" @@ -28,115 +26,89 @@ CODE_BUILD_WEBHOOK_HEAD_REF_KEY = "CODEBUILD_WEBHOOK_HEAD_REF" -@click.command() -@click.option( - '--name', - 'build_name', - help='build name', - required=True, - type=str, - metavar='BUILD_NAME' -) -@click.option( - '--source', - help='path to local Git workspace, optionally prefixed by a label. ' - ' like --source path/to/ws or --source main=path/to/ws', - default=["."], - metavar="REPO_NAME", - multiple=True -) -@click.option( - '--max-days', - help="the maximum number of days to collect commits retroactively", - default=30 -) -@click.option( - '--no-submodules', - is_flag=True, - help="stop collecting information from Git Submodules", - default=False -) -@click.option( - '--no-commit-collection', - is_flag=True, - help="""do not collect commit data. - - This is useful if the repository is a shallow clone and the RevWalk is not - possible. The commit data must be collected with a separate fully-cloned - repository. - """, - default=False -) -@click.option('--scrub-pii', is_flag=True, help='Scrub emails and names', hidden=True) -@click.option( - '--commit', - 'commits', - help="set repository name and commit hash when you use --no-commit-collection option", - multiple=True, - default=(), - type=KEY_VALUE, -) -@click.option( - '--link', - 'links', - help="Set external link of title and url", - multiple=True, - default=(), - type=KEY_VALUE, -) -@click.option( - '--branch', - 'branches', - help="Set repository name and branch name when you use --no-commit-collection option. Please use the same repository name with a commit option", # noqa: E501 - multiple=True, - default=(), - # this is a pseudo key/value that we need to process on our own - # type=KEY_VALUE, -) -@click.option( - # hidden option to directly specify the lineage name without relying on branches - '--lineage', - 'lineage', - hidden=True, -) -@click.option( - '--timestamp', - 'timestamp', - help='Used to overwrite the build time when importing historical data. Note: Format must be `YYYY-MM-DDThh:mm:ssTZD` or `YYYY-MM-DDThh:mm:ss` (local timezone applied)', # noqa: E501 - type=DATETIME_WITH_TZ, - default=None, - callback=validate_past_datetime, -) -@click.pass_context +app = typer.Typer(name="build", help="Record build information") + + +@app.callback(invoke_without_command=True) def build( - ctx: click.core.Context, build_name: str, source: List[str], - max_days: int, no_submodules: bool, no_commit_collection: bool, scrub_pii: bool, - commits: Sequence[Tuple[str, str]], - links: Sequence[Tuple[str, str]], - branches: Sequence[str], lineage: str, timestamp: Optional[datetime.datetime]): - - tracking_client = TrackingClient(Command.RECORD_BUILD, app=ctx.obj) - client = LaunchableClient(app=ctx.obj, tracking_client=tracking_client) + ctx: typer.Context, + build_name: Annotated[str, typer.Option( + "--build", + help="build name", + metavar="BUILD_NAME" + )], + branch: Annotated[str | None, typer.Option( + "--branch", + help="Branch name. A branch is a set of test sessions grouped and this option value will be used for a lineage name." + )] = None, + repositories: Annotated[List[str], typer.Option( + "--repo-branch-map", + help="Set repository name and branch name when you use --no-commit-collection option. " + "Please use the same repository name with a commit option" + )] = [], + source: Annotated[List[str], typer.Option( + help="path to local Git workspace, optionally prefixed by a label. " + "like --source path/to/ws or --source main=path/to/ws", + metavar="REPO_NAME" + )] = ["."], + max_days: Annotated[int, typer.Option( + help="the maximum number of days to collect commits retroactively" + )] = 30, + no_submodules: Annotated[bool, typer.Option( + help="stop collecting information from Git Submodules" + )] = False, + no_commit_collection: Annotated[bool, typer.Option( + help="do not collect commit data. " + "This is useful if the repository is a shallow clone and the RevWalk is not " + "possible. The commit data must be collected with a separate fully-cloned " + "repository." + )] = False, + commits: Annotated[List[str], typer.Option( + "--commit", + help="set repository name and commit hash when you use --no-commit-collection option" + )] = [], + timestamp: Annotated[str | None, typer.Option( + help="Used to overwrite the build time when importing historical data. " + "Note: Format must be `YYYY-MM-DDThh:mm:ssTZD` or `YYYY-MM-DDThh:mm:ss` (local timezone applied)" + )] = None, +): + app = ctx.obj + + # Parse key-value pairs for commits + parsed_commits = [validate_key_value(c) for c in commits] + + # Parse timestamp if provided + parsed_timestamp = None + if timestamp: + parsed_timestamp = validate_past_datetime(validate_datetime_with_tz(timestamp)) + + tracking_client = TrackingClient(Command.RECORD_BUILD, app=app) + client = SmartTestsClient(app=app, tracking_client=tracking_client) set_fail_fast_mode(client.is_fail_fast_mode()) if "/" in build_name or "%2f" in build_name.lower(): - sys.exit("--name must not contain a slash and an encoded slash") + typer.echo("--build must not contain a slash and an encoded slash", err=True) + raise typer.Exit(1) if "%25" in build_name: - sys.exit("--name must not contain encoded % (%25)") - if not no_commit_collection and len(commits) != 0: - sys.exit("--no-commit-collection must be specified when --commit is used") - - clean_session_files(days_ago=14) + typer.echo("--build must not contain encoded % (%25)", err=True) + raise typer.Exit(1) + if not no_commit_collection and len(parsed_commits) != 0: + typer.echo("--no-commit-collection must be specified when --commit is used", err=True) + raise typer.Exit(1) + if not no_commit_collection and len(repositories) != 0: + typer.echo("--no-commit-collection must be specified when --repo-branch-map is used", err=True) + raise typer.Exit(1) # Information we want to collect for each Git repository # The key data structure throughout the implementation of this command + class Workspace: # identifier given to a Git repository to track the same repository from one 'record build' to next name: str # path to the Git workspace. Can be None if there's no local workspace present dir: str # current branch of this workspace - branch: Optional[str] = None + branch: str | None = None # SHA1 commit hash that's currently checked out commit_hash: str @@ -221,11 +193,11 @@ def list_sources() -> List[Workspace]: def collect_commits(): if not no_commit_collection: for w in ws: - ctx.invoke(commit, name=w.name, source=w.dir, max_days=max_days) + commit(ctx, name=w.name, source=w.dir, max_days=max_days) else: - click.echo(click.style( + typer.secho( "Warning: Commit collection is turned off. The commit data must be collected separately.", - fg='yellow'), err=True) + fg=typer.colors.YELLOW, err=True) # tally up all the submodules, unless we are told not to def list_submodules(workspaces: List[Workspace]) -> List[Workspace]: @@ -257,19 +229,19 @@ def list_submodules(workspaces: List[Workspace]) -> List[Workspace]: def compute_hash_and_branch(ws: List[Workspace]): ws_by_name = {w.name: w for w in ws} + # Process repository options to create branch name mappings branch_name_map = dict() - if len(branches) == 1 and len(ws) == 1 and not ('=' in branches[0]): - # if there's only one repo and the short form "--branch NAME" is used, then we assign that to the first repo - branch_name_map[ws[0].name] = branches[0] + if len(repositories) == 1 and len(ws) == 1 and not ('=' in repositories[0]): + # if there's only one repo and the short form "--repository NAME" is used, then we assign that to the first repo + branch_name_map[ws[0].name] = repositories[0] else: - for b in branches: - kv = b.split('=') + for r in repositories: + kv = r.split('=') if len(kv) != 2: - click.echo(click.style( - "Expected --branch REPO=BRANCHNAME but got {}".format(kv), - fg="yellow"), - err=True) - sys.exit(1) + typer.secho( + f"Expected --repo-branch-map REPO=BRANCHNAME but got {kv}", + fg=typer.colors.YELLOW, err=True) + raise typer.Exit(1) if not ws_by_name.get(kv[0]): warn_and_exit_if_fail_fast_mode("Invalid repository name {repo} in a --branch option.\nThe repository “{repo}” is not specified via `--source` or `--commit` option.".format(repo=kv[0])) # noqa: E501 @@ -281,14 +253,12 @@ def compute_hash_and_branch(ws: List[Workspace]): if not w.commit_hash: w.commit_hash = subprocess.check_output("git rev-parse HEAD".split(), cwd=w.dir).decode().replace("\n", "") except Exception as e: - click.echo( - click.style( - "Can't get commit hash for {}. Do you run command under git-controlled directory? " - "If not, please set a directory use by --source option.".format(w.dir), - fg='yellow'), - err=True) + typer.secho( + "Can't get commit hash for {}. Do you run command under git-controlled directory? " + "If not, please set a directory use by --source option.", + fg=typer.colors.YELLOW, err=True) print(e, file=sys.stderr) - sys.exit(1) + raise typer.Exit(1) if w.name in branch_name_map: w.branch = branch_name_map[w.name] else: @@ -300,50 +270,45 @@ def synthesize_workspaces() -> List[Workspace]: commit_pattern = re.compile("[0-9A-Fa-f]{5,40}$") - for name, hash in commits: + for name, hash in parsed_commits: if not commit_pattern.match(hash): - click.echo(click.style( - "{}'s commit hash `{}` is invalid.".format(name, hash), - fg="yellow"), - err=True) - sys.exit(1) + typer.secho( + f"{name}'s commit hash `{hash}` is invalid.", + fg=typer.colors.YELLOW, err=True) + raise typer.Exit(1) ws.append(Workspace(name=name, commit_hash=hash)) return ws # send all the data to server and obtain build_id, or none if the service is down, to recover - def send(ws: List[Workspace]) -> Optional[str]: + def send(ws: List[Workspace]) -> str | None: # figure out all the CI links to capture def compute_links(): _links = capture_link(os.environ) - for k, v in links: - _links.append({ - "title": k, - "url": v, - "kind": LinkKind.CUSTOM_LINK.name, - }) return _links try: + lineage = branch or ws[0].branch + if lineage is None: + typer.echo("Unable to determine branch name. Please specify --branch option.", err=True) + raise typer.Exit(1) + payload = { "buildNumber": build_name, - "lineage": lineage or ws[0].branch, + "lineage": lineage, "commitHashes": [{ 'repositoryName': w.name, 'commitHash': w.commit_hash, 'branchName': w.branch or "" } for w in ws], "links": compute_links(), - "timestamp": timestamp.isoformat() if timestamp else None, + "timestamp": parsed_timestamp.isoformat() if parsed_timestamp else None, } res = client.request("post", "builds", payload=payload) res.raise_for_status() - # at this point we've successfully send the data, so it's OK to record this build - write_build(build_name) - return res.json().get("id", None) except Exception as e: tracking_client.send_error_event( @@ -356,27 +321,17 @@ def compute_links(): # report what we did to the user to assist diagnostics def report(ws: List[Workspace], build_id: str): org, workspace = get_org_workspace() - click.echo( - "Launchable recorded build {} to workspace {}/{} with commits from {} {}:\n".format( - build_name, - org, - workspace, - len(ws), - ("repositories" if len(ws) > 1 else "repository"), - ), - ) + typer.echo( + f"Launchable recorded build {build_name} to workspace {org}/{workspace} with commits from { + len(ws)} { + 'repositories' if len(ws) > 1 else 'repository'}:\n") header = ["Name", "Path", "HEAD Commit"] rows = [[w.name, w.dir, w.commit_hash] for w in ws] - click.echo(tabulate(rows, header, tablefmt="github")) - click.echo( - "\nVisit https://app.launchableinc.com/organizations/{organization}/workspaces/" - "{workspace}/data/builds/{build_id} to view this build and its test sessions" - .format( - organization=org, - workspace=workspace, - build_id=build_id, - )) + typer.echo(tabulate(rows, header, tablefmt="github")) + typer.echo( + f"\nVisit https://app.launchableinc.com/organizations/{org}/workspaces/" + f"{workspace}/data/builds/{build_id} to view this build and its test sessions") # all the logics at the high level if len(commits) == 0: diff --git a/launchable/commands/record/case_event.py b/smart_tests/commands/record/case_event.py similarity index 93% rename from launchable/commands/record/case_event.py rename to smart_tests/commands/record/case_event.py index 4664e8e4a..6f16e3519 100644 --- a/launchable/commands/record/case_event.py +++ b/smart_tests/commands/record/case_event.py @@ -1,12 +1,12 @@ import datetime import sys -from typing import Any, Callable, Dict, Optional +from typing import Any, Callable, Dict import dateutil.parser from dateutil.tz import tzlocal from junitparser import Error, Failure, IntAttr, Skipped, TestCase, TestSuite -from launchable.utils.common_tz import COMMON_TIMEZONES # type: ignore +from smart_tests.utils.common_tz import COMMON_TIMEZONES # type: ignore from ...testpath import FilePathNormalizer, TestPath @@ -31,7 +31,7 @@ class CaseEvent: # The 3rd argument is the report file path TestPathBuilder = Callable[[TestCase, TestSuite, str], TestPath] - DataBuilder = Callable[[TestCase], Optional[Dict[str, Any]]] + DataBuilder = Callable[[TestCase], Dict[str, Any] | None] @staticmethod def default_path_builder( @@ -153,9 +153,9 @@ def stderr(case: TestCase) -> str: @classmethod def create(cls, test_path: TestPath, duration_secs: float, status, - stdout: Optional[str] = None, stderr: Optional[str] = None, - timestamp: Optional[str] = None, data: Optional[Dict] = None) -> Dict: - def _timestamp(ts: Optional[str] = None): + stdout: str | None = None, stderr: str | None = None, + timestamp: str | None = None, data: Dict | None = None) -> Dict: + def _timestamp(ts: str | None = None): if ts is None: return datetime.datetime.now(datetime.timezone.utc).isoformat() try: diff --git a/launchable/commands/record/commit.py b/smart_tests/commands/record/commit.py similarity index 70% rename from launchable/commands/record/commit.py rename to smart_tests/commands/record/commit.py index 0c84ea7ef..84e16d0fc 100644 --- a/launchable/commands/record/commit.py +++ b/smart_tests/commands/record/commit.py @@ -1,13 +1,13 @@ import os import subprocess import sys -from typing import List, Optional +from typing import Annotated, List from urllib.parse import urlparse -import click +import typer -from launchable.utils.launchable_client import LaunchableClient -from launchable.utils.tracking import Tracking, TrackingClient +from smart_tests.utils.smart_tests_client import SmartTestsClient +from smart_tests.utils.tracking import Tracking, TrackingClient from ...app import Application from ...utils.commands import Command @@ -22,49 +22,41 @@ jar_file_path = os.path.normpath(os.path.join(os.path.dirname(__file__), "../../jar/exe_deploy.jar")) -@click.command() -@click.option( - '--name', - help="repository name", -) -@click.option( - '--source', - help="repository path", - default=os.getcwd(), - type=click.Path(exists=True, file_okay=False), -) -@click.option( - '--executable', - help="[Obsolete] it was to specify how to perform commit collection but has been removed", - type=click.Choice(['jar', 'docker']), - default='jar', - hidden=True) -@click.option( - '--max-days', - help="the maximum number of days to collect commits retroactively", - default=30) -@click.option( - '--scrub-pii', - is_flag=True, - help='[Deprecated] Scrub emails and names', - hidden=True) -@click.option( - '--import-git-log-output', - help="import from the git-log output", - type=click.Path(exists=True, dir_okay=False, - resolve_path=True, allow_dash=True), -) -@click.pass_context -def commit(ctx, name: str, source: str, executable: bool, max_days: int, scrub_pii: bool, import_git_log_output: str): +app = typer.Typer(name="commit", help="Record commit information") + + +@app.callback(invoke_without_command=True) +def commit( + ctx: typer.Context, + name: Annotated[str | None, typer.Option( + help="repository name" + )] = None, + source: Annotated[str, typer.Option( + help="repository path" + )] = os.getcwd(), + executable: Annotated[str, typer.Option( + help="[Obsolete] it was to specify how to perform commit collection but has been removed", + hidden=True + )] = "jar", + max_days: Annotated[int, typer.Option( + help="the maximum number of days to collect commits retroactively" + )] = 30, + import_git_log_output: Annotated[str | None, typer.Option( + help="import from the git-log output" + )] = None, +): + app = ctx.obj + if executable == 'docker': - sys.exit("--executable docker is no longer supported") + typer.echo("--executable docker is no longer supported", err=True) + raise typer.Exit(1) tracking_client = TrackingClient(Command.COMMIT, app=ctx.obj) - client = LaunchableClient(tracking_client=tracking_client, app=ctx.obj) + client = SmartTestsClient(tracking_client=tracking_client, app=ctx.obj) set_fail_fast_mode(client.is_fail_fast_mode()) if import_git_log_output: - _import_git_log(import_git_log_output, ctx.obj) + _import_git_log(import_git_log_output, app) return # Commit messages are not collected in the default. @@ -113,7 +105,7 @@ def exec_jar(name: str, source: str, max_days: int, app: Application, is_collect "-jar", cygpath(jar_file_path), "-endpoint", - "{}/intake/".format(base_url), + f"{base_url}/intake/", "-max-days", str(max_days) ]) @@ -142,7 +134,7 @@ def exec_jar(name: str, source: str, max_days: int, app: Application, is_collect def _import_git_log(output_file: str, app: Application): try: - with click.open_file(output_file) as fp: + with open(output_file) as fp: commits = parse_git_log(fp) upload_commits(commits, app) except Exception as e: @@ -152,7 +144,7 @@ def _import_git_log(output_file: str, app: Application): warn_and_exit_if_fail_fast_mode("Failed to import the git-log output\n error: {}".format(e)) -def _build_proxy_option(https_proxy: Optional[str]) -> List[str]: +def _build_proxy_option(https_proxy: str | None) -> List[str]: if not https_proxy: return [] @@ -162,7 +154,7 @@ def _build_proxy_option(https_proxy: Optional[str]) -> List[str]: options = [] if proxy_url.hostname: - options.append("-Dhttps.proxyHost={}".format(proxy_url.hostname)) + options.append(f"-Dhttps.proxyHost={proxy_url.hostname}") if proxy_url.port: - options.append("-Dhttps.proxyPort={}".format(proxy_url.port)) + options.append(f"-Dhttps.proxyPort={proxy_url.port}") return options diff --git a/smart_tests/commands/record/session.py b/smart_tests/commands/record/session.py new file mode 100644 index 000000000..e6d621c1b --- /dev/null +++ b/smart_tests/commands/record/session.py @@ -0,0 +1,126 @@ +import os +import re +import sys +from http import HTTPStatus +from typing import Annotated, List + +import typer + +from smart_tests.utils.commands import Command +from smart_tests.utils.exceptions import print_error_and_die +from smart_tests.utils.fail_fast_mode import set_fail_fast_mode +from smart_tests.utils.link import LinkKind, capture_link +from smart_tests.utils.no_build import NO_BUILD_BUILD_NAME +from smart_tests.utils.smart_tests_client import SmartTestsClient +from smart_tests.utils.tracking import Tracking, TrackingClient +from smart_tests.utils.typer_types import KeyValue, parse_key_value, validate_datetime_with_tz + +app = typer.Typer(name="session", help="Record session information") + +TEST_SESSION_NAME_RULE = re.compile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") + + +@app.callback(invoke_without_command=True) +def session( + ctx: typer.Context, + build_name: Annotated[str, typer.Option( + "--build", + help="build name" + )], + test_suite: Annotated[str, typer.Option( + "--test-suite", + help="Set test suite name. A test suite is a collection of test sessions. Setting a test suite allows you to " + "manage data over test sessions and lineages." + )], + print_session: bool = True, + flavors: Annotated[List[KeyValue], typer.Option( + "--flavor", + help="flavors", + metavar="KEY=VALUE", + parser=parse_key_value + )] = [], + is_observation: Annotated[bool, typer.Option( + "--observation", + help="enable observation mode" + )] = False, + links: Annotated[List[KeyValue], typer.Option( + "--link", + help="Set external link of atitle and url", + parser=parse_key_value, + )] = [], + is_no_build: Annotated[bool, typer.Option( + "--no-build", + help="If you want to only send test reports, please use this option" + )] = False, + timestamp: Annotated[str | None, typer.Option( + help="Used to overwrite the session time when importing historical data. Note: Format must be " + "`YYYY-MM-DDThh:mm:ssTZD` or `YYYY-MM-DDThh:mm:ss` (local timezone applied)" + )] = None, +): + + # Validate and convert timestamp if provided + parsed_timestamp = None + if timestamp: + parsed_timestamp = validate_datetime_with_tz(timestamp) + + # Get application context + app = ctx.obj + tracking_client = TrackingClient(Command.RECORD_SESSION, app=app) + client = SmartTestsClient(app=app, tracking_client=tracking_client) + set_fail_fast_mode(client.is_fail_fast_mode()) + + if not is_no_build and not build_name: + print_error_and_die("Missing option '--build'", tracking_client, Tracking.ErrorEvent.USER_ERROR) + + if is_no_build and build_name: + print_error_and_die("Cannot use --build option with --no-build option", tracking_client, Tracking.ErrorEvent.USER_ERROR) + + if is_no_build: + build_name = NO_BUILD_BUILD_NAME + + payload = { + "flavors": dict([(f.key, f.value) for f in flavors]), + "isObservation": is_observation, + "noBuild": is_no_build, + "testSuite": test_suite, + "timestamp": parsed_timestamp.isoformat() if parsed_timestamp else None, + } + + _links = capture_link(os.environ) + for link in links: + _links.append({ + "title": link.key, + "url": link.value, + "kind": LinkKind.CUSTOM_LINK.name, + }) + payload["links"] = _links + + try: + sub_path = f"builds/{build_name}/test_sessions" + res = client.request("post", sub_path, payload=payload) + + if res.status_code == HTTPStatus.NOT_FOUND: + msg = f"Build {build_name} was not found." \ + f"Make sure to run `launchable record build --build {build_name}` before you run this command." + tracking_client.send_error_event( + event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR, + stack_trace=msg, + ) + typer.secho(msg, fg=typer.colors.YELLOW, err=True) + sys.exit(1) + + res.raise_for_status() + + session_id = res.json().get('id', None) + if is_no_build: + build_name = res.json().get("buildNumber", "") + assert build_name is not None + + typer.echo(f"{sub_path}/{session_id}", nl=False) + + except Exception as e: + tracking_client.send_error_event( + event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR, + stack_trace=str(e), + ) + client.print_exception_and_recover(e) diff --git a/launchable/commands/record/tests.py b/smart_tests/commands/record/tests.py similarity index 54% rename from launchable/commands/record/tests.py rename to smart_tests/commands/record/tests.py index 64415692a..5c6b4e50e 100644 --- a/launchable/commands/record/tests.py +++ b/smart_tests/commands/record/tests.py @@ -3,268 +3,138 @@ import os import re import xml.etree.ElementTree as ET -from http import HTTPStatus -from typing import Callable, Dict, Generator, List, Optional, Sequence, Tuple, Union +from pathlib import Path +from time import time_ns +from typing import Annotated, Callable, Dict, Generator, List, Tuple, Union -import click +import typer from dateutil.parser import parse -from junitparser import JUnitXml, JUnitXmlError, TestCase, TestSuite # type: ignore # noqa: F401 +from junitparser import JUnitXml, TestCase, TestSuite # type: ignore # noqa: F401 from more_itertools import ichunked from tabulate import tabulate -from launchable.utils.authentication import ensure_org_workspace -from launchable.utils.tracking import Tracking, TrackingClient +from smart_tests.utils.authentication import ensure_org_workspace +from smart_tests.utils.dynamic_commands import DynamicCommandBuilder, extract_callback_options +from smart_tests.utils.env_keys import REPORT_ERROR_KEY +from smart_tests.utils.session import get_session, parse_session +from smart_tests.utils.tracking import Tracking, TrackingClient from ...testpath import FilePathNormalizer, TestPathComponent, unparse_test_path -from ...utils.click import DATETIME_WITH_TZ, KEY_VALUE, validate_past_datetime from ...utils.commands import Command -from ...utils.exceptions import InvalidJUnitXMLException +from ...utils.exceptions import InvalidJUnitXMLException, print_error_and_die from ...utils.fail_fast_mode import (FailFastModeValidateParams, fail_fast_mode_validate, set_fail_fast_mode, warn_and_exit_if_fail_fast_mode) -from ...utils.launchable_client import LaunchableClient from ...utils.logger import Logger -from ...utils.no_build import NO_BUILD_BUILD_NAME, NO_BUILD_TEST_SESSION_ID -from ...utils.session import parse_session, read_build -from ..helper import find_or_create_session, time_ns +from ...utils.smart_tests_client import SmartTestsClient from .case_event import CaseEvent, CaseEventType GROUP_NAME_RULE = re.compile("^[a-zA-Z0-9][a-zA-Z0-9_-]*$") RESERVED_GROUP_NAMES = ["group", "groups", "nogroup", "nogroups"] -def _validate_group(ctx, param, value): +def _validate_group(value): if value is None: return "" if str(value).lower() in RESERVED_GROUP_NAMES: - raise click.BadParameter("{} is reserved name.".format(value)) + raise typer.BadParameter(f"{value} is reserved name.") if GROUP_NAME_RULE.match(value): return value else: - raise click.BadParameter("group option supports only alphabet(a-z, A-Z), number(0-9), '-', and '_'") - - -@click.group() -@click.option( - '--base', - 'base_path', - help='(Advanced) base directory to make test names portable', - type=click.Path(exists=True, file_okay=False), - metavar="DIR", -) -@click.option( - '--session', - 'session', - help='In the format builds//test_sessions/', - type=str, -) -@click.option( - '--build', - 'build_name', - help='build name', - type=str, - metavar='BUILD_NAME', - hidden=True, -) -@click.option( - '--subset-id', - 'subsetting_id', - help='subset_id', - type=str, -) -@click.option( - '--post-chunk', - help='Post chunk', - default=1000, - type=int -) -@click.option( - "--flavor", - "flavor", - help='flavors', - metavar='KEY=VALUE', - type=KEY_VALUE, - default=(), - multiple=True, -) -@click.option( - "--no_base_path_inference", - "no_base_path_inference", - help="""Do not guess the base path to relativize the test file paths. - - By default, if the test file paths are absolute file paths, it automatically - guesses the repository root directory and relativize the paths. With this - option, the command doesn't do this guess work. - - If --base_path is specified, the absolute file paths are relativized to the - specified path irrelevant to this option. Use it if the guessed base path is - incorrect. - """, - is_flag=True -) -@click.option( - '--report-paths', - help='Instead of POSTing test results, just report test paths in the report file then quit. ' - 'For diagnostics. Use with --dry-run', - is_flag=True, - hidden=True -) -@click.option( - '--group', - "group", - help='Grouping name for test results', - type=str, - callback=_validate_group, -) -@click.option( - "--allow-test-before-build", - "is_allow_test_before_build", - help="", - is_flag=True, - hidden=True, -) -@click.option( - '--link', - 'links', - help="Set external link of title and url", - multiple=True, - default=(), - type=KEY_VALUE, -) -@click.option( - '--no-build', - 'is_no_build', - help="If you want to only send test reports, please use this option", - is_flag=True, -) -@click.option( - '--session-name', - 'session_name', - help='test session name', - required=False, - type=str, - metavar='SESSION_NAME', -) -@click.option( - '--lineage', - 'lineage', - help='Set lineage name. This option value will be passed to the record session command if a session isn\'t created yet.', - required=False, - type=str, - metavar='LINEAGE', -) -@click.option( - '--test-suite', - 'test_suite', - help='Set test suite name. This option value will be passed to the record session command if a session isn\'t created yet.', # noqa: E501 - required=False, - type=str, - metavar='TEST_SUITE', -) -@click.option( - '--timestamp', - 'timestamp', - help='Used to overwrite the test executed times when importing historical data. Note: Format must be `YYYY-MM-DDThh:mm:ssTZD` or `YYYY-MM-DDThh:mm:ss` (local timezone applied)', # noqa: E501 - type=DATETIME_WITH_TZ, - default=None, - callback=validate_past_datetime, -) -@click.pass_context -def tests( - context: click.core.Context, - base_path: str, - session: Optional[str], - build_name: Optional[str], - post_chunk: int, - subsetting_id: str, - flavor: Sequence[Tuple[str, str]], - no_base_path_inference: bool, - report_paths: bool, - group: str, - is_allow_test_before_build: bool, - links: Sequence[Tuple[str, str]] = (), - is_no_build: bool = False, - session_name: Optional[str] = None, - lineage: Optional[str] = None, - test_suite: Optional[str] = None, - timestamp: Optional[datetime.datetime] = None, + raise typer.BadParameter("group option supports only alphabet(a-z, A-Z), number(0-9), '-', and '_'") + + +app = typer.Typer(help="Record test results") + +# Test runners are loaded in __main__.py to avoid circular imports + + +@app.callback() +def tests_main( + ctx: typer.Context, + session: Annotated[str, typer.Option( + "--session", + help="In the format builds//test_sessions/" + )], + base_path: Annotated[Path | None, typer.Option( + "--base", + help="(Advanced) base directory to make test names portable", + exists=True, + file_okay=False, + dir_okay=True, + resolve_path=True + )] = None, + post_chunk: Annotated[int, typer.Option( + "--post-chunk", + help="Post chunk" + )] = 1000, + no_base_path_inference: Annotated[bool, typer.Option( + "--no-base-path-inference", + help="Do not guess the base path to relativize the test file paths. By default, if the test file paths are " + "absolute file paths, it automatically guesses the repository root directory and relativize the paths. " + "With this option, the command doesn't do this guess work. If --base-path is specified, the absolute " + "file paths are relativized to the specified path irrelevant to this option. Use it if the guessed base " + "path is incorrect." + )] = False, + report_paths: Annotated[bool, typer.Option( + "--report-paths", + help="Instead of POSTing test results, just report test paths in the report file then quit. For diagnostics. " + "Use with --dry-run", + hidden=True + )] = False, + group: Annotated[str | None, typer.Option( + help="Grouping name for test results" + )] = "", + is_allow_test_before_build: Annotated[bool, typer.Option( + "--allow-test-before-build", + help="", + hidden=True + )] = False, + # TODO(Konboi): restore timestamp option ): logger = Logger() org, workspace = ensure_org_workspace() - test_runner = context.invoked_subcommand + # Get test runner name from context (set by DynamicCommandBuilder) + test_runner = getattr(ctx, 'test_runner', None) - tracking_client = TrackingClient(Command.RECORD_TESTS, app=context.obj) - client = LaunchableClient(test_runner=test_runner, app=context.obj, tracking_client=tracking_client) + tracking_client = TrackingClient(Command.RECORD_TESTS, app=ctx.obj) + client = SmartTestsClient(test_runner=test_runner, app=ctx.obj, tracking_client=tracking_client) set_fail_fast_mode(client.is_fail_fast_mode()) fail_fast_mode_validate(FailFastModeValidateParams( command=Command.RECORD_TESTS, session=session, - build=build_name, - flavor=flavor, - links=links, - is_no_build=is_no_build, - test_suite=test_suite, )) - file_path_normalizer = FilePathNormalizer(base_path, no_base_path_inference=no_base_path_inference) + # Validate group if provided and ensure it's never None + if group is None: + group = "" + elif group: + group = _validate_group(group) - if is_no_build and (read_build() and read_build() != ""): - msg = 'The cli already created `.launchable` file.' \ - 'If you want to use `--no-build` option, please remove `.launchable` file before executing.' - tracking_client.send_error_event( - event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR, - stack_trace=msg, - - ) - raise click.UsageError(message=msg) # noqa: E501 + app_instance = ctx.obj + tracking_client = TrackingClient(Command.RECORD_TESTS, app=app_instance) + client = SmartTestsClient(test_runner=test_runner, app=app_instance, tracking_client=tracking_client) - if is_no_build and session: - warn_and_exit_if_fail_fast_mode( - "WARNING: `--session` and `--no-build` are set.\nUsing --session option value ({}) and ignoring `--no-build` option".format(session), # noqa: E501 - ) - - is_no_build = False + file_path_normalizer = FilePathNormalizer( + str(base_path) if base_path else None, + no_base_path_inference=no_base_path_inference) try: - if is_no_build: - session_id = "builds/{}/test_sessions/{}".format(NO_BUILD_BUILD_NAME, NO_BUILD_TEST_SESSION_ID) - record_start_at = INVALID_TIMESTAMP - elif subsetting_id: - result = get_session_and_record_start_at_from_subsetting_id(subsetting_id, client) - session_id = result["session"] - record_start_at = result["start_at"] - elif session_name: - if not build_name: - raise click.UsageError( - '--build option is required when you uses a --session-name option ') - - sub_path = "builds/{}/test_session_names/{}".format(build_name, session_name) - res = client.request("get", sub_path) - res.raise_for_status() - - session_id = "builds/{}/test_sessions/{}".format(build_name, res.json().get("id")) - record_start_at = get_record_start_at(session_id, client) - else: - # The session_id must be back, so cast to str - session_id = str(find_or_create_session( - context=context, - session=session, - build_name=build_name, - flavor=flavor, - links=links, - lineage=lineage, - test_suite=test_suite, - timestamp=timestamp, - tracking_client=tracking_client)) - build_name = read_build() - record_start_at = get_record_start_at(session_id, client) - - build_name, test_session_id = parse_session(session_id) + test_session = get_session(session, client) + record_start_at = get_record_start_at(session, client) + + test_session_id = test_session.id + build_name = test_session.build_name + except ValueError as e: + print_error_and_die(msg=str(e), event=Tracking.ErrorEvent.USER_ERROR, tracking_client=tracking_client) except Exception as e: + if os.getenv(REPORT_ERROR_KEY): + raise e + tracking_client.send_error_event( event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR, stack_trace=str(e), @@ -272,6 +142,7 @@ def tests( client.print_exception_and_recover(e) # To prevent users from stopping the CI pipeline, the cli exits with a # status code of 0, indicating that the program terminated successfully. + build_name, test_session_id = parse_session(session) exit(0) # TODO: placed here to minimize invasion in this PR to reduce the likelihood of @@ -322,7 +193,7 @@ def test_session_id(self) -> int: def test_session_id(self, test_session_id: int): self._test_session_id = test_session_id - # session is generated by `launchable record session` command + # session is generated by `smart-tests record session` command # the session format is `builds//test_sessions/` @property def session(self) -> str: @@ -332,14 +203,6 @@ def session(self) -> str: def session(self, session: str): self._session = session - @property - def is_no_build(self) -> bool: - return self._is_no_build - - @is_no_build.setter - def is_no_build(self, is_no_build: bool): - self._is_no_build = is_no_build - @property def metadata_builder(self) -> CaseEvent.DataBuilder: """ @@ -409,20 +272,19 @@ def __init__(self, dry_run=False): self.path_builder = CaseEvent.default_path_builder(file_path_normalizer) self.junitxml_parse_func = None self.check_timestamp = True - self.base_path = base_path + self.base_path = str(base_path) if base_path else None self.dry_run = dry_run self.no_base_path_inference = no_base_path_inference self.is_allow_test_before_build = is_allow_test_before_build self.build_name = build_name self.test_session_id = test_session_id - self.session = session_id - self.is_no_build = is_no_build + self.session = session self.metadata_builder = CaseEvent.default_data_builder() def make_file_path_component(self, filepath) -> TestPathComponent: """Create a single TestPathComponent from the given file path""" - if base_path: - filepath = os.path.relpath(filepath, start=base_path) + if self.base_path: + filepath = os.path.relpath(filepath, start=self.base_path) return {"type": "file", "name": filepath} def report(self, junit_report_file: str): @@ -431,14 +293,14 @@ def report(self, junit_report_file: str): if ( not self.is_allow_test_before_build # nlqa: W503 - and not self.is_no_build # noqa: W503 - and timestamp is None # noqa: W503 and self.check_timestamp # noqa: W503 and ctime.timestamp() < record_start_at.timestamp() # noqa: W503 ): format = "%Y-%m-%d %H:%M:%S" - logger.warning("skip: {} is too old to report. start_record_at: {} file_created_at: {}".format( - junit_report_file, record_start_at.strftime(format), ctime.strftime(format))) + logger.warning( + f"skip: {junit_report_file} is too old to report. start_record_at: { + record_start_at.strftime(format)} file_created_at: { + ctime.strftime(format)}") self.skipped_reports.append(junit_report_file) return @@ -467,14 +329,12 @@ def testcases(reports: List[str]) -> Generator[CaseEventType, None, None]: if len(tc.get('testPath', [])) == 0: continue - # Set specific time for importing historical data - if timestamp is not None: - tc["createdAt"] = timestamp.isoformat() + # Timestamp option has been removed yield tc except Exception as e: - exceptions.append(Exception("Failed to process a report file: {}".format(report), e)) + exceptions.append(Exception(f"Failed to process a report file: {report}", e)) if len(exceptions) > 0: # defer XML parsing exceptions so that we can send what we @@ -505,7 +365,7 @@ def payload( "testRunner": test_runner, "group": group, "metadata": get_env_values(client), - "noBuild": self.is_no_build, + "noBuild": False, # deprecated to set no-build from the record tests command # NOTE: # testSuite and flavors are applied only when the no-build option is enabled "testSuite": test_suite_name, @@ -514,30 +374,12 @@ def payload( def send(payload: Dict[str, Union[str, List]]) -> None: res = client.request( - "post", "{}/events".format(self.session), payload=payload, compress=True) - - if res.status_code == HTTPStatus.NOT_FOUND: - if session: - build, _ = parse_session(session) - warn_and_exit_if_fail_fast_mode( - "Session {} was not found. Make sure to run `launchable record session --build {}` before `launchable record tests`".format(session, build)) # noqa: E501 - - elif build_name: - warn_and_exit_if_fail_fast_mode( - "Build {} was not found. Make sure to run `launchable record build --name {}` before `launchable record tests`".format(build_name, build_name)) # noqa: E501 - + "post", f"{self.session}/events", payload=payload, compress=True) res.raise_for_status() nonlocal is_observation is_observation = res.json().get("testSession", {}).get("isObservation", False) - # If don’t override build, test session and session_id, build and test session will be made per chunk request. - if is_no_build: - self.build_name = res.json().get("build", {}).get("build", NO_BUILD_BUILD_NAME) - self.test_session_id = res.json().get("testSession", {}).get("id", NO_BUILD_TEST_SESSION_ID) - self.session = "builds/{}/test_sessions/{}".format(self.build_name, self.test_session_id) - self.is_no_build = False - def recorded_result() -> Tuple[int, int, int, float]: test_count = 0 success_count = 0 @@ -580,8 +422,8 @@ def recorded_result() -> Tuple[int, int, int, float]: cases=chunk, test_runner=test_runner, group=group, - test_suite_name=test_suite if test_suite else "", - flavors=dict(flavor), + test_suite_name="", # test_suite option was removed + flavors={}, # flavor option was removed ) send(p) @@ -610,52 +452,42 @@ def recorded_result() -> Tuple[int, int, int, float]: if len(self.skipped_reports) != 0: warn_and_exit_if_fail_fast_mode( "{} test report(s) were skipped because they were created before this build was recorded.\n" - "Make sure to run your tests after you run `launchable record build`.\n" + "Make sure to run your tests after you run `smart-tests record build`.\n" "Otherwise, if these are really correct test reports, use the `--allow-test-before-build` option.". format(len(self.skipped_reports))) return else: warn_and_exit_if_fail_fast_mode( - "Looks like tests didn't run? If not, make sure the right files/directories were passed into `launchable record tests`") # noqa: E501 + "Looks like tests didn't run? If not, make sure the right files/directories were passed into `smart-tests record tests`") # noqa: E501 return file_count = len(self.reports) test_count, success_count, fail_count, duration = recorded_result() - click.echo( - "Launchable recorded tests for build {} (test session {}) to workspace {}/{} from {} files:".format( - self.build_name, - self.test_session_id, - org, - workspace, - file_count, - )) + typer.echo( + f"Smart Tests recorded tests for build { + self.build_name} (test session { + self.test_session_id}) to workspace {org}/{workspace} from {file_count} files:") if is_observation: - click.echo("(This test session is under observation mode)") + typer.echo("(This test session is under observation mode)") - click.echo("") + typer.echo("") header = ["Files found", "Tests found", "Tests passed", "Tests failed", "Total duration (min)"] rows = [[file_count, test_count, success_count, fail_count, duration]] - click.echo(tabulate(rows, header, tablefmt="github", floatfmt=".2f")) + typer.echo(tabulate(rows, header, tablefmt="github", floatfmt=".2f")) if duration == 0: - click.echo(click.style("\nTotal test duration is 0." + typer.echo(typer.style("\nTotal test duration is 0." "\nPlease check whether the test duration times in report files are correct.", "yellow")) + typer.echo( + f"\nVisit https://app.launchableinc.com/organizations/{org}/workspaces/" + f"{workspace}/test-sessions/{self.test_session_id} to view uploaded test results " + f"(or run `launchable inspect tests --test-session-id {self.test_session_id}`)") - click.echo( - "\nVisit https://app.launchableinc.com/organizations/{organization}/workspaces/" - "{workspace}/test-sessions/{test_session_id} to view uploaded test results " - "(or run `launchable inspect tests --test-session-id {test_session_id}`)" - .format( - organization=org, - workspace=workspace, - test_session_id=self.test_session_id, - )) - - context.obj = RecordTests(dry_run=context.obj.dry_run) + ctx.obj = RecordTests(dry_run=app_instance.dry_run) # if we fail to determine the timestamp of the build, we err on the side of collecting more test reports @@ -663,7 +495,7 @@ def recorded_result() -> Tuple[int, int, int, float]: INVALID_TIMESTAMP = datetime.datetime.fromtimestamp(0) -def get_record_start_at(session: Optional[str], client: LaunchableClient): +def get_record_start_at(session: str, client: SmartTestsClient): """ Determine the baseline timestamp to be used for up-to-date checks of report files. Only files newer than this timestamp will be collected. @@ -671,31 +503,24 @@ def get_record_start_at(session: Optional[str], client: LaunchableClient): Based on the thinking that if a build doesn't exist tests couldn't have possibly run, we attempt to use the timestamp of a build, with appropriate fallback. """ - if session is None: - raise click.UsageError('Either --build or --session has to be specified') - - if session: - build_name, _ = parse_session(session) + build_name, _ = parse_session(session) - sub_path = "builds/{}".format(build_name) + sub_path = f"builds/{build_name}" res = client.request("get", sub_path) if res.status_code != 200: if res.status_code == 404: msg = "Build {} was not found. " \ - "Make sure to run `launchable record build --name {}` before `launchable record tests`".format( - build_name, build_name) + f"Make sure to run `smart-tests record build --name {build_name}` before `smart-tests record tests`" else: - msg = "Unable to determine the timestamp of the build {}. HTTP response code was {}".format( - build_name, - res.status_code) - click.echo(click.style(msg, 'yellow'), err=True) + msg = f"Unable to determine the timestamp of the build {build_name}. HTTP response code was {res.status_code}" + typer.secho(msg, fg=typer.colors.YELLOW, err=True) # to avoid stop report command return INVALID_TIMESTAMP created_at = res.json()["createdAt"] - Logger().debug("Build {} timestamp = {}".format(build_name, created_at)) + Logger().debug(f"Build {build_name} timestamp = {created_at}") t = parse_launchable_timeformat(created_at) return t @@ -705,33 +530,11 @@ def parse_launchable_timeformat(t: str) -> datetime.datetime: try: return parse(t) except Exception as e: - Logger().error("parse time error {}. time: {}".format(str(e), t)) + Logger().error(f"parse time error {str(e)}. time: {t}") return INVALID_TIMESTAMP -def get_session_and_record_start_at_from_subsetting_id(subsetting_id: str, client: LaunchableClient): - s = subsetting_id.split('/') - - # subset/{id} - if len(s) != 2: - raise click.UsageError('Invalid subset id. like `subset/123/slice` but got {}'.format(subsetting_id)) - - res = client.request("get", subsetting_id) - if res.status_code != 200: - raise click.UsageError(click.style("Unable to get subset information from subset id {}".format( - subsetting_id), 'yellow')) - - build_number = res.json()["build"]["buildNumber"] - created_at = res.json()["build"]["createdAt"] - test_session_id = res.json()["testSession"]["id"] - - return { - "session": "builds/{}/test_sessions/{}".format(build_number, test_session_id), - "start_at": parse_launchable_timeformat(created_at) - } - - -def get_env_values(client: LaunchableClient) -> Dict[str, str]: +def get_env_values(client: SmartTestsClient) -> Dict[str, str]: sub_path = "slack/notification/key/list" res = client.request("get", sub_path=sub_path) @@ -745,3 +548,21 @@ def get_env_values(client: LaunchableClient) -> Dict[str, str]: metadata[key] = val return metadata + + +# NestedCommand implementation: create test runner-specific commands +# This section adds the new command structure where test runners come before options +nested_command_app = typer.Typer(name="record", help="Record test results (NestedCommand)") + + +def create_nested_commands(): + """Create NestedCommand commands after all test runners are loaded.""" + builder = DynamicCommandBuilder() + + # Extract options from the original tests callback + callback_options = extract_callback_options(tests_main) + + # Create test runner-specific record test commands + builder.create_record_test_commands(nested_command_app, tests_main, callback_options) + +# The commands will be created when test runners are loaded diff --git a/smart_tests/commands/stats/__init__.py b/smart_tests/commands/stats/__init__.py new file mode 100644 index 000000000..7b4eacbaf --- /dev/null +++ b/smart_tests/commands/stats/__init__.py @@ -0,0 +1,7 @@ +import typer + +from . import test_sessions + +app = typer.Typer(name="stats", help="View test session statistics") + +app.add_typer(test_sessions.app, name="test-sessions") diff --git a/smart_tests/commands/stats/test_sessions.py b/smart_tests/commands/stats/test_sessions.py new file mode 100644 index 000000000..d9b167f36 --- /dev/null +++ b/smart_tests/commands/stats/test_sessions.py @@ -0,0 +1,44 @@ +from typing import Annotated, Any, Dict, List + +import typer + +from ...utils.smart_tests_client import SmartTestsClient +from ...utils.typer_types import validate_key_value + +app = typer.Typer(name="test-sessions", help="View test session statistics") + + +@app.command() +def test_sessions( + ctx: typer.Context, + days: Annotated[int, typer.Option( + help="How many days of test sessions in the past to be stat" + )] = 7, + flavor: Annotated[List[str], typer.Option( + help="flavors", + metavar="KEY=VALUE" + )] = [], +): + app = ctx.obj + + # Parse flavors + parsed_flavors = [validate_key_value(f) for f in flavor] + + params: Dict[str, Any] = {'days': days, 'flavor': []} + flavors = [] + for f in parsed_flavors: + flavors.append('%s=%s' % (f[0], f[1])) + + if flavors: + params['flavor'] = flavors + else: + params.pop('flavor', None) + + client = SmartTestsClient(app=app) + try: + res = client.request('get', '/stats/test-sessions', params=params) + res.raise_for_status() + typer.echo(res.text) + + except Exception as e: + client.print_exception_and_recover(e, "Warning: the service failed to get stat.") diff --git a/launchable/commands/subset.py b/smart_tests/commands/subset.py similarity index 59% rename from launchable/commands/subset.py rename to smart_tests/commands/subset.py index e0ec48e4a..bd975ceb6 100644 --- a/launchable/commands/subset.py +++ b/smart_tests/commands/subset.py @@ -7,266 +7,153 @@ import sys from multiprocessing import Process from os.path import join -from typing import Any, Callable, Dict, List, Optional, Sequence, TextIO, Tuple, Union +from typing import Annotated, Any, Callable, Dict, List, TextIO -import click +import typer from tabulate import tabulate -from launchable.utils.authentication import get_org_workspace -from launchable.utils.session import parse_session -from launchable.utils.tracking import Tracking, TrackingClient +from smart_tests.utils.authentication import get_org_workspace +from smart_tests.utils.commands import Command +from smart_tests.utils.session import get_session, parse_session +from smart_tests.utils.tracking import Tracking, TrackingClient from ..app import Application from ..testpath import FilePathNormalizer, TestPath -from ..utils.click import DURATION, KEY_VALUE, PERCENTAGE, DurationType, PercentageType, ignorable_error -from ..utils.commands import Command +from ..utils.dynamic_commands import DynamicCommandBuilder, extract_callback_options from ..utils.env_keys import REPORT_ERROR_KEY from ..utils.fail_fast_mode import (FailFastModeValidateParams, fail_fast_mode_validate, set_fail_fast_mode, warn_and_exit_if_fail_fast_mode) -from ..utils.launchable_client import LaunchableClient -from .helper import find_or_create_session +from ..utils.smart_tests_client import SmartTestsClient +from ..utils.typer_types import Duration, Percentage, parse_duration, parse_percentage from .test_path_writer import TestPathWriter # TODO: rename files and function accordingly once the PR landscape -@click.group(help="Subsetting tests") -@click.option( - '--target', - 'target', - help='subsetting target from 0% to 100%', - type=PERCENTAGE, -) -@click.option( - '--time', - 'duration', - help='subsetting by absolute time, in seconds e.g) 300, 5m', - type=DURATION, -) -@click.option( - '--confidence', - 'confidence', - help='subsetting by confidence from 0% to 100%', - type=PERCENTAGE, -) -@click.option( - '--goal-spec', - 'goal_spec', - help='subsetting by programmatic goal definition', - type=str, -) -@click.option( - '--session', - 'session', - help='In the format builds//test_sessions/', - type=str, -) -@click.option( - '--base', - 'base_path', - help='(Advanced) base directory to make test names portable', - type=click.Path(exists=True, file_okay=False), - metavar="DIR", -) -@click.option( - '--build', - 'build_name', - help='build name', - type=str, - metavar='BUILD_NAME', - hidden=True, -) -@click.option( - '--rest', - 'rest', - help='Output the subset remainder to a file, e.g. `--rest=remainder.txt`', - type=str, -) -@click.option( - "--flavor", - "flavor", - help='flavors', - metavar='KEY=VALUE', - type=KEY_VALUE, - default=(), - multiple=True, -) -@click.option( - "--split", - "split", - help='split', - is_flag=True -) -@click.option( - "--no-base-path-inference", - "--no_base_path_inference", # historical, inconsistently named - "no_base_path_inference", - help="""Do not guess the base path to relativize the test file paths. - - By default, if the test file paths are absolute file paths, it automatically - guesses the repository root directory and relativize the paths. With this - option, the command doesn't do this guess work. - - If --base_path is specified, the absolute file paths are relativized to the - specified path irrelevant to this option. Use it if the guessed base path is - incorrect. - """, - is_flag=True -) -@click.option( - "--ignore-new-tests", - "ignore_new_tests", - help='Ignore tests that were added recently.\n\nNOTICE: this option will ignore tests that you added just now as well', - is_flag=True -) -@click.option( - "--observation", - "is_observation", - help="enable observation mode", - is_flag=True, -) -@click.option( - "--get-tests-from-previous-sessions", - "is_get_tests_from_previous_sessions", - help="get subset list from previous full tests", - is_flag=True, -) -@click.option( - "--output-exclusion-rules", - "is_output_exclusion_rules", - help="outputs the exclude test list. Switch the subset and rest.", - is_flag=True, -) -@click.option( - "--non-blocking", - "is_non_blocking", - help="Do not wait for subset requests in observation mode.", - is_flag=True, - hidden=True, -) -@click.option( - "--ignore-flaky-tests-above", - "ignore_flaky_tests_above", - help='Ignore flaky tests above the value set by this option. You can confirm flaky scores in WebApp', - type=click.FloatRange(min=0, max=1.0), -) -@click.option( - '--link', - 'links', - help="Set external link of title and url", - multiple=True, - default=(), - type=KEY_VALUE, -) -@click.option( - "--no-build", - "is_no_build", - help="If you want to only send test reports, please use this option", - is_flag=True, -) -@click.option( - '--session-name', - 'session_name', - help='test session name', - required=False, - type=str, - metavar='SESSION_NAME', -) -@click.option( - '--lineage', - 'lineage', - help='Set lineage name. This option value will be passed to the record session command if a session isn\'t created yet.', - required=False, - type=str, - metavar='LINEAGE', -) -@click.option( - "--prioritize-tests-failed-within-hours", - "prioritize_tests_failed_within_hours", - help="Prioritize tests that failed within the specified hours; maximum 720 hours (= 24 hours * 30 days)", - type=click.IntRange(min=0, max=24 * 30), -) -@click.option( - "--prioritized-tests-mapping", - "prioritized_tests_mapping_file", - help='Prioritize tests based on test mapping file', - required=False, - type=click.File('r'), -) -@click.option( - '--test-suite', - 'test_suite', - help='Set test suite name. This option value will be passed to the record session command if a session isn\'t created yet.', # noqa: E501 - required=False, - type=str, - metavar='TEST_SUITE', -) -@click.option( - "--get-tests-from-guess", - "is_get_tests_from_guess", - help="get subset list from git managed files", - is_flag=True, -) -@click.pass_context +app = typer.Typer(name="subset", help="Subsetting tests") + + +@app.callback() def subset( - context: click.core.Context, - target: Optional[PercentageType], - session: Optional[str], - base_path: Optional[str], - build_name: Optional[str], - rest: str, - duration: Optional[DurationType], - flavor: Sequence[Tuple[str, str]], - confidence: Optional[PercentageType], - goal_spec: Optional[str], - split: bool, - no_base_path_inference: bool, - ignore_new_tests: bool, - is_observation: bool, - is_get_tests_from_previous_sessions: bool, - is_output_exclusion_rules: bool, - is_non_blocking: bool, - ignore_flaky_tests_above: Optional[float], - links: Sequence[Tuple[str, str]] = (), - is_no_build: bool = False, - session_name: Optional[str] = None, - lineage: Optional[str] = None, - prioritize_tests_failed_within_hours: Optional[int] = None, - prioritized_tests_mapping_file: Optional[TextIO] = None, - test_suite: Optional[str] = None, - is_get_tests_from_guess: bool = False, + ctx: typer.Context, + session: Annotated[str, typer.Option( + "--session", + help="In the format builds//test_sessions/", + metavar="SESSION" + )], + target: Annotated[Percentage | None, typer.Option( + parser=parse_percentage, + help="subsetting target from 0% to 100%" + )] = None, + time: Annotated[Duration | None, typer.Option( + parser=parse_duration, + help="subsetting by absolute time, in seconds e.g) 300, 5m" + )] = None, + confidence: Annotated[Percentage | None, typer.Option( + parser=parse_percentage, + help="subsetting by confidence from 0% to 100%" + )] = None, + goal_spec: Annotated[str | None, typer.Option( + help="subsetting by programmatic goal definition" + )] = None, + base_path: Annotated[str | None, typer.Option( + '--base', + help="(Advanced) base directory to make test names portable", + metavar="DIR" + )] = None, + rest: Annotated[str | None, typer.Option( + help="Output the subset remainder to a file, e.g. `--rest=remainder.txt`" + )] = None, + # TODO(Konboi): omit from the smart-tests command initial release + # split: Annotated[bool, typer.Option( + # help="split" + # )] = False, + no_base_path_inference: Annotated[bool, typer.Option( + "--no-base-path-inference", + help="Do not guess the base path to relativize the test file paths. " + "By default, if the test file paths are absolute file paths, it automatically " + "guesses the repository root directory and relativize the paths. With this " + "option, the command doesn't do this guess work. " + "If --base is specified, the absolute file paths are relativized to the " + "specified path irrelevant to this option. Use it if the guessed base path is incorrect." + )] = False, + ignore_new_tests: Annotated[bool, typer.Option( + "--ignore-new-tests", + help="Ignore tests that were added recently. NOTICE: this option will ignore tests that you added just now as well" + )] = False, + is_observation: Annotated[bool, typer.Option( + '--observation', + help="enable observation mode" + )] = False, + is_get_tests_from_previous_sessions: Annotated[bool, typer.Option( + "--get-tests-from-previous-sessions", + help="get subset list from previous full tests" + )] = False, + is_output_exclusion_rules: Annotated[bool, typer.Option( + "--output-exclusion-rules", + help="outputs the exclude test list. Switch the subset and rest." + )] = False, + is_non_blocking: Annotated[bool, typer.Option( + "--non-blocking", + help="Do not wait for subset requests in observation mode.", + hidden=True + )] = False, + ignore_flaky_tests_above: Annotated[float | None, typer.Option( + help="Ignore flaky tests above the value set by this option. You can confirm flaky scores in WebApp", + min=0.0, max=1.0 + )] = None, + prioritize_tests_failed_within_hours: Annotated[int | None, typer.Option( + help="Prioritize tests that failed within the specified hours; maximum 720 hours (= 24 hours * 30 days)", + min=0, max=24 * 30 + )] = None, + prioritized_tests_mapping_file: Annotated[typer.FileText | None, typer.Option( + "--prioritized-tests-mapping", + help="Prioritize tests based on test mapping file", + mode="r" + )] = None, + is_get_tests_from_guess: Annotated[bool, typer.Option( + "--get-tests-from-guess", + help="Get subset list from guessed tests" + )] = False ): - app = context.obj + app = ctx.obj tracking_client = TrackingClient(Command.SUBSET, app=app) - client = LaunchableClient( - test_runner=context.invoked_subcommand, - app=app, - tracking_client=tracking_client) + client = SmartTestsClient(app=app, tracking_client=tracking_client) set_fail_fast_mode(client.is_fail_fast_mode()) fail_fast_mode_validate(FailFastModeValidateParams( command=Command.SUBSET, session=session, - build=build_name, - flavor=flavor, is_observation=is_observation, - links=links, - is_no_build=is_no_build, - test_suite=test_suite, )) def print_error_and_die(msg: str, event: Tracking.ErrorEvent): - click.echo(click.style(msg, fg="red"), err=True) + typer.echo(typer.style(msg, fg="red"), err=True) tracking_client.send_error_event(event_name=event, stack_trace=msg) sys.exit(1) def warn(msg: str): - click.echo(click.style("Warning: " + msg, fg="yellow"), err=True) + typer.echo(typer.style("Warning: " + msg, fg="yellow"), err=True) tracking_client.send_error_event( event_name=Tracking.ErrorEvent.WARNING_ERROR, stack_trace=msg ) + try: + test_session = get_session(session, client) + build_name = test_session.build_name + session_id = test_session.id + is_observation = test_session.observation_mode + except ValueError as e: + print_error_and_die(msg=str(e), event=Tracking.ErrorEvent.USER_ERROR) + except Exception as e: + if os.getenv(REPORT_ERROR_KEY): + raise e + else: + # not to block pipeline, parse session and use it + client.print_exception_and_recover(e, "Warning: failed to check test session") + build_name, session_id = parse_session(session) + if is_get_tests_from_guess and is_get_tests_from_previous_sessions: print_error_and_die( "--get-tests-from-guess (list up tests from git ls-files and subset from there) and --get-tests-from-previous-sessions (list up tests from the recent runs and subset from there) are mutually exclusive. Which one do you want to use?", # noqa E501 @@ -283,67 +170,10 @@ def warn(msg: str): Tracking.ErrorEvent.INTERNAL_CLI_ERROR ) - if is_no_build and session: - warn_and_exit_if_fail_fast_mode( - "WARNING: `--session` and `--no-build` are set.\nUsing --session option value ({}) and ignoring `--no-build` option".format(session)) # noqa: E501 - is_no_build = False - - session_id = None - - try: - if session_name: - if not build_name: - raise click.UsageError( - '--build option is required when you use a --session-name option ') - sub_path = "builds/{}/test_session_names/{}".format(build_name, session_name) - client = LaunchableClient(test_runner=context.invoked_subcommand, app=context.obj, tracking_client=tracking_client) - res = client.request("get", sub_path) - res.raise_for_status() - session_id = "builds/{}/test_sessions/{}".format(build_name, res.json().get("id")) - else: - session_id = find_or_create_session( - context=context, - session=session, - build_name=build_name, - flavor=flavor, - is_observation=is_observation, - links=links, - is_no_build=is_no_build, - lineage=lineage, - tracking_client=tracking_client, - test_suite=test_suite, - ) - except click.UsageError as e: - print_error_and_die(str(e), Tracking.ErrorEvent.USER_ERROR) - except Exception as e: - tracking_client.send_error_event( - event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR, - stack_trace=str(e), - - ) - if os.getenv(REPORT_ERROR_KEY): - raise e - else: - click.echo(ignorable_error(e), err=True) - - if is_non_blocking: - if (not is_observation) and session_id: - try: - client = LaunchableClient( - app=app, - tracking_client=tracking_client) - res = client.request("get", session_id) - is_observation_in_recorded_session = res.json().get("isObservation", False) - if not is_observation_in_recorded_session: - print_error_and_die( - "You have to specify --observation option to use non-blocking mode", - Tracking.ErrorEvent.INTERNAL_CLI_ERROR) - except Exception as e: - tracking_client.send_error_event( - event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR, - stack_trace=str(e), - ) - click.echo(ignorable_error(e), err=True) + if is_non_blocking and not is_observation: + print_error_and_die( + "You have to specify --observation option to use non-blocking mode", + Tracking.ErrorEvent.INTERNAL_CLI_ERROR) file_path_normalizer = FilePathNormalizer(base_path, no_base_path_inference=no_base_path_inference) @@ -351,13 +181,14 @@ def warn(msg: str): # PR merge hell. This should be moved to a top-level class TestPathWriter.base_path = base_path + TestPathWriter.base_path_explicitly_set = (base_path is not None) class Optimize(TestPathWriter): # test_paths: List[TestPath] # doesn't work with Python 3.5 # is_get_tests_from_previous_sessions: bool # Where we take TestPath, we also accept a path name as a string. - TestPathLike = Union[str, TestPath] + TestPathLike = str | TestPath # output_handler: Callable[[ # List[TestPathLike], List[TestPathLike]], None] @@ -367,22 +198,32 @@ class Optimize(TestPathWriter): def __init__(self, app: Application): self.rest = rest self.input_given = False # set to True when an attempt was made to add to self.test_paths - self.test_paths: List[List[Dict[str, str]]] = [] + self.test_paths: list[list[dict[str, str]]] = [] self.output_handler = self._default_output_handler self.exclusion_output_handler = self._default_exclusion_output_handler self.is_get_tests_from_previous_sessions = is_get_tests_from_previous_sessions self.is_output_exclusion_rules = is_output_exclusion_rules + self.test_runner: str | None = None # Will be set by set_test_runner self.is_get_tests_from_guess = is_get_tests_from_guess super(Optimize, self).__init__(app=app) - def _default_output_handler(self, output: List[TestPath], rests: List[TestPath]): + def set_test_runner(self, test_runner: str): + """Set the test runner name for this subset operation""" + self.test_runner = test_runner + + @property + def base_path(self): + """Provide access to base_path for test runners compatibility""" + return TestPathWriter.base_path + + def _default_output_handler(self, output: list[TestPath], rests: list[TestPath]): if rest: self.write_file(rest, rests) if output: self.print(output) - def _default_exclusion_output_handler(self, subset: List[TestPath], rest: List[TestPath]): + def _default_exclusion_output_handler(self, subset: list[TestPath], rest: list[TestPath]): self.output_handler(rest, subset) def test_path(self, path: TestPathLike): @@ -402,7 +243,7 @@ def rel_base_path(path): else: self.test_paths.append(self.to_test_path(rel_base_path(path))) - def stdin(self) -> Union[TextIO, List]: + def stdin(self) -> TextIO | List: """ Returns sys.stdin, but after ensuring that it's connected to something reasonable. @@ -431,7 +272,7 @@ def to_test_path(x: TestPathLike) -> TestPath: return x def scan(self, base: str, pattern: str, - path_builder: Optional[Callable[[str], Union[TestPath, str, None]]] = None): + path_builder: Callable[[str], TestPath | str | None] | None = None): """ Starting at the 'base' path, recursively add everything that matches the given GLOB pattern @@ -466,11 +307,12 @@ def default_path_builder(file_name): def get_payload( self, session_id: str, - target: Optional[PercentageType], - duration: Optional[DurationType], + target: Percentage | None, + duration: Duration | None, + confidence: Percentage | None, test_runner: str, ): - payload: Dict[str, Any] = { + payload: dict[str, Any] = { "testPaths": self.test_paths, "testRunner": test_runner, "session": { @@ -484,17 +326,17 @@ def get_payload( if target is not None: payload["goal"] = { "type": "subset-by-percentage", - "percentage": target, + "percentage": float(target), } elif duration is not None: payload["goal"] = { "type": "subset-by-absolute-time", - "duration": duration, + "duration": float(duration), } elif confidence is not None: payload["goal"] = { "type": "subset-by-confidence", - "percentage": confidence + "percentage": float(confidence) } elif goal_spec is not None: payload["goal"] = { @@ -539,18 +381,19 @@ def _collect_potential_test_files(self): warn_and_exit_if_fail_fast_mode("Nothing that looks like a test file in the current git repository.") def request_subset(self) -> SubsetResult: - test_runner = context.invoked_subcommand + # Get test runner name from the object (set by DynamicCommandBuilder) + test_runner = self.test_runner # temporarily extend the timeout because subset API response has become slow # TODO: remove this line when API response return response # within 300 sec timeout = (5, 300) - payload = self.get_payload(str(session_id), target, duration, str(test_runner)) + payload = self.get_payload(str(session_id), target, time, confidence, str(test_runner)) if is_non_blocking: # Create a new process for requesting a subset. process = Process(target=subset_request, args=(client, timeout, payload)) process.start() - click.echo("The subset was requested in non-blocking mode.", err=True) + typer.echo("The subset was requested in non-blocking mode.", err=True) self.output_handler(self.test_paths, []) # With non-blocking mode, we don't need to wait for the response sys.exit(0) @@ -560,6 +403,7 @@ def request_subset(self) -> SubsetResult: # The status code 422 is returned when validation error of the test mapping file occurs. if res.status_code == 422: print_error_and_die("Error: {}".format(res.reason), Tracking.ErrorEvent.USER_ERROR) + res.raise_for_status() return SubsetResult.from_response(res.json()) except Exception as e: @@ -597,19 +441,19 @@ def run(self): warn_and_exit_if_fail_fast_mode("Error: no tests found matching the path.") return - if split: - click.echo("subset/{}".format(subset_result.subset_id)) - else: - output_subset, output_rests = subset_result.subset, subset_result.rest + # TODO(Konboi): split subset isn't provided for smart-tests initial release + # if split: + # typer.echo("subset/{}".format(subset_result.subset_id)) + output_subset, output_rests = subset_result.subset, subset_result.rest - if subset_result.is_observation: - output_subset = output_subset + output_rests - output_rests = [] + if subset_result.is_observation: + output_subset = output_subset + output_rests + output_rests = [] - if is_output_exclusion_rules: - self.exclusion_output_handler(output_subset, output_rests) - else: - self.output_handler(output_subset, output_rests) + if is_output_exclusion_rules: + self.exclusion_output_handler(output_subset, output_rests) + else: + self.output_handler(output_subset, output_rests) # When Launchable returns an error, the cli skips showing summary # report @@ -619,7 +463,6 @@ def run(self): if "subset" not in summary.keys() or "rest" not in summary.keys(): return - build_name, test_session_id = parse_session(session_id) org, workspace = get_org_workspace() header = ["", "Candidates", @@ -647,36 +490,54 @@ def run(self): ] if subset_result.is_brainless: - click.echo( + typer.echo( "Your model is currently in training", err=True) - click.echo( - "Launchable created subset {} for build {} (test session {}) in workspace {}/{}".format( + typer.echo( + "Smart Tests created subset {} for build {} (test session {}) in workspace {}/{}".format( subset_result.subset_id, build_name, - test_session_id, + session_id, org, workspace, ), err=True, ) if subset_result.is_observation: - click.echo( + typer.echo( "(This test session is under observation mode)", err=True) - click.echo("", err=True) - click.echo(tabulate(rows, header, tablefmt="github", floatfmt=".2f"), err=True) + typer.echo("", err=True) + typer.echo(tabulate(rows, header, tablefmt="github", floatfmt=".2f"), err=True) - click.echo( - "\nRun `launchable inspect subset --subset-id {}` to view full subset details".format(subset_result.subset_id), + typer.echo( + "\nRun `smart-tests inspect subset --subset-id {}` to view full subset details".format(subset_result.subset_id), err=True) - context.obj = Optimize(app=context.obj) + ctx.obj = Optimize(app=app) -def subset_request(client: LaunchableClient, timeout: Tuple[int, int], payload: Dict[str, Any]): +def subset_request(client: SmartTestsClient, timeout: tuple[int, int], payload: dict[str, Any]): return client.request("post", "subset", timeout=timeout, payload=payload, compress=True) +# NestedCommand implementation: create test runner-specific commands +# This section adds the new command structure where test runners come before options +nested_command_app = typer.Typer(name="subset", help="Subsetting tests (NestedCommand)") + + +def create_nested_commands(): + """Create NestedCommand commands after all test runners are loaded.""" + builder = DynamicCommandBuilder() + + # Extract options from the original subset callback + callback_options = extract_callback_options(subset) + + # Create test runner-specific subset commands + builder.create_subset_commands(nested_command_app, subset, callback_options) + +# The commands will be created when test runners are loaded + + class SubsetResult: def __init__( self, diff --git a/launchable/commands/test_path_writer.py b/smart_tests/commands/test_path_writer.py similarity index 73% rename from launchable/commands/test_path_writer.py rename to smart_tests/commands/test_path_writer.py index 748fe604f..6ba9539ae 100644 --- a/launchable/commands/test_path_writer.py +++ b/smart_tests/commands/test_path_writer.py @@ -1,26 +1,29 @@ from os.path import join -from typing import Callable, Dict, List, Optional +from typing import Callable, Dict, List -import click +import typer from ..app import Application from ..testpath import TestPath class TestPathWriter(object): - base_path: Optional[str] = None + base_path: str | None = None + base_path_explicitly_set: bool = False # Track if base_path was explicitly provided def __init__(self, app: Application): self._formatter: Callable[[TestPath], str] = TestPathWriter.default_formatter self._separator = "\n" - self._same_bin_formatter: Optional[Callable[[str], Dict[str, str]]] = None + self._same_bin_formatter: Callable[[str], Dict[str, str]] | None = None self.app = app @classmethod def default_formatter(cls, x: TestPath): """default formatter that's in line with to_test_path(str)""" file_name = x[0]['name'] - if cls.base_path: + # Only prepend base_path if it was explicitly set via --base option + # Auto-inferred base paths should not affect output formatting + if cls.base_path and cls.base_path_explicitly_set: # default behavior consistent with default_path_builder's relative # path handling file_name = join(str(cls.base_path), file_name) @@ -51,11 +54,11 @@ def write_file(self, file: str, test_paths: List[TestPath]): self.separator.join(self.formatter(t) for t in test_paths)) def print(self, test_paths: List[TestPath]): - click.echo(self.separator.join(self.formatter(t) + typer.echo(self.separator.join(self.formatter(t) for t in test_paths)) @property - def same_bin_formatter(self) -> Optional[Callable[[str], Dict[str, str]]]: + def same_bin_formatter(self) -> Callable[[str], Dict[str, str]] | None: return self._same_bin_formatter @same_bin_formatter.setter diff --git a/launchable/commands/verify.py b/smart_tests/commands/verify.py similarity index 62% rename from launchable/commands/verify.py rename to smart_tests/commands/verify.py index 68e4596b9..c53932a44 100644 --- a/launchable/commands/verify.py +++ b/smart_tests/commands/verify.py @@ -2,21 +2,18 @@ import platform import re import subprocess -import sys -from subprocess import CalledProcessError from typing import List -import click +import typer -from launchable.utils.env_keys import TOKEN_KEY -from launchable.utils.tracking import Tracking, TrackingClient +from smart_tests.utils.commands import Command +from smart_tests.utils.env_keys import TOKEN_KEY +from smart_tests.utils.tracking import Tracking, TrackingClient from ..utils.authentication import get_org_workspace -from ..utils.click import emoji -from ..utils.commands import Command -from ..utils.http_client import DEFAULT_BASE_URL from ..utils.java import get_java_command -from ..utils.launchable_client import LaunchableClient +from ..utils.smart_tests_client import SmartTestsClient +from ..utils.typer_types import emoji from ..version import __version__ as version @@ -36,10 +33,10 @@ def pick(a, i): def compare_java_version(output: str) -> int: """Check if the Java version meets what we need. returns >=0 if we meet the requirement""" pattern = re.compile('"([^"]+)"') - for l in output.splitlines(): - if l.find("java version") != -1: - # l is like: java version "1.8.0_144" - m = pattern.search(l) + for line in output.splitlines(): + if line.find("java version") != -1: + # line is like: java version "1.8.0_144" + m = pattern.search(line) if m: tokens = m.group(1).split(".") if len(tokens) >= 2: @@ -51,72 +48,71 @@ def compare_java_version(output: str) -> int: def check_java_version(javacmd: str) -> int: - """ - Check if the Java version meets what we need. - Returns >=0 if we meet the requirement - Returns -1 if 'java -version' command returns non-zero exit status - """ + """Check if the Java version meets what we need. returns >=0 if we meet the requirement""" try: v = subprocess.run([javacmd, "-version"], check=True, stderr=subprocess.PIPE, universal_newlines=True) return compare_java_version(v.stderr) - except CalledProcessError: + except subprocess.CalledProcessError: return -1 -@click.command(name="verify") -@click.pass_context -def verify(context: click.core.Context): +app = typer.Typer(name="verify", help="Verify CLI setup and connectivity") + + +@app.callback(invoke_without_command=True) +def verify(ctx: typer.Context): + # Run the verification (no subcommands in this app) # In this command, regardless of REPORT_ERROR_KEY, always report an unexpected error with full stack trace # to assist troubleshooting. `click.UsageError` is handled by the invoking # Click gracefully. + app_instance = ctx.obj + org, workspace = get_org_workspace() - tracking_client = TrackingClient(Command.VERIFY, app=context.obj) - client = LaunchableClient(tracking_client=tracking_client, app=context.obj) + tracking_client = TrackingClient(Command.VERIFY, app=app_instance) + client = SmartTestsClient(tracking_client=tracking_client, app=app_instance) java = get_java_command() # Print the system information first so that we can get them even if there's # an issue. - click.echo("Organization: " + repr(org)) - click.echo("Workspace: " + repr(workspace)) - click.echo("Proxy: " + repr(os.getenv("HTTPS_PROXY"))) - if client.base_url() != DEFAULT_BASE_URL: - click.echo("Server: " + repr(client.base_url())) - click.echo("Platform: " + repr(platform.platform())) - click.echo("Python version: " + repr(platform.python_version())) - click.echo("Java command: " + repr(java)) - click.echo("launchable version: " + repr(version)) + typer.echo("Organization: " + repr(org)) + typer.echo("Workspace: " + repr(workspace)) + typer.echo("Proxy: " + repr(os.getenv("HTTPS_PROXY"))) + typer.echo("Platform: " + repr(platform.platform())) + typer.echo("Python version: " + repr(platform.python_version())) + typer.echo("Java command: " + repr(java)) + typer.echo("smart-tests version: " + repr(version)) if org is None or workspace is None: msg = ( - "Could not identify Launchable organization/workspace. " - "Please confirm if you set LAUNCHABLE_TOKEN or LAUNCHABLE_ORGANIZATION and LAUNCHABLE_WORKSPACE " + "Could not identify Smart Tests organization/workspace. " + "Please confirm if you set SMART_TESTS_TOKEN or SMART_TESTS_ORGANIZATION and SMART_TESTS_WORKSPACE " "environment variables" ) tracking_client.send_error_event( event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR, stack_trace=msg ) - raise click.UsageError( - click.style(msg, fg="red")) + typer.secho(msg, fg=typer.colors.RED, err=True) + raise typer.Exit(1) try: res = client.request("get", "verification") if res.status_code == 401: if os.getenv(TOKEN_KEY): - msg = ("Authentication failed. Most likely the value for the LAUNCHABLE_TOKEN " + msg = ("Authentication failed. Most likely the value for the SMART_TESTS_TOKEN " "environment variable is invalid.") else: - msg = ("Authentication failed. Please set the LAUNCHABLE_TOKEN. " + msg = ("Authentication failed. Please set the SMART_TESTS_TOKEN. " "If you intend to use tokenless authentication, " "kindly reach out to our support team for further assistance.") - click.echo(click.style(msg, fg="red"), err=True) + typer.secho(msg, fg=typer.colors.RED, err=True) tracking_client.send_error_event( event_name=Tracking.ErrorEvent.USER_ERROR, stack_trace=msg, ) - sys.exit(2) + raise typer.Exit(2) res.raise_for_status() except Exception as e: tracking_client.send_error_event( @@ -127,12 +123,13 @@ def verify(context: click.core.Context): client.print_exception_and_recover(e) if java is None: - msg = "Java is not installed. Install Java version 8 or newer to use the Launchable CLI." + msg = "Java is not installed. Install Java version 8 or newer to use the Smart Tests CLI." tracking_client.send_error_event( event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR, stack_trace=msg ) - raise click.UsageError(click.style(msg, fg="red")) + typer.secho(msg, fg=typer.colors.RED, err=True) + raise typer.Exit(1) # Level 2 check: versions. This is more fragile than just reporting the number, so we move # this out here @@ -143,7 +140,8 @@ def verify(context: click.core.Context): event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR, stack_trace=msg ) - raise click.UsageError(click.style(msg, fg="red")) + typer.secho(msg, fg=typer.colors.RED, err=True) + raise typer.Exit(1) if check_java_version(java) < 0: msg = "Java 8 or later is required" @@ -151,6 +149,7 @@ def verify(context: click.core.Context): event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR, stack_trace=msg ) - raise click.UsageError(click.style(msg, fg="red")) + typer.secho(msg, fg=typer.colors.RED, err=True) + raise typer.Exit(1) - click.echo(click.style("Your CLI configuration is successfully verified" + emoji(" \U0001f389"), fg="green")) + typer.secho("Your CLI configuration is successfully verified" + emoji(" \U0001f389"), fg=typer.colors.GREEN) diff --git a/launchable/jar/exe_deploy.jar b/smart_tests/jar/exe_deploy.jar similarity index 95% rename from launchable/jar/exe_deploy.jar rename to smart_tests/jar/exe_deploy.jar index e7af26047..5374a5e46 100755 Binary files a/launchable/jar/exe_deploy.jar and b/smart_tests/jar/exe_deploy.jar differ diff --git a/launchable/plugins/__init__.py b/smart_tests/plugins/__init__.py similarity index 100% rename from launchable/plugins/__init__.py rename to smart_tests/plugins/__init__.py diff --git a/launchable/test_runners/__init__.py b/smart_tests/test_runners/__init__.py similarity index 100% rename from launchable/test_runners/__init__.py rename to smart_tests/test_runners/__init__.py diff --git a/launchable/test_runners/adb.py b/smart_tests/test_runners/adb.py similarity index 68% rename from launchable/test_runners/adb.py rename to smart_tests/test_runners/adb.py index 101df6cbe..9809f9f50 100644 --- a/launchable/test_runners/adb.py +++ b/smart_tests/test_runners/adb.py @@ -1,9 +1,9 @@ import re -from . import launchable +from . import smart_tests -@launchable.subset +@smart_tests.subset def subset(client): prev_cls_name = None pattern = re.compile(r'^INSTRUMENTATION_STATUS: class=(.+)$') @@ -20,5 +20,4 @@ def subset(client): client.run() -split_subset = launchable.CommonSplitSubsetImpls(__name__, seperator=',').split_subset() -record_tests = launchable.CommonRecordTestImpls(__name__).report_files() +record_tests = smart_tests.CommonRecordTestImpls(__name__).report_files() diff --git a/launchable/test_runners/ant.py b/smart_tests/test_runners/ant.py similarity index 59% rename from launchable/test_runners/ant.py rename to smart_tests/test_runners/ant.py index c55df2afe..860dd53ca 100644 --- a/launchable/test_runners/ant.py +++ b/smart_tests/test_runners/ant.py @@ -1,15 +1,19 @@ import os -from typing import List +from typing import Annotated, List -import click # type: ignore +import typer from ..utils.file_name_pattern import jvm_test_pattern -from . import launchable +from . import smart_tests -@click.argument('source_roots', required=True, nargs=-1) -@launchable.subset -def subset(client, source_roots: List[str]): +@smart_tests.subset +def subset( + client, + source_roots: Annotated[List[str], typer.Argument( + help="Source directories to scan for test files" + )] +): def file2test(f: str): if jvm_test_pattern.match(f): f = f[:f.rindex('.')] # remove extension @@ -25,5 +29,4 @@ def file2test(f: str): client.run() -split_subset = launchable.CommonSplitSubsetImpls(__name__).split_subset() -record_tests = launchable.CommonRecordTestImpls(__name__).report_files() +record_tests = smart_tests.CommonRecordTestImpls(__name__).report_files() diff --git a/launchable/test_runners/bazel.py b/smart_tests/test_runners/bazel.py similarity index 74% rename from launchable/test_runners/bazel.py rename to smart_tests/test_runners/bazel.py index 1904a926b..defa3f9d4 100644 --- a/launchable/test_runners/bazel.py +++ b/smart_tests/test_runners/bazel.py @@ -2,21 +2,21 @@ import os import sys from pathlib import Path -from typing import Generator, List +from typing import Annotated, Generator, List -import click # type: ignore +import typer from junitparser import TestCase, TestSuite # type: ignore from ..testpath import TestPath from ..utils.logger import Logger -from . import launchable +from . import smart_tests def make_test_path(pkg, target) -> TestPath: return [{'type': 'package', 'name': pkg}, {'type': 'target', 'name': target}] -@launchable.subset +@smart_tests.subset def subset(client): # Read targets from stdin, which generally looks like //foo/bar:zot for label in client.stdin(): @@ -30,18 +30,15 @@ def subset(client): client.run() -split_subset = launchable.CommonSplitSubsetImpls(__name__, - formatter=lambda x: x[0]['name'] + ":" + x[1]['name']).split_subset() - - -@click.argument('workspace', required=True) -@click.option('--build-event-json', 'build_event_json_files', help="set file path generated by --build_event_json_file", - type=click.Path(exists=True), - required=False, - multiple=True, - ) -@launchable.record.tests -def record_tests(client, workspace, build_event_json_files): +@smart_tests.record.tests +def record_tests( + client, + workspace: Annotated[str, typer.Argument(help="Bazel workspace directory")], + build_event_json: Annotated[List[typer.FileText] | None, typer.Option( + "--build-event-json", + help="set file path generated by --build_event_json_file" + )] = None, +): """ Takes Bazel workspace, then report all its test results """ @@ -68,12 +65,15 @@ def f(case: TestCase, suite: TestSuite, report_file: str) -> TestPath: client.path_builder = f client.check_timestamp = False + # Convert parameter name for backward compatibility + build_event_json_files = [f.name for f in build_event_json] if build_event_json else [] + if build_event_json_files: - for l in parse_build_event_json(build_event_json_files): - if l is None: + for test_label in parse_build_event_json(build_event_json_files): + if test_label is None: continue - client.report(str(Path(base).joinpath(l, 'test.xml'))) + client.report(str(Path(base).joinpath(test_label, 'test.xml'))) else: client.scan(str(base), '**/test.xml') @@ -87,7 +87,7 @@ def parse_build_event_json(files: List[str]) -> Generator: try: d = json.loads(line) except Exception: - Logger().error("Can not parse build event json {}".format(line)) + Logger().error(f"Can not parse build event json {line}") yield if "id" in d: if "testResult" in d["id"]: diff --git a/launchable/test_runners/behave.py b/smart_tests/test_runners/behave.py similarity index 80% rename from launchable/test_runners/behave.py rename to smart_tests/test_runners/behave.py index b70924f82..970a44968 100644 --- a/launchable/test_runners/behave.py +++ b/smart_tests/test_runners/behave.py @@ -1,14 +1,19 @@ import os +from typing import Annotated, List from xml.etree import ElementTree as ET -import click +import typer -from . import launchable +from . import smart_tests -@click.argument('reports', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, reports): +@smart_tests.record.tests +def record_tests( + client, + reports: Annotated[List[str], typer.Argument( + help="Test report files to process" + )], +): for r in reports: client.report(r) @@ -39,7 +44,7 @@ def parse_func(p: str) -> ET.ElementTree: client.run() -@launchable.subset +@smart_tests.subset def subset(client): for t in client.stdin(): if 0 < t.find(".feature"): @@ -52,6 +57,3 @@ def subset(client): client.separator = "|" client.run() - - -split_subset = launchable.CommonSplitSubsetImpls(__name__, seperator="|").split_subset() diff --git a/launchable/test_runners/ctest.py b/smart_tests/test_runners/ctest.py similarity index 80% rename from launchable/test_runners/ctest.py rename to smart_tests/test_runners/ctest.py index 8eb3f9cfc..c33e04efa 100644 --- a/launchable/test_runners/ctest.py +++ b/smart_tests/test_runners/ctest.py @@ -3,22 +3,33 @@ import os import re from pathlib import Path +from typing import Annotated, List from xml.etree import ElementTree as ET -import click - -from . import launchable - - -@click.argument('file', type=click.Path(exists=True)) -@click.option('--output-regex-files', is_flag=True, - help='Output test regex to files') -@click.option('--output-regex-files-dir', type=str, default='.', - help='Output directory for test regex') -@click.option('--output-regex-files-size', type=int, - default=60 * 1024, help='Max size of each regex file') -@launchable.subset -def subset(client, file, output_regex_files, output_regex_files_dir, output_regex_files_size): +import typer + +from . import smart_tests + + +@smart_tests.subset +def subset( + client, + file: Annotated[str, typer.Argument( + help="JSON file to process" + )], + output_regex_files: Annotated[bool, typer.Option( + "--output-regex-files", + help="Output test regex to files" + )] = False, + output_regex_files_dir: Annotated[str, typer.Option( + "--output-regex-files-dir", + help="Output directory for test regex" + )] = ".", + output_regex_files_size: Annotated[int, typer.Option( + "--output-regex-files-size", + help="Max size of each regex file" + )] = 60 * 1024, +): if file: with Path(file).open() as json_file: data = json.load(json_file) @@ -36,7 +47,7 @@ def handler(output, rests): client.output_handler = handler client.run() else: - client.formatter = lambda x: "^{}$".format(x[0]['name']) + client.formatter = lambda x: f"^{x[0]['name']}$" client.seperator = '|' client.run() @@ -48,7 +59,7 @@ def _write_regex_files(output_dir, prefix, max_size, paths): if not os.path.exists(output_dir): os.makedirs(output_dir) for i, elems in enumerate(escaped): - with open(os.path.join(output_dir, "{}_{}".format(prefix, i)), 'w') as f: + with open(os.path.join(output_dir, f"{prefix}_{i}"), 'w') as f: f.write('|'.join(elems) + '\n') @@ -70,14 +81,13 @@ def _group_by_size(elems, max_size): return ret -split_subset = launchable.CommonSplitSubsetImpls( - __name__, formatter=lambda x: "^{}$".format( - x[0]['name']), seperator='|').split_subset() - - -@click.argument('source_roots', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, source_roots): +@smart_tests.record.tests +def record_tests( + client, + source_roots: Annotated[List[str], typer.Argument( + help="Source root directories or files to process" + )], +): for root in source_roots: match = False for t in glob.iglob(root, recursive=True): @@ -87,7 +97,7 @@ def record_tests(client, source_roots): else: client.report(t) if not match: - click.echo("No matches found: {}".format(root), err=True) + typer.echo(f"No matches found: {root}", err=True) def parse_func(p: str) -> ET.ElementTree: """ diff --git a/launchable/test_runners/cts.py b/smart_tests/test_runners/cts.py similarity index 91% rename from launchable/test_runners/cts.py rename to smart_tests/test_runners/cts.py index a715f1c92..96f94c5d7 100644 --- a/launchable/test_runners/cts.py +++ b/smart_tests/test_runners/cts.py @@ -1,10 +1,11 @@ +from typing import Annotated, List from xml.etree import ElementTree as ET -import click +import typer -from launchable.commands.record.case_event import CaseEvent +from smart_tests.commands.record.case_event import CaseEvent -from . import launchable +from . import smart_tests # https://source.android.com/docs/compatibility/cts/command-console-v2 include_option = "--include-filter" @@ -115,9 +116,13 @@ def build_record_test_path(module: str, test_case: str, test: str): return (x for x in events) -@click.argument('reports', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, reports): +@smart_tests.record.tests +def record_tests( + client, + reports: Annotated[List[str], typer.Argument( + help="Test report files to process" + )], +): """ Beta: Report test result that Compatibility Test Suite (CTS) produced. Supports only CTS v2 """ @@ -128,7 +133,7 @@ def record_tests(client, reports): client.run() -@launchable.subset +@smart_tests.subset def subset(client): """ Beta: Produces test list from previous test sessions for Compatibility Test Suite (CTS). Supports only CTS v2 @@ -152,7 +157,9 @@ def subset(client): """ for t in client.stdin(): - if "starting command" in t: + line = t.rstrip("\n") if isinstance(t, str) else str(t).rstrip("\n") + + if "starting command" in line: start_module = True continue @@ -160,13 +167,11 @@ def subset(client): continue # e.g) armeabi-v7a CtsAbiOverrideHostTestCases - device_and_module = t.rstrip("\n").split() + device_and_module = line.split() if len(device_and_module) != 2: - click.echo( - click.style( - "Warning: {line} is not expected Module format and skipped".format( - line=t), - fg="yellow"), + typer.secho( + f"Warning: {line} is not expected Module format and skipped", + fg=typer.colors.YELLOW, err=True) continue diff --git a/launchable/test_runners/cucumber.py b/smart_tests/test_runners/cucumber.py similarity index 89% rename from launchable/test_runners/cucumber.py rename to smart_tests/test_runners/cucumber.py index eabad183e..57563a2e9 100644 --- a/launchable/test_runners/cucumber.py +++ b/smart_tests/test_runners/cucumber.py @@ -5,33 +5,39 @@ from copy import deepcopy from enum import Enum from pathlib import Path -from typing import Dict, Generator, List, Optional +from typing import Annotated, Dict, Generator, List from xml.etree import ElementTree as ET -import click +import typer -from launchable.testpath import FilePathNormalizer, TestPath +from smart_tests.testpath import FilePathNormalizer, TestPath from ..commands.record.case_event import CaseEvent, CaseEventType -from . import launchable +from . import smart_tests -subset = launchable.CommonSubsetImpls(__name__).scan_files('*_feature') -split_subset = launchable.CommonSplitSubsetImpls(__name__).split_subset() +subset = smart_tests.CommonSubsetImpls(__name__).scan_files('*_feature') REPORT_FILE_PREFIX = "TEST-" -@click.option('--json', 'json_format', help="use JSON report format", is_flag=True) -@click.argument('reports', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, reports, json_format): +@smart_tests.record.tests +def record_tests( + client, + reports: Annotated[List[str], typer.Argument( + help="Test report files to process" + )], + json_format: Annotated[bool, typer.Option( + "--json", + help="use JSON report format" + )] = False, +): if json_format: for r in reports: client.report(r) client.parse_func = JSONReportParser(client).parse_func else: - report_file_and_test_file_map = {} + report_file_and_test_file_map: dict[str, str] = {} _record_tests_from_xml(client, reports, report_file_and_test_file_map) def parse_func(report: str) -> ET.ElementTree: @@ -50,7 +56,7 @@ def _record_tests_from_xml(client, reports, report_file_and_test_file_map: Dict[ base_path = client.base_path if client.base_path else os.getcwd() for report in reports: if REPORT_FILE_PREFIX not in report: - click.echo("{} was load skipped because it doesn't look like a report file.".format(report), err=True) + typer.echo(f"{report} was load skipped because it doesn't look like a report file.", err=True) continue test_file = _find_test_file_from_report_file(base_path, report) @@ -58,12 +64,12 @@ def _record_tests_from_xml(client, reports, report_file_and_test_file_map: Dict[ report_file_and_test_file_map[report] = str(test_file) client.report(report) else: - click.echo("Cannot find test file of {}".format(report), err=True) + typer.echo(f"Cannot find test file of {report}", err=True) class JSONReportParser: """ - client: launchable.RecordTests + client: smart_tests.RecordTests """ def __init__(self, client): @@ -210,12 +216,10 @@ def parse_func(self, report_file: str) -> Generator[CaseEventType, None, None]: try: data = json.load(json_file) except Exception as e: - raise Exception("Can't read JSON format report file {}. Make sure to confirm report file.".format( - report_file)) from e + raise Exception(f"Can't read JSON format report file {report_file}. Make sure to confirm report file.") from e if len(data) == 0: - click.echo("Can't find test reports from {}. Make sure to confirm report file.".format( - report_file), err=True) + typer.echo(f"Can't find test reports from {report_file}. Make sure to confirm report file.", err=True) for d in data: file_name = clean_uri(d.get("uri", "")) @@ -223,7 +227,7 @@ def parse_func(self, report_file: str) -> Generator[CaseEventType, None, None]: # Cucumber can define repeating the same `Given` steps as a `Background` # https://cucumber.io/docs/gherkin/reference/#background - background: Optional[TestCaseInfo] = None + background: TestCaseInfo | None = None for element in d.get("elements", []): test_case = element.get("name", "") @@ -266,7 +270,7 @@ def parse_func(self, report_file: str) -> Generator[CaseEventType, None, None]: stderr="\n".join(test_case_info.stderr())) -def _find_test_file_from_report_file(base_path: str, report: str) -> Optional[Path]: +def _find_test_file_from_report_file(base_path: str, report: str) -> Path | None: """ Find test file from cucumber report file path format e.g) Test-features-foo-hoge.xml -> features/foo/hoge.feature or features/foo-hoge.feature @@ -276,9 +280,9 @@ def _find_test_file_from_report_file(base_path: str, report: str) -> Optional[Pa report_file = report_file.lstrip(REPORT_FILE_PREFIX) report_file = os.path.splitext(report_file)[0] - list = _create_file_candidate_list(report_file) - for l in list: - f = Path(base_path, l + ".feature") + file_candidates = _create_file_candidate_list(report_file) + for candidate in file_candidates: + f = Path(base_path, candidate + ".feature") if f.exists(): return f @@ -286,20 +290,20 @@ def _find_test_file_from_report_file(base_path: str, report: str) -> Optional[Pa def _create_file_candidate_list(file: str) -> List[str]: - list = [""] + candidates = [""] for c in file: if c == "-": - l = len(list) - list += deepcopy(list) - for i in range(l): - list[i] += '-' - for i in range(l, len(list)): - list[i] += '/' + list_length = len(candidates) + candidates += deepcopy(candidates) + for i in range(list_length): + candidates[i] += '-' + for i in range(list_length, len(candidates)): + candidates[i] += '/' else: - for i in range(len(list)): - list[i] += c + for i in range(len(candidates)): + candidates[i] += c - return list + return candidates class Result: diff --git a/launchable/test_runners/cypress.py b/smart_tests/test_runners/cypress.py similarity index 76% rename from launchable/test_runners/cypress.py rename to smart_tests/test_runners/cypress.py index a090da2fe..f9a666fe8 100644 --- a/launchable/test_runners/cypress.py +++ b/smart_tests/test_runners/cypress.py @@ -1,13 +1,18 @@ +from typing import Annotated, List from xml.etree import ElementTree as ET -import click +import typer -from . import launchable +from . import smart_tests -@click.argument('reports', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, reports): +@smart_tests.record.tests +def record_tests( + client, + reports: Annotated[List[str], typer.Argument( + help="Test report files to process" + )], +): for r in reports: client.report(r) @@ -28,7 +33,7 @@ def parse_func(p: str) -> ET.ElementTree: client.run() -@launchable.subset +@smart_tests.subset def subset(client): # read lines as test file names for t in client.stdin(): @@ -36,6 +41,3 @@ def subset(client): client.separator = ',' client.run() - - -split_subset = launchable.CommonSplitSubsetImpls(__name__, seperator=',').split_subset() diff --git a/launchable/test_runners/dotnet.py b/smart_tests/test_runners/dotnet.py similarity index 69% rename from launchable/test_runners/dotnet.py rename to smart_tests/test_runners/dotnet.py index d0875c940..ab39522ce 100644 --- a/launchable/test_runners/dotnet.py +++ b/smart_tests/test_runners/dotnet.py @@ -1,12 +1,12 @@ import glob import os -from typing import List +from typing import Annotated, List -import click +import typer -from launchable.test_runners import launchable -from launchable.test_runners.nunit import nunit_parse_func -from launchable.testpath import TestPath +from smart_tests.test_runners import smart_tests +from smart_tests.test_runners.nunit import nunit_parse_func +from smart_tests.testpath import TestPath # main subset logic @@ -45,7 +45,7 @@ def exclusion_output_handler(subset_tests: List[TestPath], rest_tests: List[Test with open(client.rest, "w+", encoding="utf-8") as fp: fp.write(client.separator.join(formatter(t) for t in subset_tests)) - click.echo(client.separator.join(formatter(t) for t in rest_tests)) + typer.echo(client.separator.join(formatter(t) for t in rest_tests)) client.separator = separator client.formatter = formatter @@ -53,31 +53,35 @@ def exclusion_output_handler(subset_tests: List[TestPath], rest_tests: List[Test client.run() -@click.option('--bare', help='outputs class names alone', default=False, is_flag=True) -@launchable.subset -def subset(client, bare): +@smart_tests.subset +def subset( + client, + bare: Annotated[bool, typer.Option( + "--bare", + help="outputs class names alone" + )] = False, +): """ Alpha: Supports only Zero Input Subsetting """ if not client.is_get_tests_from_previous_sessions: - click.echo( - click.style( - "The dotnet profile only supports Zero Input Subsetting.\nMake sure to use `--get-tests-from-previous-sessions` opton", # noqa: E501 - fg="red"), + typer.secho( + "The dotnet profile only supports Zero Input Subsetting.\nMake sure to use " + "`--get-tests-from-previous-sessions` option", + fg=typer.colors.RED, err=True) + raise typer.Exit(1) do_subset(client, bare) -@click.option('--bare', help='outputs class names alone', default=False, is_flag=True) -@launchable.split_subset -def split_subset(client, bare): - do_subset(client, bare) - - -@click.argument('files', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, files): +@smart_tests.record.tests +def record_tests( + client, + files: Annotated[List[str], typer.Argument( + help="Test report files to process" + )], +): """ Alpha: Supports only NUnit report formats. """ @@ -90,7 +94,7 @@ def record_tests(client, files): else: client.report(t) if not match: - click.echo("No matches found: {}".format(file), err=True) + typer.echo(f"No matches found: {file}", err=True) # Note: we support only Nunit test report format now. # If we need to support another format e.g) JUnit, trc, then we'll add a option diff --git a/launchable/test_runners/file.py b/smart_tests/test_runners/file.py similarity index 83% rename from launchable/test_runners/file.py rename to smart_tests/test_runners/file.py index 8c36746fc..d9856f746 100644 --- a/launchable/test_runners/file.py +++ b/smart_tests/test_runners/file.py @@ -1,14 +1,16 @@ # # The most bare-bone versions of the test runner support # -import click +from typing import Annotated, List + +import typer from junitparser import TestCase, TestSuite # type: ignore from ..testpath import TestPath -from . import launchable +from . import smart_tests -@launchable.subset +@smart_tests.subset def subset(client): # read lines as test file names for t in client.stdin(): @@ -17,9 +19,13 @@ def subset(client): client.run() -@click.argument('reports', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, reports): +@smart_tests.record.tests +def record_tests( + client, + reports: Annotated[List[str], typer.Argument( + help="Test report files to process" + )], +): def path_builder(case: TestCase, suite: TestSuite, report_file: str) -> TestPath: """path builder that puts the file name first, which is consistent with the subset command""" @@ -34,7 +40,7 @@ def find_filename(): filepath = find_filename() if not filepath: - raise click.ClickException( + raise typer.BadParameter( "No file name found in %s" % report_file) # default test path in `subset` expects to have this file name @@ -49,6 +55,3 @@ def find_filename(): for r in reports: client.report(r) client.run() - - -split_subset = launchable.CommonSplitSubsetImpls(__name__).split_subset() diff --git a/launchable/test_runners/flutter.py b/smart_tests/test_runners/flutter.py similarity index 79% rename from launchable/test_runners/flutter.py rename to smart_tests/test_runners/flutter.py index 5c69cc028..a979e3bbe 100644 --- a/launchable/test_runners/flutter.py +++ b/smart_tests/test_runners/flutter.py @@ -1,13 +1,13 @@ import json import pathlib -from typing import Dict, Generator, List, Optional +from typing import Dict, Generator, List -import click +import typer -from launchable.commands.record.case_event import CaseEvent -from launchable.testpath import FilePathNormalizer +from smart_tests.commands.record.case_event import CaseEvent +from smart_tests.testpath import FilePathNormalizer -from . import launchable +from . import smart_tests FLUTTER_FILE_EXT = "_test.dart" @@ -81,7 +81,7 @@ def __init__(self, id: int, platform: str, path: str): self._path = path self._test_cases: Dict[int, TestCase] = {} - def _get_test_case(self, id: int) -> Optional[TestCase]: + def _get_test_case(self, id: int) -> TestCase | None: return self._test_cases.get(id) @@ -90,10 +90,10 @@ def __init__(self, file_path_normalizer: FilePathNormalizer): self.file_path_normalizer = file_path_normalizer self._suites: Dict[int, TestSuite] = {} - def _get_suite(self, suite_id: int) -> Optional[TestSuite]: + def _get_suite(self, suite_id: int) -> TestSuite | None: return self._suites.get(suite_id) - def _get_test(self, test_id: int) -> Optional[TestCase]: + def _get_test(self, test_id: int) -> TestCase | None: if test_id is None: return None @@ -124,7 +124,7 @@ def parse_func(self, report_file: str) -> Generator[CaseEvent, None, None]: # TODO: Support cases that include information about `flutter pub get` # see detail: https://github.com/launchableinc/examples/actions/runs/11884312142/job/33112309450 if not pathlib.Path(report_file).exists(): - click.echo(click.style("Error: Report file not found: {}".format(report_file), fg='red'), err=True) + typer.secho(f"Error: Report file not found: {report_file}", fg=typer.colors.RED, err=True) return with open(report_file, "r") as ndjson: @@ -136,12 +136,14 @@ def parse_func(self, report_file: str) -> Generator[CaseEvent, None, None]: data = json.loads(j) self._parse_json(data) except json.JSONDecodeError: - click.echo( - click.style("Error: Invalid JSON format: {}. Skip load this line".format(j), fg='yellow'), err=True) + typer.secho( + f"Error: Invalid JSON format: {j}. Skip load this line", + fg=typer.colors.YELLOW, + err=True) continue except Exception as e: - click.echo( - click.style("Error: Failed to parse the report file: {} : {}".format(report_file, e), fg='red'), err=True) + typer.secho( + f"Error: Failed to parse the report file: {report_file} : {e}", fg=typer.colors.RED, err=True) return for event in self._events(): @@ -181,9 +183,9 @@ def _parse_json(self, data: Dict): suite = self._get_suite(suite_id) if suite_id is None or suite is None: - click.echo(click.style( - "Warning: Cannot find a parent test suite (id: {}). So won't send test result of {}".format( - suite_id, test_data.get("name")), fg='yellow'), err=True) + typer.secho( + f"Warning: Cannot find a parent test suite (id: {suite_id}). So won't send test result of { + test_data.get('name')}", fg=typer.colors.YELLOW, err=True) return test_id = test_data.get("id") @@ -212,8 +214,8 @@ def _parse_json(self, data: Dict): test_id = data.get("testID") test = self._get_test(test_id) if test is None: - click.echo(click.style( - "Warning: Cannot find a test (id: {}). So we skip update stderr".format(test_id), fg='yellow'), + typer.secho( + f"Warning: Cannot find a test (id: {test_id}). So we skip update stderr", fg=typer.colors.YELLOW, err=True) return test.stderr += ("\n" if test.stderr else "") + data.get("error", "") @@ -224,8 +226,8 @@ def _parse_json(self, data: Dict): test_id = data.get("testID") test = self._get_test(test_id) if test is None: - click.echo(click.style( - "Warning: Cannot find a test (id: {}). So we skip update stdout".format(test_id), fg='yellow'), + typer.secho( + f"Warning: Cannot find a test (id: {test_id}). So we skip update stdout", fg=typer.colors.YELLOW, err=True) return @@ -235,14 +237,13 @@ def _parse_json(self, data: Dict): return -@click.argument('reports', required=True, nargs=-1) -@launchable.record.tests +# This decorator is converted to Typer annotations in the function signature +@smart_tests.record.tests def record_tests(client, reports): file_path_normalizer = FilePathNormalizer(base_path=client.base_path, no_base_path_inference=client.no_base_path_inference) client.parse_func = ReportParser(file_path_normalizer).parse_func - launchable.CommonRecordTestImpls.load_report_files(client=client, source_roots=reports) + smart_tests.CommonRecordTestImpls.load_report_files(client=client, source_roots=reports) -subset = launchable.CommonSubsetImpls(__name__).scan_files('*.dart') -split_subset = launchable.CommonSplitSubsetImpls(__name__).split_subset() +subset = smart_tests.CommonSubsetImpls(__name__).scan_files('*.dart') diff --git a/launchable/test_runners/go_test.py b/smart_tests/test_runners/go_test.py similarity index 85% rename from launchable/test_runners/go_test.py rename to smart_tests/test_runners/go_test.py index 8f148be4e..29a39041f 100644 --- a/launchable/test_runners/go_test.py +++ b/smart_tests/test_runners/go_test.py @@ -1,17 +1,17 @@ import glob import os import re -from typing import Dict, List +from typing import Annotated, Dict, List -import click +import typer from junitparser import TestCase, TestSuite # type: ignore from ..testpath import TestPath from ..utils.logger import Logger -from . import launchable +from . import smart_tests -@launchable.subset +@smart_tests.subset def subset(client): logger = Logger() @@ -35,14 +35,18 @@ def subset(client): else: logger.warning("Cannot extract the package from the input. This may result in missing some tests.") test_cases = [] - client.formatter = lambda x: "^{}$".format(x[1]['name']) + client.formatter = lambda x: f"^{x[1]['name']}$" client.separator = '|' client.run() -@click.argument('source_roots', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, source_roots): +@smart_tests.record.tests +def record_tests( + client, + source_roots: Annotated[List[str], typer.Argument( + help="Source root directories or files to process" + )], +): for root in source_roots: match = False for t in glob.iglob(root, recursive=True): @@ -53,7 +57,7 @@ def record_tests(client, source_roots): client.report(t) if not match: - click.echo("No matches found: {}".format(root), err=True) + typer.echo(f"No matches found: {root}", err=True) return default_path_builder = client.path_builder @@ -88,11 +92,3 @@ def format_same_bin(s: str) -> List[Dict[str, str]]: t = s.split(".") return [{"type": "class", "name": t[0]}, {"type": "testcase", "name": t[1]}] - - -split_subset = launchable.CommonSplitSubsetImpls( - __name__, - formatter=lambda x: "^{}$".format(x[1]['name']), - seperator='|', - same_bin_formatter=format_same_bin, -).split_subset() diff --git a/launchable/test_runners/googletest.py b/smart_tests/test_runners/googletest.py similarity index 73% rename from launchable/test_runners/googletest.py rename to smart_tests/test_runners/googletest.py index 0f6b968a6..b63f1dc1c 100644 --- a/launchable/test_runners/googletest.py +++ b/smart_tests/test_runners/googletest.py @@ -1,14 +1,14 @@ import re from ..testpath import TestPath -from . import launchable +from . import smart_tests def make_test_path(cls, case) -> TestPath: return [{'type': 'class', 'name': cls}, {'type': 'testcase', 'name': case}] -@launchable.subset +@smart_tests.subset def subset(client): cls = '' class_pattern = re.compile(r'^([^\.]+)\.') @@ -30,7 +30,4 @@ def subset(client): client.run() -split_subset = launchable.CommonSplitSubsetImpls(__name__, - formatter=lambda x: x[0]['name'] + "." + x[1]['name']).split_subset() - -record_tests = launchable.CommonRecordTestImpls(__name__).report_files() +record_tests = smart_tests.CommonRecordTestImpls(__name__).report_files() diff --git a/smart_tests/test_runners/gradle.py b/smart_tests/test_runners/gradle.py new file mode 100644 index 000000000..f5f3eb9d8 --- /dev/null +++ b/smart_tests/test_runners/gradle.py @@ -0,0 +1,89 @@ +import os +from typing import Annotated, List + +import typer + +from smart_tests.utils.java import junit5_nested_class_path_builder + +from ..utils.file_name_pattern import jvm_test_pattern +from . import smart_tests + + +@smart_tests.subset +def subset( + client, + source_roots: Annotated[List[str] | None, typer.Argument( + help="Source root directories to scan for tests" + )] = None, + bare: Annotated[bool, typer.Option( + "--bare", + help="outputs class names alone" + )] = False, +): + def file2test(f: str): + if jvm_test_pattern.match(f): + f = f[:f.rindex('.')] # remove extension + # directory -> package name conversion + cls_name = f.replace(os.path.sep, '.') + return [{"type": "class", "name": cls_name}] + else: + return None + + # Handle None source_roots - convert to empty list + if source_roots is None: + source_roots = [] + + if client.is_get_tests_from_previous_sessions: + if len(source_roots) != 0: + typer.secho( + "Warning: SOURCE_ROOTS are ignored when --get-tests-from-previous-sessions is used", + fg=typer.colors.YELLOW, err=True) + # Always set to empty list when getting tests from previous sessions + source_roots = [] + else: + if len(source_roots) == 0: + raise typer.BadParameter("Error: Missing argument 'SOURCE_ROOTS...'") + + # Only scan if we have source roots + for root in source_roots: + client.scan(root, '**/*', file2test) + + def exclusion_output_handler(subset_tests, rest_tests): + if client.rest: + with open(client.rest, "w+", encoding="utf-8") as fp: + if not bare and len(rest_tests) == 0: + # This prevents the CLI output to be evaled as an empty + # string argument. + fp.write('-PdummyPlaceHolder') + else: + fp.write(client.separator.join(client.formatter(t) for t in rest_tests)) + + classes = [to_class_file(tp[0]['name']) for tp in rest_tests] + if bare: + typer.echo(','.join(classes)) + else: + typer.echo('-PexcludeTests=' + (','.join(classes))) + client.exclusion_output_handler = exclusion_output_handler + + if bare: + client.formatter = lambda x: x[0]['name'] + else: + client.formatter = lambda x: f"--tests {x[0]['name']}" + client.separator = ' ' + + client.run() + + +def to_class_file(class_name: str): + return class_name.replace('.', '/') + '.class' + + +@smart_tests.record.tests +def record_tests( + client, + reports: Annotated[List[str], typer.Argument( + help="Test report files to process" + )], +): + client.path_builder = junit5_nested_class_path_builder(client.path_builder) + smart_tests.CommonRecordTestImpls.load_report_files(client=client, source_roots=reports) diff --git a/launchable/test_runners/jest.py b/smart_tests/test_runners/jest.py similarity index 68% rename from launchable/test_runners/jest.py rename to smart_tests/test_runners/jest.py index eb21c9490..03f6bdd04 100644 --- a/launchable/test_runners/jest.py +++ b/smart_tests/test_runners/jest.py @@ -1,9 +1,11 @@ -import click +from typing import Annotated, List + +import typer from junitparser import TestCase, TestSuite # type: ignore -from launchable.testpath import TestPath +from smart_tests.testpath import TestPath -from . import launchable +from . import smart_tests def path_builder(case: TestCase, suite: TestSuite, report_file: str) -> TestPath: @@ -20,9 +22,13 @@ def path_builder(case: TestCase, suite: TestSuite, report_file: str) -> TestPath return test_path -@click.argument('reports', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, reports): +@smart_tests.record.tests +def record_tests( + client, + reports: Annotated[List[str], typer.Argument( + help="Test report files to process" + )], +): for r in reports: client.report(r) @@ -30,16 +36,13 @@ def record_tests(client, reports): client.run() -@launchable.subset +@smart_tests.subset def subset(client): if client.base_path is None: - raise click.BadParameter("Please specify base path") + raise typer.BadParameter("Please specify base path") for line in client.stdin(): if len(line.strip()) and not line.startswith(">"): client.test_path(line.rstrip("\n")) client.run() - - -split_subset = launchable.CommonSplitSubsetImpls(__name__).split_subset() diff --git a/launchable/test_runners/maven.py b/smart_tests/test_runners/maven.py similarity index 63% rename from launchable/test_runners/maven.py rename to smart_tests/test_runners/maven.py index deb87ccf5..d73aa819a 100644 --- a/launchable/test_runners/maven.py +++ b/smart_tests/test_runners/maven.py @@ -1,13 +1,13 @@ import glob import os -from typing import Dict, List, Optional +from typing import Annotated, Dict, List -import click +import typer -from launchable.utils import glob as uglob -from launchable.utils.java import junit5_nested_class_path_builder +from smart_tests.utils import glob as uglob +from smart_tests.utils.java import junit5_nested_class_path_builder -from . import launchable +from . import smart_tests # Surefire has the default inclusion pattern # https://maven.apache.org/surefire/maven-surefire-plugin/test-mojo.html#includes @@ -45,24 +45,22 @@ def is_file(f: str) -> bool: return False -@click.option( - '--test-compile-created-file', - 'test_compile_created_file', - required=False, - multiple=True, - type=click.Path(exists=True), - help="Please run `mvn test-compile` command to create input file for this option", -) -@click.option( - '--scan-test-compile-lst', - 'is_scan_test_compile_lst', - required=False, - is_flag=True, - help="Scan testCompile/default-testCompile/createdFiles.lst for *.lst files generated by `mvn compile` and use them as test inputs.", # noqa: E501 -) -@click.argument('source_roots', required=False, nargs=-1) -@launchable.subset -def subset(client, source_roots, test_compile_created_file, is_scan_test_compile_lst): +@smart_tests.subset +def subset( + client, + source_roots: Annotated[List[str] | None, typer.Argument( + help="Source root directories to scan for tests" + )] = None, + test_compile_created_file: Annotated[List[str] | None, typer.Option( + "--test-compile-created-file", + help="Please run `mvn test-compile` command to create input file for this option" + )] = None, + is_scan_test_compile_lst: Annotated[bool, typer.Option( + "--scan-test-compile-lst", + help="Scan testCompile/default-testCompile/createdFiles.lst for *.lst files generated by `mvn compile` and " + "use them as test inputs." + )] = False, +): def file2class_test_path(f: str) -> List[Dict[str, str]]: # remove extension @@ -72,25 +70,32 @@ def file2class_test_path(f: str) -> List[Dict[str, str]]: cls_name = f.replace(os.path.sep, '.') return [{"type": "class", "name": cls_name}] - def file2test(f: str) -> Optional[List]: + def file2test(f: str) -> List | None: if is_file(f): return file2class_test_path(f) else: return None + # Handle None values + if test_compile_created_file is None: + test_compile_created_file = [] + if source_roots is None: + source_roots = [] + files_to_read = list(test_compile_created_file) if is_scan_test_compile_lst: if len(test_compile_created_file) > 0: - click.echo(click.style( - "Warning: --test-compile-created-file is overridden by --scan-test-compile-lst", fg="yellow"), - err=True) + typer.secho( + "Warning: --test-compile-created-file is overridden by --scan-test-compile-lst", + fg=typer.colors.YELLOW, err=True) pattern = os.path.join('**', 'createdFiles.lst') files_to_read = glob.glob(pattern, recursive=True) if not files_to_read: - click.echo(click.style( - "Warning: No .lst files. Please run after executing `mvn test-compile`", fg="yellow"), + typer.secho( + "Warning: No .lst files. Please run after executing `mvn test-compile`", + fg=typer.colors.YELLOW, err=True) return @@ -99,15 +104,16 @@ def file2test(f: str) -> Optional[List]: with open(file, 'r') as f: lines = f.readlines() if len(lines) == 0: - click.echo(click.style( - "Warning: --test-compile-created-file {} is empty".format(file), fg="yellow"), + typer.secho( + f"Warning: --test-compile-created-file {file} is empty", + fg=typer.colors.YELLOW, err=True) - for l in lines: + for line in lines: # trim trailing newline - l = l.strip() + line = line.strip() - path = file2test(l) + path = file2test(line) if path: client.test_paths.append(path) else: @@ -117,15 +123,6 @@ def file2test(f: str) -> Optional[List]: client.run() -@launchable.split_subset -def split_subset(client): - def format_same_bin(s: str) -> List[Dict[str, str]]: - return [{"type": "class", "name": s}] - - client.same_bin_formatter = format_same_bin - client.run() - - # TestNG produces surefire-reports/testng-results.xml in TestNG's native format. # Surefire produces TEST-*.xml in JUnit format (see Surefire's StatelessXmlReporter.getReportFile) # In addition, TestNG also produces surefire-reports/junitreports/TEST-*.xml @@ -134,8 +131,12 @@ def format_same_bin(s: str) -> List[Dict[str, str]]: # # So to collectly find tests without duplications, we need to find surefire-reports/TEST-*.xml # not surefire-reports/**/TEST-*.xml nor surefire-reports/*.xml -@click.argument('reports', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, reports): +@smart_tests.record.tests +def record_tests( + client, + reports: Annotated[List[str], typer.Argument( + help="Test report files to process" + )], +): client.path_builder = junit5_nested_class_path_builder(client.path_builder) - launchable.CommonRecordTestImpls.load_report_files(client=client, source_roots=reports) + smart_tests.CommonRecordTestImpls.load_report_files(client=client, source_roots=reports) diff --git a/launchable/test_runners/minitest.py b/smart_tests/test_runners/minitest.py similarity index 71% rename from launchable/test_runners/minitest.py rename to smart_tests/test_runners/minitest.py index 8af49102f..5f414d028 100644 --- a/launchable/test_runners/minitest.py +++ b/smart_tests/test_runners/minitest.py @@ -1,18 +1,23 @@ -import click +from typing import Annotated, List + +import typer from junitparser import TestCase, TestSuite # type: ignore from ..testpath import TestPath -from . import launchable +from . import smart_tests -subset = launchable.CommonSubsetImpls(__name__).scan_files('*_test.rb') -split_subset = launchable.CommonSplitSubsetImpls(__name__).split_subset() +subset = smart_tests.CommonSubsetImpls(__name__).scan_files('*_test.rb') TEST_PATH_ORDER = {"file": 1, "class": 2, "testcase": 3} -@click.argument('reports', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, reports): +@smart_tests.record.tests +def record_tests( + client, + reports: Annotated[List[str], typer.Argument( + help="Test report files to process" + )], +): default_path_builder = client.path_builder def path_builder(case: TestCase, suite: TestSuite, report_file: str) -> TestPath: @@ -29,4 +34,4 @@ def path_builder(case: TestCase, suite: TestSuite, report_file: str) -> TestPath client.path_builder = path_builder - launchable.CommonRecordTestImpls.load_report_files(client=client, source_roots=reports) + smart_tests.CommonRecordTestImpls.load_report_files(client=client, source_roots=reports) diff --git a/launchable/test_runners/nunit.py b/smart_tests/test_runners/nunit.py similarity index 89% rename from launchable/test_runners/nunit.py rename to smart_tests/test_runners/nunit.py index 2b2836323..8f7cd5842 100644 --- a/launchable/test_runners/nunit.py +++ b/smart_tests/test_runners/nunit.py @@ -1,12 +1,12 @@ -from typing import Callable, Dict, List +from typing import Annotated, Callable, Dict, List from xml.etree import ElementTree as ET -import click +import typer -from launchable.commands.record.case_event import CaseEvent -from launchable.testpath import TestPath, parse_test_path, unparse_test_path +from smart_tests.commands.record.case_event import CaseEvent +from smart_tests.testpath import TestPath, parse_test_path, unparse_test_path -from . import launchable +from . import smart_tests # common code between 'subset' & 'record tests' to build up test path from # nested s @@ -124,9 +124,13 @@ def on_element(e: ET.Element, parent: ET.Element): return (x for x in events) -@click.argument('report_xmls', type=click.Path(exists=True), required=True, nargs=-1) -@launchable.subset -def subset(client, report_xmls): +@smart_tests.subset +def subset( + client, + report_xmls: Annotated[List[str], typer.Argument( + help="Test report XML files to process" + )], +): """ Parse an XML file produced from NUnit --explore option to list up all the viable test cases """ @@ -145,15 +149,15 @@ def on_element(e: ET.Element, parent: ET.Element): client.run() -split_subset = launchable.CommonSplitSubsetImpls(__name__, formatter=lambda x: '.'.join( - [c['name'] for c in x if c['type'] not in ['ParameterizedMethod', 'Assembly']])).split_subset() - - -@click.argument('report_xml', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, report_xml): +@smart_tests.record.tests +def record_tests( + client, + report_xml: Annotated[List[str], typer.Argument( + help="Test report XML files to process" + )], +): client.parse_func = nunit_parse_func - launchable.CommonRecordTestImpls.load_report_files(client=client, source_roots=report_xml) + smart_tests.CommonRecordTestImpls.load_report_files(client=client, source_roots=report_xml) """ diff --git a/launchable/test_runners/playwright.py b/smart_tests/test_runners/playwright.py similarity index 92% rename from launchable/test_runners/playwright.py rename to smart_tests/test_runners/playwright.py index cd286e3a8..c2ad9f289 100644 --- a/launchable/test_runners/playwright.py +++ b/smart_tests/test_runners/playwright.py @@ -3,22 +3,29 @@ # https://playwright.dev/ # import json -from typing import Dict, Generator, List +from typing import Annotated, Dict, Generator, List -import click +import typer from junitparser import TestCase, TestSuite # type: ignore from ..commands.record.case_event import CaseEvent from ..testpath import TestPath -from . import launchable +from . import smart_tests TEST_CASE_DELIMITER = " › " -@click.option('--json', 'json_format', help="use JSON report format", is_flag=True) -@click.argument('reports', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, reports, json_format): +@smart_tests.record.tests +def record_tests( + client, + reports: Annotated[List[str], typer.Argument( + help="Test report files to process" + )], + json_format: Annotated[bool, typer.Option( + "--json", + help="use JSON report format" + )] = False, +): def path_builder(case: TestCase, suite: TestSuite, report_file: str) -> TestPath: """ @@ -32,8 +39,7 @@ def path_builder(case: TestCase, suite: TestSuite, """ filepath = suite.name if not filepath: - raise click.ClickException( - "No file name found in %s" % report_file) + raise typer.BadParameter("No file name found in %s" % report_file) test_path = [client.make_file_path_component(filepath)] if case.name: @@ -51,7 +57,7 @@ def path_builder(case: TestCase, suite: TestSuite, client.run() -@launchable.subset +@smart_tests.subset def subset(client): # read lines as test file names for t in client.stdin(): @@ -60,9 +66,6 @@ def subset(client): client.run() -split_subset = launchable.CommonSplitSubsetImpls(__name__).split_subset() - - class JSONReportParser: """ example of JSON reporter format: @@ -172,13 +175,11 @@ def parse_func(self, report_file: str) -> Generator[CaseEvent, None, None]: try: data = json.load(json_file) except Exception as e: - raise Exception("Can't read JSON format report file {}. Make sure to confirm report file.".format( - report_file)) from e + raise Exception(f"Can't read JSON format report file {report_file}. Make sure to confirm report file.") from e suites: List[Dict[str, Dict]] = list(data.get("suites", [])) if len(suites) == 0: - click.echo("Can't find test results from {}. Make sure to confirm report file.".format( - report_file), err=True) + typer.echo(f"Can't find test results from {report_file}. Make sure to confirm report file.", err=True) for s in suites: # The title of the root suite object contains the file name. diff --git a/launchable/test_runners/prove.py b/smart_tests/test_runners/prove.py similarity index 86% rename from launchable/test_runners/prove.py rename to smart_tests/test_runners/prove.py index 67c83910a..012b1a6f3 100644 --- a/launchable/test_runners/prove.py +++ b/smart_tests/test_runners/prove.py @@ -1,10 +1,11 @@ import re +from typing import Annotated, List -import click +import typer from junitparser import TestCase, TestSuite # type: ignore from ..testpath import TestPath -from . import launchable +from . import smart_tests TEARDOWN = "(teardown)" @@ -14,7 +15,7 @@ def remove_leading_number_and_dash(input_string: str) -> str: return result -@launchable.subset +@smart_tests.subset def subset(client): # read lines as test file names for t in client.stdin(): @@ -23,9 +24,13 @@ def subset(client): client.run() -@click.argument('reports', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, reports): +@smart_tests.record.tests +def record_tests( + client, + reports: Annotated[List[str], typer.Argument( + help="Test report files to process" + )], +): def path_builder(case: TestCase, suite: TestSuite, report_file: str) -> TestPath: def find_filename(): @@ -41,7 +46,7 @@ def find_filename(): filepath = find_filename() if not filepath: - raise click.ClickException( + raise typer.BadParameter( "No file name found in %s." "Perl prove profile is made to take Junit report produced by " "TAP::Formatter::JUnit (https://github.com/bleargh45/TAP-Formatter-JUnit), " @@ -62,6 +67,3 @@ def find_filename(): for r in reports: client.report(r) client.run() - - -split_subset = launchable.CommonSplitSubsetImpls(__name__).split_subset() diff --git a/launchable/test_runners/pytest.py b/smart_tests/test_runners/pytest.py similarity index 90% rename from launchable/test_runners/pytest.py rename to smart_tests/test_runners/pytest.py index 3341628ff..993c7909b 100644 --- a/launchable/test_runners/pytest.py +++ b/smart_tests/test_runners/pytest.py @@ -3,15 +3,15 @@ import os import pathlib import subprocess -from typing import Generator, List +from typing import Annotated, Generator, List -import click +import typer from junitparser import Properties, TestCase # type: ignore -from launchable.commands.record.case_event import CaseEvent, CaseEventType, MetadataTestCase -from launchable.testpath import TestPath +from smart_tests.commands.record.case_event import CaseEvent, CaseEventType, MetadataTestCase +from smart_tests.testpath import TestPath -from . import launchable +from . import smart_tests # Please specify junit_family=legacy for pytest report format. if using pytest version 6 or higher. @@ -36,9 +36,13 @@ # # -@click.argument('source_roots', required=False, nargs=-1) -@launchable.subset -def subset(client, source_roots: List[str]): +@smart_tests.subset +def subset( + client, + source_roots: Annotated[List[str] | None, typer.Argument( + help="Source root directories for pytest test collection" + )] = None, +): def _add_testpaths(lines: List[str]): for line in lines: line = line.rstrip() @@ -48,6 +52,7 @@ def _add_testpaths(lines: List[str]): test_path = _parse_pytest_nodeid(line) client.test_path(test_path) + if not source_roots: _add_testpaths(client.stdin()) else: @@ -57,7 +62,7 @@ def _add_testpaths(lines: List[str]): result = subprocess.run(command, stdout=subprocess.PIPE, universal_newlines=True) _add_testpaths(result.stdout.split(os.linesep)) except FileNotFoundError: - raise click.ClickException("pytest command not found. Please check the path.") + raise typer.BadParameter("pytest command not found. Please check the path.") client.formatter = _pytest_formatter client.run() @@ -110,8 +115,9 @@ def _pytest_formatter(test_path): # junitformat -> + if cls_name == _path_to_class_name(file): - return "{}::{}".format(file, case) + return f"{file}::{case}" else: # junitformat's class name includes package, but pytest does not @@ -119,17 +125,23 @@ def _pytest_formatter(test_path): # junitformat -> - return "{}::{}::{}".format(file, cls_name.split(".")[-1], case) - - -split_subset = launchable.CommonSplitSubsetImpls(__name__, formatter=_pytest_formatter).split_subset() - - -@click.option('--json', 'json_report', help="use JSON report files produced by pytest-dev/pytest-reportlog", - is_flag=True) -@click.argument('source_roots', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, json_report, source_roots): + if cls_name: + return f"{file}::{cls_name.split('.')[-1]}::{case}" + else: + return f"{file}::{case}" + + +@smart_tests.record.tests +def record_tests( + client, + source_roots: Annotated[List[str], typer.Argument( + help="Source directories containing test report files" + )], + json_report: Annotated[bool, typer.Option( + "--json", + help="use JSON report files produced by pytest-dev/pytest-reportlog" + )] = False, +): def data_builder(case: TestCase): props = case.child(Properties) @@ -164,12 +176,12 @@ def data_builder(case: TestCase): for t in glob.iglob(root, recursive=True): match = True if os.path.isdir(t): - client.scan(t, "*.{}".format(ext)) + client.scan(t, f"*.{ext}") else: client.report(t) if not match: - click.echo("No matches found: {}".format(root), err=True) + typer.echo(f"No matches found: {root}", err=True) return if json_report: @@ -189,7 +201,7 @@ def data_builder(case: TestCase): ``` $ pip install -U pytest-reportlog $ pytest --report-log report.json -$ launchable record tests --json report.json +$ smart-tests record tests --json report.json ``` """ @@ -205,8 +217,7 @@ def parse_func( try: data = json.loads(line) except Exception as e: - raise Exception("Can't read JSON format report file {}. Make sure to confirm report file.".format( - report_file)) from e + raise Exception(f"Can't read JSON format report file {report_file}. Make sure to confirm report file.") from e nodeid = data.get("nodeid", "") if nodeid == "": diff --git a/launchable/test_runners/raw.py b/smart_tests/test_runners/raw.py similarity index 84% rename from launchable/test_runners/raw.py rename to smart_tests/test_runners/raw.py index cf52d0245..f79c4ffcc 100644 --- a/launchable/test_runners/raw.py +++ b/smart_tests/test_runners/raw.py @@ -1,19 +1,23 @@ import datetime import json import sys -from typing import Generator +from typing import Annotated, Generator, List -import click import dateutil.parser +import typer from ..commands.record.case_event import CaseEvent, CaseEventType from ..testpath import TestPath, parse_test_path, unparse_test_path -from . import launchable +from . import smart_tests -@click.argument('test_path_file', required=False, type=click.File('r')) -@launchable.subset -def subset(client, test_path_file): +@smart_tests.subset +def subset( + client, + test_path_file: Annotated[str | None, typer.Argument( + help="File containing test paths, one per line" + )] = None, +): """Subset tests TEST_PATH_FILE is a file that contains test paths (e.g. @@ -22,15 +26,17 @@ def subset(client, test_path_file): """ if not client.is_get_tests_from_previous_sessions and test_path_file is None: - raise click.BadArgumentUsage("Missing argument 'TEST_PATH_FILE'.") + raise typer.BadParameter("Missing argument 'TEST_PATH_FILE'.") if client.is_output_exclusion_rules: - raise click.BadArgumentUsage( + raise typer.BadParameter( "Don't need to use `--output-exclusion-rules` option. Please use `--rest` option and use it for exclusion" ) if not client.is_get_tests_from_previous_sessions: - tps = [s.strip() for s in test_path_file.readlines()] + assert test_path_file is not None # Guaranteed by earlier check + with open(test_path_file, 'r') as f: + tps = [s.strip() for s in f.readlines()] for tp_str in tps: if not tp_str or tp_str.startswith('#'): continue @@ -45,12 +51,13 @@ def subset(client, test_path_file): client.run() -split_subset = launchable.CommonSplitSubsetImpls(__name__, formatter=unparse_test_path, seperator='\n').split_subset() - - -@click.argument('test_result_files', required=True, type=click.Path(exists=True), nargs=-1) -@launchable.record.tests -def record_tests(client, test_result_files): +@smart_tests.record.tests +def record_tests( + client, + test_result_files: Annotated[List[str], typer.Argument( + help="Test result files (JSON or JUnit XML)" + )], +): """Record test results TEST_RESULT_FILE is a file that contains a JSON document or JUnit XML file @@ -184,17 +191,16 @@ def parse_json(test_result_file: str) -> Generator[CaseEventType, None, None]: try: duration_secs = float(duration_secs) except ValueError: - raise ValueError("The duration of {} in {} isn't a valid format (was {}). Make sure set a valid duration".format(test_path_components, test_result_file, duration_secs)) # noqa + raise ValueError(f"The duration of {test_path_components} in {test_result_file} isn't a valid format (was {duration_secs}). Make sure set a valid duration") # noqa created_at = case.get('createdAt', default_created_at) if status not in CaseEvent.STATUS_MAP: raise ValueError( - "The status of {} should be one of {} (was {})".format(test_path_components, - list(CaseEvent.STATUS_MAP.keys()), status)) + f"The status of {test_path_components} should be one of {list(CaseEvent.STATUS_MAP.keys())} (was {status})") if duration_secs < 0: - raise ValueError("The duration of {} should be positive (was {})".format(test_path_components, duration_secs)) + raise ValueError(f"The duration of {test_path_components} should be positive (was {duration_secs})") dateutil.parser.parse(created_at) metadata = case.get('data', None) diff --git a/launchable/test_runners/robot.py b/smart_tests/test_runners/robot.py similarity index 88% rename from launchable/test_runners/robot.py rename to smart_tests/test_runners/robot.py index 81f7f6efc..ae6cc7250 100644 --- a/launchable/test_runners/robot.py +++ b/smart_tests/test_runners/robot.py @@ -1,11 +1,12 @@ from datetime import datetime +from typing import Annotated, List from xml.etree import ElementTree as ET -import click +import typer from junitparser import JUnitXml # type: ignore from ..testpath import TestPath -from . import launchable +from . import smart_tests def parse_func(p: str) -> ET.ElementTree: @@ -66,9 +67,13 @@ def parse_suite(suite: ET.Element): return ET.ElementTree(testsuite) -@click.argument('reports', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, reports): +@smart_tests.record.tests +def record_tests( + client, + reports: Annotated[List[str], typer.Argument( + help="Test report files to process" + )], +): for r in reports: client.report(r) @@ -76,9 +81,13 @@ def record_tests(client, reports): client.run() -@click.argument('reports', required=True, nargs=-1) -@launchable.subset -def subset(client, reports): +@smart_tests.subset +def subset( + client, + reports: Annotated[List[str], typer.Argument( + help="Test report files to process" + )], +): for r in reports: xml = JUnitXml.fromfile(r, parse_func) @@ -94,13 +103,6 @@ def subset(client, reports): client.run() -@launchable.split_subset -def split_subset(client): - client.formatter = robot_formatter - client.separator = " " - client.run() - - def robot_formatter(x: TestPath): cls_name = '' case = '' @@ -112,6 +114,6 @@ def robot_formatter(x: TestPath): case = path['name'] if cls_name != '' and case != '': - return "-s '{}' -t '{}'".format(cls_name, case) + return f"-s '{cls_name}' -t '{case}'" return '' diff --git a/smart_tests/test_runners/rspec.py b/smart_tests/test_runners/rspec.py new file mode 100644 index 000000000..ee2c1850e --- /dev/null +++ b/smart_tests/test_runners/rspec.py @@ -0,0 +1,4 @@ +from . import smart_tests + +subset = smart_tests.CommonSubsetImpls(__name__).scan_files('*_spec.rb') +record_tests = smart_tests.CommonRecordTestImpls(__name__).report_files() diff --git a/launchable/test_runners/launchable.py b/smart_tests/test_runners/smart_tests.py similarity index 57% rename from launchable/test_runners/launchable.py rename to smart_tests/test_runners/smart_tests.py index 92119326c..70bf959a5 100644 --- a/launchable/test_runners/launchable.py +++ b/smart_tests/test_runners/smart_tests.py @@ -2,49 +2,50 @@ import os import sys import types +from typing import Annotated -import click +import typer -from launchable.commands.record.tests import tests as record_tests_cmd -from launchable.commands.split_subset import split_subset as split_subset_cmd -from launchable.commands.subset import subset as subset_cmd - - -def cmdname(m): - """figure out the sub-command name from a test runner function""" - - # a.b.cde -> cde - # xyz -> xyz - # - # In python module name the conventional separator is '_' but in command name, - # it is '-', so we do replace that - return m[m.rfind('.') + 1:].replace('_', '-') +from smart_tests.commands.record.tests import app as record_tests_cmd +from smart_tests.commands.subset import app as subset_cmd +from smart_tests.utils.test_runner_registry import cmdname, create_test_runner_wrapper, get_registry +# Legacy wrap function for CommonImpls classes def wrap(f, group, name=None): - """ - Wraps a 'plugin' function into a click command and registers it to the given group. - - a plugin function receives the scanner object in its first argument - """ + """Legacy wrapper function for CommonImpls classes.""" if not name: name = cmdname(f.__module__) - d = click.command(name=name) - cmd = d(click.pass_obj(f)) - group.add_command(cmd) + wrapper = create_test_runner_wrapper(f, name) + cmd = group.command(name=name)(wrapper) return cmd +# NestedCommand-only decorators (no backward compatibility) def subset(f): - return wrap(f, subset_cmd) + """ + Register a subset function with the test runner registry. + + This stores the function for later dynamic command generation in NestedCommand. + """ + test_runner_name = cmdname(f.__module__) + registry = get_registry() + registry.register_subset(test_runner_name, f) + return f record = types.SimpleNamespace() -record.tests = lambda f: wrap(f, record_tests_cmd) -def split_subset(f): - return wrap(f, split_subset_cmd) +def _record_tests_decorator(f): + """Register a record tests function with the test runner registry.""" + test_runner_name = cmdname(f.__module__) + registry = get_registry() + registry.register_record_tests(test_runner_name, f) + return f + + +record.tests = _record_tests_decorator class CommonSubsetImpls: @@ -61,21 +62,25 @@ def scan_files(self, pattern): :param pattern: file masks that identify test files, such as '*_spec.rb' """ - @click.argument('files', required=True, nargs=-1) - def subset(client, files): + def subset( + client, + files: Annotated[list[str], typer.Argument( + help="Test files or directories to include in the subset" + )] + ): # client type: Optimize in def lauchable.commands.subset.subset def parse(fname: str): if os.path.isdir(fname): client.scan(fname, '**/' + pattern) elif fname == '@-': # read stdin - for l in sys.stdin: - parse(l.rstrip()) + for line in sys.stdin: + parse(line.rstrip()) elif fname.startswith('@'): # read response file with open(fname[1:]) as f: - for l in f: - parse(l.rstrip()) + for line in f: + parse(line.rstrip()) else: # assume it's a file client.test_path(fname) @@ -84,6 +89,11 @@ def parse(fname: str): parse(f) client.run() + + # Register with new registry system for NestedCommand + registry = get_registry() + registry.register_subset(self.cmdname, subset) + return wrap(subset, subset_cmd, self.cmdname) @@ -102,10 +112,18 @@ def report_files(self, file_mask="*.xml"): 'record tests' expect JUnit report/XML file names. """ - @click.argument('source_roots', required=True, nargs=-1) - def record_tests(client, source_roots): + def record_tests( + client, + source_roots: Annotated[list[str], typer.Argument( + help="Source directories containing test report files" + )] + ): CommonRecordTestImpls.load_report_files(client=client, source_roots=source_roots, file_mask=file_mask) + # Register with new registry system for NestedCommand + registry = get_registry() + registry.register_record_tests(self.cmdname, record_tests) + return wrap(record_tests, record_tests_cmd, self.cmdname) @classmethod @@ -129,39 +147,8 @@ def load_report_files(cls, client, source_roots, file_mask="*.xml"): # raise it as an error. Note this can happen for reasons other than a configuration error. # For example, if a build catastrophically failed and no # tests got run. - click.echo("No matches found: {}".format(root), err=True) + typer.echo(f"No matches found: {root}", err=True) # intentionally exiting with zero return client.run() - - -class CommonSplitSubsetImpls: - def __init__( - self, - module_name, - formatter=None, - seperator=None, - same_bin_formatter=None, - ): - self.cmdname = cmdname(module_name) - self._formatter = formatter - self._separator = seperator - self._same_bin_formatter = same_bin_formatter - - def split_subset(self): - def split_subset(client): - # client type: SplitSubset in def - # lauchable.commands.split_subset.split_subset - if self._formatter: - client.formatter = self._formatter - - if self._separator: - client.separator = self._separator - - if self._same_bin_formatter: - client.same_bin_formatter = self._same_bin_formatter - - client.run() - - return wrap(split_subset, split_subset_cmd, self.cmdname) diff --git a/launchable/test_runners/vitest.py b/smart_tests/test_runners/vitest.py similarity index 73% rename from launchable/test_runners/vitest.py rename to smart_tests/test_runners/vitest.py index d8731d5b5..90ac6c27f 100644 --- a/launchable/test_runners/vitest.py +++ b/smart_tests/test_runners/vitest.py @@ -1,13 +1,18 @@ import xml.etree.ElementTree as ET +from typing import Annotated, List -import click +import typer -from . import launchable +from . import smart_tests -@click.argument('reports', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, reports): +@smart_tests.record.tests +def record_tests( + client, + reports: Annotated[List[str], typer.Argument( + help="Test report files to process" + )], +): def parse_func(report: str) -> ET.ElementTree: """ Vitest junit report doesn't set file/filepath attributes on test cases, and it's set as a classname attribute instead. @@ -25,10 +30,10 @@ def parse_func(report: str) -> ET.ElementTree: return tree client.junitxml_parse_func = parse_func - launchable.CommonRecordTestImpls.load_report_files(client=client, source_roots=reports) + smart_tests.CommonRecordTestImpls.load_report_files(client=client, source_roots=reports) -@launchable.subset +@smart_tests.subset def subset(client): # read lines as test file names for t in client.stdin(): diff --git a/launchable/test_runners/xctest.py b/smart_tests/test_runners/xctest.py similarity index 70% rename from launchable/test_runners/xctest.py rename to smart_tests/test_runners/xctest.py index bcb21e60b..0783ddb0f 100644 --- a/launchable/test_runners/xctest.py +++ b/smart_tests/test_runners/xctest.py @@ -1,16 +1,21 @@ import html import xml.etree.ElementTree as ET # type: ignore +from typing import Annotated, List -import click +import typer from junitparser import TestCase, TestSuite from ..testpath import TestPath -from . import launchable +from . import smart_tests -@click.argument('reports', nargs=-1, required=True) -@launchable.record.tests -def record_tests(client, reports): +@smart_tests.record.tests +def record_tests( + client, + reports: Annotated[List[str], typer.Argument( + help="Test report files to process" + )], +): def parse_func(p: str) -> ET.ElementTree: tree = ET.parse(p) @@ -39,16 +44,15 @@ def path_builder(case: TestCase, suite: TestSuite, report_path: str) -> TestPath client.junitxml_parse_func = parse_func client.path_builder = path_builder - launchable.CommonRecordTestImpls.load_report_files(client=client, source_roots=reports) + smart_tests.CommonRecordTestImpls.load_report_files(client=client, source_roots=reports) -@launchable.subset +@smart_tests.subset def subset(client): if not client.is_get_tests_from_previous_sessions or not client.is_output_exclusion_rules: - click.echo( - click.style( - "XCTest profile only supports the subset with `--get-tests-from-previous-sessions` and `--output-exclusion-rules` options", # noqa: E501 - fg="red"), + typer.secho( + "XCTest profile only supports the subset with `--get-tests-from-previous-sessions` and `--output-exclusion-rules` options", # noqa: E501 + fg=typer.colors.RED, err=True, ) @@ -58,10 +62,10 @@ def formatter(test_path: TestPath) -> str: # only target case if len(test_path) == 1: - return "-skip-testing:{}".format(test_path[0]['name']) + return f"-skip-testing:{test_path[0]['name']}" # default target/class format - return "-skip-testing:{}/{}".format(test_path[0]['name'], test_path[1]['name']) + return f"-skip-testing:{test_path[0]['name']}/{test_path[1]['name']}" client.formatter = formatter client.separator = "\n" diff --git a/launchable/testpath.py b/smart_tests/testpath.py similarity index 84% rename from launchable/testpath.py rename to smart_tests/testpath.py index 5a9456fc6..b84434a6e 100644 --- a/launchable/testpath.py +++ b/smart_tests/testpath.py @@ -1,18 +1,18 @@ import os import pathlib import subprocess -import sys import urllib -from typing import Dict, List, Optional, Tuple + +# No additional typing imports needed # Path component is a node in a tree. # It's the equivalent of a short file/directory name in a file system. # In our abstraction, it's represented as arbitrary bag of attributes -TestPathComponent = Dict[str, str] +TestPathComponent = dict[str, str] # TestPath is a full path to a node in a tree from the root # It's the equivalent of an absolute file name in a file system -TestPath = List[TestPathComponent] +TestPath = list[TestPathComponent] def parse_test_path(tp_str: str) -> TestPath: @@ -23,7 +23,7 @@ def parse_test_path(tp_str: str) -> TestPath: for component_str in tp_str.split('#'): if component_str == '&': # Technically, this should be mapped to {None:None}. But because the - # TestPath definition is now Dict[str, str], not Dict[Optional[str], + # TestPath definition is now dict[str, str], not dict[str | None, # Optinal[str]], we cannot add it. Fixing this definition needs to # fix callers not to assume they are always str. In practice, this # is a rare case. Do not appent {None: None} now... @@ -43,7 +43,7 @@ def parse_test_path(tp_str: str) -> TestPath: return ret -def _parse_kv(kv: str) -> Tuple[str, str]: +def _parse_kv(kv: str) -> tuple[str, str]: kvs = kv.split('=') if len(kvs) != 2: raise ValueError('Malformed TestPath component: ' + kv) @@ -85,14 +85,7 @@ def _encode_str(s: str) -> str: def _relative_to(p: pathlib.Path, base: str) -> pathlib.Path: - if sys.version_info[0:2] >= (3, 6): - return p.resolve(strict=False).relative_to(base) - else: - try: - resolved = p.resolve() - except BaseException: - resolved = p - return resolved.relative_to(base) + return p.resolve(strict=False).relative_to(base) class FilePathNormalizer: @@ -104,10 +97,10 @@ class FilePathNormalizer: repository root. """ - def __init__(self, base_path: Optional[str] = None, no_base_path_inference: bool = False): + def __init__(self, base_path: str | None = None, no_base_path_inference: bool = False): self._base_path = base_path self._no_base_path_inference = no_base_path_inference - self._inferred_base_path = None # type: Optional[str] + self._inferred_base_path = None # type: str | None def relativize(self, p: str) -> str: return str(self._relativize(pathlib.Path(os.path.normpath(p)))) @@ -130,8 +123,17 @@ def _relativize(self, p: pathlib.Path) -> pathlib.Path: return p - def _auto_infer_base_path(self, p: pathlib.Path) -> Optional[str]: - p = p.parent + def get_effective_base_path(self) -> str | None: + """Get the effective base path, either explicitly set or inferred.""" + if self._base_path: + return self._base_path + return self._inferred_base_path + + def _auto_infer_base_path(self, p: pathlib.Path) -> str | None: + # If p is a file, start from its parent directory + if p.is_file(): + p = p.parent + while p != p.root and not p.exists(): p = p.parent try: diff --git a/launchable/utils/__init__.py b/smart_tests/utils/__init__.py similarity index 100% rename from launchable/utils/__init__.py rename to smart_tests/utils/__init__.py diff --git a/launchable/utils/authentication.py b/smart_tests/utils/authentication.py similarity index 61% rename from launchable/utils/authentication.py rename to smart_tests/utils/authentication.py index a6f01abb0..9330ed78c 100644 --- a/launchable/utils/authentication.py +++ b/smart_tests/utils/authentication.py @@ -1,14 +1,14 @@ import os from typing import Tuple -import click import requests +import typer -from .env_keys import ORGANIZATION_KEY, TOKEN_KEY, WORKSPACE_KEY +from .env_keys import ORGANIZATION_KEY, WORKSPACE_KEY, get_token def get_org_workspace(): - token = os.getenv(TOKEN_KEY) + token = get_token() if token: try: _, user, _ = token.split(":", 2) @@ -23,38 +23,38 @@ def get_org_workspace(): def ensure_org_workspace() -> Tuple[str, str]: org, workspace = get_org_workspace() if org is None or workspace is None: - raise click.UsageError( - click.style( - "Could not identify Launchable organization/workspace. " - "Please confirm if you set LAUNCHABLE_TOKEN or LAUNCHABLE_ORGANIZATION and " - "LAUNCHABLE_WORKSPACE environment variables", - fg="red")) + typer.secho( + "Could not identify Smart Tests organization/workspace. " + "Please confirm if you set SMART_TESTS_TOKEN " + "(or LAUNCHABLE_TOKEN for backward compatibility) or SMART_TESTS_ORGANIZATION and " + "SMART_TESTS_WORKSPACE environment variables", fg=typer.colors.RED, err=True) + raise typer.Exit(1) return org, workspace def authentication_headers(): - token = os.getenv(TOKEN_KEY) + token = get_token() if token: - return {'Authorization': 'Bearer {}'.format(token)} + return {'Authorization': f'Bearer {token}'} if os.getenv('EXPERIMENTAL_GITHUB_OIDC_TOKEN_AUTH'): req_url = os.getenv('ACTIONS_ID_TOKEN_REQUEST_URL') rt_token = os.getenv('ACTIONS_ID_TOKEN_REQUEST_TOKEN') if not req_url or not rt_token: - raise click.UsageError( - click.style( - "GitHub Actions OIDC tokens cannot be retrieved." - "Confirm that you have added necessary permissions following " - "https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-cloud-providers#adding-permissions-settings", # noqa: E501 - fg="red")) + typer.secho( + "GitHub Actions OIDC tokens cannot be retrieved." + "Confirm that you have added necessary permissions following " + "https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-cloud-providers#adding-permissions-settings", # noqa: E501 + fg=typer.colors.RED, err=True) + raise typer.Exit(1) r = requests.get(req_url, headers={ - 'Authorization': 'Bearer {}'.format(rt_token), + 'Authorization': f'Bearer {rt_token}', 'Accept': 'application/json; api-version=2.0', 'Content-Type': 'application/json', }) r.raise_for_status() - return {'Authorization': 'Bearer {}'.format(r.json()['value'])} + return {"Authorization": f"Bearer {r.json()["value"]}"} if os.getenv('GITHUB_ACTIONS'): headers = { diff --git a/launchable/utils/ci_provider.py b/smart_tests/utils/ci_provider.py similarity index 100% rename from launchable/utils/ci_provider.py rename to smart_tests/utils/ci_provider.py diff --git a/launchable/utils/commands.py b/smart_tests/utils/commands.py similarity index 100% rename from launchable/utils/commands.py rename to smart_tests/utils/commands.py diff --git a/launchable/utils/commit_ingester.py b/smart_tests/utils/commit_ingester.py similarity index 90% rename from launchable/utils/commit_ingester.py rename to smart_tests/utils/commit_ingester.py index ac5fe7963..87b1885ec 100644 --- a/launchable/utils/commit_ingester.py +++ b/smart_tests/utils/commit_ingester.py @@ -1,17 +1,17 @@ import hashlib from datetime import tzinfo -from typing import Dict, List, Optional +from typing import Dict, List from ..app import Application from .git_log_parser import GitCommit -from .launchable_client import LaunchableClient +from .smart_tests_client import SmartTestsClient def _sha256(s: str) -> str: return hashlib.sha256(s.encode('utf8')).hexdigest() -def _format_tzinfo(tz: Optional[tzinfo]) -> int: +def _format_tzinfo(tz: tzinfo | None) -> int: if not tz: return 0 delta = tz.utcoffset(None) @@ -54,6 +54,6 @@ def upload_commits(commits: List[GitCommit], app: Application): 'commits': [_convert_git_commit(commit) for commit in commits] } - client = LaunchableClient(app=app) + client = SmartTestsClient(app=app) res = client.request("post", "commits/collect", payload=payload) res.raise_for_status() diff --git a/launchable/utils/common_tz.py b/smart_tests/utils/common_tz.py similarity index 100% rename from launchable/utils/common_tz.py rename to smart_tests/utils/common_tz.py diff --git a/smart_tests/utils/dynamic_commands.py b/smart_tests/utils/dynamic_commands.py new file mode 100644 index 000000000..758eee9a8 --- /dev/null +++ b/smart_tests/utils/dynamic_commands.py @@ -0,0 +1,294 @@ +""" +Dynamic Command Builder for NestedCommand Pattern + +This module provides functionality to dynamically generate Typer commands +that combine command-level options with test runner-specific logic, +enabling the NestedCommand pattern where test runners come before options. +""" + +import inspect +from typing import Any, Callable, Dict + +import typer + +from smart_tests.utils.test_runner_registry import get_registry + + +class DynamicCommandBuilder: + """Builder for creating dynamic Typer commands using NestedCommand pattern where test runners come before options.""" + + def __init__(self): + self.registry = get_registry() + + def create_subset_commands(self, base_app: typer.Typer, + base_callback_func: Callable, + base_callback_options: Dict[str, Any]) -> None: + """ + Create subset commands for each test runner with combined options. + + Args: + base_app: The Typer app to add commands to + base_callback_func: The original subset callback function + base_callback_options: Options from the original subset callback + """ + subset_functions = self.registry.get_subset_functions() + + for test_runner_name, test_runner_func in subset_functions.items(): + # Create a combined command that merges base options with test runner logic + combined_command = self._create_combined_subset_command( + test_runner_name, + test_runner_func, + base_callback_func, + base_callback_options + ) + + # Register the command with the app + base_app.command(name=test_runner_name, help=f"Subset tests using {test_runner_name}")(combined_command) + + def create_record_test_commands(self, base_app: typer.Typer, + base_callback_func: Callable, + base_callback_options: Dict[str, Any]) -> None: + """ + Create record test commands for each test runner with combined options. + + Args: + base_app: The Typer app to add commands to + base_callback_func: The original record tests callback function + base_callback_options: Options from the original record tests callback + """ + record_functions = self.registry.get_record_test_functions() + + for test_runner_name, test_runner_func in record_functions.items(): + # Create a combined command that merges base options with test runner logic + combined_command = self._create_combined_record_command( + test_runner_name, + test_runner_func, + base_callback_func, + base_callback_options + ) + + # Register the command with the app + base_app.command(name=test_runner_name, help=f"Record test results using {test_runner_name}")(combined_command) + + def _create_combined_subset_command(self, test_runner_name: str, + test_runner_func: Callable, + base_callback_func: Callable, + base_callback_options: Dict[str, Any]) -> Callable: + """Create a combined subset command for a specific test runner.""" + + # Get signatures from both functions + base_sig = inspect.signature(base_callback_func) + test_runner_sig = inspect.signature(test_runner_func) + + # Combine parameters from both functions + combined_params = [] + + # Add ctx parameter first + combined_params.append( + inspect.Parameter('ctx', inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=typer.Context) + ) + + # Add parameters from base callback (excluding ctx) + for param_name, param in base_sig.parameters.items(): + if param_name != 'ctx': + combined_params.append(param) + + # Add parameters from test runner function (excluding client) + test_runner_params = list(test_runner_sig.parameters.values())[1:] # Skip 'client' + for param in test_runner_params: + # Avoid duplicate parameter names + if param.name not in [p.name for p in combined_params]: + # Ensure parameter has a default value to avoid "non-default follows default" error + if param.default == inspect.Parameter.empty: + # Add a default value for parameters without one + param = param.replace(default=None) + combined_params.append(param) + + # Create the combined function + def combined_function(*args, **kwargs): + # Extract ctx from args/kwargs + ctx = kwargs.get('ctx') or (args[0] if args else None) + + if not ctx: + raise ValueError("Context not found in function arguments") + + # Store test runner name as context attribute for direct access + ctx.test_runner = test_runner_name + + # Prepare arguments for base callback + base_args = {} + # Unused variable removed + + for i, (param_name, param) in enumerate(base_sig.parameters.items()): + if param_name == 'ctx': + base_args[param_name] = ctx + elif param_name in kwargs: + base_args[param_name] = kwargs[param_name] + elif i < len(args): + base_args[param_name] = args[i] + elif param.default != inspect.Parameter.empty: + base_args[param_name] = param.default + + # Call base callback to set up context + base_callback_func(**base_args) + + # Get client from context + client = ctx.obj + + # Store test runner name in client if possible + if hasattr(client, 'set_test_runner'): + client.set_test_runner(test_runner_name) + + # Auto-infer base path if not explicitly provided for all test runners + # This ensures all test runners have access to base_path when needed + has_base_path_attr = hasattr(client, 'base_path') + base_path_is_none = client.base_path is None if has_base_path_attr else False + no_inference_disabled = not kwargs.get('no_base_path_inference', False) + + if has_base_path_attr and base_path_is_none and no_inference_disabled: + + # Attempt to infer base path from current working directory + try: + import pathlib + + from smart_tests.commands.test_path_writer import TestPathWriter + from smart_tests.testpath import FilePathNormalizer + + file_path_normalizer = FilePathNormalizer() + inferred_base_path = file_path_normalizer._auto_infer_base_path(pathlib.Path.cwd().resolve()) + if inferred_base_path: + TestPathWriter.base_path = inferred_base_path + except (ImportError, OSError) as e: + import logging + logging.error(f"Failed to infer base path: {e}") + # If inference fails, continue with None + + # Prepare arguments for test runner function + test_runner_args = [client] # First argument is always client + test_runner_kwargs = {} + + test_runner_param_names = list(test_runner_sig.parameters.keys())[1:] # Skip 'client' + + for param_name in test_runner_param_names: + if param_name in kwargs: + test_runner_kwargs[param_name] = kwargs[param_name] + + # Call test runner function + return test_runner_func(*test_runner_args, **test_runner_kwargs) + + # Set the signature for the combined function + setattr(combined_function, '__signature__', inspect.Signature(combined_params)) + combined_function.__name__ = f"subset_{test_runner_name.replace('-', '_')}" + + return combined_function + + def _create_combined_record_command(self, test_runner_name: str, + test_runner_func: Callable, + base_callback_func: Callable, + base_callback_options: Dict[str, Any]) -> Callable: + """Create a combined record test command for a specific test runner.""" + + # Get signatures from both functions + base_sig = inspect.signature(base_callback_func) + test_runner_sig = inspect.signature(test_runner_func) + + # Combine parameters from both functions + combined_params = [] + + # Add ctx parameter first + combined_params.append( + inspect.Parameter('ctx', inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=typer.Context) + ) + + # Add parameters from base callback (excluding ctx) + for param_name, param in base_sig.parameters.items(): + if param_name != 'ctx': + combined_params.append(param) + + # Add parameters from test runner function (excluding client) + test_runner_params = list(test_runner_sig.parameters.values())[1:] # Skip 'client' + for param in test_runner_params: + # Avoid duplicate parameter names + if param.name not in [p.name for p in combined_params]: + # Ensure parameter has a default value to avoid "non-default follows default" error + if param.default == inspect.Parameter.empty: + # Add a default value for parameters without one + param = param.replace(default=None) + combined_params.append(param) + + # Create the combined function + def combined_function(*args, **kwargs): + # Extract ctx from args/kwargs + ctx = kwargs.get('ctx') or (args[0] if args else None) + + if not ctx: + raise ValueError("Context not found in function arguments") + + # Store test runner name as context attribute for direct access + ctx.test_runner = test_runner_name + + # Prepare arguments for base callback + base_args = {} + + for i, (param_name, param) in enumerate(base_sig.parameters.items()): + if param_name == 'ctx': + base_args[param_name] = ctx + elif param_name in kwargs: + base_args[param_name] = kwargs[param_name] + elif i < len(args): + base_args[param_name] = args[i] + elif param.default != inspect.Parameter.empty: + base_args[param_name] = param.default + + # Call base callback to set up context + base_callback_func(**base_args) + + # Get client from context + client = ctx.obj + + # Store test runner name in client if possible + if hasattr(client, 'set_test_runner'): + client.set_test_runner(test_runner_name) + + # Prepare arguments for test runner function + test_runner_args = [client] # First argument is always client + test_runner_kwargs = {} + + test_runner_param_names = list(test_runner_sig.parameters.keys())[1:] # Skip 'client' + + for param_name in test_runner_param_names: + if param_name in kwargs: + test_runner_kwargs[param_name] = kwargs[param_name] + + # Call test runner function + return test_runner_func(*test_runner_args, **test_runner_kwargs) + + # Set the signature for the combined function + setattr(combined_function, '__signature__', inspect.Signature(combined_params)) + combined_function.__name__ = f"record_{test_runner_name.replace('-', '_')}" + + return combined_function + + +def extract_callback_options(callback_func: Callable) -> Dict[str, Any]: + """ + Extract option definitions from a Typer callback function. + + This function analyzes the signature and annotations of a callback function + to extract the option definitions that can be reused in dynamic commands. + """ + sig = inspect.signature(callback_func) + options = {} + + for param_name, param in sig.parameters.items(): + if param_name == 'ctx': + continue + + # Store parameter information for later use + options[param_name] = { + 'annotation': param.annotation, + 'default': param.default, + 'kind': param.kind + } + + return options diff --git a/smart_tests/utils/env_keys.py b/smart_tests/utils/env_keys.py new file mode 100644 index 000000000..4076d4da8 --- /dev/null +++ b/smart_tests/utils/env_keys.py @@ -0,0 +1,19 @@ +import os + +REPORT_ERROR_KEY = "SMART_TESTS_REPORT_ERROR" +TOKEN_KEY = "SMART_TESTS_TOKEN" +ORGANIZATION_KEY = "SMART_TESTS_ORGANIZATION" +WORKSPACE_KEY = "SMART_TESTS_WORKSPACE" +BASE_URL_KEY = "SMART_TESTS_BASE_URL" +SKIP_TIMEOUT_RETRY = "SMART_TESTS_SKIP_TIMEOUT_RETRY" +COMMIT_TIMEOUT = "SMART_TESTS_COMMIT_TIMEOUT" +SKIP_CERT_VERIFICATION = "SMART_TESTS_SKIP_CERT_VERIFICATION" +SESSION_DIR_KEY = "SMART_TESTS_SESSION_DIR" + +# Legacy token key for backward compatibility +LEGACY_TOKEN_KEY = "LAUNCHABLE_TOKEN" + + +def get_token(): + """Get token with backward compatibility for LAUNCHABLE_TOKEN.""" + return os.getenv(TOKEN_KEY) or os.getenv(LEGACY_TOKEN_KEY) diff --git a/launchable/utils/exceptions.py b/smart_tests/utils/exceptions.py similarity index 57% rename from launchable/utils/exceptions.py rename to smart_tests/utils/exceptions.py index 7504d0333..d859e0b99 100644 --- a/launchable/utils/exceptions.py +++ b/smart_tests/utils/exceptions.py @@ -1,4 +1,10 @@ # TODO: add cli-specific custom exceptions +import sys + +import typer + +from smart_tests.utils.tracking import Tracking, TrackingClient + class ParseSessionException(Exception): def __init__( @@ -7,7 +13,7 @@ def __init__( message: str = "Wrong session format; session format is like 'builds//test_sessions/'.", ): self.session = session - self.message = "{message}: {session}".format(message=message, session=self.session) + self.message = f"{message}: {self.session}" super().__init__(self.message) @@ -18,5 +24,11 @@ def __init__( message: str = "Invalid JUnit XML file format", ): self.filename = filename - self.message = "{message}: {filename}".format(message=message, filename=self.filename) + self.message = f"{message}: {filename}" super().__init__(self.message) + + +def print_error_and_die(msg: str, tracking_client: TrackingClient, event: Tracking.ErrorEvent): + typer.secho(msg, fg=typer.colors.RED, err=True) + tracking_client.send_error_event(event_name=event, stack_trace=msg) + sys.exit(1) diff --git a/launchable/utils/fail_fast_mode.py b/smart_tests/utils/fail_fast_mode.py similarity index 100% rename from launchable/utils/fail_fast_mode.py rename to smart_tests/utils/fail_fast_mode.py diff --git a/launchable/utils/file_name_pattern.py b/smart_tests/utils/file_name_pattern.py similarity index 100% rename from launchable/utils/file_name_pattern.py rename to smart_tests/utils/file_name_pattern.py diff --git a/launchable/utils/git_log_parser.py b/smart_tests/utils/git_log_parser.py similarity index 95% rename from launchable/utils/git_log_parser.py rename to smart_tests/utils/git_log_parser.py index 93037741e..d2db8f42d 100644 --- a/launchable/utils/git_log_parser.py +++ b/smart_tests/utils/git_log_parser.py @@ -47,7 +47,7 @@ def parse_git_log(fp: TextIO) -> List[GitCommit]: added, deleted, path = line.split('\t', 3) files.append(ChangedFile(path=path, added=int(added), deleted=int(deleted))) except Exception as e: - raise ValueError("Failed to parse the file at line {}: {}".format(idx + 1, e)) + raise ValueError(f"Failed to parse the file at line {idx + 1}: {e}") if len(meta) != 0: ret.append(GitCommit(changed_files=files, **meta)) return ret diff --git a/launchable/utils/glob.py b/smart_tests/utils/glob.py similarity index 100% rename from launchable/utils/glob.py rename to smart_tests/utils/glob.py diff --git a/launchable/utils/gzipgen.py b/smart_tests/utils/gzipgen.py similarity index 100% rename from launchable/utils/gzipgen.py rename to smart_tests/utils/gzipgen.py diff --git a/launchable/utils/http_client.py b/smart_tests/utils/http_client.py similarity index 79% rename from launchable/utils/http_client.py rename to smart_tests/utils/http_client.py index 0044ecb99..6024a5440 100644 --- a/launchable/utils/http_client.py +++ b/smart_tests/utils/http_client.py @@ -2,21 +2,22 @@ import json import os import platform -from typing import IO, BinaryIO, Dict, Optional, Tuple, Union +from typing import IO, BinaryIO, Dict, Tuple, Union import click -from click import Context +import typer from requests import Session from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry # type: ignore +from typer import Context -from launchable.version import __version__ +from smart_tests.version import __version__ from ..app import Application from .authentication import authentication_headers from .env_keys import BASE_URL_KEY, SKIP_TIMEOUT_RETRY from .gzipgen import compress as gzipgen_compress -from .logger import AUDIT_LOG_FORMAT, Logger +from .logger import Logger DEFAULT_BASE_URL = "https://api.mercury.launchableinc.com" @@ -44,8 +45,8 @@ def json(self): class _HttpClient: - def __init__(self, base_url: str = "", session: Optional[Session] = None, - test_runner: Optional[str] = "", app: Optional[Application] = None): + def __init__(self, base_url: str = "", session: Session | None = None, + test_runner: str | None = "", app: Application | None = None): self.base_url = base_url or get_base_url() self.dry_run = bool(app and app.dry_run) self.skip_cert_verification = bool(app and app.skip_cert_verification) @@ -68,7 +69,7 @@ def __init__(self, base_url: str = "", session: Optional[Session] = None, s.mount("https://", adapter) self.session = s else: - self.session = session # type: ignore + self.session = session self.test_runner = test_runner @@ -76,11 +77,11 @@ def request( self, method: str, path: str, - payload: Optional[Union[Dict, BinaryIO]] = None, - params: Optional[Dict] = None, + payload: Union[Dict, BinaryIO] | None = None, + params: Dict | None = None, timeout: Tuple[int, int] = DEFAULT_TIMEOUT, compress: bool = False, - additional_headers: Optional[Dict] = None, + additional_headers: Dict | None = None, ): url = _join_paths(self.base_url, path) @@ -91,7 +92,8 @@ def request( if additional_headers: headers = {**headers, **additional_headers} - Logger().audit(AUDIT_LOG_FORMAT.format("(DRY RUN) " if self.dry_run else "", method, url, headers, payload)) + dry_run_prefix = "(DRY RUN) " if self.dry_run else "" + Logger().audit(f"{dry_run_prefix}send request method:{method} path:{url} headers:{headers} args:{payload}") if self.dry_run and method.upper() not in ["HEAD", "GET"]: return DryRunResponse(status_code=200, payload={ @@ -107,8 +109,7 @@ def request( response = self.session.request(method, url, headers=headers, timeout=timeout, data=data, params=params, verify=(not self.skip_cert_verification)) Logger().debug( - "received response status:{} message:{} headers:{}".format(response.status_code, response.reason, - response.headers) + f"received response status:{response.status_code} message:{response.reason} headers:{response.headers}" ) # because (I believe, though I could be wrong) HTTP/2 got rid of status message, our server side HTTP stack @@ -126,11 +127,7 @@ def request( def _headers(self, compress): h = { - "User-Agent": "Launchable/{} (Python {}, {})".format( - __version__, - platform.python_version(), - platform.platform(), - ), + "User-Agent": f"Launchable/{__version__} (Python {platform.python_version()}, {platform.platform()})", "Content-Type": "application/json" } @@ -138,16 +135,16 @@ def _headers(self, compress): h["Content-Encoding"] = "gzip" if self.test_runner != "": - h["User-Agent"] = h["User-Agent"] + " TestRunner/{}".format(self.test_runner) + h["User-Agent"] = h["User-Agent"] + f" TestRunner/{self.test_runner}" ctx = click.get_current_context(silent=True) if ctx: - h["User-Agent"] = h["User-Agent"] + " Command/{}".format(format_context(ctx)) + h["User-Agent"] = h["User-Agent"] + f" Command/{format_context(ctx)}" return {**h, **authentication_headers()} -def format_context(ctx: click.Context) -> str: +def format_context(ctx: typer.Context) -> str: """ So that our CSMs can better understand how the users are invoking us, capture the implicit command invocations and PID. This way we can correlate @@ -163,10 +160,11 @@ def format_context(ctx: click.Context) -> str: Cannot overwrite ctx with ctx.parent directly (it will fail the type check). Therefore defined a _ctx and use it. """ - _ctx: Optional[Context] = ctx + _ctx: Context | None = ctx while _ctx: - cmds.append(ctx.command.name) - _ctx = _ctx.parent + if _ctx.command.name: + cmds.append(_ctx.command.name) + _ctx = _ctx.parent # type: ignore return '%s(%s)' % ('>'.join(cmds), os.getpid()) @@ -181,7 +179,7 @@ def _file_to_generator(f: IO, chunk_size=4096): yield data -def _build_data(payload: Optional[Union[BinaryIO, Dict]], compress: bool): +def _build_data(payload: Union[BinaryIO, Dict] | None, compress: bool): if payload is None: return None if isinstance(payload, dict): diff --git a/launchable/utils/java.py b/smart_tests/utils/java.py similarity index 93% rename from launchable/utils/java.py rename to smart_tests/utils/java.py index df091a2e5..43035630c 100644 --- a/launchable/utils/java.py +++ b/smart_tests/utils/java.py @@ -5,7 +5,7 @@ from typing import Callable from unittest import TestCase, TestSuite -from launchable.testpath import TestPath +from smart_tests.testpath import TestPath def get_java_command(): @@ -40,7 +40,7 @@ def junit5_nested_class_path_builder( With @Nested tests in JUnit 5, test class names have inner class names like com.launchableinc.rocket_car.NestedTest$InnerClass. - It causes a problem in subsetting because Launchable CLI can't detect inner classes in subsetting. + It causes a problem in subsetting because Smart Tests CLI can't detect inner classes in subsetting. So, we need to ignore the inner class names. The inner class name is separated by $. Note: Launchable allows $ in test paths. But we decided to remove it in this case because $ in the class name is not a common case. diff --git a/launchable/utils/link.py b/smart_tests/utils/link.py similarity index 78% rename from launchable/utils/link.py rename to smart_tests/utils/link.py index 093221551..a61be7afc 100644 --- a/launchable/utils/link.py +++ b/smart_tests/utils/link.py @@ -37,24 +37,19 @@ def capture_link(env: Mapping[str, str]) -> List[Dict[str, str]]: if env.get(JENKINS_URL_KEY): links.append({ "kind": LinkKind.JENKINS.name, "url": env.get(JENKINS_BUILD_URL_KEY, ""), - "title": "{} {}".format(env.get(JENKINS_JOB_NAME_KEY), env.get(JENKINS_BUILD_DISPLAY_NAME_KEY)) + "title": f"{env.get(JENKINS_JOB_NAME_KEY)} {env.get(JENKINS_BUILD_DISPLAY_NAME_KEY)}" }) if env.get(GITHUB_ACTIONS_KEY): links.append({ "kind": LinkKind.GITHUB_ACTIONS.name, - "url": "{}/{}/actions/runs/{}".format( - env.get(GITHUB_ACTIONS_SERVER_URL_KEY), - env.get(GITHUB_ACTIONS_REPOSITORY_KEY), - env.get(GITHUB_ACTIONS_RUN_ID_KEY), - ), + "url": f"{env.get(GITHUB_ACTIONS_SERVER_URL_KEY)}/{env.get(GITHUB_ACTIONS_REPOSITORY_KEY)}" + f"/actions/runs/{env.get(GITHUB_ACTIONS_RUN_ID_KEY)}", # the nomenclature in GitHub PR comment from GHA has the optional additional part "(a,b,c)" that refers # to the matrix, but that doesn't appear to be available as env var. Interestingly, run numbers are not # included. Maybe it was seen as too much details and unnecessary for deciding which link to click? - "title": "{} / {} #{}".format( - env.get(GITHUB_ACTIONS_WORKFLOW_KEY), - env.get(GITHUB_ACTIONS_JOB_KEY), - env.get(GITHUB_ACTIONS_RUN_NUMBER_KEY)) - }) + "title": f"{env.get(GITHUB_ACTIONS_WORKFLOW_KEY)} / {env.get(GITHUB_ACTIONS_JOB_KEY)} " + f"#{env.get(GITHUB_ACTIONS_RUN_NUMBER_KEY)}" + }) if env.get(GITHUB_PULL_REQUEST_URL_KEY): # TODO: where is this environment variable coming from? links.append({ @@ -67,7 +62,7 @@ def capture_link(env: Mapping[str, str]) -> List[Dict[str, str]]: # how much of that information should be present in title. links.append({ "kind": LinkKind.CIRCLECI.name, "url": env.get(CIRCLECI_BUILD_URL_KEY, ""), - "title": "{} ({})".format(env.get(CIRCLECI_JOB_KEY), env.get(CIRCLECI_BUILD_NUM_KEY)) + "title": f"{env.get(CIRCLECI_JOB_KEY)} ({env.get(CIRCLECI_BUILD_NUM_KEY)})" }) return links diff --git a/launchable/utils/logger.py b/smart_tests/utils/logger.py similarity index 94% rename from launchable/utils/logger.py rename to smart_tests/utils/logger.py index f34fb8bf9..508aaac7b 100644 --- a/launchable/utils/logger.py +++ b/smart_tests/utils/logger.py @@ -5,7 +5,7 @@ LOG_LEVEL_AUDIT = 25 LOG_LEVEL_AUDIT_STR = "AUDIT" -AUDIT_LOG_FORMAT = "{}send request method:{} path:{} headers:{} args:{}" +AUDIT_LOG_FORMAT = "{0}send request method:{1} path:{2} headers:{3} args:{4}" logging.addLevelName(LOG_LEVEL_AUDIT, "AUDIT") diff --git a/launchable/utils/no_build.py b/smart_tests/utils/no_build.py similarity index 100% rename from launchable/utils/no_build.py rename to smart_tests/utils/no_build.py diff --git a/launchable/utils/sax.py b/smart_tests/utils/sax.py similarity index 93% rename from launchable/utils/sax.py rename to smart_tests/utils/sax.py index 6cee59f4e..7bcb22419 100644 --- a/launchable/utils/sax.py +++ b/smart_tests/utils/sax.py @@ -1,11 +1,11 @@ import re import sys -from typing import Callable, Dict, List, Optional +from typing import Callable, Dict, List from xml.sax import make_parser from xml.sax.handler import ContentHandler from xml.sax.xmlreader import AttributesImpl -import click +import typer class Element: @@ -53,7 +53,7 @@ def __init__(self, element: str, attr: str, var: str): self.attr = attr self.var = var - def matches(self, e: Element) -> Optional[str]: + def matches(self, e: Element) -> str | None: return e.attrs.get( self.attr) if self.element == e.name or self.element == "*" else None @@ -64,7 +64,7 @@ def parse(spec: str) -> 'TagMatcher': if m: return TagMatcher(m.group(1), m.group(2), m.group(3)) else: - raise click.BadParameter("Invalid tag spec: %s" % spec) + raise typer.BadParameter("Invalid tag spec: %s" % spec) class SaxParser(ContentHandler): @@ -73,7 +73,7 @@ class SaxParser(ContentHandler): """ # represents the current element - context: Optional[Element] = None + context: Element | None = None # matchers: List[TagMatcher] diff --git a/smart_tests/utils/session.py b/smart_tests/utils/session.py new file mode 100644 index 000000000..0a26e0ab0 --- /dev/null +++ b/smart_tests/utils/session.py @@ -0,0 +1,72 @@ +# Utilities for TestSession. +# Named `session.py` to avoid confusion with test files. + +import re +import sys +from dataclasses import dataclass +from typing import Tuple + +import typer +from requests import HTTPError + +from smart_tests.utils.smart_tests_client import SmartTestsClient +from smart_tests.utils.tracking import Tracking + + +@dataclass +class TestSession: + id: int + build_id: int + build_name: str + observation_mode: bool + name: str | None = None + + +def get_session(session: str, client: SmartTestsClient) -> TestSession: + build_name, test_session_id = parse_session(session) + + subpath = f"builds/{build_name}/test_sessions/{test_session_id}" + res = client.request("get", subpath) + + try: + res.raise_for_status() + except HTTPError as e: + if e.response.status_code == 404: + # TODO(Konboi): move subset.print_error_and_die to util and use it + msg = f"Session {session} was not found. Make sure to run `smart-tests record session --build {build_name}` before you run this command" # noqa E501 + typer.secho(msg, fg=typer.colors.RED, err=True) + if client.tracking_client: + client.tracking_client.send_error_event(event_name=Tracking.ErrorEvent.USER_ERROR, stack_trace=msg) + sys.exit(1) + raise + + test_session = res.json() + + return TestSession( + id=test_session.get("id"), + build_id=test_session.get("buildId"), + build_name=test_session.get("buildNumber"), + observation_mode=test_session.get("isObservation"), + name=test_session.get("name"), + ) + + +def parse_session(session: str) -> Tuple[str, int]: + """Parse session to extract build name and test session id. + + Args: + session: Session in format "builds/{build_name}/test_sessions/{test_session_id}" + + Returns: + Tuple of (build_name, test_session_id) + + Raises: + ValueError: If session_id format is invalid + """ + match = re.match(r"builds/([^/]+)/test_sessions/(.+)", session) + + if match: + return match.group(1), int(match.group(2)) + else: + raise ValueError( + f"Invalid session ID format: {session}. Expected format: builds/{{build_name}}/test_sessions/{{test_session_id}}") diff --git a/launchable/utils/launchable_client.py b/smart_tests/utils/smart_tests_client.py similarity index 72% rename from launchable/utils/launchable_client.py rename to smart_tests/utils/smart_tests_client.py index e02d3733b..9a1ffa8f9 100644 --- a/launchable/utils/launchable_client.py +++ b/smart_tests/utils/smart_tests_client.py @@ -1,21 +1,21 @@ import os -from typing import BinaryIO, Dict, Optional, Tuple, Union +from typing import BinaryIO, Dict -import click import requests +import typer from requests import HTTPError, Session, Timeout -from launchable.utils.http_client import _HttpClient, _join_paths -from launchable.utils.tracking import Tracking, TrackingClient # type: ignore +from smart_tests.utils.http_client import _HttpClient, _join_paths +from smart_tests.utils.tracking import Tracking, TrackingClient # type: ignore from ..app import Application from .authentication import get_org_workspace from .env_keys import REPORT_ERROR_KEY -class LaunchableClient: - def __init__(self, tracking_client: Optional[TrackingClient] = None, base_url: str = "", session: Optional[Session] = None, - test_runner: Optional[str] = "", app: Optional[Application] = None): +class SmartTestsClient: + def __init__(self, tracking_client: TrackingClient | None = None, base_url: str = "", session: Session | None = None, + test_runner: str | None = "", app: Application | None = None): self.http_client = _HttpClient( base_url=base_url, session=session, @@ -26,24 +26,24 @@ def __init__(self, tracking_client: Optional[TrackingClient] = None, base_url: s self.organization, self.workspace = get_org_workspace() if self.organization is None or self.workspace is None: raise ValueError( - "Could not identify a Launchable organization/workspace. " - "Confirm that you set LAUNCHABLE_TOKEN " - "(or LAUNCHABLE_ORGANIZATION and LAUNCHABLE_WORKSPACE) environment variable(s)\n" + "Could not identify a Smart Tests organization/workspace. " + "Confirm that you set SMART_TESTS_TOKEN " + "(or SMART_TESTS_ORGANIZATION and SMART_TESTS_WORKSPACE) environment variable(s)\n" "See https://docs.launchableinc.com/getting-started#setting-your-api-key") - self._workspace_state_cache: Optional[Dict[str, Union[str, bool]]] = None + self._workspace_state_cache: Dict[str, str | bool] | None = None def request( self, method: str, sub_path: str, - payload: Optional[Union[Dict, BinaryIO]] = None, - params: Optional[Dict] = None, - timeout: Tuple[int, int] = (5, 60), + payload: dict | BinaryIO | None = None, + params: dict | None = None, + timeout: tuple[int, int] = (5, 60), compress: bool = False, - additional_headers: Optional[Dict] = None, + additional_headers: dict | None = None, ) -> requests.Response: path = _join_paths( - "/intake/organizations/{}/workspaces/{}".format(self.organization, self.workspace), + f"/intake/organizations/{self.organization}/workspaces/{self.workspace}", sub_path ) @@ -80,7 +80,7 @@ def track(event_name: Tracking.ErrorEvent, e: Exception): # should never come here, but needed to make type checker happy assert False - def print_exception_and_recover(self, e: Exception, warning: Optional[str] = None, warning_color='yellow'): + def print_exception_and_recover(self, e: Exception, warning: str | None = None, warning_color='yellow'): """ Print the exception raised from the request method, then recover from it @@ -91,15 +91,15 @@ def print_exception_and_recover(self, e: Exception, warning: Optional[str] = Non if os.getenv(REPORT_ERROR_KEY): raise e - click.echo(e, err=True) + typer.echo(e, err=True) if isinstance(e, HTTPError): # if the payload is present, report that as well to assist troubleshooting res = e.response if res and res.text: - click.echo(res.text, err=True) + typer.echo(res.text, err=True) if warning: - click.echo(click.style(warning, fg=warning_color), err=True) + typer.secho(warning, fg=getattr(typer.colors, warning_color.upper(), typer.colors.YELLOW), err=True) def base_url(self) -> str: return self.http_client.base_url @@ -132,3 +132,7 @@ def _get_workspace_state(self) -> dict: self.print_exception_and_recover(e, "Failed to get workspace state") return {} + + def set_test_runner(self, test_runner: str): + """Update the test runner name for this client.""" + self.http_client.test_runner = test_runner diff --git a/launchable/utils/subprocess.py b/smart_tests/utils/subprocess.py similarity index 91% rename from launchable/utils/subprocess.py rename to smart_tests/utils/subprocess.py index 98c983972..66a3a4056 100644 --- a/launchable/utils/subprocess.py +++ b/smart_tests/utils/subprocess.py @@ -7,6 +7,6 @@ def check_output(*args, **kwargs): In Windows, subprocess.check_output is used internally in one of those dependencies. If we mock out subprocess.check_output, it also traps those internall calls, making tests fail. This wrapper is a point to mock only - launchable CLI initiated calls. + Smart Tests CLI initiated calls. """ return subprocess.check_output(*args, **kwargs) diff --git a/smart_tests/utils/test_runner_registry.py b/smart_tests/utils/test_runner_registry.py new file mode 100644 index 000000000..52928d8c8 --- /dev/null +++ b/smart_tests/utils/test_runner_registry.py @@ -0,0 +1,117 @@ +""" +Test Runner Registry System + +This module provides a registry system for collecting test runner functions +before registering them as Typer commands, enabling the NestedCommand where +test runners come before options in command structure. +""" + +import inspect +from functools import wraps +from typing import Callable, Dict, List + +import typer + + +class TestRunnerRegistry: + """Registry for collecting test runner functions by command type.""" + + def __init__(self): + # Dictionary to store test runner functions by command type + # Format: {command_type: {test_runner_name: function}} + self._subset_functions: Dict[str, Callable] = {} + self._record_test_functions: Dict[str, Callable] = {} + self._split_subset_functions: Dict[str, Callable] = {} + # Callback to trigger when new test runners are registered + self._on_register_callback: Callable[[], None] | None = None + + def set_on_register_callback(self, callback: Callable[[], None]) -> None: + """Set a callback to be called when new test runners are registered.""" + self._on_register_callback = callback + + def register_subset(self, test_runner_name: str, func: Callable) -> None: + """Register a subset function for a test runner.""" + self._subset_functions[test_runner_name] = func + if self._on_register_callback: + self._on_register_callback() + + def register_record_tests(self, test_runner_name: str, func: Callable) -> None: + """Register a record tests function for a test runner.""" + self._record_test_functions[test_runner_name] = func + if self._on_register_callback: + self._on_register_callback() + + def register_split_subset(self, test_runner_name: str, func: Callable) -> None: + """Register a split subset function for a test runner.""" + self._split_subset_functions[test_runner_name] = func + if self._on_register_callback: + self._on_register_callback() + + def get_subset_functions(self) -> Dict[str, Callable]: + """Get all registered subset functions.""" + return self._subset_functions.copy() + + def get_record_test_functions(self) -> Dict[str, Callable]: + """Get all registered record test functions.""" + return self._record_test_functions.copy() + + def get_split_subset_functions(self) -> Dict[str, Callable]: + """Get all registered split subset functions.""" + return self._split_subset_functions.copy() + + def get_all_test_runner_names(self) -> List[str]: + """Get all unique test runner names across all command types.""" + all_names: set[str] = set() + all_names.update(self._subset_functions.keys()) + all_names.update(self._record_test_functions.keys()) + all_names.update(self._split_subset_functions.keys()) + return sorted(list(all_names)) + + +# Global registry instance +_registry = TestRunnerRegistry() + + +def get_registry() -> TestRunnerRegistry: + """Get the global test runner registry instance.""" + return _registry + + +def cmdname(module_name: str) -> str: + """Figure out the sub-command name from a test runner module name.""" + # a.b.cde -> cde + # xyz -> xyz + # In python module name the conventional separator is '_' but in command name, + # it is '-', so we do replace that + return module_name[module_name.rfind('.') + 1:].replace('_', '-') + + +def create_test_runner_wrapper(func: Callable, test_runner_name: str) -> Callable: + """ + Create a wrapper for test runner functions that handles client injection. + + This preserves the original function signature while adding ctx parameter + and handling client object injection. + """ + # Get the original function signature (excluding 'client' parameter) + sig = inspect.signature(func) + params = list(sig.parameters.values())[1:] # Skip 'client' parameter + + # Create a wrapper that matches the original signature + @wraps(func) + def typer_wrapper(ctx: typer.Context, *args, **kwargs): + client = ctx.obj + + # Store the test runner name in the client object for later use + if hasattr(client, 'set_test_runner'): + client.set_test_runner(test_runner_name) + + # Call the function with client as first argument, then remaining args + return func(client, *args, **kwargs) + + # Copy parameter annotations from original function (excluding client) + new_params = [inspect.Parameter('ctx', inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=typer.Context)] + new_params.extend(params) + setattr(typer_wrapper, '__signature__', sig.replace(parameters=new_params)) + + return typer_wrapper diff --git a/launchable/utils/tracking.py b/smart_tests/utils/tracking.py similarity index 83% rename from launchable/utils/tracking.py rename to smart_tests/utils/tracking.py index 8358b23eb..9a3998f32 100644 --- a/launchable/utils/tracking.py +++ b/smart_tests/utils/tracking.py @@ -1,12 +1,12 @@ from enum import Enum -from typing import Any, Dict, Optional, Union +from typing import Any, Dict, Union from requests import Session -from launchable.app import Application -from launchable.utils.authentication import get_org_workspace -from launchable.utils.http_client import _HttpClient, _join_paths -from launchable.version import __version__ +from smart_tests.app import Application +from smart_tests.utils.authentication import get_org_workspace +from smart_tests.utils.http_client import _HttpClient, _join_paths +from smart_tests.version import __version__ from .commands import Command @@ -31,8 +31,8 @@ class ErrorEvent(Enum): class TrackingClient: - def __init__(self, command: Command, base_url: str = "", session: Optional[Session] = None, - test_runner: Optional[str] = "", app: Optional[Application] = None): + def __init__(self, command: Command, base_url: str = "", session: Session | None = None, + test_runner: str | None = "", app: Application | None = None): self.http_client = _HttpClient( base_url=base_url, session=session, @@ -44,7 +44,7 @@ def __init__(self, command: Command, base_url: str = "", session: Optional[Sessi def send_event( self, event_name: Tracking.Event, - metadata: Optional[Dict[str, Any]] = None + metadata: Dict[str, Any] | None = None ): org, workspace = get_org_workspace() if metadata is None: @@ -61,7 +61,7 @@ def send_error_event( event_name: Tracking.ErrorEvent, stack_trace: str, api: str = "", - metadata: Optional[Dict[str, Any]] = None + metadata: Dict[str, Any] | None = None ): org, workspace = get_org_workspace() if metadata is None: diff --git a/smart_tests/utils/typer_types.py b/smart_tests/utils/typer_types.py new file mode 100644 index 000000000..44f8c7c72 --- /dev/null +++ b/smart_tests/utils/typer_types.py @@ -0,0 +1,240 @@ +import datetime +import re +import sys + +import dateutil.parser +import typer +from dateutil.tz import tzlocal + + +class Percentage: + def __init__(self, value: float): + self.value = value + + def __str__(self): + return f"{self.value * 100}%" + + def __float__(self): + return self.value + + +def parse_percentage(value: str) -> Percentage: + try: + missing_percent = False + if value.endswith('%'): + x = float(value[:-1]) / 100 + if 0 <= x <= 1: + return Percentage(x) + else: + missing_percent = True + except ValueError: + pass + + msg = "Expected percentage like 50% but got '{}'".format(value) + if missing_percent and sys.platform.startswith("win"): + msg += " ('%' is a special character in batch files, so please write '50%%' to pass in '50%')" + raise typer.BadParameter(msg) + + +class Duration: + def __init__(self, seconds: float): + self.seconds = seconds + + def __str__(self): + return f"{self.seconds}s" + + def __float__(self): + return self.seconds + + +def parse_duration(value: str) -> Duration: + try: + return Duration(convert_to_seconds(value)) + except ValueError: + raise typer.BadParameter("Expected duration like 3600, 30m, 1h15m but got '{}'".format(value)) + + +class KeyValue: + def __init__(self, key: str, value: str): + self.key = key + self.value = value + + def __str__(self): + return f"{self.key}={self.value}" + + def __iter__(self): + return iter((self.key, self.value)) + + def __getitem__(self, index): + return (self.key, self.value)[index] + + +def parse_key_value(value: str) -> KeyValue: + """ + Handles options that take key/value pairs. + + The preferred syntax is "--option key=value" and that's what we should be advertising in docs and help, + but for compatibility (?) we accept "--option key:value" + + Typically, this is used with multiple=True to produce `Sequence[Tuple[str, str]]`. + """ + error_message = "Expected a key-value pair formatted as --option key=value, but got '{}'" + + for delimiter in ['=', ':']: + if delimiter in value: + kv = value.split(delimiter, 1) + if len(kv) != 2: + raise typer.BadParameter(error_message.format(value)) + return KeyValue(kv[0].strip(), kv[1].strip()) + + raise typer.BadParameter(error_message.format(value)) + + +class Fraction: + def __init__(self, numerator: int, denominator: int): + self.numerator = numerator + self.denominator = denominator + + def __str__(self): + return f"{self.numerator}/{self.denominator}" + + def __iter__(self): + return iter((self.numerator, self.denominator)) + + def __getitem__(self, index): + return (self.numerator, self.denominator)[index] + + def __float__(self): + return self.numerator / self.denominator + + +def parse_fraction(value: str) -> Fraction: + try: + v = value.strip().split('/') + if len(v) == 2: + n = int(v[0]) + d = int(v[1]) + return Fraction(n, d) + except ValueError: + pass + + raise typer.BadParameter("Expected fraction like 1/2 but got '{}'".format(value)) + + +class DateTimeWithTimezone: + def __init__(self, dt: datetime.datetime): + self.dt = dt + + def __str__(self): + return self.dt.isoformat() + + def datetime(self): + return self.dt + + +def parse_datetime_with_timezone(value: str) -> DateTimeWithTimezone: + try: + dt = dateutil.parser.parse(value) + if dt.tzinfo is None: + dt = dt.replace(tzinfo=tzlocal()) + return DateTimeWithTimezone(dt) + except ValueError: + raise typer.BadParameter("Expected datetime like 2023-10-01T12:00:00 but got '{}'".format(value)) + + +def convert_to_seconds(s: str) -> float: + """Convert duration string to seconds""" + units = {'s': 1, 'm': 60, 'h': 60 * 60, 'd': 60 * 60 * 24, 'w': 60 * 60 * 24 * 7} + + if s.isdigit(): + return float(s) + + duration = 0 + for m in re.finditer(r'(?P\d+)(?P[smhdw]?)', s, flags=re.I): + val = m.group('val') + unit = m.group('unit') + + if val is None or unit is None: + raise ValueError(f"unable to parse: {s}") + + u = units.get(unit) + if u is None: + raise ValueError(f"unable to parse: {s}") + + duration += int(val) * u + + return float(duration) + + +# Can the output deal with Unicode emojis? +try: + '\U0001f389'.encode(sys.stdout.encoding or "ascii") + # If stdout encoding is unavailable, such as in case of pipe, err on the safe side (EMOJI=False) + # This is a judgement call, but given that emojis do not serve functional purposes and purely decorative + # erring on the safe side seems like a reasonable call. + EMOJI = True +except UnicodeEncodeError: + EMOJI = False + + +def emoji(s: str, fallback: str = '') -> str: + """ + Used to safely use Emoji where we can. + + Returns 's' in an environment where stdout can deal with emojis, but 'fallback' otherwise. + """ + return s if EMOJI else fallback + + +def ignorable_error(e: Exception) -> str: + return "An error occurred on Smart Tests CLI. You can ignore this message since the process will continue. " \ + f"Error: {e}" + + +def parse_key_value_list(values: list) -> list: + """Parse a list of key-value strings into KeyValue objects""" + return [parse_key_value(v) for v in values] + + +# Backward compatibility functions for existing usage +def validate_key_value(value: str): + """Validate and parse a key-value string, returning a tuple for backward compatibility""" + kv = parse_key_value(value) + return (kv.key, kv.value) + + +def validate_datetime_with_tz(value: str): + """Validate and parse a datetime string, returning a datetime object for backward compatibility""" + dt_obj = parse_datetime_with_timezone(value) + return dt_obj.dt + + +def validate_past_datetime(dt_value: datetime.datetime): + """Validate that the provided datetime is in the past""" + if dt_value is None: + return dt_value + + if not isinstance(dt_value, datetime.datetime): + raise typer.BadParameter("Expected a datetime object.") + + now = datetime.datetime.now(tz=tzlocal()) + if dt_value > now: + raise typer.BadParameter("The provided timestamp must be in the past.") + + return dt_value + + +def _key_value_compat(value: str): + """Compatibility wrapper that returns tuple instead of KeyValue object""" + kv = parse_key_value(value) + return (kv.key, kv.value) + + +def _datetime_with_tz_compat(value: str): + """Compatibility wrapper that returns datetime instead of DateTimeWithTimezone object""" + dt_obj = parse_datetime_with_timezone(value) + return dt_obj.dt + + +KEY_VALUE = _key_value_compat +DATETIME_WITH_TZ = _datetime_with_tz_compat diff --git a/launchable/version.py b/smart_tests/version.py similarity index 54% rename from launchable/version.py rename to smart_tests/version.py index 282e81042..7920e033e 100644 --- a/launchable/version.py +++ b/smart_tests/version.py @@ -1,7 +1,7 @@ -from importlib_metadata import PackageNotFoundError, version +from importlib.metadata import PackageNotFoundError, version try: __version__ = version("launchable") except PackageNotFoundError: # package is not installed - pass + __version__ = "unknown" diff --git a/src/main/java/com/launchableinc/ingest/commits/CommitIngester.java b/src/main/java/com/launchableinc/ingest/commits/CommitIngester.java index 2efff19c0..302b30759 100644 --- a/src/main/java/com/launchableinc/ingest/commits/CommitIngester.java +++ b/src/main/java/com/launchableinc/ingest/commits/CommitIngester.java @@ -88,18 +88,18 @@ public void setNoCommitMessage(boolean b) { private void parseConfiguration() throws CmdLineException { String apiToken = launchableToken; if (launchableToken == null) { - apiToken = System.getenv("LAUNCHABLE_TOKEN"); + apiToken = System.getenv("SMART_TESTS_TOKEN"); } if (apiToken == null || apiToken.isEmpty()) { if (System.getenv("GITHUB_ACTIONS") != null) { - String o = System.getenv("LAUNCHABLE_ORGANIZATION"); + String o = System.getenv("SMART_TESTS_ORGANIZATION"); if (org == null && o == null) { - throw new CmdLineException("LAUNCHABLE_ORGANIZATION env variable is not set"); + throw new CmdLineException("SMART_TESTS_ORGANIZATION env variable is not set"); } - String w = System.getenv("LAUNCHABLE_WORKSPACE"); + String w = System.getenv("SMART_TESTS_WORKSPACE"); if (ws == null && w == null) { - throw new CmdLineException("LAUNCHABLE_WORKSPACE env variable is not set"); + throw new CmdLineException("SMART_TESTS_WORKSPACE env variable is not set"); } if (org == null) { @@ -118,7 +118,7 @@ private void parseConfiguration() throws CmdLineException { return; } - throw new CmdLineException("LAUNCHABLE_TOKEN env variable is not set"); + throw new CmdLineException("SMART_TESTS_TOKEN env variable is not set"); } this.parseLaunchableToken(apiToken); @@ -163,11 +163,11 @@ private void parseLaunchableToken(String token) throws CmdLineException { if (token.startsWith("v1:")) { String[] v = token.split(":"); if (v.length != 3) { - throw new IllegalStateException("Malformed LAUNCHABLE_TOKEN"); + throw new IllegalStateException("Malformed SMART_TESTS_TOKEN"); } v = v[1].split("/"); if (v.length != 2) { - throw new IllegalStateException("Malformed LAUNCHABLE_TOKEN"); + throw new IllegalStateException("Malformed SMART_TESTS_TOKEN"); } // for backward compatibility, allow command line options to take precedence diff --git a/tests/cli_test_case.py b/tests/cli_test_case.py index e79611cc5..d3b4f4405 100644 --- a/tests/cli_test_case.py +++ b/tests/cli_test_case.py @@ -9,13 +9,12 @@ from pathlib import Path from typing import Any, Dict, List -import click # type: ignore import responses # type: ignore -from click.testing import CliRunner # type: ignore +from typer.testing import CliRunner -from launchable.__main__ import main -from launchable.utils.http_client import get_base_url -from launchable.utils.session import SESSION_DIR_KEY, clean_session_files +from smart_tests.__main__ import main +from smart_tests.utils.env_keys import SESSION_DIR_KEY +from smart_tests.utils.http_client import get_base_url class CliTestCase(unittest.TestCase): @@ -24,12 +23,12 @@ class CliTestCase(unittest.TestCase): """ organization = 'launchableinc' workspace = 'mothership' - launchable_token = "v1:{}/{}:auth-token-sample".format(organization, workspace) + smart_tests_token = f"v1:{organization}/{workspace}:auth-token-sample" session_id = 16 build_name = "123" session_name = "test_session_name" subsetting_id = 456 - session = "builds/{}/test_sessions/{}".format(build_name, session_id) + session = f"builds/{build_name}/test_sessions/{session_id}" # directory where test data files are placed. see get_test_files_dir() test_files_dir: Path @@ -45,75 +44,48 @@ def setUp(self): responses.add( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/builds/{}/test_sessions".format( - get_base_url(), - self.organization, - self.workspace, - self.build_name), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}" + f"/builds/{self.build_name}/test_sessions", json={'id': self.session_id}, status=200) responses.add( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={'testPaths': [], 'rest': [], 'subsettingId': 456}, status=200) responses.add( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset/{}/slice".format( - get_base_url(), - self.organization, - self.workspace, - self.subsetting_id), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}" + f"/subset/{self.subsetting_id}/slice", json={'testPaths': [], 'rest': [], 'subsettingId': 456}, status=200) responses.add( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset/{}/split-by-groups".format( - get_base_url(), - self.organization, - self.workspace, - self.subsetting_id), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}" + f"/subset/{self.subsetting_id}/split-by-groups", json={'subsettingId': self.subsetting_id, 'isObservation': False, 'splitGroups': []}, status=200) responses.add( responses.GET, - "{}/intake/organizations/{}/workspaces/{}/subset/{}".format( - get_base_url(), - self.organization, - self.workspace, - self.subsetting_id), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset/{self.subsetting_id}", json={'testPaths': [], 'rest': [], 'subsettingId': 456}, status=200) responses.add( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/builds/{}/test_sessions/{}/events".format( - get_base_url(), - self.organization, - self.workspace, - self.build_name, - self.session_id), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}" + f"/builds/{self.build_name}/test_sessions/{self.session_id}/events", json={}, status=200) responses.add( responses.GET, - "{}/intake/organizations/{}/workspaces/{}/test_sessions/{}/events".format( - get_base_url(), - self.organization, - self.workspace, - self.session_id), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}" + f"/test_sessions/{self.session_id}/events", json=[], status=200) responses.add( responses.GET, - "{}/intake/organizations/{}/workspaces/{}/builds/{}/test_sessions/{}".format( - get_base_url(), - self.organization, - self.workspace, - self.build_name, - self.session_id), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}" + f"/builds/{self.build_name}/test_sessions/{self.session_id}", json={ 'id': self.session_id, 'isObservation': False, @@ -121,12 +93,17 @@ def setUp(self): status=200) responses.add( responses.GET, - "{}/intake/organizations/{}/workspaces/{}/builds/{}/test_session_names/{}".format( - get_base_url(), - self.organization, - self.workspace, - self.build_name, - self.session_name), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}" + f"/builds/{self.build_name}/test_session_names/{self.session_name}", + json={ + 'id': self.session_id, + 'isObservation': False, + }, + status=404) + responses.add( + responses.GET, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}" + f"/builds/{self.build_name}/test_sessions/{self.session_id}", json={ 'id': self.session_id, 'isObservation': False, @@ -134,56 +111,34 @@ def setUp(self): status=200) responses.add( responses.PATCH, - "{}/intake/organizations/{}/workspaces/{}/builds/{}/test_sessions/{}".format( - get_base_url(), - self.organization, - self.workspace, - self.build_name, - self.session_id), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}" + f"/builds/{self.build_name}/test_sessions/{self.session_id}", json={'name': self.session_name}, status=200) responses.add( responses.PATCH, - "{}/intake/organizations/{}/workspaces/{}/builds/{}/test_sessions/{}/close".format( - get_base_url(), - self.organization, - self.workspace, - self.build_name, - self.session_id), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}" + f"/builds/{self.build_name}/test_sessions/{self.session_id}/close", json={}, status=200) responses.add( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/builds".format( - get_base_url(), - self.organization, - self.workspace - ), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/builds", json={'createdAt': "2020-01-02T03:45:56.123+00:00", 'id': 123, "build": self.build_name}, status=200) responses.add( responses.GET, - "{}/intake/organizations/{}/workspaces/{}/builds/{}".format( - get_base_url(), - self.organization, - self.workspace, - self.build_name), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/builds/{self.build_name}", json={'createdAt': "2020-01-02T03:45:56.123+00:00", 'id': 123, "build": self.build_name}, status=200) responses.add( responses.GET, - "{}/intake/organizations/{}/workspaces/{}/slack/notification/key/list".format( - get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/slack/notification/key/list", json={'keys': ["GITHUB_ACTOR", "BRANCH_NAME"]}, status=200) responses.add( responses.GET, - "{}/intake/organizations/{}/workspaces/{}/commits/collect/options".format( - get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/commits/collect/options", json={'commitMessage': True}, status=200) responses.add( @@ -201,11 +156,10 @@ def get_test_files_dir(self): return file_name.parent.joinpath('../data/%s/' % stem).resolve() def tearDown(self): - clean_session_files() del os.environ[SESSION_DIR_KEY] shutil.rmtree(self.dir) - def cli(self, *args, **kwargs) -> click.testing.Result: + def cli(self, *args, **kwargs): """ Invoke CLI command and returns its result """ @@ -216,12 +170,22 @@ def cli(self, *args, **kwargs) -> click.testing.Result: mix_stderr = kwargs['mix_stderr'] del kwargs['mix_stderr'] - return CliRunner(mix_stderr=mix_stderr).invoke(main, args, catch_exceptions=False, **kwargs) + # Disable rich colors for testing by setting the environment variable + import os + old_no_color = os.environ.get('NO_COLOR') + os.environ['NO_COLOR'] = '1' + try: + return CliRunner(mix_stderr=mix_stderr).invoke(main, args, catch_exceptions=False, **kwargs) + finally: + if old_no_color is None: + os.environ.pop('NO_COLOR', None) + else: + os.environ['NO_COLOR'] = old_no_color - def assert_success(self, result: click.testing.Result): + def assert_success(self, result): self.assert_exit_code(result, 0) - def assert_exit_code(self, result: click.testing.Result, expected: int): + def assert_exit_code(self, result, expected: int): self.assertEqual(result.exit_code, expected, result.stdout) def assert_contents(self, file_path: str, content: str): @@ -251,7 +215,7 @@ def assert_record_tests_payload(self, golden_image_filename: str, payload=None): ''' if not payload: - payload = json.loads(gzip.decompress(self.find_request('/events').request.body).decode()) + payload = self.decode_request_body(self.find_request('/events').request.body) # Remove timestamp because it depends on the machine clock for c in payload['events']: @@ -272,7 +236,7 @@ def assert_subset_payload(self, golden_image_filename: str, payload=None): ''' if not payload: - payload = json.loads(gzip.decompress(self.find_request('/subset').request.body).decode()) + payload = self.decode_request_body(self.find_request('/subset').request.body) expected = self.load_json_from_file(self.test_files_dir.joinpath(golden_image_filename)) self.assert_json_orderless_equal(expected, payload) @@ -281,7 +245,7 @@ def load_json_from_file(self, file): with file.open() as json_file: return json.load(json_file) except Exception as e: - raise IOError("Failed to parse JSON {}".format(file)) from e + raise IOError(f"Failed to parse JSON {file}") from e def payload(self, mock_post): """ @@ -302,6 +266,27 @@ def gzipped_json_payload(self, mock_post): def json_payload(self, mock_post): return json.loads(self.payload(mock_post)) + def decode_request_body(self, request_body): + """ + Decode request body, handling both compressed and uncompressed data. + + This function became necessary after adding session name resolution API calls. + Originally, all request bodies were gzip-compressed, so we used + `json.loads(gzip.decompress(request_body).decode())` directly. + However, with the addition of session name resolution mocking (GET requests), + we now have a mix of compressed and uncompressed request bodies. + This function handles both cases by attempting decompression first, + then falling back to direct JSON parsing if decompression fails. + """ + if isinstance(request_body, bytes): + # Try to decompress first, fall back to direct decoding if not compressed + try: + return json.loads(gzip.decompress(request_body).decode()) + except gzip.BadGzipFile: + return json.loads(request_body.decode()) + else: + return json.loads(request_body) + def assert_json_orderless_equal(self, a, b): """ Compare two JSON trees ignoring orders of items in list & dict @@ -346,4 +331,4 @@ def extract_all_test_paths(obj: Dict[str, Any]) -> List[str]: # It's because the value associated with the 'events' key is in random order. self.assertCountEqual(a_test_paths, b_test_paths) for test_path in a_test_paths: - self.assertIn(test_path, b_test_paths, "Expected to include {}".format(test_path)) + self.assertIn(test_path, b_test_paths, f"Expected to include {test_path}") diff --git a/tests/commands/compare/test_subsets.py b/tests/commands/compare/test_subsets.py index df0676bbc..bbb8d29e7 100644 --- a/tests/commands/compare/test_subsets.py +++ b/tests/commands/compare/test_subsets.py @@ -6,7 +6,7 @@ class SubsetsTest(CliTestCase): - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subsets(self): # Create subset-before.txt with open("subset-before.txt", "w") as f: @@ -52,7 +52,7 @@ def test_subsets(self): self.assertEqual(result.stdout, expect) - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subsets_when_new_tests(self): # Create subset-before.txt with open("subset-before.txt", "w") as f: @@ -100,7 +100,7 @@ def test_subsets_when_new_tests(self): self.assertEqual(result.stdout, expect) - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subsets_when_deleted_tests(self): # Create subset-before.txt with open("subset-before.txt", "w") as f: diff --git a/tests/commands/inspect/test_subset.py b/tests/commands/inspect/test_subset.py index 1738279b5..1c144f431 100644 --- a/tests/commands/inspect/test_subset.py +++ b/tests/commands/inspect/test_subset.py @@ -3,7 +3,7 @@ import responses # type: ignore -from launchable.utils.http_client import get_base_url +from smart_tests.utils.http_client import get_base_url from tests.cli_test_case import CliTestCase @@ -25,10 +25,14 @@ class SubsetTest(CliTestCase): } @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset(self): - responses.replace(responses.GET, "{}/intake/organizations/{}/workspaces/{}/subset/{}".format( - get_base_url(), self.organization, self.workspace, self.subsetting_id), json=self.mock_json, status=200) + responses.replace( + responses.GET, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/" + f"{self.workspace}/subset/{self.subsetting_id}", + json=self.mock_json, + status=200) result = self.cli('inspect', 'subset', '--subset-id', self.subsetting_id, mix_stderr=False) expect = """| Order | Test Path | In Subset | Estimated duration (sec) | @@ -42,10 +46,14 @@ def test_subset(self): self.assertEqual(result.stdout, expect) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_json_format(self): - responses.replace(responses.GET, "{}/intake/organizations/{}/workspaces/{}/subset/{}".format( - get_base_url(), self.organization, self.workspace, self.subsetting_id), json=self.mock_json, status=200) + responses.replace( + responses.GET, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/" + f"{self.workspace}/subset/{self.subsetting_id}", + json=self.mock_json, + status=200) result = self.cli('inspect', 'subset', '--subset-id', self.subsetting_id, "--json", mix_stderr=False) expect = """{ diff --git a/tests/commands/inspect/test_tests.py b/tests/commands/inspect/test_tests.py deleted file mode 100644 index fb15c8b2b..000000000 --- a/tests/commands/inspect/test_tests.py +++ /dev/null @@ -1,170 +0,0 @@ -import os -from unittest import mock - -import responses # type: ignore - -from launchable.utils.http_client import get_base_url -from launchable.utils.session import write_session -from tests.cli_test_case import CliTestCase - - -class TestsTest(CliTestCase): - response_json = [ - {"testPath": [ - { - "type": "file", - "name": "test_file1.py", - }, - ], - "duration": 1.2, - "stderr": "", - "stdout": "", - "createdAt": "2021-01-02T03:04:05.000+00:00", - "status": "SUCCESS", - }, - {"testPath": [ - { - "type": "file", - "name": "test_file3.py", - }, - ], - "duration": 0.6, - "stderr": "", - "stdout": "", - "createdAt": "2021-01-02T03:04:05.000+00:00", - "status": "SUCCESS", - }, - - - {"testPath": [ - { - "type": "file", - "name": "test_file4.py", - }, - ], - "duration": 1.8, - "stderr": "", - "stdout": "", - "createdAt": "2021-01-02T03:04:05.000+00:00", - "status": "FAILURE", - }, - {"testPath": [ - { - "type": "file", - "name": "test_file2.py", - }, - ], - "duration": 0.1, - "stderr": "", - "stdout": "", - "createdAt": "2021-01-02T03:04:05.000+00:00", - "status": "FAILURE", - }, - ] - - expect = """| Test Path | Duration (sec) | Status | Uploaded At | -|--------------------|------------------|----------|-------------------------------| -| file=test_file1.py | 1.20 | SUCCESS | 2021-01-02T03:04:05.000+00:00 | -| file=test_file3.py | 0.60 | SUCCESS | 2021-01-02T03:04:05.000+00:00 | -| file=test_file4.py | 1.80 | FAILURE | 2021-01-02T03:04:05.000+00:00 | -| file=test_file2.py | 0.10 | FAILURE | 2021-01-02T03:04:05.000+00:00 | -+-----------+----------------+------------------------+ -| Summary | Report Count | Total Duration (min) | -+===========+================+========================+ -| Total | 4 | 0.06 | -+-----------+----------------+------------------------+ -| Success | 2 | 0.03 | -+-----------+----------------+------------------------+ -| Failure | 2 | 0.03 | -+-----------+----------------+------------------------+ -| Skip | 0 | 0.00 | -+-----------+----------------+------------------------+ -""" - - expect_json = """{ - "summary": { - "total": { - "report_count": 4, - "duration_min": 0.06 - }, - "success": { - "report_count": 2, - "duration_min": 0.03 - }, - "failure": { - "report_count": 2, - "duration_min": 0.03 - }, - "skip": { - "report_count": 0, - "duration_min": 0.0 - } - }, - "results": [ - { - "test_path": "file=test_file1.py", - "duration_sec": 1.2, - "status": "SUCCESS", - "created_at": "2021-01-02T03:04:05.000+00:00" - }, - { - "test_path": "file=test_file3.py", - "duration_sec": 0.6, - "status": "SUCCESS", - "created_at": "2021-01-02T03:04:05.000+00:00" - }, - { - "test_path": "file=test_file4.py", - "duration_sec": 1.8, - "status": "FAILURE", - "created_at": "2021-01-02T03:04:05.000+00:00" - }, - { - "test_path": "file=test_file2.py", - "duration_sec": 0.1, - "status": "FAILURE", - "created_at": "2021-01-02T03:04:05.000+00:00" - } - ], - "test_session_app_url": "https://app.launchableinc.com/organizations/launchableinc/workspaces/mothership/test-sessions/16" -} -""" - - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_tests(self): - responses.replace(responses.GET, "{}/intake/organizations/{}/workspaces/{}/test_sessions/{}/events".format( - get_base_url(), - self.organization, - self.workspace, - self.session_id), json=self.response_json, status=200) - - result = self.cli('inspect', 'tests', '--test-session-id', self.session_id, mix_stderr=False) - self.assert_success(result) - self.assertEqual(result.stdout, self.expect) - - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_tests_without_test_session_id(self): - responses.replace(responses.GET, "{}/intake/organizations/{}/workspaces/{}/test_sessions/{}/events".format( - get_base_url(), - self.organization, - self.workspace, - self.session_id), json=self.response_json, status=200) - - write_session(self.build_name, self.session) - result = self.cli('inspect', 'tests') - self.assertEqual(result.stdout, self.expect) - - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_tests_json_format(self): - responses.replace(responses.GET, "{}/intake/organizations/{}/workspaces/{}/test_sessions/{}/events".format( - get_base_url(), - self.organization, - self.workspace, - self.session_id), json=self.response_json, status=200) - - write_session(self.build_name, self.session) - result = self.cli('inspect', 'tests', "--json") - self.assertEqual(result.stdout, self.expect_json) diff --git a/tests/commands/record/test_attachment.py b/tests/commands/record/test_attachment.py index 2bf002e2c..e37861164 100644 --- a/tests/commands/record/test_attachment.py +++ b/tests/commands/record/test_attachment.py @@ -5,19 +5,17 @@ import responses # type: ignore -from launchable.utils.http_client import get_base_url -from launchable.utils.session import write_session +from smart_tests.utils.http_client import get_base_url from tests.cli_test_case import CliTestCase class AttachmentTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_attachment(self): TEST_CONTENT = b"Hello world" - # emulate launchable record build & session - write_session(self.build_name, self.session_id) + # Test requires explicit session parameter attachment = tempfile.NamedTemporaryFile(delete=False) attachment.write(TEST_CONTENT) @@ -34,12 +32,11 @@ def verify_body(request): responses.add_callback( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/builds/{}/test_sessions/{}/attachment".format( - get_base_url(), self.organization, self.workspace, self.build_name, self.session_id), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/" + f"{self.workspace}/builds/{self.build_name}/test_sessions/{self.session_id}/attachment", callback=verify_body) result = self.cli("record", "attachment", "--session", self.session, attachment.name) - self.assert_success(result) self.assertEqual(TEST_CONTENT, body) diff --git a/tests/commands/record/test_build.py b/tests/commands/record/test_build.py index 5d6940867..c7eda27e0 100644 --- a/tests/commands/record/test_build.py +++ b/tests/commands/record/test_build.py @@ -4,16 +4,15 @@ import responses # type: ignore -from launchable.utils.session import read_build from tests.cli_test_case import CliTestCase class BuildTest(CliTestCase): # make sure the output of git-submodule is properly parsed @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) @mock.patch.dict(os.environ, {"GITHUB_ACTIONS": ""}) - @mock.patch('launchable.utils.subprocess.check_output') + @mock.patch('smart_tests.utils.subprocess.check_output') # to tests on GitHub Actions @mock.patch.dict(os.environ, {"GITHUB_ACTIONS": ""}) @mock.patch.dict(os.environ, {"GITHUB_PULL_REQUEST_URL": ""}) @@ -30,8 +29,20 @@ def test_submodule(self, mock_check_output): ('c50f5de0f06fe16afa4fd1dd615e4903e40b42a2 refs/head/main\nc50f5de0f06fe16afa4fd1dd615e4903e40b42a2 refs/remotes/origin/main\n').encode(), # noqa: E501 ] - self.assertEqual(read_build(), None) - result = self.cli("record", "build", "--no-commit-collection", "--name", self.build_name) + result = self.cli( + "record", + "build", + "--no-commit-collection", + "--build", + self.build_name, + "--branch", + "main", + "--repo-branch-map", + ".=main", + "--repo-branch-map", + "./foo=main", + "--repo-branch-map", + "./bar-zot=main") self.assert_success(result) # Name & Path should both reflect the submodule path @@ -51,57 +62,61 @@ def test_submodule(self, mock_check_output): { "repositoryName": "./foo", "commitHash": "491e03096e2234dab9a9533da714fb6eff5dcaa7", - "branchName": "" + "branchName": "main" }, { "repositoryName": "./bar-zot", "commitHash": "8bccab48338219e73c3118ad71c8c98fbd32a4be", - "branchName": "" + "branchName": "main" }, ], "links": [], "timestamp": None }, payload) - self.assertEqual(read_build(), self.build_name) - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) # to tests on GitHub Actions @mock.patch.dict(os.environ, {"GITHUB_ACTIONS": ""}) @mock.patch.dict(os.environ, {"GITHUB_PULL_REQUEST_URL": ""}) - @mock.patch('launchable.utils.subprocess.check_output') + @mock.patch('smart_tests.utils.subprocess.check_output') def test_no_submodule(self, mock_check_output): mock_check_output.side_effect = [ # the call is git rev-parse HEAD ('c50f5de0f06fe16afa4fd1dd615e4903e40b42a2').encode(), ] - self.assertEqual(read_build(), None) - - result = self.cli("record", "build", "--no-commit-collection", "--no-submodules", "--name", self.build_name) + result = self.cli( + "record", + "build", + "--no-commit-collection", + "--no-submodules", + "--build", + self.build_name, + "--branch", + "main", + "--repo-branch-map", + ".=main") self.assert_success(result) payload = json.loads(responses.calls[1].request.body.decode()) self.assert_json_orderless_equal( { "buildNumber": "123", - "lineage": None, + "lineage": "main", "commitHashes": [ { "repositoryName": ".", "commitHash": "c50f5de0f06fe16afa4fd1dd615e4903e40b42a2", - "branchName": "" + "branchName": "main" }, ], "links": [], "timestamp": None }, payload) - self.assertEqual(read_build(), self.build_name) - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) # to tests on GitHub Actions @mock.patch.dict(os.environ, {"GITHUB_ACTIONS": ""}) @mock.patch.dict(os.environ, {"GITHUB_PULL_REQUEST_URL": ""}) @@ -109,51 +124,70 @@ def test_no_git_directory(self): orig_dir = os.getcwd() try: os.chdir(self.dir) - self.assertEqual(read_build(), None) - self.cli("record", "build", "--no-commit-collection", "--commit", - ".=c50f5de0f06fe16afa4fd1dd615e4903e40b42a2", "--name", self.build_name) + self.cli( + "record", + "build", + "--no-commit-collection", + "--commit", + ".=c50f5de0f06fe16afa4fd1dd615e4903e40b42a2", + "--build", + self.build_name, + "--branch", + "main", + "--repo-branch-map", + ".=main") payload = json.loads(responses.calls[1].request.body.decode()) self.assert_json_orderless_equal( { "buildNumber": "123", - "lineage": None, + "lineage": "main", "commitHashes": [ { "repositoryName": ".", "commitHash": "c50f5de0f06fe16afa4fd1dd615e4903e40b42a2", - "branchName": "", + "branchName": "main", }, ], "links": [], "timestamp": None }, payload) - self.assertEqual(read_build(), self.build_name) finally: os.chdir(orig_dir) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) # to tests on GitHub Actions @mock.patch.dict(os.environ, {"GITHUB_ACTIONS": ""}) @mock.patch.dict(os.environ, {"GITHUB_PULL_REQUEST_URL": ""}) def test_commit_option_and_build_option(self): # case only --commit option - result = self.cli("record", "build", "--no-commit-collection", "--commit", "A=abc12", "--name", self.build_name) + result = self.cli( + "record", + "build", + "--no-commit-collection", + "--commit", + "A=abc12", + "--build", + self.build_name, + "--branch", + "main", + "--repo-branch-map", + "A=main") self.assert_success(result) payload = json.loads(responses.calls[1].request.body.decode()) self.assert_json_orderless_equal( { "buildNumber": "123", - "lineage": None, + "lineage": "main", "commitHashes": [ { "repositoryName": "A", "commitHash": "abc12", - "branchName": "" + "branchName": "main" }, ], "links": [], @@ -169,8 +203,10 @@ def test_commit_option_and_build_option(self): "--commit", "A=abc12", "--branch", + "feature-xxx", + "--repo-branch-map", "A=feature-xxx", - "--name", + "--build", self.build_name) self.assert_success(result) @@ -191,24 +227,26 @@ def test_commit_option_and_build_option(self): }, payload) responses.calls.reset() - # case --commit option and --branch option but another one + # case --commit option and --repo-branch-map option with invalid repo result = self.cli( "record", "build", + "--build", + self.build_name, "--no-commit-collection", "--commit", "A=abc12", "--branch", - "B=feature-yyy", - "--name", - self.build_name) + "main", + "--repo-branch-map", + "B=feature-yyy") self.assert_success(result) payload = json.loads(responses.calls[1].request.body.decode()) self.assert_json_orderless_equal( { "buildNumber": "123", - "lineage": None, + "lineage": "main", "commitHashes": [ { "repositoryName": "A", @@ -226,17 +264,17 @@ def test_commit_option_and_build_option(self): result = self.cli( "record", "build", + "--build", + self.build_name, "--no-commit-collection", "--commit", "A=abc12", - "--branch", + "--repo-branch-map", "B=feature-yyy", "--commit", "B=56cde", - "--branch", - "A=feature-xxx", - "--name", - self.build_name) + "--repo-branch-map", + "A=feature-xxx") self.assert_success(result) payload = json.loads(responses.calls[1].request.body.decode()) @@ -262,31 +300,56 @@ def test_commit_option_and_build_option(self): responses.calls.reset() @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_build_name_validation(self): - result = self.cli("record", "build", "--no-commit-collection", "--name", "foo/hoge") + result = self.cli( + "record", + "build", + "--no-commit-collection", + "--build", + "foo/hoge", + "--branch", + "main", + "--commit", + "foo=abc123", + "--repo-branch-map", + "foo=main") self.assert_exit_code(result, 1) - result = self.cli("record", "build", "--no-commit-collection", "--name", "foo%2Fhoge") + result = self.cli( + "record", + "build", + "--no-commit-collection", + "--build", + "foo%2Fhoge", + "--branch", + "main", + "--commit", + "foo=abc123", + "--repo-branch-map", + "foo=main") self.assert_exit_code(result, 1) # make sure the output of git-submodule is properly parsed @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) # to tests on GitHub Actions @mock.patch.dict(os.environ, {"GITHUB_ACTIONS": ""}) @mock.patch.dict(os.environ, {"GITHUB_PULL_REQUEST_URL": ""}) - @mock.patch('launchable.utils.subprocess.check_output') + @mock.patch('smart_tests.utils.subprocess.check_output') def test_with_timestamp(self, mock_check_output): - self.assertEqual(read_build(), None) result = self.cli( "record", "build", "--no-commit-collection", "--commit", "repo=abc12", - "--name", + "--build", self.build_name, + "--branch", + "main", + "--repo-branch-map", + "repo=main", '--timestamp', "2025-01-23 12:34:56Z") self.assert_success(result) @@ -295,16 +358,29 @@ def test_with_timestamp(self, mock_check_output): self.assert_json_orderless_equal( { "buildNumber": "123", - "lineage": None, + "lineage": "main", "commitHashes": [ { "repositoryName": "repo", "commitHash": "abc12", - "branchName": "" + "branchName": "main" }, ], "links": [], "timestamp": "2025-01-23T12:34:56+00:00" }, payload) - self.assertEqual(read_build(), self.build_name) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) + def test_repo_branch_map_requires_no_commit_collection(self): + # Test that --repo-branch-map requires --no-commit-collection + result = self.cli( + "record", + "build", + "--build", + self.build_name, + "--branch", + "main", + "--repo-branch-map", + ".=main") + self.assert_exit_code(result, 1) + self.assertIn("--no-commit-collection must be specified when --repo-branch-map is used", result.stdout) diff --git a/tests/commands/record/test_case_event.py b/tests/commands/record/test_case_event.py index 3af311ddd..1933c54be 100644 --- a/tests/commands/record/test_case_event.py +++ b/tests/commands/record/test_case_event.py @@ -2,7 +2,7 @@ from io import StringIO from unittest import mock -from launchable.commands.record.case_event import CaseEvent +from smart_tests.commands.record.case_event import CaseEvent UNKNOWN_TIMEZONE_WARNING = "UnknownTimezoneWarning" diff --git a/tests/commands/record/test_commit.py b/tests/commands/record/test_commit.py index b7a1217a5..304274395 100644 --- a/tests/commands/record/test_commit.py +++ b/tests/commands/record/test_commit.py @@ -4,8 +4,8 @@ from http.server import HTTPServer, SimpleHTTPRequestHandler from unittest import mock -from launchable.commands.record.commit import _build_proxy_option -from launchable.utils.env_keys import BASE_URL_KEY +from smart_tests.commands.record.commit import _build_proxy_option +from smart_tests.utils.env_keys import BASE_URL_KEY from tests.cli_test_case import CliTestCase @@ -27,17 +27,17 @@ def do_POST(self): class CommitTest(CliTestCase): - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_run_commit(self): server = HTTPServer(("", 0), CommitHandler) thread = threading.Thread(None, server.serve_forever) thread.start() host, port = server.server_address - endpoint = "http://{}:{}".format(host, port) + endpoint = f"http://{host}:{port}" with mock.patch.dict(os.environ, {BASE_URL_KEY: endpoint}): - result = self.cli("record", "commit") + result = self.cli("record", "commit", "--name", "test-commit") self.assert_success(result) server.shutdown() diff --git a/tests/commands/record/test_session.py b/tests/commands/record/test_session.py index ddac44e15..339c1b80e 100644 --- a/tests/commands/record/test_session.py +++ b/tests/commands/record/test_session.py @@ -4,7 +4,6 @@ import responses # type: ignore -from launchable.utils.http_client import get_base_url from tests.cli_test_case import CliTestCase @@ -16,14 +15,16 @@ class SessionTest(CliTestCase): @responses.activate @mock.patch.dict(os.environ, { - "LAUNCHABLE_TOKEN": CliTestCase.launchable_token, + "SMART_TESTS_TOKEN": CliTestCase.smart_tests_token, # LANG=C.UTF-8 is needed to run CliRunner().invoke(command). # Generally it's provided by shell. But in this case, `clear=True` # removes the variable. 'LANG': 'C.UTF-8', }, clear=True) - def test_run_session_without_flavor(self): - result = self.cli("record", "session", "--build", self.build_name) + def test_run_session(self): + result = self.cli( + "record", "session", "--build", self.build_name, + "--test-suite", "test-suite") self.assert_success(result) payload = json.loads(responses.calls[1].request.body.decode()) @@ -32,18 +33,15 @@ def test_run_session_without_flavor(self): "isObservation": False, "links": [], "noBuild": False, - "lineage": None, - "testSuite": None, + "testSuite": "test-suite", "timestamp": None, }, payload) @responses.activate - @mock.patch.dict(os.environ, { - "LAUNCHABLE_TOKEN": CliTestCase.launchable_token, - 'LANG': 'C.UTF-8', - }, clear=True) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token, 'LANG': 'C.UTF-8'}, clear=True) def test_run_session_with_flavor(self): result = self.cli("record", "session", "--build", self.build_name, + "--test-suite", "test-suite", "--flavor", "key=value", "--flavor", "k:v", "--flavor", "k e y = v a l u e") self.assert_success(result) @@ -57,83 +55,44 @@ def test_run_session_with_flavor(self): "isObservation": False, "links": [], "noBuild": False, - "lineage": None, - "testSuite": None, + "testSuite": "test-suite", "timestamp": None, }, payload) - result = self.cli("record", "session", "--build", self.build_name, "--flavor", "only-key") + # invalid flavor case + result = self.cli( + "record", "session", "--build", self.build_name, + "--test-suite", "test-suite", "--flavor", "only-key") self.assert_exit_code(result, 2) - self.assertIn("Expected a key-value pair formatted as --option key=value", result.output) + self.assertIn("but got 'only-key'", result.output) @responses.activate @mock.patch.dict(os.environ, { - "LAUNCHABLE_TOKEN": CliTestCase.launchable_token, + "SMART_TESTS_TOKEN": CliTestCase.smart_tests_token, 'LANG': 'C.UTF-8', }, clear=True) def test_run_session_with_observation(self): - result = self.cli("record", "session", "--build", self.build_name, "--observation") + result = self.cli( + "record", "session", "--build", self.build_name, + "--test-suite", "test-suite", "--observation") self.assert_success(result) payload = json.loads(responses.calls[1].request.body.decode()) - self.assert_json_orderless_equal({ "flavors": {}, "isObservation": True, "links": [], "noBuild": False, - "lineage": None, - "testSuite": None, - "timestamp": None, - }, payload) - - @responses.activate - @mock.patch.dict(os.environ, { - "LAUNCHABLE_TOKEN": CliTestCase.launchable_token, - 'LANG': 'C.UTF-8', - }, clear=True) - def test_run_session_with_session_name(self): - # session name is already exist - result = self.cli("record", "session", "--build", self.build_name, "--session-name", self.session_name) - self.assert_exit_code(result, 2) - - responses.replace( - responses.GET, - "{}/intake/organizations/{}/workspaces/{}/builds/{}/test_session_names/{}".format( - get_base_url(), - self.organization, - self.workspace, - self.build_name, - self.session_name, - ), - status=404, - ) - # invalid session name - result = self.cli("record", "session", "--build", self.build_name, "--session-name", "invalid/name") - self.assert_exit_code(result, 2) - - result = self.cli("record", "session", "--build", self.build_name, "--session-name", self.session_name) - self.assert_success(result) - - payload = json.loads(responses.calls[5].request.body.decode()) - self.assert_json_orderless_equal({ - "flavors": {}, - "isObservation": False, - "links": [], - "noBuild": False, - "lineage": None, - "testSuite": None, + "testSuite": "test-suite", "timestamp": None, }, payload) @responses.activate - @mock.patch.dict(os.environ, { - "LAUNCHABLE_TOKEN": CliTestCase.launchable_token, - 'LANG': 'C.UTF-8', - }, clear=True) - def test_run_session_with_lineage(self): + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token, 'LANG': 'C.UTF-8'}, clear=True) + def test_run_session_with_timestamp(self): result = self.cli("record", "session", "--build", self.build_name, - "--lineage", "example-lineage") + "--test-suite", "test-suite", + "--timestamp", "2023-10-01T12:00:00Z") self.assert_success(result) payload = json.loads(responses.calls[1].request.body.decode()) @@ -142,49 +101,26 @@ def test_run_session_with_lineage(self): "isObservation": False, "links": [], "noBuild": False, - "lineage": "example-lineage", - "testSuite": None, - "timestamp": None, + "testSuite": "test-suite", + "timestamp": "2023-10-01T12:00:00+00:00", }, payload) @responses.activate - @mock.patch.dict(os.environ, { - "LAUNCHABLE_TOKEN": CliTestCase.launchable_token, - 'LANG': 'C.UTF-8', - }, clear=True) - def test_run_session_with_test_suite(self): + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token, 'LANG': 'C.UTF-8'}, clear=True) + def test_run_session_with_link(self): result = self.cli("record", "session", "--build", self.build_name, - "--test-suite", "example-test-suite") + "--test-suite", "test-suite", + "--link", "url=https://smart-tests.test") self.assert_success(result) payload = json.loads(responses.calls[1].request.body.decode()) self.assert_json_orderless_equal({ "flavors": {}, "isObservation": False, - "links": [], + "links": [ + {"title": "url", "url": "https://smart-tests.test", "kind": "CUSTOM_LINK"}, + ], "noBuild": False, - "lineage": None, - "testSuite": "example-test-suite", + "testSuite": "test-suite", "timestamp": None, }, payload) - - @responses.activate - @mock.patch.dict(os.environ, { - "LAUNCHABLE_TOKEN": CliTestCase.launchable_token, - 'LANG': 'C.UTF-8', - }, clear=True) - def test_run_session_with_timestamp(self): - result = self.cli("record", "session", "--build", self.build_name, - "--timestamp", "2023-10-01T12:00:00Z") - self.assert_success(result) - - payload = json.loads(responses.calls[1].request.body.decode()) - self.assert_json_orderless_equal({ - "flavors": {}, - "isObservation": False, - "links": [], - "noBuild": False, - "lineage": None, - "testSuite": None, - "timestamp": "2023-10-01T12:00:00+00:00", - }, payload) diff --git a/tests/commands/record/test_tests.py b/tests/commands/record/test_tests.py index 1e8c122e0..8aa3bcc6e 100644 --- a/tests/commands/record/test_tests.py +++ b/tests/commands/record/test_tests.py @@ -7,10 +7,7 @@ import responses # type: ignore -from launchable.commands.record.tests import INVALID_TIMESTAMP, parse_launchable_timeformat -from launchable.utils.http_client import get_base_url -from launchable.utils.no_build import NO_BUILD_BUILD_NAME, NO_BUILD_TEST_SESSION_ID -from launchable.utils.session import write_build, write_session +from smart_tests.commands.record.tests import INVALID_TIMESTAMP, parse_launchable_timeformat from tests.cli_test_case import CliTestCase @@ -19,28 +16,30 @@ class TestsTest(CliTestCase): '../../data/maven/').resolve() @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_with_group_name(self): - # emulate launchable record build & session - write_session(self.build_name, self.session_id) - - result = self.cli('record', 'tests', '--session', - self.session, '--group', 'hoge', 'maven', str( - self.report_files_dir) + "**/reports/") + result = self.cli('record', 'test', 'maven', '--session', self.session, '--group', 'hoge', + str(self.report_files_dir) + "**/reports/") self.assert_success(result) request = json.loads(gzip.decompress(self.find_request('/events').request.body).decode()) self.assertCountEqual(request.get("group", []), "hoge") @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_filename_in_error_message(self): - # emulate launchable record build - write_build(self.build_name) + # emulate smart-tests record build normal_xml = str(Path(__file__).parent.joinpath('../../data/broken_xml/normal.xml').resolve()) broken_xml = str(Path(__file__).parent.joinpath('../../data/broken_xml/broken.xml').resolve()) - result = self.cli('record', 'tests', '--build', self.build_name, 'file', normal_xml, broken_xml) + result = self.cli( + 'record', + 'test', + 'file', + '--session', + self.session, + normal_xml, + broken_xml) def remove_backslash(input: str) -> str: # Hack for Windowns. They containts double escaped backslash such @@ -56,33 +55,6 @@ def remove_backslash(input: str) -> str: # normal.xml self.assertIn('open_class_user_test.rb', gzip.decompress(self.find_request('/events').request.body).decode()) - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_with_no_build(self): - responses.add( - responses.POST, - "{}/intake/organizations/{}/workspaces/{}/builds/{}/test_sessions/{}/events".format( - get_base_url(), - self.organization, - self.workspace, - NO_BUILD_BUILD_NAME, - NO_BUILD_TEST_SESSION_ID, - ), - json={ - "build": { - "id": 12345, - "buildNumber": 1675750000, - }, - "testSessions": { - "id": 678, - "buildId": 12345, - }, - }, - status=200) - - result = self.cli('record', 'tests', '--no-build', 'maven', str(self.report_files_dir) + "**/reports/") - self.assert_success(result) - def test_parse_launchable_timeformat(self): t1 = "2021-04-01T09:35:47.934+00:00" # 1617269747.934 t2 = "2021-05-24T18:29:04.285+00:00" # 1621880944.285 @@ -97,13 +69,15 @@ def test_parse_launchable_timeformat(self): self.assertEqual(INVALID_TIMESTAMP, parse_launchable_timeformat(t3)) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_when_total_test_duration_zero(self): - write_build(self.build_name) - zero_duration_xml1 = str(Path(__file__).parent.joinpath('../../data/googletest/output_a.xml').resolve()) zero_duration_xml2 = str(Path(__file__).parent.joinpath('../../data/googletest/output_b.xml').resolve()) - result = self.cli('record', 'tests', '--build', self.build_name, 'googletest', zero_duration_xml1, zero_duration_xml2) + result = self.cli( + 'record', 'test', 'googletest', + '--session', self.session, + zero_duration_xml1, + zero_duration_xml2) self.assert_success(result) self.assertIn("Total test duration is 0.", result.output) diff --git a/tests/commands/test_api_error.py b/tests/commands/test_api_error.py index 7c8cd16fd..21ddb0387 100644 --- a/tests/commands/test_api_error.py +++ b/tests/commands/test_api_error.py @@ -10,9 +10,9 @@ import responses # type: ignore from requests.exceptions import ReadTimeout -from launchable.commands.verify import compare_version -from launchable.utils.env_keys import BASE_URL_KEY -from launchable.utils.http_client import get_base_url +from smart_tests.commands.verify import compare_version +from smart_tests.utils.env_keys import BASE_URL_KEY +from smart_tests.utils.http_client import get_base_url from tests.cli_test_case import CliTestCase @@ -55,12 +55,9 @@ class APIErrorTest(CliTestCase): test_files_dir = Path(__file__).parent.joinpath('../data/minitest/').resolve() @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_verify(self): - verification_url = "{base}/intake/organizations/{org}/workspaces/{ws}/verification".format( - base=get_base_url(), - org=self.organization, - ws=self.workspace) + verification_url = f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/verification" responses.add( responses.GET, verification_url, @@ -74,8 +71,7 @@ def test_verify(self): body=ConnectionError("error")) tracking = responses.add( responses.POST, - CLI_TRACKING_URL.format( - base=get_base_url()), + f"{get_base_url()}/intake/cli_tracking", body=ReadTimeout("error")) result = self.cli("verify") self.assert_success(result) @@ -83,17 +79,17 @@ def test_verify(self): self.assert_tracking_count(tracking=tracking, count=2) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_commit(self): server = HTTPServer(("", 0), ErrorCommitHandlerMock) thread = threading.Thread(None, server.serve_forever) thread.start() host, port = server.server_address - endpoint = "http://{}:{}".format(host, port) + endpoint = f"http://{host}:{port}" with mock.patch.dict(os.environ, {BASE_URL_KEY: endpoint}): - result = self.cli("record", "commit", "--source", ".") + result = self.cli("record", "commit", "--name", "test-commit", "--source", ".") self.assert_success(result) self.assertEqual(result.exception, None) @@ -101,7 +97,7 @@ def test_record_commit(self): thread.join(timeout=3) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_build(self): # case: cli catches error success_server = HTTPServer(("", 0), SuccessCommitHandlerMock) @@ -109,29 +105,35 @@ def test_record_build(self): thread.start() host, port = success_server.server_address - endpoint = "http://{}:{}".format(host, port) + endpoint = f"http://{host}:{port}" with mock.patch.dict(os.environ, {BASE_URL_KEY: endpoint}): responses.add( responses.GET, - "{}/intake/organizations/{}/workspaces/{}/commits/collect/options".format( - get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/commits/collect/options", json={'commitMessage': True}, status=200) - responses.add(responses.POST, "{base}/intake/organizations/{org}/workspaces/{ws}/builds".format( - base=get_base_url(), org=self.organization, ws=self.workspace), status=500) + responses.add(responses.POST, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/builds", + status=500) tracking = responses.add( responses.POST, - CLI_TRACKING_URL.format( - base=get_base_url()), + f"{get_base_url()}/intake/cli_tracking", body=ReadTimeout("error")) - result = self.cli("record", "build", "--name", "example") + result = self.cli( + "record", + "build", + "--build", + "example", + "--branch", + "main", + "--repo-branch-map", + ".=main", + "--no-commit-collection") self.assert_success(result) self.assertEqual(result.exception, None) # Since HTTPError is occurred outside of LaunchableClient, the count is 1. - self.assert_tracking_count(tracking=tracking, count=3) + self.assert_tracking_count(tracking=tracking, count=2) success_server.shutdown() thread.join(timeout=3) @@ -142,112 +144,93 @@ def test_record_build(self): thread.start() host, port = error_server.server_address - endpoint = "http://{}:{}".format(host, port) + endpoint = f"http://{host}:{port}" with mock.patch.dict(os.environ, {BASE_URL_KEY: endpoint}): responses.add( responses.GET, - "{}/intake/organizations/{}/workspaces/{}/commits/collect/options".format( - get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/commits/collect/options", json={'commitMessage': True}, status=200) - responses.add(responses.POST, "{base}/intake/organizations/{org}/workspaces/{ws}/builds".format( - base=get_base_url(), org=self.organization, ws=self.workspace), status=500) + responses.add(responses.POST, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/builds", + status=500) tracking = responses.add( responses.POST, - CLI_TRACKING_URL.format( - base=get_base_url()), + f"{get_base_url()}/intake/cli_tracking", body=ReadTimeout("error")) - result = self.cli("record", "build", "--name", "example") + result = self.cli( + "record", + "build", + "--build", + "example", + "--branch", + "main", + "--repo-branch-map", + ".=main", + "--no-commit-collection") self.assert_success(result) self.assertEqual(result.exception, None) # Since HTTPError is occurred outside of LaunchableClient, the count is 1. - self.assert_tracking_count(tracking=tracking, count=3) + self.assert_tracking_count(tracking=tracking, count=2) error_server.shutdown() thread.join(timeout=3) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_session(self): build = "internal_server_error" responses.add( responses.POST, - "{base}/intake/organizations/{org}/workspaces/{ws}/builds/{build}/test_sessions".format( - base=get_base_url(), - org=self.organization, - ws=self.workspace, - build=build), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/builds/{build}/test_sessions", status=500) tracking = responses.add( responses.POST, - CLI_TRACKING_URL.format( - base=get_base_url()), + f"{get_base_url()}/intake/cli_tracking", body=ReadTimeout("error")) - result = self.cli("record", "session", "--build", build) + result = self.cli("record", "session", "--build", build, "--test-suite", "test-suite") self.assert_success(result) - # Since HTTPError is occurred outside of LaunchableClient, the count is 1. self.assert_tracking_count(tracking=tracking, count=1) build = "not_found" responses.add( responses.POST, - "{base}/intake/organizations/{org}/workspaces/{ws}/builds/{build}/test_sessions".format( - base=get_base_url(), - org=self.organization, - ws=self.workspace, - build=build), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/builds/{build}/test_sessions", status=404) tracking = responses.add( responses.POST, - CLI_TRACKING_URL.format( - base=get_base_url()), + f"{get_base_url()}/intake/cli_tracking", body=ReadTimeout("error")) - result = self.cli("record", "session", "--build", build) + result = self.cli("record", "session", "--build", build, "--test-suite", "test-suite") self.assert_exit_code(result, 1) self.assert_tracking_count(tracking=tracking, count=1) - responses.replace( - responses.GET, - "{}/intake/organizations/{}/workspaces/{}/builds/{}/test_session_names/{}".format( - get_base_url(), - self.organization, - self.workspace, - self.build_name, - self.session_name, - ), - body=ReadTimeout("error") - ) tracking = responses.add( responses.POST, - CLI_TRACKING_URL.format( - base=get_base_url()), + f"{get_base_url()}/intake/cli_tracking", body=ReadTimeout("error")) - result = self.cli("record", "session", "--build", self.build_name, "--session-name", self.session_name) + result = self.cli( + "record", "session", "--build", self.build_name, + "--test-suite", "test-suite") self.assert_success(result) # Since Timeout error is caught inside of LaunchableClient, the tracking event is sent twice. - self.assert_tracking_count(tracking=tracking, count=2) + self.assert_tracking_count(tracking=tracking, count=0) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset(self): responses.replace( responses.POST, - "{base}/intake/organizations/{org}/workspaces/{ws}/subset".format( - base=get_base_url(), - org=self.organization, - ws=self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", status=500) tracking = responses.add( responses.POST, - CLI_TRACKING_URL.format( - base=get_base_url()), + f"{get_base_url()}/intake/cli_tracking", body=ReadTimeout("error")) subset_file = "example_test.rb" @@ -255,13 +238,13 @@ def test_subset(self): with tempfile.NamedTemporaryFile(delete=False) as rest_file: result = self.cli( "subset", + "minitest", "--target", "30%", "--session", self.session, "--rest", rest_file.name, - "minitest", str(self.test_files_dir) + "/test/**/*.rb", mix_stderr=False) @@ -272,12 +255,21 @@ def test_subset(self): # Since HTTPError is occurred outside of LaunchableClient, the count is 1. self.assert_tracking_count(tracking=tracking, count=1) - responses.replace(responses.POST, "{base}/intake/organizations/{org}/workspaces/{ws}/subset".format( - base=get_base_url(), org=self.organization, ws=self.workspace), status=404) + responses.replace(responses.POST, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", + status=404) with tempfile.NamedTemporaryFile(delete=False) as rest_file: - result = self.cli("subset", "--target", "30%", "--session", self.session, "--rest", rest_file.name, - "minitest", str(self.test_files_dir) + "/test/**/*.rb", mix_stderr=False) + result = self.cli("subset", + "minitest", + "--target", + "30%", + "--session", + self.session, + "--rest", + rest_file.name, + str(self.test_files_dir) + "/test/**/*.rb", + mix_stderr=False) self.assert_success(result) self.assertEqual(len(result.stdout.rstrip().split("\n")), 1) @@ -286,109 +278,80 @@ def test_subset(self): responses.replace( responses.GET, - "{base}/intake/organizations/{org}/workspaces/{ws}/builds/{build}/test_sessions/{session_id}".format( - base=get_base_url(), - org=self.organization, - ws=self.workspace, - build=self.build_name, - session_id=self.session_id), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/" + f"{self.workspace}/builds/{self.build_name}/test_sessions/{self.session_id}", body=ReadTimeout("error")) tracking = responses.add( responses.POST, - CLI_TRACKING_URL.format( - base=get_base_url()), + f"{get_base_url()}/intake/cli_tracking", body=ReadTimeout("error")) with tempfile.NamedTemporaryFile(delete=False) as rest_file: result = self.cli("subset", + "minitest", "--target", "30%", "--session", self.session, "--rest", rest_file.name, - "--observation", - "minitest", str(self.test_files_dir) + "/test/**/*.rb", mix_stderr=False) self.assert_success(result) # Since Timeout error is caught inside of LaunchableClient, the tracking event is sent twice. self.assert_tracking_count(tracking=tracking, count=2) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_tests(self): - responses.replace( - responses.GET, - "{base}/intake/organizations/{org}/workspaces/{ws}/builds/{build}".format( - base=get_base_url(), - org=self.organization, - ws=self.workspace, - build=self.build_name), - body=ReadTimeout("error")) tracking = responses.add( responses.POST, - CLI_TRACKING_URL.format( - base=get_base_url()), + f"{get_base_url()}/intake/cli_tracking", body=ReadTimeout("error")) - result = self.cli("record", "tests", "--session", self.session, "minitest", str(self.test_files_dir) + "/") + result = self.cli("record", "test", "minitest", "--session", self.session, str(self.test_files_dir) + "/") self.assert_success(result) - # Since Timeout error is caught inside of LaunchableClient, the tracking event is sent twice. + # Since session name resolution works, the tracking event is sent once. self.assert_tracking_count(tracking=tracking, count=2) responses.replace( responses.POST, - "{base}/intake/organizations/{org}/workspaces/{ws}/builds/{build}/test_sessions/{session_id}/events".format( - base=get_base_url(), - org=self.organization, - ws=self.workspace, - build=self.build_name, - session_id=self.session_id), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/" + f"{self.workspace}/builds/{self.build_name}/test_sessions/{self.session_id}/events", json=[], status=500) tracking = responses.add( responses.POST, - CLI_TRACKING_URL.format( - base=get_base_url()), + f"{get_base_url()}/intake/cli_tracking", body=ReadTimeout("error")) - result = self.cli("record", "tests", "--session", self.session, "minitest", str(self.test_files_dir) + "/") + result = self.cli("record", "test", "minitest", "--session", self.session, str(self.test_files_dir) + "/") self.assert_success(result) # Since HTTPError is occurred outside of LaunchableClient, the count is 1. - self.assert_tracking_count(tracking=tracking, count=2) + self.assert_tracking_count(tracking=tracking, count=3) responses.replace( responses.POST, - "{base}/intake/organizations/{org}/workspaces/{ws}/builds/{build}/test_sessions/{session_id}/events".format( - base=get_base_url(), - org=self.organization, - ws=self.workspace, - build=self.build_name, - session_id=self.session_id), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/" + f"{self.workspace}/builds/{self.build_name}/test_sessions/{self.session_id}/events", json=[], status=404) tracking = responses.add( responses.POST, - CLI_TRACKING_URL.format( - base=get_base_url()), + f"{get_base_url()}/intake/cli_tracking", body=ReadTimeout("error")) - result = self.cli("record", "tests", "--session", self.session, "minitest", str(self.test_files_dir) + "/") + result = self.cli("record", "test", "minitest", "--session", self.session, str(self.test_files_dir) + "/") self.assert_success(result) - self.assert_tracking_count(tracking=tracking, count=2) + self.assert_tracking_count(tracking=tracking, count=3) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_all_workflow_when_server_down(self): # setup verify responses.add( responses.GET, - "{base}/intake/organizations/{org}/workspaces/{ws}/verification".format( - base=get_base_url(), - org=self.organization, - ws=self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/verification", body=ReadTimeout("error")) tracking = responses.add( responses.POST, - CLI_TRACKING_URL.format( - base=get_base_url()), + f"{get_base_url()}/intake/cli_tracking", body=ReadTimeout("error")) # setup state responses.replace( @@ -401,30 +364,19 @@ def test_all_workflow_when_server_down(self): # setup build responses.replace( responses.POST, - "{base}/intake/organizations/{org}/workspaces/{ws}/builds".format( - base=get_base_url(), - org=self.organization, - ws=self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/builds", body=ReadTimeout("error")) # setup subset responses.replace( responses.GET, - "{base}/intake/organizations/{org}/workspaces/{ws}/builds/{build}/test_sessions/{session_id}".format( - base=get_base_url(), - org=self.organization, - ws=self.workspace, - build=self.build_name, - session_id=self.session_id), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/" + f"{self.workspace}/builds/{self.build_name}/test_sessions/{self.session_id}", body=ReadTimeout("error")) # setup recording tests responses.replace( responses.POST, - "{base}/intake/organizations/{org}/workspaces/{ws}/builds/{build}/test_sessions/{session_id}/events".format( - base=get_base_url(), - org=self.organization, - ws=self.workspace, - build=self.build_name, - session_id=self.session_id), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/" + f"{self.workspace}/builds/{self.build_name}/test_sessions/{self.session_id}/events", body=ReadTimeout("error")) # test commands @@ -433,35 +385,43 @@ def test_all_workflow_when_server_down(self): # Since Timeout error is caught inside of LaunchableClient, the tracking event is sent twice. self.assert_tracking_count(tracking=tracking, count=2) - result = self.cli("record", "build", "--name", "example") + result = self.cli( + "record", + "build", + "--build", + "example", + "--branch", + "main", + "--repo-branch-map", + ".=main", + "--no-commit-collection") self.assert_success(result) # Since Timeout error is caught inside of LaunchableClient, the tracking event is sent twice. - self.assert_tracking_count(tracking=tracking, count=6) + self.assert_tracking_count(tracking=tracking, count=5) # set delete=False to solve the error `PermissionError: [Errno 13] Permission denied:` on Windows. with tempfile.NamedTemporaryFile(delete=False) as rest_file: result = self.cli("subset", + "minitest", "--target", "30%", "--session", self.session, "--rest", rest_file.name, - "--observation", - "minitest", str(self.test_files_dir) + "/test/**/*.rb", mix_stderr=False) self.assert_success(result) # Since Timeout error is caught inside of LaunchableClient, the tracking event is sent twice. - self.assert_tracking_count(tracking=tracking, count=9) + self.assert_tracking_count(tracking=tracking, count=7) - result = self.cli("record", "tests", "--session", self.session, "minitest", str(self.test_files_dir) + "/") + result = self.cli("record", "test", "minitest", "--session", self.session, str(self.test_files_dir) + "/") self.assert_success(result) # Since Timeout error is caught inside of LaunchableClient, the tracking event is sent twice. - self.assert_tracking_count(tracking=tracking, count=13) + self.assert_tracking_count(tracking=tracking, count=11) def assert_tracking_count(self, tracking, count: int): # Prior to 3.6, `Response` object can't be obtained. if compare_version([int(x) for x in platform.python_version().split('.')], [3, 7]) >= 0: - assert tracking.call_count == count + self.assertEqual(tracking.call_count, count) diff --git a/tests/commands/test_flake_detection.py b/tests/commands/test_flake_detection.py new file mode 100644 index 000000000..4589b8c1f --- /dev/null +++ b/tests/commands/test_flake_detection.py @@ -0,0 +1,83 @@ +import os +from unittest import mock + +import responses # type: ignore + +from launchable.utils.http_client import get_base_url +from tests.cli_test_case import CliTestCase + + +class FlakeDetectionTest(CliTestCase): + @responses.activate + @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + def test_flake_detection_success(self): + mock_json_response = { + "testPaths": [ + [{"type": "file", "name": "test_flaky_1.py"}], + [{"type": "file", "name": "test_flaky_2.py"}], + ] + } + responses.add( + responses.GET, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/retry/flake-detection", + json=mock_json_response, + status=200, + ) + result = self.cli( + "retry", + "flake-detection", + "--session", + self.session, + "--confidence", + "high", + "file", + mix_stderr=False, + ) + self.assert_success(result) + self.assertIn("test_flaky_1.py", result.stdout) + self.assertIn("test_flaky_2.py", result.stdout) + + @responses.activate + @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + def test_flake_detection_no_flakes(self): + mock_json_response = {"testPaths": []} + responses.add( + responses.GET, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/retry/flake-detection", + json=mock_json_response, + status=200, + ) + result = self.cli( + "retry", + "flake-detection", + "--session", + self.session, + "--confidence", + "low", + "file", + mix_stderr=False, + ) + self.assert_success(result) + self.assertEqual(result.stdout, "") + + @responses.activate + @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + def test_flake_detection_api_error(self): + responses.add( + responses.GET, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/retry/flake-detection", + status=500, + ) + result = self.cli( + "retry", + "flake-detection", + "--session", + self.session, + "--confidence", + "medium", + "file", + mix_stderr=False, + ) + self.assert_exit_code(result, 0) + self.assertIn("Error", result.stderr) + self.assertEqual(result.stdout, "") diff --git a/tests/commands/test_helper.py b/tests/commands/test_helper.py deleted file mode 100644 index 82a05c22f..000000000 --- a/tests/commands/test_helper.py +++ /dev/null @@ -1,72 +0,0 @@ - -import os -from io import StringIO -from unittest import mock - -import responses # type: ignore - -from launchable.commands.helper import _check_observation_mode_status -from launchable.utils.commands import Command -from launchable.utils.http_client import get_base_url -from launchable.utils.tracking import TrackingClient -from tests.cli_test_case import CliTestCase - - -class HelperTest(CliTestCase): - - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_check_observation_mode_status(self): - test_session = "builds/{}/test_sessions/{}".format( - self.build_name, - self.session_id, - ) - - tracking_client = TrackingClient(Command.RECORD_TESTS) - - with mock.patch('sys.stderr', new=StringIO()) as stderr: - _check_observation_mode_status(test_session, False, tracking_client=tracking_client) - print(stderr.getvalue()) - self.assertNotIn("WARNING:", stderr.getvalue()) - - request_path = "{}/intake/organizations/{}/workspaces/{}/{}".format( - get_base_url(), - self.organization, - self.workspace, - test_session, - ) - - with mock.patch('sys.stderr', new=StringIO()) as stderr: - responses.replace( - responses.GET, - request_path, - json={ - "isObservation": False - }, status=200) - - _check_observation_mode_status(test_session, True, tracking_client=tracking_client) - self.assertIn("WARNING:", stderr.getvalue()) - - with mock.patch('sys.stderr', new=StringIO()) as stderr: - responses.replace( - responses.GET, - request_path, - json={ - "isObservation": True - }, status=200) - - _check_observation_mode_status(test_session, True, tracking_client=tracking_client) - self.assertNotIn("WARNING:", stderr.getvalue()) - - with mock.patch('sys.stderr', new=StringIO()) as stderr: - responses.replace( - responses.GET, - request_path, - json={ - "isObservation": True - }, status=404) - - _check_observation_mode_status(test_session, False, tracking_client=tracking_client) - - # not check when status isn't 200 - self.assertNotIn("WARNING:", stderr.getvalue()) diff --git a/tests/commands/test_split_subset.py b/tests/commands/test_split_subset.py deleted file mode 100644 index 601ddba6c..000000000 --- a/tests/commands/test_split_subset.py +++ /dev/null @@ -1,277 +0,0 @@ -import os -import tempfile -from unittest import mock - -import responses # type: ignore - -from launchable.commands.split_subset import (SPLIT_BY_GROUP_REST_GROUPS_FILE_NAME, - SPLIT_BY_GROUP_SUBSET_GROUPS_FILE_NAME, SPLIT_BY_GROUPS_NO_GROUP_NAME) -from launchable.utils.http_client import get_base_url -from tests.cli_test_case import CliTestCase - - -class SplitSubsetTest(CliTestCase): - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_split_subset_with_observation_mode(self): - pipe = "test_1.py\ntest_2.py\ntest_3.py\ntest_4.py\ntest_5.py\ntest_6.py" - mock_json_response = { - "testPaths": [ - [{"type": "file", "name": "test_1.py"}], - [{"type": "file", "name": "test_3.py"}], - - ], - "rest": [ - [{"type": "file", "name": "test_5.py"}], - - ], - "subsettingId": 456, - "isObservation": False, - } - - responses.replace( - responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset/{}/slice".format( - get_base_url(), - self.organization, - self.workspace, - self.subsetting_id), - json=mock_json_response, - status=200) - - rest = tempfile.NamedTemporaryFile(delete=False) - result = self.cli("split-subset", "--subset-id", "subset/456", "--bin", "1/2", "--rest", - rest.name, "file", mix_stderr=False, input=pipe) - self.assert_success(result) - self.assertEqual(result.stdout, "test_1.py\ntest_3.py\n") - self.assertEqual(rest.read().decode(), os.linesep.join(["test_5.py"])) - rest.close() - os.unlink(rest.name) - - mock_json_response["isObservation"] = True - responses.replace( - responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset/{}/slice".format( - get_base_url(), - self.organization, - self.workspace, - self.subsetting_id), - json=mock_json_response, - status=200) - - observation_mode_rest = tempfile.NamedTemporaryFile(delete=False) - result = self.cli("split-subset", "--subset-id", "subset/456", "--bin", "1/2", "--rest", - observation_mode_rest.name, "file", mix_stderr=False, input=pipe) - - self.assert_success(result) - self.assertEqual(result.stdout, "test_1.py\ntest_3.py\ntest_5.py\n") - self.assertEqual(observation_mode_rest.read().decode(), "") - observation_mode_rest.close() - os.unlink(observation_mode_rest.name) - - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_split_subset(self): - pipe = "test_1.py\ntest_2.py\ntest_3.py\ntest_4.py\ntest_5.py\ntest_6.py" - mock_json_response = { - "testPaths": [ - [{"type": "file", "name": "test_1.py"}], - [{"type": "file", "name": "test_6.py"}], - ], - "rest": [], - "subsettingId": 456, - "isObservation": False, - } - - responses.replace( - responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset/{}/slice".format( - get_base_url(), - self.organization, - self.workspace, - self.subsetting_id), - json=mock_json_response, - status=200) - - result = self.cli( - "split-subset", - "--subset-id", - "subset/456", - "--bin", - "1/2", - "file", - mix_stderr=False, - input=pipe, - ) - self.assert_success(result) - self.assertEqual(result.stdout, "test_1.py\ntest_6.py\n") - - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_split_by_group_names(self): - mock_json_response = { - "subsettingId": self.subsetting_id, - "isObservation": False, - "splitGroups": [ - { - "groupName": "e2e", - "subset": [ - [{"type": "file", "name": "e2e-aaa.py"}], - [{"type": "file", "name": "e2e-bbb.py"}], - ], - "rest": [ - [{"type": "file", "name": "e2e-ccc.py"}], - [{"type": "file", "name": "e2e-ddd.py"}], - ] - }, - { - "groupName": "unit-test", - "subset": [], - "rest": [ - [{"type": "file", "name": "unit-test-111.py"}], - [{"type": "file", "name": "unit-test-222.py"}], - ] - }, - { - "groupName": "nogroup", - "subset": [ - [{"type": "file", "name": "aaa.py"}], - [{"type": "file", "name": "bbb.py"}], - ], - "rest": [ - [{"type": "file", "name": "111.py"}], - [{"type": "file", "name": "222.py"}], - ], - } - ] - } - - """ - Note(Konboi): - Don't know the cause, but in the Python 3.10 environment, - the settings configured with responses.replace disappear on the second call. - see: https://github.com/cloudbees-oss/smart-tests-cli/actions/runs/11697720998/job/32576899978#step:10:88 - So, to call it each time, `replace_response` was defined. - """ - def replace_response(): - responses.replace( - responses.POST, - "{base_url}/intake/organizations/{organization}/workspaces/{workspace}/subset/{subset_id}/split-by-groups".format( - base_url=get_base_url(), - organization=self.organization, - workspace=self.workspace, - subset_id=self.subsetting_id, - ), - json=mock_json_response, - status=200 - ) - - with tempfile.TemporaryDirectory() as tmpdir: - replace_response() - result = self.cli("split-subset", "--subset-id", "subset/{}".format(self.subsetting_id), - "--split-by-groups", "--split-by-groups-output-dir", tmpdir, "file") - - self.assert_contents("{}/subset-e2e.txt".format(tmpdir), "e2e-aaa.py\ne2e-bbb.py") - self.assert_contents("{}/subset-{}.txt".format(tmpdir, SPLIT_BY_GROUPS_NO_GROUP_NAME), "aaa.py\nbbb.py") - # check the group file - self.assert_contents("{}/{}".format(tmpdir, SPLIT_BY_GROUP_SUBSET_GROUPS_FILE_NAME), "e2e") - - # server doesn't return subset of unit-test - self.assert_file_exists("{}/subset-unit-test.txt".format(tmpdir), False) - - # doesn't set the --rest option - self.assert_file_exists("{}/rest-e2e.txt".format(tmpdir), False) - self.assert_file_exists("{}/rest-unit-test.txt".format(tmpdir), False) - self.assert_file_exists("{}/rest-{}.txt".format(tmpdir, SPLIT_BY_GROUPS_NO_GROUP_NAME), False) - self.assert_file_exists("{}/{}".format(tmpdir, SPLIT_BY_GROUP_REST_GROUPS_FILE_NAME), False) - - # with rest option - with tempfile.TemporaryDirectory() as tmpdir: - replace_response() - result = self.cli("split-subset", "--subset-id", "subset/{}".format(self.subsetting_id), - "--split-by-groups-with-rest", "--split-by-groups-output-dir", tmpdir, "file", mix_stderr=False) - - self.assert_success(result) - self.assert_contents("{}/subset-e2e.txt".format(tmpdir), "e2e-aaa.py\ne2e-bbb.py") - self.assert_contents("{}/rest-e2e.txt".format(tmpdir), "e2e-ccc.py\ne2e-ddd.py") - - # server doesn't return subset of unit-test - self.assert_file_exists("{}/subset-unit-test.txt".format(tmpdir), False) - self.assert_contents("{}/rest-unit-test.txt".format(tmpdir), "unit-test-111.py\nunit-test-222.py") - - self.assert_contents("{}/subset-{}.txt".format(tmpdir, SPLIT_BY_GROUPS_NO_GROUP_NAME), "aaa.py\nbbb.py") - self.assert_contents("{}/rest-{}.txt".format(tmpdir, SPLIT_BY_GROUPS_NO_GROUP_NAME), "111.py\n222.py") - - # check the group file - self.assert_contents("{}/{}".format(tmpdir, SPLIT_BY_GROUP_SUBSET_GROUPS_FILE_NAME), "e2e") - self.assert_contents("{}/{}".format(tmpdir, SPLIT_BY_GROUP_REST_GROUPS_FILE_NAME), "unit-test") - - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_split_by_group_names_output_exclusion_rules(self): - mock_json_response = { - "subsettingId": self.subsetting_id, - "isObservation": False, - "splitGroups": [ - { - "groupName": "e2e", - "subset": [ - [{"type": "file", "name": "e2e-aaa.py"}], - [{"type": "file", "name": "e2e-bbb.py"}], - ], - "rest": [ - [{"type": "file", "name": "e2e-ccc.py"}], - [{"type": "file", "name": "e2e-ddd.py"}], - ] - }, - { - "groupName": "unit-test", - "subset": [], - "rest": [ - [{"type": "file", "name": "unit-test-111.py"}], - [{"type": "file", "name": "unit-test-222.py"}], - ] - }, - { - "groupName": "nogroup", - "subset": [ - [{"type": "file", "name": "aaa.py"}], - [{"type": "file", "name": "bbb.py"}], - ], - "rest": [ - [{"type": "file", "name": "111.py"}], - [{"type": "file", "name": "222.py"}], - ], - } - ] - } - - responses.replace( - responses.POST, - "{base_url}/intake/organizations/{organization}/workspaces/{workspace}/subset/{subset_id}/split-by-groups".format( - base_url=get_base_url(), - organization=self.organization, - workspace=self.workspace, - subset_id=self.subsetting_id, - ), - json=mock_json_response, - status=200 - ) - - with tempfile.TemporaryDirectory() as tmpdir: - result = self.cli("split-subset", "--subset-id", "subset/{}".format(self.subsetting_id), - "--split-by-groups", "--split-by-groups-output-dir", tmpdir, '--output-exclusion-rules', "file") - - self.assert_success(result) - - # --output-exclusion-rules is enabled, thus switched subset and rest - self.assert_contents("{}/subset-e2e.txt".format(tmpdir), "e2e-ccc.py\ne2e-ddd.py") - self.assert_contents("{}/subset-unit-test.txt".format(tmpdir), "unit-test-111.py\nunit-test-222.py") - self.assert_contents("{}/subset-{}.txt".format(tmpdir, SPLIT_BY_GROUPS_NO_GROUP_NAME), "111.py\n222.py") - self.assert_contents("{}/{}".format(tmpdir, SPLIT_BY_GROUP_SUBSET_GROUPS_FILE_NAME), "unit-test") - - # doesn't set the --rest option - self.assert_file_exists("{}/rest-e2e.txt".format(tmpdir), False) - self.assert_file_exists("{}/rest-unit-test.txt".format(tmpdir), False) - self.assert_file_exists("{}/rest-{}.txt".format(tmpdir, SPLIT_BY_GROUPS_NO_GROUP_NAME), False) - self.assert_file_exists("{}/{}".format(tmpdir, SPLIT_BY_GROUP_REST_GROUPS_FILE_NAME), False) diff --git a/tests/commands/test_subset.py b/tests/commands/test_subset.py index 7781ba71c..afc5d75ee 100644 --- a/tests/commands/test_subset.py +++ b/tests/commands/test_subset.py @@ -1,18 +1,16 @@ -import gzip -import json import os import tempfile from unittest import mock import responses # type: ignore -from launchable.utils.http_client import get_base_url +from smart_tests.utils.http_client import get_base_url from tests.cli_test_case import CliTestCase class SubsetTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset(self): pipe = "test_1.py\ntest_2.py\ntest_3.py\ntest_4.py" mock_json_response = { @@ -34,16 +32,23 @@ def test_subset(self): }, "isObservation": False, } - responses.replace(responses.POST, "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), - json=mock_json_response, - status=200) + responses.replace(responses.POST, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", + json=mock_json_response, + status=200) rest = tempfile.NamedTemporaryFile(delete=False) - result = self.cli("subset", "--target", "30%", "--session", - self.session, "--rest", rest.name, "file", mix_stderr=False, input=pipe) + result = self.cli( + "subset", + "file", + "--target", + "30%", + "--session", + self.session, + "--rest", + rest.name, + mix_stderr=False, + input=pipe) self.assert_success(result) self.assertEqual(result.stdout, "test_1.py\ntest_2.py\n") self.assertEqual(rest.read().decode(), os.linesep.join(["test_3.py", "test_4.py"])) @@ -67,16 +72,23 @@ def test_subset(self): }, "isObservation": False, } - responses.replace(responses.POST, "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), - json=mock_json_response, - status=200) + responses.replace(responses.POST, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", + json=mock_json_response, + status=200) rest = tempfile.NamedTemporaryFile(delete=False) - result = self.cli("subset", "--target", "30%", "--session", - self.session, "--rest", rest.name, "file", mix_stderr=False, input=pipe) + result = self.cli( + "subset", + "file", + "--target", + "30%", + "--session", + self.session, + "--rest", + rest.name, + mix_stderr=False, + input=pipe) self.assert_success(result) self.assertEqual(result.stdout, "test_1.py\ntest_2.py\ntest_3.py\ntest_4.py\n") self.assertEqual(rest.read().decode(), "") @@ -84,8 +96,8 @@ def test_subset(self): os.unlink(rest.name) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_subset_with_observation_mode(self): + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) + def test_subset_with_observation_session(self): pipe = "test_1.py\ntest_2.py\ntest_3.py\ntest_4.py" mock_json_response = { "testPaths": [ @@ -106,24 +118,21 @@ def test_subset_with_observation_mode(self): "isObservation": True, } - responses.replace(responses.POST, "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), - json=mock_json_response, - status=200) + responses.replace(responses.POST, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", + json=mock_json_response, + status=200) observation_mode_rest = tempfile.NamedTemporaryFile(delete=False) result = self.cli( "subset", + "file", "--target", "30%", "--session", self.session, "--rest", observation_mode_rest.name, - "--observation", - "file", input=pipe, mix_stderr=False) self.assert_success(result) @@ -150,16 +159,23 @@ def test_subset_with_observation_mode(self): }, "isObservation": True, } - responses.replace(responses.POST, "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), - json=mock_json_response, - status=200) + responses.replace(responses.POST, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", + json=mock_json_response, + status=200) rest = tempfile.NamedTemporaryFile(delete=False) - result = self.cli("subset", "--target", "30%", "--session", - self.session, "--rest", rest.name, "--observation", "file", mix_stderr=False, input=pipe) + result = self.cli( + "subset", + "file", + "--target", + "30%", + "--session", + self.session, + "--rest", + rest.name, + mix_stderr=False, + input=pipe) self.assert_success(result) self.assertEqual(result.stdout, "test_1.py\ntest_2.py\ntest_3.py\ntest_4.py\n") self.assertEqual(rest.read().decode(), "") @@ -167,15 +183,12 @@ def test_subset_with_observation_mode(self): os.unlink(rest.name) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_targetless(self): pipe = "test_aaa.py\ntest_bbb.py\ntest_ccc.py\ntest_eee.py\ntest_fff.py\ntest_ggg.py" responses.replace( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={ "testPaths": [ [{"type": "file", "name": "test_aaa.py"}], @@ -198,26 +211,23 @@ def test_subset_targetless(self): result = self.cli( "subset", + "file", "--session", self.session, - "file", input=pipe, mix_stderr=False) self.assert_success(result) - payload = json.loads(gzip.decompress(responses.calls[1].request.body).decode()) + payload = self.decode_request_body(self.find_request('/subset').request.body) self.assertTrue(payload.get('useServerSideOptimizationTarget')) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_goalspec(self): # make sure --goal-spec gets translated properly to a JSON request payload responses.replace( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={ "testPaths": [ [{"type": "file", "name": "test_aaa.py"}], @@ -230,27 +240,22 @@ def test_subset_goalspec(self): result = self.cli( "subset", - "--session", - self.session, - "--goal-spec", - "foo(),bar(zot=3%)", "file", + "--session", self.session, + "--goal-spec", "foo(),bar(zot=3%)", input="test_aaa.py") self.assert_success(result) - payload = json.loads(gzip.decompress(responses.calls[1].request.body).decode()) + payload = self.decode_request_body(self.find_request('/subset').request.body) self.assertEqual(payload.get('goal').get('goal'), "foo(),bar(zot=3%)") @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_ignore_flaky_tests_above(self): pipe = "test_aaa.py\ntest_bbb.py\ntest_ccc.py\ntest_flaky.py" responses.replace( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={ "testPaths": [ [{"type": "file", "name": "test_aaa.py"}], @@ -271,32 +276,27 @@ def test_subset_ignore_flaky_tests_above(self): result = self.cli( "subset", - "--session", - self.session, - "--ignore-flaky-tests-above", - 0.05, "file", + "--session", self.session, + "--ignore-flaky-tests-above", 0.05, input=pipe, mix_stderr=False) self.assert_success(result) - payload = json.loads(gzip.decompress(responses.calls[1].request.body).decode()) + payload = self.decode_request_body(self.find_request('/subset').request.body) self.assertEqual(payload.get('dropFlakinessThreshold'), 0.05) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_with_get_tests_from_previous_full_runs(self): # check error when input candidates are empty without --get-tests-from-previous-sessions option - result = self.cli("subset", "--target", "30%", "--session", self.session, "file") + result = self.cli("subset", "file", "--target", "30%", "--session", self.session) self.assert_exit_code(result, 1) self.assertIn("use the `--get-tests-from-previous-sessions` option", result.stdout) responses.replace( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={ "testPaths": [ [{"type": "file", "name": "test_aaa.py"}], @@ -320,6 +320,7 @@ def test_subset_with_get_tests_from_previous_full_runs(self): rest = tempfile.NamedTemporaryFile(delete=False) result = self.cli( "subset", + "file", "--target", "30%", "--session", @@ -327,7 +328,6 @@ def test_subset_with_get_tests_from_previous_full_runs(self): "--rest", rest.name, "--get-tests-from-previous-sessions", - "file", mix_stderr=False) self.assert_success(result) @@ -337,15 +337,12 @@ def test_subset_with_get_tests_from_previous_full_runs(self): os.unlink(rest.name) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_with_output_exclusion_rules(self): pipe = "test_aaa.py\ntest_111.py\ntest_bbb.py\ntest_222.py\ntest_ccc.py\ntest_333.py\n" responses.replace( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={ "testPaths": [ [{"type": "file", "name": "test_aaa.py"}], @@ -368,13 +365,13 @@ def test_subset_with_output_exclusion_rules(self): rest = tempfile.NamedTemporaryFile(delete=False) result = self.cli( "subset", + "file", "--target", "70%", "--session", self.session, "--rest", rest.name, - "file", input=pipe, mix_stderr=False) @@ -387,6 +384,7 @@ def test_subset_with_output_exclusion_rules(self): rest = tempfile.NamedTemporaryFile(delete=False) result = self.cli( "subset", + "file", "--target", "70%", "--session", @@ -394,7 +392,6 @@ def test_subset_with_output_exclusion_rules(self): "--rest", rest.name, "--output-exclusion-rules", - "file", input=pipe, mix_stderr=False) @@ -408,10 +405,7 @@ def test_subset_with_output_exclusion_rules(self): # case: reset is empty responses.replace( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={ "testPaths": [ [{"type": "file", "name": "test_aaa.py"}], @@ -433,58 +427,7 @@ def test_subset_with_output_exclusion_rules(self): rest = tempfile.NamedTemporaryFile(delete=False) result = self.cli( "subset", - "--target", - "70%", - "--session", - self.session, - "--rest", - rest.name, - "--output-exclusion-rules", "file", - input=pipe, - mix_stderr=False) - - self.assert_success(result) - self.assertEqual(result.stdout, "") - - self.assertEqual(rest.read().decode(), os.linesep.join( - ["test_aaa.py", "test_bbb.py", "test_ccc.py", "test_111.py", "test_222.py", "test_333.py"])) - rest.close() - os.unlink(rest.name) - - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_subset_with_observation_and_output_exclusion_rules(self): - pipe = "test_aaa.py\ntest_111.py\ntest_bbb.py\ntest_222.py\ntest_ccc.py\ntest_333.py\n" - responses.replace( - responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), - json={ - "testPaths": [ - [{"type": "file", "name": "test_aaa.py"}], - [{"type": "file", "name": "test_bbb.py"}], - [{"type": "file", "name": "test_ccc.py"}], - ], - "testRunner": "file", - "rest": [ - [{"type": "file", "name": "test_111.py"}], - [{"type": "file", "name": "test_222.py"}], - [{"type": "file", "name": "test_333.py"}], - ], - "subsettingId": 123, - "summary": { - "subset": {"duration": 15, "candidates": 3, "rate": 70}, - "rest": {"duration": 6, "candidates": 3, "rate": 30} - }, - "isObservation": True, - }, status=200) - - rest = tempfile.NamedTemporaryFile(delete=False) - result = self.cli( - "subset", "--target", "70%", "--session", @@ -492,14 +435,11 @@ def test_subset_with_observation_and_output_exclusion_rules(self): "--rest", rest.name, "--output-exclusion-rules", - "--observation", - "file", input=pipe, mix_stderr=False) self.assert_success(result) self.assertEqual(result.stdout, "") - self.assertIn("Warning: --observation and --output-exclusion-rules are set.", result.stderr) self.assertEqual(rest.read().decode(), os.linesep.join( ["test_aaa.py", "test_bbb.py", "test_ccc.py", "test_111.py", "test_222.py", "test_333.py"])) @@ -507,15 +447,12 @@ def test_subset_with_observation_and_output_exclusion_rules(self): os.unlink(rest.name) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_prioritize_tests_failed_within_hours(self): pipe = "test_aaa.py\ntest_bbb.py\ntest_ccc.py\ntest_flaky.py" responses.replace( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={ "testPaths": [ [{"type": "file", "name": "test_aaa.py"}], @@ -536,20 +473,21 @@ def test_subset_prioritize_tests_failed_within_hours(self): result = self.cli( "subset", + "file", "--session", self.session, "--prioritize-tests-failed-within-hours", 24, - "file", input=pipe, mix_stderr=False) + self.assert_success(result) - payload = json.loads(gzip.decompress(responses.calls[1].request.body).decode()) + payload = self.decode_request_body(self.find_request('/subset').request.body) self.assertEqual(payload.get('hoursToPrioritizeFailedTest'), 24) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_with_get_tests_from_guess(self): responses.replace( responses.GET, @@ -576,19 +514,11 @@ def test_subset_with_get_tests_from_guess(self): status=[200] ) - result = self.cli( - "subset", - "--session", - self.session, - "--get-tests-from-guess", - "file", - ) - + result = self.cli("subset", "file", "--session", self.session, "--get-tests-from-guess") self.assert_success(result) - """ 1. request to /state 2. request to /subset with test paths that are collected from auto collection """ - payload = json.loads(gzip.decompress(responses.calls[1].request.body).decode()) + payload = self.decode_request_body(self.find_request('/subset').request.body) self.assertIn([{"type": "file", "name": "tests/commands/test_subset.py"}], payload.get("testPaths", [])) diff --git a/tests/commands/test_verify.py b/tests/commands/test_verify.py index 879cee51c..9f4339111 100644 --- a/tests/commands/test_verify.py +++ b/tests/commands/test_verify.py @@ -2,7 +2,7 @@ from unittest import TestCase from unittest.mock import patch -from launchable.commands.verify import check_java_version, compare_java_version, compare_version +from smart_tests.commands.verify import check_java_version, compare_java_version, compare_version class VersionTest(TestCase): @@ -40,7 +40,7 @@ def test_java_version(self): """ ) < 0) - @patch('launchable.commands.verify.subprocess.run') + @patch('smart_tests.commands.verify.subprocess.run') def test_check_java_version(self, mock_run): mock_run.side_effect = CalledProcessError(1, 'java -version') result = check_java_version('java') diff --git a/tests/data/adb/subset_result.json b/tests/data/adb/subset_result.json index de9dc0ad2..e6cff33d1 100644 --- a/tests/data/adb/subset_result.json +++ b/tests/data/adb/subset_result.json @@ -1,10 +1,10 @@ { "testPaths": [ [ - { "type": "class", "name": "com.launchableinc.rocketcar.ExampleInstrumentedTest2" } + { "type": "class", "name": "com.example.sampleapp.ExampleInstrumentedTest2" } ], [ - { "type": "class", "name": "com.launchableinc.rocketcar.ExampleInstrumentedTest" } + { "type": "class", "name": "com.example.sampleapp.ExampleInstrumentedTest" } ] ], "testRunner": "adb", diff --git a/tests/data/ant/junitreport/TEST-com.launchable.library.CacheTest.xml b/tests/data/ant/junitreport/TEST-com.example.HelloWorldTest.xml similarity index 82% rename from tests/data/ant/junitreport/TEST-com.launchable.library.CacheTest.xml rename to tests/data/ant/junitreport/TEST-com.example.HelloWorldTest.xml index e17b22c72..29f266cf8 100644 --- a/tests/data/ant/junitreport/TEST-com.launchable.library.CacheTest.xml +++ b/tests/data/ant/junitreport/TEST-com.example.HelloWorldTest.xml @@ -1,5 +1,5 @@ - + @@ -50,7 +50,7 @@ - + @@ -66,14 +66,19 @@ - + - + + junit.framework.AssertionFailedError: An error message + at com.example.HelloWorldTest.testWillAlwaysFail(Unknown Source) + + + diff --git a/tests/data/ant/junitreport/TEST-com.launchable.HelloWorldTest.xml b/tests/data/ant/junitreport/TEST-com.example.library.CacheTest.xml similarity index 86% rename from tests/data/ant/junitreport/TEST-com.launchable.HelloWorldTest.xml rename to tests/data/ant/junitreport/TEST-com.example.library.CacheTest.xml index e34964824..b09681607 100644 --- a/tests/data/ant/junitreport/TEST-com.launchable.HelloWorldTest.xml +++ b/tests/data/ant/junitreport/TEST-com.example.library.CacheTest.xml @@ -1,5 +1,5 @@ - + @@ -50,7 +50,7 @@ - + @@ -66,19 +66,14 @@ - + - - junit.framework.AssertionFailedError: An error message - at com.launchable.HelloWorldTest.testWillAlwaysFail(Unknown Source) - - - + diff --git a/tests/data/ant/junitreport/TESTS-TestSuites.xml b/tests/data/ant/junitreport/TESTS-TestSuites.xml index f6771eae0..a31566c18 100644 --- a/tests/data/ant/junitreport/TESTS-TestSuites.xml +++ b/tests/data/ant/junitreport/TESTS-TestSuites.xml @@ -1,6 +1,6 @@ - + @@ -100,7 +100,7 @@ - + @@ -132,7 +132,7 @@ - + @@ -146,21 +146,21 @@ - + junit.framework.AssertionFailedError: An error message - at com.launchable.HelloWorldTest.testWillAlwaysFail(Unknown Source) + at com.example.HelloWorldTest.testWillAlwaysFail(Unknown Source) - + - + @@ -260,7 +260,7 @@ - + @@ -292,7 +292,7 @@ - + @@ -306,7 +306,7 @@ - + diff --git a/tests/data/ant/record_test_result.json b/tests/data/ant/record_test_result.json index fc12d05ae..f897a8bc6 100644 --- a/tests/data/ant/record_test_result.json +++ b/tests/data/ant/record_test_result.json @@ -5,7 +5,7 @@ "testPath": [ { "type": "class", - "name": "com.launchable.HelloWorldTest" + "name": "com.example.HelloWorldTest" }, { "type": "testcase", @@ -15,7 +15,7 @@ "duration": 0.006, "status": 0, "stdout": "", - "stderr": "junit.framework.AssertionFailedError: An error message\n\tat com.launchable.HelloWorldTest.testWillAlwaysFail(Unknown Source)\n", + "stderr": "junit.framework.AssertionFailedError: An error message\n\tat com.example.HelloWorldTest.testWillAlwaysFail(Unknown Source)\n", "data": null }, { @@ -23,7 +23,7 @@ "testPath": [ { "type": "class", - "name": "com.launchable.HelloWorldTest" + "name": "com.example.HelloWorldTest" }, { "type": "testcase", @@ -41,7 +41,7 @@ "testPath": [ { "type": "class", - "name": "com.launchable.library.CacheTest" + "name": "com.example.library.CacheTest" }, { "type": "testcase", diff --git a/tests/data/ant/src/com/launchable/HelloWorld.java b/tests/data/ant/src/com/example/HelloWorld.java similarity index 92% rename from tests/data/ant/src/com/launchable/HelloWorld.java rename to tests/data/ant/src/com/example/HelloWorld.java index c31cfee7e..16ff2a9c6 100644 --- a/tests/data/ant/src/com/launchable/HelloWorld.java +++ b/tests/data/ant/src/com/example/HelloWorld.java @@ -1,4 +1,4 @@ -package com.launchable; +package com.example; import org.apache.log4j.Logger; import org.apache.log4j.BasicConfigurator; diff --git a/tests/data/ant/src/com/launchable/HelloWorldTest.java b/tests/data/ant/src/com/example/HelloWorldTest.java similarity index 90% rename from tests/data/ant/src/com/launchable/HelloWorldTest.java rename to tests/data/ant/src/com/example/HelloWorldTest.java index 37a416fa9..9da79395c 100644 --- a/tests/data/ant/src/com/launchable/HelloWorldTest.java +++ b/tests/data/ant/src/com/example/HelloWorldTest.java @@ -1,4 +1,4 @@ -package com.launchable; +package com.example; import org.junit.Test; diff --git a/tests/data/ant/src/com/launchable/library/Cache.java b/tests/data/ant/src/com/example/library/Cache.java similarity index 90% rename from tests/data/ant/src/com/launchable/library/Cache.java rename to tests/data/ant/src/com/example/library/Cache.java index cb619a899..96f68854f 100644 --- a/tests/data/ant/src/com/launchable/library/Cache.java +++ b/tests/data/ant/src/com/example/library/Cache.java @@ -1,4 +1,4 @@ -package com.launchable.library; +package com.example.library; import java.util.HashMap; diff --git a/tests/data/ant/src/com/launchable/library/CacheTest.java b/tests/data/ant/src/com/example/library/CacheTest.java similarity index 90% rename from tests/data/ant/src/com/launchable/library/CacheTest.java rename to tests/data/ant/src/com/example/library/CacheTest.java index 99498d549..3e1fa2005 100644 --- a/tests/data/ant/src/com/launchable/library/CacheTest.java +++ b/tests/data/ant/src/com/example/library/CacheTest.java @@ -1,4 +1,4 @@ -package com.launchable.library; +package com.example.library; import org.junit.Test; diff --git a/tests/data/ant/subset_result.json b/tests/data/ant/subset_result.json index 47bd8bfbd..3877c9755 100644 --- a/tests/data/ant/subset_result.json +++ b/tests/data/ant/subset_result.json @@ -1,7 +1,7 @@ { "testPaths": [ - [{ "type": "class", "name": "com.launchable.HelloWorldTest" }], - [{ "type": "class", "name": "com.launchable.library.CacheTest" }] + [{ "type": "class", "name": "com.example.HelloWorldTest" }], + [{ "type": "class", "name": "com.example.library.CacheTest" }] ], "testRunner": "ant", "session": { "id": "16" }, diff --git a/tests/data/dotnet/record_test_result.json b/tests/data/dotnet/record_test_result.json index 90aa4df51..5d507b1db 100644 --- a/tests/data/dotnet/record_test_result.json +++ b/tests/data/dotnet/record_test_result.json @@ -5,11 +5,11 @@ "testPath": [ { "type": "Assembly", - "name": "rocket-car-dotnet.dll" + "name": "sample-app-dotnet.dll" }, { "type": "TestSuite", - "name": "rocket_car_dotnet" + "name": "sample_app_dotnet" }, { "type": "TestSuite", @@ -31,11 +31,11 @@ "testPath": [ { "type": "Assembly", - "name": "rocket-car-dotnet.dll" + "name": "sample-app-dotnet.dll" }, { "type": "TestSuite", - "name": "rocket_car_dotnet" + "name": "sample_app_dotnet" }, { "type": "TestSuite", @@ -49,7 +49,7 @@ "duration": 0.016795, "status": 0, "stdout": "", - "stderr": " Expected: 0.5d\n But was: 0\n\n at rocket_car_dotnet.ExampleTest.TestDiv() in /Users/yabuki-ryosuke/src/github.com/launchableinc/examples/dotnet/ExampleTest.cs:line 35\n", + "stderr": " Expected: 0.5d\n But was: 0\n\n at sample_app_dotnet.ExampleTest.TestDiv() in /Users/yabuki-ryosuke/src/github.com/launchableinc/examples/dotnet/ExampleTest.cs:line 35\n", "data": null }, { @@ -57,11 +57,11 @@ "testPath": [ { "type": "Assembly", - "name": "rocket-car-dotnet.dll" + "name": "sample-app-dotnet.dll" }, { "type": "TestSuite", - "name": "rocket_car_dotnet" + "name": "sample_app_dotnet" }, { "type": "TestSuite", @@ -75,7 +75,7 @@ "duration": 0.0003959, "status": 0, "stdout": "", - "stderr": " Expected: 10\n But was: 25\n\n at rocket_car_dotnet.ExampleTest.TestMul() in /Users/yabuki-ryosuke/src/github.com/launchableinc/examples/dotnet/ExampleTest.cs:line 28\n", + "stderr": " Expected: 10\n But was: 25\n\n at sample_app_dotnet.ExampleTest.TestMul() in /Users/yabuki-ryosuke/src/github.com/launchableinc/examples/dotnet/ExampleTest.cs:line 28\n", "data": null }, { @@ -83,11 +83,11 @@ "testPath": [ { "type": "Assembly", - "name": "rocket-car-dotnet.dll" + "name": "sample-app-dotnet.dll" }, { "type": "TestSuite", - "name": "rocket_car_dotnet" + "name": "sample_app_dotnet" }, { "type": "TestSuite", @@ -101,7 +101,7 @@ "duration": 0.000308, "status": 0, "stdout": "", - "stderr": " Expected: 2\n But was: 6\n\n at rocket_car_dotnet.ExampleTest.TestSub() in /Users/yabuki-ryosuke/src/github.com/launchableinc/examples/dotnet/ExampleTest.cs:line 21\n", + "stderr": " Expected: 2\n But was: 6\n\n at sample_app_dotnet.ExampleTest.TestSub() in /Users/yabuki-ryosuke/src/github.com/launchableinc/examples/dotnet/ExampleTest.cs:line 21\n", "data": null } ], diff --git a/tests/data/dotnet/test-result.xml b/tests/data/dotnet/test-result.xml index c6a04e2b8..acf60a03e 100644 --- a/tests/data/dotnet/test-result.xml +++ b/tests/data/dotnet/test-result.xml @@ -1,32 +1,32 @@ - - - - - + + + + + Expected: 0.5d But was: 0 - at rocket_car_dotnet.ExampleTest.TestDiv() in /Users/yabuki-ryosuke/src/github.com/launchableinc/examples/dotnet/ExampleTest.cs:line 35 + at sample_app_dotnet.ExampleTest.TestDiv() in /Users/yabuki-ryosuke/src/github.com/launchableinc/examples/dotnet/ExampleTest.cs:line 35 - + Expected: 10 But was: 25 - at rocket_car_dotnet.ExampleTest.TestMul() in /Users/yabuki-ryosuke/src/github.com/launchableinc/examples/dotnet/ExampleTest.cs:line 28 + at sample_app_dotnet.ExampleTest.TestMul() in /Users/yabuki-ryosuke/src/github.com/launchableinc/examples/dotnet/ExampleTest.cs:line 28 - + Expected: 2 But was: 6 - at rocket_car_dotnet.ExampleTest.TestSub() in /Users/yabuki-ryosuke/src/github.com/launchableinc/examples/dotnet/ExampleTest.cs:line 21 + at sample_app_dotnet.ExampleTest.TestSub() in /Users/yabuki-ryosuke/src/github.com/launchableinc/examples/dotnet/ExampleTest.cs:line 21 diff --git a/tests/data/gradle/java/app/src/test/java/com/launchableinc/rocket_car_gradle/App2Test.java b/tests/data/gradle/java/app/src/test/java/com/example/sample_app_gradle/App2Test.java similarity index 92% rename from tests/data/gradle/java/app/src/test/java/com/launchableinc/rocket_car_gradle/App2Test.java rename to tests/data/gradle/java/app/src/test/java/com/example/sample_app_gradle/App2Test.java index 1c37927d2..282cd65de 100644 --- a/tests/data/gradle/java/app/src/test/java/com/launchableinc/rocket_car_gradle/App2Test.java +++ b/tests/data/gradle/java/app/src/test/java/com/example/sample_app_gradle/App2Test.java @@ -1,7 +1,7 @@ /* * This Java source file was generated by the Gradle 'init' task. */ -package com.launchableinc.rocket_car_gradle; +package com.example.sample_app_gradle; import org.junit.Test; import org.junit.jupiter.api.Test; diff --git a/tests/data/gradle/java/app/src/test/java/com/launchableinc/rocket_car_gradle/AppTest.java b/tests/data/gradle/java/app/src/test/java/com/example/sample_app_gradle/AppTest.java similarity index 95% rename from tests/data/gradle/java/app/src/test/java/com/launchableinc/rocket_car_gradle/AppTest.java rename to tests/data/gradle/java/app/src/test/java/com/example/sample_app_gradle/AppTest.java index 714150a42..8403d2282 100644 --- a/tests/data/gradle/java/app/src/test/java/com/launchableinc/rocket_car_gradle/AppTest.java +++ b/tests/data/gradle/java/app/src/test/java/com/example/sample_app_gradle/AppTest.java @@ -1,7 +1,7 @@ /* * This Java source file was generated by the Gradle 'init' task. */ -package com.launchableinc.rocket_car_gradle; +package com.example.sample_app_gradle; import org.junit.jupiter.api.Nested; import org.junit.jupiter.api.Test; diff --git a/tests/data/gradle/java/app/src/test/java/com/launchableinc/rocket_car_gradle/sub/App3Test.java b/tests/data/gradle/java/app/src/test/java/com/example/sample_app_gradle/sub/App3Test.java similarity index 84% rename from tests/data/gradle/java/app/src/test/java/com/launchableinc/rocket_car_gradle/sub/App3Test.java rename to tests/data/gradle/java/app/src/test/java/com/example/sample_app_gradle/sub/App3Test.java index d8f114b04..3a2574762 100644 --- a/tests/data/gradle/java/app/src/test/java/com/launchableinc/rocket_car_gradle/sub/App3Test.java +++ b/tests/data/gradle/java/app/src/test/java/com/example/sample_app_gradle/sub/App3Test.java @@ -1,11 +1,11 @@ /* * This Java source file was generated by the Gradle 'init' task. */ -package com.launchableinc.rocket_car_gradle.sub; +package com.example.sample_app_gradle.sub; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Assertions.*; -import com.launchableinc.rocket_car_gradle.App; +import com.example.sample_app_gradle.App; public class App3Test { @Test void testAppHasAGreeting() { diff --git a/tests/data/gradle/recursion/expected.json b/tests/data/gradle/recursion/expected.json index 6798dba57..d44a3eaac 100644 --- a/tests/data/gradle/recursion/expected.json +++ b/tests/data/gradle/recursion/expected.json @@ -5,7 +5,7 @@ "testPath": [ { "type": "class", - "name": "com.launchableinc.rocket_car_gradle.AppTest" + "name": "com.example.sample_app_gradle.AppTest" }, { "type": "testcase", @@ -23,7 +23,7 @@ "testPath": [ { "type": "class", - "name": "com.launchableinc.rocket_car_gradle.AppTest" + "name": "com.example.sample_app_gradle.AppTest" }, { "type": "testcase", @@ -41,7 +41,7 @@ "testPath": [ { "type": "class", - "name": "com.launchableinc.rocket_car_gradle.AppTest" + "name": "com.example.sample_app_gradle.AppTest" }, { "type": "testcase", @@ -59,7 +59,7 @@ "testPath": [ { "type": "class", - "name": "com.launchableinc.rocket_car_gradle.AppTest" + "name": "com.example.sample_app_gradle.AppTest" }, { "type": "testcase", diff --git a/tests/data/gradle/recursion/foo/bar/reports/1.xml b/tests/data/gradle/recursion/foo/bar/reports/1.xml index 301d6118e..84a7148fe 100644 --- a/tests/data/gradle/recursion/foo/bar/reports/1.xml +++ b/tests/data/gradle/recursion/foo/bar/reports/1.xml @@ -1,10 +1,10 @@ - + - - - - + + + + diff --git a/tests/data/maven/createdFile_1.lst b/tests/data/maven/createdFile_1.lst index 9145a29f4..c77123884 100644 --- a/tests/data/maven/createdFile_1.lst +++ b/tests/data/maven/createdFile_1.lst @@ -1,4 +1,4 @@ -com/example/launchable/model/a/ModelATest.class -com/example/launchable/model/b/ModelBTest.class -com/example/launchable/model/b/ModelBTest$SomeInner.class -com/example/launchable/model/c/ModelCTest.class +com/example/sampleapp/model/a/ModelATest.class +com/example/sampleapp/model/b/ModelBTest.class +com/example/sampleapp/model/b/ModelBTest$SomeInner.class +com/example/sampleapp/model/c/ModelCTest.class diff --git a/tests/data/maven/createdFile_2.lst b/tests/data/maven/createdFile_2.lst index 5663e49d5..b8d697835 100644 --- a/tests/data/maven/createdFile_2.lst +++ b/tests/data/maven/createdFile_2.lst @@ -1,4 +1,4 @@ -com/example/launchable/service/ServiceATest.class -com/example/launchable/service/ServiceATest$Inner1$Inner2.class -com/example/launchable/service/ServiceBTest.class -com/example/launchable/service/ServiceCTest.class +com/example/sampleapp/service/ServiceATest.class +com/example/sampleapp/service/ServiceATest$Inner1$Inner2.class +com/example/sampleapp/service/ServiceBTest.class +com/example/sampleapp/service/ServiceCTest.class diff --git a/tests/data/maven/java/test/src/java/com/launchableinc/rocket_car_maven/App2Test.java b/tests/data/maven/java/test/src/java/com/example/sample_app_maven/App2Test.java similarity index 65% rename from tests/data/maven/java/test/src/java/com/launchableinc/rocket_car_maven/App2Test.java rename to tests/data/maven/java/test/src/java/com/example/sample_app_maven/App2Test.java index 0e9508e36..48651c1fc 100644 --- a/tests/data/maven/java/test/src/java/com/launchableinc/rocket_car_maven/App2Test.java +++ b/tests/data/maven/java/test/src/java/com/example/sample_app_maven/App2Test.java @@ -1,4 +1,4 @@ -package com.launchableinc.rocket_car_maven; +package com.example.sample_app_maven; import static org.junit.Assert.*; @@ -8,13 +8,13 @@ public class App2Test { @Test public void testAppHasGreeting() { App testApp = new App(); - String message = "Hello rocket-car-maven"; + String message = "Hello sample-app-maven"; assertEquals(message, testApp.getGreeting()); } @Test public void testAppHasGreeting2() { App testApp = new App(); - assertNotNull("Hello rocket-car-maven", testApp.getGreeting()); + assertNotNull("Hello sample-app-maven", testApp.getGreeting()); } } diff --git a/tests/data/maven/java/test/src/java/com/launchableinc/rocket_car_maven/AppTest.java b/tests/data/maven/java/test/src/java/com/example/sample_app_maven/AppTest.java similarity index 83% rename from tests/data/maven/java/test/src/java/com/launchableinc/rocket_car_maven/AppTest.java rename to tests/data/maven/java/test/src/java/com/example/sample_app_maven/AppTest.java index b27ad6421..06a9ce275 100644 --- a/tests/data/maven/java/test/src/java/com/launchableinc/rocket_car_maven/AppTest.java +++ b/tests/data/maven/java/test/src/java/com/example/sample_app_maven/AppTest.java @@ -1,4 +1,4 @@ -package com.launchableinc.rocket_car_maven; +package com.example.sample_app_maven; import static org.junit.Assert.assertTrue; diff --git a/tests/data/maven/record_test_result.json b/tests/data/maven/record_test_result.json index b003cff7a..1b11c8399 100644 --- a/tests/data/maven/record_test_result.json +++ b/tests/data/maven/record_test_result.json @@ -5,7 +5,7 @@ "testPath": [ { "type": "class", - "name": "com.launchableinc.rocket_car_maven.AppTest" + "name": "com.example.sample_app_maven.AppTest" }, { "type": "testcase", @@ -23,7 +23,7 @@ "testPath": [ { "type": "class", - "name": "com.launchableinc.rocket_car_maven.NestedTest" + "name": "com.example.sample_app_maven.NestedTest" }, { "type": "testcase", @@ -41,7 +41,7 @@ "testPath": [ { "type": "class", - "name": "com.launchableinc.rocket_car_maven.App2Test" + "name": "com.example.sample_app_maven.App2Test" }, { "type": "testcase", @@ -59,7 +59,7 @@ "testPath": [ { "type": "class", - "name": "com.launchableinc.rocket_car_maven.App2Test" + "name": "com.example.sample_app_maven.App2Test" }, { "type": "testcase", diff --git a/tests/data/maven/reports/TEST-1.xml b/tests/data/maven/reports/TEST-1.xml index a46e02e13..8335ebe78 100644 --- a/tests/data/maven/reports/TEST-1.xml +++ b/tests/data/maven/reports/TEST-1.xml @@ -1,4 +1,4 @@ - - + + diff --git a/tests/data/maven/reports/TEST-2.xml b/tests/data/maven/reports/TEST-2.xml index ec804d9b4..824367266 100644 --- a/tests/data/maven/reports/TEST-2.xml +++ b/tests/data/maven/reports/TEST-2.xml @@ -1,5 +1,5 @@ - - - + + + diff --git a/tests/data/maven/reports/TEST-nested.xml b/tests/data/maven/reports/TEST-nested.xml index 7baa5c650..3cf4ff6ab 100644 --- a/tests/data/maven/reports/TEST-nested.xml +++ b/tests/data/maven/reports/TEST-nested.xml @@ -1,4 +1,4 @@ - - + + diff --git a/tests/data/maven/subset_by_absolute_time_result.json b/tests/data/maven/subset_by_absolute_time_result.json index 05ae7d4e9..8adf86225 100644 --- a/tests/data/maven/subset_by_absolute_time_result.json +++ b/tests/data/maven/subset_by_absolute_time_result.json @@ -1,10 +1,10 @@ { "testPaths": [ [ - {"type": "class", "name": "com.launchableinc.rocket_car_maven.App2Test"} + {"type": "class", "name": "com.example.sample_app_maven.App2Test"} ], [ - { "type": "class", "name": "com.launchableinc.rocket_car_maven.AppTest"} + { "type": "class", "name": "com.example.sample_app_maven.AppTest"} ]], "testRunner": "maven", "session": { diff --git a/tests/data/maven/subset_by_confidence_result.json b/tests/data/maven/subset_by_confidence_result.json index cbecf2557..d6fc78808 100644 --- a/tests/data/maven/subset_by_confidence_result.json +++ b/tests/data/maven/subset_by_confidence_result.json @@ -1,10 +1,10 @@ { "testPaths": [ [ - {"type": "class", "name": "com.launchableinc.rocket_car_maven.App2Test"} + {"type": "class", "name": "com.example.sample_app_maven.App2Test"} ], [ - { "type": "class", "name": "com.launchableinc.rocket_car_maven.AppTest"} + { "type": "class", "name": "com.example.sample_app_maven.AppTest"} ]], "testRunner": "maven", "session": { diff --git a/tests/data/maven/subset_from_file_result.json b/tests/data/maven/subset_from_file_result.json index 64d85fd2f..6f09fd69e 100644 --- a/tests/data/maven/subset_from_file_result.json +++ b/tests/data/maven/subset_from_file_result.json @@ -1,23 +1,23 @@ { "testPaths": [ [ - {"type": "class", "name": "com.example.launchable.service.ServiceATest"} + {"type": "class", "name": "com.example.sampleapp.service.ServiceATest"} ], [ - { "type": "class", "name": "com.example.launchable.service.ServiceBTest"} + { "type": "class", "name": "com.example.sampleapp.service.ServiceBTest"} ], [ - { "type": "class", "name": "com.example.launchable.service.ServiceCTest"} + { "type": "class", "name": "com.example.sampleapp.service.ServiceCTest"} ], [ - {"type": "class", "name": "com.example.launchable.model.a.ModelATest"} + {"type": "class", "name": "com.example.sampleapp.model.a.ModelATest"} ], [ - {"type": "class", "name": "com.example.launchable.model.b.ModelBTest"} + {"type": "class", "name": "com.example.sampleapp.model.b.ModelBTest"} ], [ - {"type": "class", "name": "com.example.launchable.model.c.ModelCTest"} + {"type": "class", "name": "com.example.sampleapp.model.c.ModelCTest"} ] ], "testRunner": "maven", diff --git a/tests/data/maven/subset_result.json b/tests/data/maven/subset_result.json index eeaadc66a..06fc3c88c 100644 --- a/tests/data/maven/subset_result.json +++ b/tests/data/maven/subset_result.json @@ -1,10 +1,10 @@ { "testPaths": [ [ - {"type": "class", "name": "com.launchableinc.rocket_car_maven.App2Test"} + {"type": "class", "name": "com.example.sample_app_maven.App2Test"} ], [ - { "type": "class", "name": "com.launchableinc.rocket_car_maven.AppTest"} + { "type": "class", "name": "com.example.sample_app_maven.AppTest"} ]], "testRunner": "maven", "goal": {"type": "subset-by-percentage", "percentage": 0.1}, diff --git a/tests/data/maven/subset_scan_test_compile_lst_result.json b/tests/data/maven/subset_scan_test_compile_lst_result.json index f0e9ecdf9..72dcbe116 100644 --- a/tests/data/maven/subset_scan_test_compile_lst_result.json +++ b/tests/data/maven/subset_scan_test_compile_lst_result.json @@ -1,13 +1,13 @@ { "testPaths": [ [ - {"type": "class", "name": "com.example.launchable.service.ServiceATest"} + {"type": "class", "name": "com.example.sampleapp.service.ServiceATest"} ], [ - { "type": "class", "name": "com.example.launchable.service.ServiceBTest"} + { "type": "class", "name": "com.example.sampleapp.service.ServiceBTest"} ], [ - { "type": "class", "name": "com.example.launchable.service.ServiceCTest"} + { "type": "class", "name": "com.example.sampleapp.service.ServiceCTest"} ] ], "testRunner": "maven", diff --git a/tests/plugins/foo.py b/tests/plugins/foo.py index cc799b95a..9656c5f60 100644 --- a/tests/plugins/foo.py +++ b/tests/plugins/foo.py @@ -1,15 +1,21 @@ -import click +from typing import Annotated, List -from launchable.test_runners import launchable +import typer +from smart_tests.test_runners import smart_tests -@click.argument('reports', required=True, nargs=-1) -@launchable.record.tests -def record_tests(client, reports): + +@smart_tests.record.tests +def record_tests( + client, + reports: Annotated[List[str], typer.Argument( + help="Test report files to process" + )], +): for r in reports: - click.echo('foo:{}'.format(r)) + typer.echo(f'foo:{r}') -@launchable.subset +@smart_tests.subset def subset(client): - click.echo("Subset!") + typer.echo("Subset!") diff --git a/tests/test_cli_test_case.py b/tests/test_cli_test_case.py index dc2c5d19a..9fda78ebd 100644 --- a/tests/test_cli_test_case.py +++ b/tests/test_cli_test_case.py @@ -43,7 +43,7 @@ def test_extract_all_test_paths_do_not_throw_exception_when_correct_input(self): { "type": "case", "testPath": [ - {"type": "class", "name": "com.launchable.HelloWorldTest"}, + {"type": "class", "name": "com.example.HelloWorldTest"}, {"type": "testcase", "name": "test1"}, ], } @@ -54,7 +54,7 @@ def test_extract_all_test_paths_do_not_throw_exception_when_correct_input(self): { "type": "case", "testPath": [ - {"type": "class", "name": "com.launchable.HelloWorldTest"}, + {"type": "class", "name": "com.example.HelloWorldTest"}, {"type": "testcase", "name": "test1"}, ], } @@ -68,14 +68,14 @@ def test_extract_all_test_paths_do_not_throw_exception_when_correct_input(self): { "type": "case", "testPath": [ - {"type": "class", "name": "com.launchable.HelloWorldTest"}, + {"type": "class", "name": "com.example.HelloWorldTest"}, {"type": "testcase", "name": "test1"}, ], }, { "type": "case", "testPath": [ - {"type": "class", "name": "com.launchable.HelloWorldTest"}, + {"type": "class", "name": "com.example.HelloWorldTest"}, {"type": "testcase", "name": "test2"}, ], }, @@ -86,14 +86,14 @@ def test_extract_all_test_paths_do_not_throw_exception_when_correct_input(self): { "type": "case", "testPath": [ - {"type": "class", "name": "com.launchable.HelloWorldTest"}, + {"type": "class", "name": "com.example.HelloWorldTest"}, {"type": "testcase", "name": "test1"}, ], }, { "type": "case", "testPath": [ - {"type": "class", "name": "com.launchable.HelloWorldTest"}, + {"type": "class", "name": "com.example.HelloWorldTest"}, {"type": "testcase", "name": "test2"}, ], }, @@ -113,7 +113,7 @@ def test_extract_all_test_paths_throw_exception_when_element_size_does_not_match "testPath": [ { "type": "class", - "name": "com.launchable.HelloWorldTest", + "name": "com.example.HelloWorldTest", }, {"type": "testcase", "name": "test1"}, ], @@ -123,7 +123,7 @@ def test_extract_all_test_paths_throw_exception_when_element_size_does_not_match "testPath": [ { "type": "class", - "name": "com.launchable.HelloWorldTest", + "name": "com.example.HelloWorldTest", }, {"type": "testcase", "name": "test2"}, ], @@ -137,7 +137,7 @@ def test_extract_all_test_paths_throw_exception_when_element_size_does_not_match "testPath": [ { "type": "class", - "name": "com.launchable.HelloWorldTest", + "name": "com.example.HelloWorldTest", }, {"type": "testcase", "name": "test1"}, ], @@ -155,7 +155,7 @@ def test_extract_all_test_paths_throw_exception_when_element_size_does_not_match "testPath": [ { "type": "class", - "name": "com.launchable.HelloWorldTest", + "name": "com.example.HelloWorldTest", }, {"type": "testcase", "name": "test1"}, ], @@ -169,7 +169,7 @@ def test_extract_all_test_paths_throw_exception_when_element_size_does_not_match "testPath": [ { "type": "class", - "name": "com.launchable.HelloWorldTest", + "name": "com.example.HelloWorldTest", }, {"type": "testcase", "name": "test1"}, ], @@ -179,7 +179,7 @@ def test_extract_all_test_paths_throw_exception_when_element_size_does_not_match "testPath": [ { "type": "class", - "name": "com.launchable.HelloWorldTest", + "name": "com.example.HelloWorldTest", }, {"type": "testcase", "name": "test2"}, ], @@ -199,7 +199,7 @@ def test_extract_all_test_paths_throw_exception_when_the_events_order_does_not_m "testPath": [ { "type": "class", - "name": "com.launchable.HelloWorldTest", + "name": "com.example.HelloWorldTest", }, {"type": "testcase", "name": "test2"}, ], @@ -209,7 +209,7 @@ def test_extract_all_test_paths_throw_exception_when_the_events_order_does_not_m "testPath": [ { "type": "class", - "name": "com.launchable.HelloWorldTest", + "name": "com.example.HelloWorldTest", }, {"type": "testcase", "name": "test1"}, ], @@ -223,7 +223,7 @@ def test_extract_all_test_paths_throw_exception_when_the_events_order_does_not_m "testPath": [ { "type": "class", - "name": "com.launchable.HelloWorldTest", + "name": "com.example.HelloWorldTest", }, {"type": "testcase", "name": "test1"}, ], @@ -233,7 +233,7 @@ def test_extract_all_test_paths_throw_exception_when_the_events_order_does_not_m "testPath": [ { "type": "class", - "name": "com.launchable.HelloWorldTest", + "name": "com.example.HelloWorldTest", }, {"type": "testcase", "name": "test2"}, ], diff --git a/tests/test_plugin.py b/tests/test_plugin.py index c10b2f2fb..d54f7adc1 100644 --- a/tests/test_plugin.py +++ b/tests/test_plugin.py @@ -4,32 +4,42 @@ import responses # type: ignore -from launchable.utils.http_client import get_base_url +from smart_tests.utils.http_client import get_base_url from .cli_test_case import CliTestCase class PluginTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_plugin_loading(self): + # Manually load the plugin to ensure it's available for the test + import importlib.util + + plugin_dir = Path(__file__).parent.joinpath('plugins').resolve() + for f in plugin_dir.glob('*.py'): + spec = importlib.util.spec_from_file_location( + f"smart_tests.plugins.{f.stem}", f) + plugin = importlib.util.module_from_spec(spec) + spec.loader.exec_module(plugin) responses.add( responses.GET, - "{}/intake/organizations/{}/workspaces/{}/builds/{}".format( - get_base_url(), - self.organization, - self.workspace, - "dummy"), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/builds/dummy", json={'createdAt': "2020-01-02T03:45:56.123+00:00", 'id': 123, "build": "dummy"}, status=200) + # Session existence check + responses.add( + responses.GET, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}" + f"/builds/dummy/test_session_names/123", + json={'id': 123}, + status=200) + responses.add( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/builds/{}/test_sessions{}/events".format( - get_base_url(), - self.organization, - self.workspace, - "dummy", 123), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}" + f"/builds/dummy/test_sessions/{123}/events", json={}, status=200) @@ -37,8 +47,8 @@ def test_plugin_loading(self): Load plugins/foo.py as a plugin and execute its code """ plugin_dir = Path(__file__).parent.joinpath('plugins').resolve() - result = self.cli('--plugins', str(plugin_dir), 'record', 'tests', - '--session', 'builds/dummy/test_sessions/123', 'foo', 'alpha', 'bravo', 'charlie') + result = self.cli('--plugins', str(plugin_dir), 'record', 'test', 'foo', + '--session', self.session, 'alpha', 'bravo', 'charlie') self.assertTrue("foo:alpha" in result.stdout, result.stdout) self.assertTrue("foo:bravo" in result.stdout, result.stdout) self.assertTrue("foo:charlie" in result.stdout, result.stdout) diff --git a/tests/test_runners/test_adb.py b/tests/test_runners/test_adb.py index 0f7003146..6f5527ba7 100644 --- a/tests/test_runners/test_adb.py +++ b/tests/test_runners/test_adb.py @@ -3,35 +3,34 @@ import responses # type: ignore -from launchable.utils.session import read_session, write_build from tests.cli_test_case import CliTestCase class AdbTest(CliTestCase): - subset_input = """INSTRUMENTATION_STATUS: class=com.launchableinc.rocketcar.ExampleInstrumentedTest2 + subset_input = """INSTRUMENTATION_STATUS: class=com.example.sampleapp.ExampleInstrumentedTest2 INSTRUMENTATION_STATUS: current=1 INSTRUMENTATION_STATUS: id=AndroidJUnitRunner INSTRUMENTATION_STATUS: numtests=2 INSTRUMENTATION_STATUS: stream= -com.launchableinc.rocketcar.ExampleInstrumentedTest2: +com.example.sampleapp.ExampleInstrumentedTest2: INSTRUMENTATION_STATUS: test=useAppContext INSTRUMENTATION_STATUS_CODE: 1 -INSTRUMENTATION_STATUS: class=com.launchableinc.rocketcar.ExampleInstrumentedTest2 +INSTRUMENTATION_STATUS: class=com.example.sampleapp.ExampleInstrumentedTest2 INSTRUMENTATION_STATUS: current=1 INSTRUMENTATION_STATUS: id=AndroidJUnitRunner INSTRUMENTATION_STATUS: numtests=2 INSTRUMENTATION_STATUS: stream=. INSTRUMENTATION_STATUS: test=useAppContext INSTRUMENTATION_STATUS_CODE: 0 -INSTRUMENTATION_STATUS: class=com.launchableinc.rocketcar.ExampleInstrumentedTest +INSTRUMENTATION_STATUS: class=com.example.sampleapp.ExampleInstrumentedTest INSTRUMENTATION_STATUS: current=2 INSTRUMENTATION_STATUS: id=AndroidJUnitRunner INSTRUMENTATION_STATUS: numtests=2 INSTRUMENTATION_STATUS: stream= -com.launchableinc.rocketcar.ExampleInstrumentedTest: +com.example.sampleapp.ExampleInstrumentedTest: INSTRUMENTATION_STATUS: test=useAppContext INSTRUMENTATION_STATUS_CODE: 1 -INSTRUMENTATION_STATUS: class=com.launchableinc.rocketcar.ExampleInstrumentedTest +INSTRUMENTATION_STATUS: class=com.example.sampleapp.ExampleInstrumentedTest INSTRUMENTATION_STATUS: current=2 INSTRUMENTATION_STATUS: id=AndroidJUnitRunner INSTRUMENTATION_STATUS: numtests=2 @@ -49,13 +48,15 @@ class AdbTest(CliTestCase): """ @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset(self): - # emulate launchable record build - write_build(self.build_name) - - result = self.cli('subset', '--target', '10%', 'adb', input=self.subset_input) + result = self.cli( + 'subset', + 'adb', + '--session', + self.session, + '--target', + '10%', + input=self.subset_input) self.assert_success(result) - - self.assertEqual(read_session(self.build_name), self.session) self.assert_subset_payload('subset_result.json') diff --git a/tests/test_runners/test_ant.py b/tests/test_runners/test_ant.py index 1ebf4520e..e0fc51381 100644 --- a/tests/test_runners/test_ant.py +++ b/tests/test_runners/test_ant.py @@ -8,17 +8,17 @@ class AntTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset(self): - result = self.cli('subset', '--target', '10%', '--session', - self.session, 'ant', str(self.test_files_dir.joinpath('src').resolve())) + result = self.cli('subset', 'ant', '--session', self.session, '--target', + '10%', str(self.test_files_dir.joinpath('src').resolve())) self.assert_success(result) self.assert_subset_payload('subset_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_ant(self): - result = self.cli('record', 'tests', '--session', self.session, - 'ant', str(self.test_files_dir) + "/junitreport/TESTS-TestSuites.xml") + result = self.cli('record', 'test', 'ant', '--session', self.session, + str(self.test_files_dir) + "/junitreport/TESTS-TestSuites.xml") self.assert_success(result) self.assert_record_tests_payload("record_test_result.json") diff --git a/tests/test_runners/test_bazel.py b/tests/test_runners/test_bazel.py index 4ba4ff25f..21691ef58 100644 --- a/tests/test_runners/test_bazel.py +++ b/tests/test_runners/test_bazel.py @@ -6,7 +6,6 @@ import responses # type: ignore -from launchable.utils.session import read_session, write_build from tests.cli_test_case import CliTestCase @@ -25,69 +24,65 @@ class BazelTest(CliTestCase): """ @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset(self): - # emulate launchable record build - write_build(self.build_name) - - result = self.cli('subset', '--target', '10%', 'bazel', input=self.subset_input) + result = self.cli( + 'subset', + 'bazel', + '--session', + self.session, + '--target', + '10%', + input=self.subset_input) self.assert_success(result) - self.assertEqual(read_session(self.build_name), self.session) self.assert_subset_payload('subset_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test(self): - # emulate launchable record build - write_build(self.build_name) - - result = self.cli('record', 'tests', 'bazel', str(self.test_files_dir) + "/") + result = self.cli('record', 'test', 'bazel', '--session', self.session, str(self.test_files_dir) + "/") self.assert_success(result) - self.assertEqual(read_session(self.build_name), self.session) self.assert_record_tests_payload('record_test_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_with_build_event_json_file(self): - # emulate launchable record build - write_build(self.build_name) - - result = self.cli('record', 'tests', 'bazel', '--build-event-json', str( - self.test_files_dir.joinpath("build_event.json")), str(self.test_files_dir) + "/") + result = self.cli('record', 'test', 'bazel', '--session', self.session, + '--build-event-json', str(self.test_files_dir.joinpath("build_event.json")), + str(self.test_files_dir) + "/") self.assert_success(result) - self.assertEqual(read_session(self.build_name), self.session) self.assert_record_tests_payload('record_test_with_build_event_json_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_with_multiple_build_event_json_files(self): - # emulate launchable record build - write_build(self.build_name) - - result = self.cli('record', 'tests', 'bazel', '--build-event-json', - str(self.test_files_dir.joinpath("build_event.json")), + result = self.cli('record', 'test', 'bazel', '--session', self.session, + '--build-event-json', str(self.test_files_dir.joinpath("build_event.json")), '--build-event-json', str(self.test_files_dir.joinpath("build_event_rest.json")), str(self.test_files_dir) + "/") self.assert_success(result) - self.assertEqual(read_session(self.build_name), self.session) self.assert_record_tests_payload('record_test_with_multiple_build_event_json_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_record_key_match(self): - # emulate launchable record build - write_build(self.build_name) - """ Test recorded test results contain subset's test path """ - result = self.cli('subset', '--target', '10%', 'bazel', input=self.subset_input) + result = self.cli( + 'subset', + 'bazel', + '--session', + self.session, + '--target', + '10%', + input=self.subset_input) self.assert_success(result) subset_payload = json.loads(gzip.decompress(self.find_request('/subset').request.body).decode()) - result = self.cli('record', 'tests', 'bazel', str(self.test_files_dir) + "/") + result = self.cli('record', 'test', 'bazel', '--session', self.session, str(self.test_files_dir) + "/") self.assert_success(result) record_payload = json.loads(gzip.decompress(self.find_request('/events').request.body).decode()) diff --git a/tests/test_runners/test_behave.py b/tests/test_runners/test_behave.py index e217bb9f5..5c1107083 100644 --- a/tests/test_runners/test_behave.py +++ b/tests/test_runners/test_behave.py @@ -8,17 +8,23 @@ class BehaveTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset(self): pipe = "tutorial.feature" - result = self.cli('subset', '--target', '10%', '--session', self.session, 'behave', input=pipe) + result = self.cli( + 'subset', + 'behave', + '--session', self.session, + '--target', + '10%', + input=pipe) self.assert_success(result) self.assert_subset_payload('subset_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test(self): - result = self.cli('record', 'tests', '--session', self.session, 'behave', + result = self.cli('record', 'test', 'behave', '--session', self.session, str(self.test_files_dir) + "/reports/report.xml") self.assert_success(result) self.assert_record_tests_payload("record_test_result.json") diff --git a/tests/test_runners/test_ctest.py b/tests/test_runners/test_ctest.py index 3e41ef124..3ede5e84e 100644 --- a/tests/test_runners/test_ctest.py +++ b/tests/test_runners/test_ctest.py @@ -5,19 +5,17 @@ import responses # type: ignore -from launchable.utils.http_client import get_base_url -from launchable.utils.session import read_session, write_build +from smart_tests.utils.http_client import get_base_url from tests.cli_test_case import CliTestCase class CTestTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_multiple_files(self): responses.replace( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), self.organization, self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={ "testPaths": [ [{'type': 'testcase', 'name': 'FooTest.Bar'}], @@ -38,11 +36,7 @@ def test_subset_multiple_files(self): with tempfile.TemporaryDirectory() as tempdir: # Use a non-existing dir to check it creates a dir. output_dir = os.path.join(tempdir, 'subdir') - - # emulate launchable record build - write_build(self.build_name) - - result = self.cli('subset', '--target', '10%', 'ctest', + result = self.cli('subset', 'ctest', '--session', self.session, '--target', '10%', '--output-regex-files', '--output-regex-files-dir=' + output_dir, '--output-regex-files-size=32', @@ -74,23 +68,16 @@ def test_subset_multiple_files(self): self.assertEqual(rest_files, ['^FooTest\\.Baz$']) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_without_session(self): - # emulate launchable record build - write_build(self.build_name) - - result = self.cli('subset', '--target', '10%', 'ctest', str(self.test_files_dir.joinpath("ctest_list.json"))) + result = self.cli('subset', 'ctest', '--session', self.session, '--target', '10%', + str(self.test_files_dir.joinpath("ctest_list.json"))) self.assert_success(result) self.assert_subset_payload('subset_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test(self): - # emulate launchable record build - write_build(self.build_name) - - result = self.cli('record', 'tests', 'ctest', str(self.test_files_dir) + "/Testing/**/Test.xml") + result = self.cli('record', 'test', 'ctest', '--session', self.session, str(self.test_files_dir) + "/Testing/**/Test.xml") self.assert_success(result) - - self.assertEqual(read_session(self.build_name), self.session) self.assert_record_tests_payload('record_test_result.json') diff --git a/tests/test_runners/test_cts.py b/tests/test_runners/test_cts.py index 3bb2ad230..ebf824e16 100644 --- a/tests/test_runners/test_cts.py +++ b/tests/test_runners/test_cts.py @@ -3,13 +3,13 @@ import responses -from launchable.utils.http_client import get_base_url +from smart_tests.utils.http_client import get_base_url from tests.cli_test_case import CliTestCase class CtsTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset(self): pipe = """ # noqa: E501 ================== @@ -45,20 +45,18 @@ def test_subset(self): "isObservation": False, } - responses.replace(responses.POST, "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), - json=mock_response, - status=200) + responses.replace(responses.POST, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", + json=mock_response, + status=200) result = self.cli( "subset", + "cts", "--target", "30%", "--session", self.session, - "cts", input=pipe, mix_stderr=False) self.assert_success(result) @@ -69,12 +67,12 @@ def test_subset(self): result = self.cli( "subset", + "cts", "--target", "30%", "--session", self.session, "--output-exclusion-rules", - "cts", input=pipe, mix_stderr=False) @@ -84,9 +82,9 @@ def test_subset(self): self.assertEqual(output, result.stdout) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_tests(self): - result = self.cli('record', 'tests', '--session', self.session, - 'cts', str(self.test_files_dir) + "/test_result.xml") + result = self.cli('record', 'test', 'cts', '--session', self.session, + str(self.test_files_dir) + "/test_result.xml") self.assert_success(result) self.assert_record_tests_payload('record_test_result.json') diff --git a/tests/test_runners/test_cucumber.py b/tests/test_runners/test_cucumber.py index 2b24b759c..6a970a5d7 100644 --- a/tests/test_runners/test_cucumber.py +++ b/tests/test_runners/test_cucumber.py @@ -4,39 +4,35 @@ import responses # type: ignore -from launchable.test_runners.cucumber import _create_file_candidate_list, clean_uri -from launchable.utils.session import write_build +from smart_tests.test_runners.cucumber import _create_file_candidate_list, clean_uri from tests.cli_test_case import CliTestCase class CucumberTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test(self): reports = [] for f in glob.iglob(str(self.test_files_dir.joinpath("report/*.xml")), recursive=True): reports.append(f) - - # emulate launchable record build - write_build(self.build_name) - - result = self.cli('record', 'tests', '--base', str(self.test_files_dir), 'cucumber', *reports) - + result = self.cli('record', 'test', 'cucumber', '--session', self.session, '--base', str(self.test_files_dir), *reports) self.assert_success(result) self.assert_record_tests_payload('record_test_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_from_json(self): reports = [] for f in glob.iglob(str(self.test_files_dir.joinpath("report/*.json")), recursive=True): reports.append(f) - - # emulate launchable record build - write_build(self.build_name) - - result = self.cli('record', 'tests', 'cucumber', "--json", *reports) - + result = self.cli( + 'record', + 'test', + 'cucumber', + '--session', + self.session, + "--json", + *reports) self.assert_success(result) self.assert_record_tests_payload('record_test_json_result.json') diff --git a/tests/test_runners/test_cypress.py b/tests/test_runners/test_cypress.py index 287c1133a..8e43a71cf 100644 --- a/tests/test_runners/test_cypress.py +++ b/tests/test_runners/test_cypress.py @@ -8,31 +8,38 @@ class CypressTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_cypress(self): # test-result.xml was generated used to cypress-io/cypress-example-kitchensink # cypress run --reporter junit report.xml - result = self.cli('record', 'tests', '--session', self.session, - 'cypress', str(self.test_files_dir) + "/test-result.xml") + result = self.cli('record', 'test', 'cypress', '--session', self.session, + str(self.test_files_dir) + "/test-result.xml") self.assert_success(result) self.assert_record_tests_payload('record_test_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_cypress(self): # test-report.xml is outputed from # cypress/integration/examples/window.spec.js, so set it pipe = "cypress/integration/examples/window.spec.js" - result = self.cli('subset', '--target', '10%', '--session', self.session, 'cypress', input=pipe) + result = self.cli( + 'subset', + 'cypress', + '--session', + self.session, + '--target', + '10%', + input=pipe) self.assert_success(result) self.assert_subset_payload('subset_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_empty_xml(self): # parse empty test report XML - result = self.cli('record', 'tests', '--session', self.session, - 'cypress', str(self.test_files_dir) + "/empty.xml") + result = self.cli('record', 'test', 'cypress', '--session', self.session, + str(self.test_files_dir) + "/empty.xml") self.assert_success(result) for call in responses.calls: self.assertFalse(call.request.url.endswith('/events'), 'there should be no calls to the /events endpoint') diff --git a/tests/test_runners/test_dotnet.py b/tests/test_runners/test_dotnet.py index 8a3187a83..8a7a0a09c 100644 --- a/tests/test_runners/test_dotnet.py +++ b/tests/test_runners/test_dotnet.py @@ -3,14 +3,13 @@ import responses # type: ignore -from launchable.utils.http_client import get_base_url +from smart_tests.utils.http_client import get_base_url from tests.cli_test_case import CliTestCase -from tests.helper import ignore_warnings class DotnetTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset(self): mock_response = { "testPaths": [ @@ -50,25 +49,20 @@ def test_subset(self): "isObservation": False, } - responses.replace(responses.POST, "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), - json=mock_response, - status=200) + responses.replace(responses.POST, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", + json=mock_response, + status=200) # dotnet profiles requires Zero Input Subsetting - result = self.cli('subset', '--target', '25%', '--session', self.session, 'dotnet') + result = self.cli('subset', 'dotnet', '--session', self.session, '--target', '25%') self.assert_exit_code(result, 1) result = self.cli( - 'subset', - '--target', - '25%', - '--session', - self.session, + 'subset', 'dotnet', + '--session', self.session, + '--target', '25%', '--get-tests-from-previous-sessions', - 'dotnet', mix_stderr=False) self.assert_success(result) @@ -76,14 +70,11 @@ def test_subset(self): self.assertEqual(result.output, output) result = self.cli( - 'subset', - '--target', - '25%', - '--session', - self.session, + 'subset', 'dotnet', + '--session', self.session, + '--target', '25%', '--get-tests-from-previous-sessions', '--output-exclusion-rules', - 'dotnet', mix_stderr=False) self.assert_success(result) @@ -91,7 +82,7 @@ def test_subset(self): self.assertEqual(result.output, output) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_with_bare_option(self): mock_response = { "testPaths": [ @@ -131,21 +122,16 @@ def test_subset_with_bare_option(self): "isObservation": False, } - responses.replace(responses.POST, "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), - json=mock_response, - status=200) + responses.replace(responses.POST, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", + json=mock_response, + status=200) result = self.cli( - 'subset', - '--target', - '25%', - '--session', - self.session, + 'subset', 'dotnet', + '--session', self.session, + '--target', '25%', '--get-tests-from-previous-sessions', - 'dotnet', '--bare', mix_stderr=False) self.assert_success(result) @@ -154,14 +140,11 @@ def test_subset_with_bare_option(self): self.assertEqual(result.output, output) result = self.cli( - 'subset', - '--target', - '25%', - '--session', - self.session, + 'subset', 'dotnet', + '--session', self.session, + '--target', '25%', '--get-tests-from-previous-sessions', '--output-exclusion-rules', - 'dotnet', '--bare', mix_stderr=False) self.assert_success(result) @@ -169,60 +152,9 @@ def test_subset_with_bare_option(self): output = "rocket_car_dotnet.ExampleTest.TestAdd\nrocket_car_dotnet.ExampleTest.TestDiv\n" self.assertEqual(result.output, output) - @ignore_warnings @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_split_subset(self): - responses.replace( - responses.POST, "{}/intake/organizations/{}/workspaces/{}/subset/456/slice".format( - get_base_url(), self.organization, self.workspace), json={ - "testPaths": [ - [ - {"type": "Assembly", "name": "rocket-car-dotnet.dll"}, - {"type": "TestSuite", "name": "rocket_car_dotnet"}, - {"type": "TestSuite", "name": "ExampleTest"}, - {"type": "TestCase", "name": "TestSub"}, - ], - ], - "rest": [ - [ - {"type": "Assembly", "name": "rocket-car-dotnet.dll"}, - {"type": "TestSuite", "name": "rocket_car_dotnet"}, - {"type": "TestSuite", "name": "ExampleTest"}, - {"type": "TestCase", "name": "TestAdd"}, - ], - ], - 'subsettingId': 456, - 'summary': { - 'subset': { - 'duration': 8, 'candidates': 1, 'rate': 50, - }, - 'rest': { - 'duration': 7, 'candidates': 1, 'rate': 50, - }, - }, - }, - status=200) - - result = self.cli('split-subset', '--subset-id', 'subset/456', - '--bin', '1/2', 'dotnet') - - self.assert_success(result) - - output = "FullyQualifiedName=rocket_car_dotnet.ExampleTest.TestSub\n" # noqa: E501 - self.assertEqual(result.output, output) - - result = self.cli('split-subset', '--subset-id', 'subset/456', - '--bin', '1/2', '--output-exclusion-rules', 'dotnet') - self.assert_success(result) - - output = "FullyQualifiedName!=rocket_car_dotnet.ExampleTest.TestAdd\n" # noqa: E501 - self.assertEqual(result.output, output) - - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_tests(self): - result = self.cli('record', 'tests', '--session', self.session, - 'dotnet', str(self.test_files_dir) + "/test-result.xml") + result = self.cli('record', 'test', 'dotnet', '--session', self.session, str(self.test_files_dir) + "/test-result.xml") self.assert_success(result) self.assert_record_tests_payload("record_test_result.json") diff --git a/tests/test_runners/test_go_test.py b/tests/test_runners/test_go_test.py index 066e5bbe0..2948f9781 100644 --- a/tests/test_runners/test_go_test.py +++ b/tests/test_runners/test_go_test.py @@ -1,110 +1,40 @@ import os -import tempfile from unittest import mock import responses # type: ignore -from launchable.utils.http_client import get_base_url -from launchable.utils.session import read_session, write_build from tests.cli_test_case import CliTestCase class GoTestTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_with_session(self): pipe = "TestExample1\nTestExample2\nTestExample3\nTestExample4\n" \ "ok github.com/launchableinc/rocket-car-gotest 0.268s" - result = self.cli('subset', '--target', '10%', '--session', self.session, 'go-test', input=pipe) - self.assert_success(result) - self.assert_subset_payload('subset_result.json') - - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_subset_without_session(self): - # emulate launchable record build - write_build(self.build_name) - - pipe = "TestExample1\nTestExample2\nTestExample3\nTestExample4\n" \ - "ok github.com/launchableinc/rocket-car-gotest 0.268s" - result = self.cli('subset', '--target', '10%', 'go-test', input=pipe) - + result = self.cli( + 'subset', + 'go-test', + '--session', + self.session, + '--target', + '10%', + input=pipe) self.assert_success(result) - - self.assertEqual(read_session(self.build_name), self.session) self.assert_subset_payload('subset_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_tests_with_session(self): - result = self.cli('record', 'tests', '--session', - self.session, 'go-test', str(self.test_files_dir.joinpath('reportv1')) + "/") + result = self.cli('record', 'test', 'go-test', '--session', self.session, + str(self.test_files_dir.joinpath('reportv1')) + "/") self.assert_success(result) self.assert_record_tests_payload('record_test_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_record_tests_without_session(self): - # emulate launchable record build - write_build(self.build_name) - - result = self.cli('record', 'tests', 'go-test', str(self.test_files_dir.joinpath('reportv1')) + "/") - self.assert_success(result) - - self.assertEqual(read_session(self.build_name), self.session) - self.assert_record_tests_payload('record_test_result.json') - - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_tests_v2(self): - result = self.cli('record', 'tests', '--session', - self.session, 'go-test', str(self.test_files_dir.joinpath('reportv2')) + "/") + result = self.cli('record', 'test', 'go-test', '--session', self.session, + str(self.test_files_dir.joinpath('reportv2')) + "/") self.assert_success(result) self.assert_record_tests_payload('record_test_result.json') - - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_split_subset_with_same_bin(self): - responses.replace( - responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset/456/slice".format( - get_base_url(), - self.organization, - self.workspace, - ), - json={ - 'testPaths': [ - [ - {'type': 'class', 'name': 'rocket-car-gotest'}, - {'type': 'testcase', 'name': 'TestExample1'}, - ], - [ - {'type': 'class', 'name': 'rocket-car-gotest'}, - {'type': 'testcase', 'name': 'TestExample2'}, - ], - ], - "rest": [], - }, - status=200, - ) - - same_bin_file = tempfile.NamedTemporaryFile(delete=False) - same_bin_file.write( - b'rocket-car-gotest.TestExample1\n' - b'rocket-car-gotest.TestExample2') - result = self.cli( - 'split-subset', - '--subset-id', - 'subset/456', - '--bin', - '1/2', - "--same-bin", - same_bin_file.name, - 'go-test', - ) - - self.assert_success(result) - - self.assertEqual("^TestExample1$|^TestExample2$\n", result.output) - same_bin_file.close() - os.unlink(same_bin_file.name) diff --git a/tests/test_runners/test_googletest.py b/tests/test_runners/test_googletest.py index 775233bcc..440b96219 100644 --- a/tests/test_runners/test_googletest.py +++ b/tests/test_runners/test_googletest.py @@ -8,7 +8,7 @@ class GoogleTestTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset(self): # I use "ctest -N" to get this list. pipe = """FooTest. @@ -16,32 +16,37 @@ def test_subset(self): Baz Foo """ - result = self.cli('subset', '--target', '10%', '--session', self.session, 'googletest', input=pipe) + result = self.cli( + 'subset', + 'googletest', + '--session', + self.session, + '--target', + '10%', + input=pipe) self.assert_success(result) self.assert_subset_payload('subset_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_googletest(self): - result = self.cli('record', 'tests', '--session', self.session, - 'googletest', str(self.test_files_dir) + "/") + result = self.cli('record', 'test', 'googletest', '--session', self.session, str(self.test_files_dir) + "/") self.assert_success(result) self.assert_record_tests_payload('record_test_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_failed_test_googletest(self): # ./test_a --gtest_output=xml:output.xml - result = self.cli('record', 'tests', '--session', self.session, - 'googletest', str(self.test_files_dir) + "/fail/") + result = self.cli('record', 'test', 'googletest', '--session', self.session, + str(self.test_files_dir) + "/fail/") self.assert_success(result) self.assert_record_tests_payload('fail/record_test_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_empty_dir(self): path = 'latest/gtest_*_results.xml' - result = self.cli('record', 'tests', '--session', self.session, - 'googletest', path) - self.assertEqual(result.output.rstrip('\n'), "No matches found: {}".format(path)) + result = self.cli('record', 'test', 'googletest', '--session', self.session, path) + self.assertEqual(result.output.rstrip('\n'), f"No matches found: {path}") self.assert_success(result) diff --git a/tests/test_runners/test_gradle.py b/tests/test_runners/test_gradle.py index 1affb5275..8043a2fbe 100644 --- a/tests/test_runners/test_gradle.py +++ b/tests/test_runners/test_gradle.py @@ -5,8 +5,7 @@ import responses # type: ignore -from launchable.utils.http_client import get_base_url -from launchable.utils.session import write_build +from smart_tests.utils.http_client import get_base_url from tests.cli_test_case import CliTestCase from tests.helper import ignore_warnings @@ -14,14 +13,11 @@ class GradleTest(CliTestCase): @ignore_warnings @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_without_session(self): responses.replace( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={ "testPaths": [ [{'name': 'com.launchableinc.rocket_car_gradle.App2Test'}], @@ -37,13 +33,8 @@ def test_subset_without_session(self): }, "isBrainless": False}, status=200) - - # emulate launchable record build - write_build(self.build_name) - - result = self.cli( - 'subset', '--target', '10%', 'gradle', - str(self.test_files_dir.joinpath('java/app/src/test').resolve())) + result = self.cli('subset', 'gradle', '--session', self.session, '--target', '10%', + str(self.test_files_dir.joinpath('java/app/src/test').resolve())) # TODO: we need to assert on the request payload to make sure it found # test list all right self.assert_success(result) @@ -56,15 +47,11 @@ def test_subset_without_session(self): @ignore_warnings @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_rest(self): responses.replace( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace, - ), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={ "testPaths": [ [{'type': 'class', @@ -85,13 +72,8 @@ def test_subset_rest(self): status=200) rest = tempfile.NamedTemporaryFile(delete=False) - - # emulate launchable record build - write_build(self.build_name) - - result = self.cli( - 'subset', '--target', '10%', '--rest', rest.name, 'gradle', - str(self.test_files_dir.joinpath('java/app/src/test/java').resolve())) + result = self.cli('subset', 'gradle', '--session', self.session, '--target', + '10%', '--rest', rest.name, str(self.test_files_dir.joinpath('java/app/src/test/java').resolve())) self.assert_success(result) @@ -105,14 +87,11 @@ def test_subset_rest(self): @ignore_warnings @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_zero_input_subsetting(self): responses.replace( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={ "testPaths": [ [{'type': 'class', @@ -132,18 +111,11 @@ def test_subset_zero_input_subsetting(self): "isBrainless": False, }, status=200) - - # emulate launchable record build - write_build(self.build_name) - - result = self.cli( - 'subset', - '--target', - '10%', - '--get-tests-from-previous-sessions', - '--output-exclusion-rules', - 'gradle', - mix_stderr=False) + result = self.cli('subset', 'gradle', '--session', self.session, '--target', + '10%', + '--get-tests-from-previous-sessions', + '--output-exclusion-rules', + mix_stderr=False) if result.exit_code != 0: self.assertEqual( @@ -158,14 +130,11 @@ def test_subset_zero_input_subsetting(self): @ignore_warnings @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_zero_input_subsetting_observation(self): responses.replace( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={ "testPaths": [ [{'type': 'class', @@ -186,18 +155,11 @@ def test_subset_zero_input_subsetting_observation(self): "isObservation": True, }, status=200) - - # emulate launchable record build - write_build(self.build_name) - - result = self.cli( - 'subset', - '--target', - '10%', - '--get-tests-from-previous-sessions', - '--output-exclusion-rules', - 'gradle', - mix_stderr=False) + result = self.cli('subset', 'gradle', '--session', self.session, '--target', + '10%', + '--get-tests-from-previous-sessions', + '--output-exclusion-rules', + mix_stderr=False) if result.exit_code != 0: self.assertEqual( @@ -209,14 +171,11 @@ def test_subset_zero_input_subsetting_observation(self): @ignore_warnings @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_zero_input_subsetting_source_root(self): responses.replace( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={ "testPaths": [ [{'type': 'class', @@ -237,19 +196,12 @@ def test_subset_zero_input_subsetting_source_root(self): "isObservation": True, }, status=200) - - # emulate launchable record build - write_build(self.build_name) - - result = self.cli( - 'subset', - '--target', - '10%', - '--get-tests-from-previous-sessions', - '--output-exclusion-rules', - 'gradle', - str(self.test_files_dir.joinpath('java/app/src/test').resolve()), - mix_stderr=False) + result = self.cli('subset', 'gradle', '--session', self.session, '--target', + '10%', + '--get-tests-from-previous-sessions', + '--output-exclusion-rules', + str(self.test_files_dir.joinpath('java/app/src/test').resolve()), + mix_stderr=False) if result.exit_code != 0: self.assertEqual( @@ -260,15 +212,25 @@ def test_subset_zero_input_subsetting_source_root(self): body = gzip.decompress(self.find_request('/subset').request.body).decode('utf8') self.assertNotIn("java.com.launchableinc.rocket_car_gradle.App2Test", body) + # TODO(Konboi): The split subset isn't supported for the smart-tests initial release + """ @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_split(self): + # Override session name lookup to allow session resolution + responses.replace( + responses.GET, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/" + f"{self.workspace}/builds/{self.build_name}/test_session_names/{self.session_name}", + json={ + 'id': self.session_id, + 'isObservation': False, + }, + status=200) + responses.replace( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={ "testPaths": [ [{'type': 'class', @@ -287,123 +249,19 @@ def test_subset_split(self): "isBrainless": False, }, status=200) - - # emulate launchable record build - write_build(self.build_name) - - result = self.cli( - 'subset', - '--target', - '10%', - '--split', - 'gradle', - str(self.test_files_dir.joinpath('java/app/src/test/java').resolve())) + result = self.cli('subset', 'gradle', '--session', self.session_name, '--build', self.build_name, '--target', + '10%', + '--split', + str(self.test_files_dir.joinpath('java/app/src/test/java').resolve())) self.assert_success(result) self.assertIn("subset/123", result.output.rstrip('\n')) - - @ignore_warnings - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_split_subset(self): - responses.replace( - responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset/456/slice".format( - get_base_url(), - self.organization, - self.workspace), - json={ - 'testPaths': [ - [{'type': 'class', - 'name': 'com.launchableinc.rocket_car_gradle.App2Test'}], - [{'type': 'class', - 'name': 'com.launchableinc.rocket_car_gradle.AppTest'}], - [{'type': 'class', - 'name': 'com.launchableinc.rocket_car_gradle.utils.UtilsTest'}], - ], - "rest": [[{'name': 'com.launchableinc.rocket_car_gradle.sub.App3Test'}]], - }, - status=200) - - rest = tempfile.NamedTemporaryFile(delete=False) - result = self.cli( - 'split-subset', - '--subset-id', - 'subset/456', - '--bin', - '1/2', - '--rest', - rest.name, - 'gradle') - - self.assert_success(result) - - self.assertIn( - "--tests com.launchableinc.rocket_car_gradle.App2Test " - "--tests com.launchableinc.rocket_car_gradle.AppTest " - "--tests com.launchableinc.rocket_car_gradle.utils.UtilsTest", - result.output.rstrip('\n')) - self.assertEqual(rest.read().decode(), '--tests com.launchableinc.rocket_car_gradle.sub.App3Test') - rest.close() - os.unlink(rest.name) - - @ignore_warnings - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_split_subset_with_same_bin(self): - responses.replace( - responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset/456/slice".format( - get_base_url(), - self.organization, - self.workspace, - ), - json={ - 'testPaths': [ - [{'type': 'class', - 'name': 'com.launchableinc.rocket_car_gradle.App2Test'}], - [{'type': 'class', - 'name': 'com.launchableinc.rocket_car_gradle.AppTest'}], - [{'type': 'class', - 'name': 'com.launchableinc.rocket_car_gradle.utils.UtilsTest'}], - ], - "rest": [], - }, - status=200, - ) - - same_bin_file = tempfile.NamedTemporaryFile(delete=False) - same_bin_file.write( - b'com.launchableinc.rocket_car_gradle.App2Test\n' - b'com.launchableinc.rocket_car_gradle.AppTest\n' - b'com.launchableinc.rocket_car_gradle.utils.UtilsTest') - result = self.cli( - 'split-subset', - '--subset-id', - 'subset/456', - '--bin', - '1/2', - "--same-bin", - same_bin_file.name, - 'gradle', - ) - - self.assert_success(result) - - self.assertIn( - "--tests com.launchableinc.rocket_car_gradle.App2Test " - "--tests com.launchableinc.rocket_car_gradle.AppTest " - "--tests com.launchableinc.rocket_car_gradle.utils.UtilsTest", - result.output.rstrip('\n'), - ) - same_bin_file.close() - os.unlink(same_bin_file.name) - + """ @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_gradle(self): - result = self.cli('record', 'tests', '--session', self.session, - 'gradle', str(self.test_files_dir) + "/**/reports") + result = self.cli('record', 'test', 'gradle', '--session', self.session, + str(self.test_files_dir) + "/**/reports") self.assert_success(result) self.assert_record_tests_payload('recursion/expected.json') diff --git a/tests/test_runners/test_jest.py b/tests/test_runners/test_jest.py index efa15c7dd..2d864b6f0 100644 --- a/tests/test_runners/test_jest.py +++ b/tests/test_runners/test_jest.py @@ -1,13 +1,9 @@ import os -from pathlib import Path from unittest import mock import responses # type: ignore -from launchable.utils.http_client import get_base_url -from launchable.utils.session import write_build from tests.cli_test_case import CliTestCase -from tests.helper import ignore_warnings class JestTest(CliTestCase): @@ -30,25 +26,30 @@ class JestTest(CliTestCase): """.format(*(os.getcwd() for _ in range(10))) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset(self): - # emulate launchable record build - write_build(self.build_name) - - result = self.cli('subset', '--target', '10%', '--base', - os.getcwd(), 'jest', input=self.subset_input) + result = self.cli( + 'subset', + 'jest', + '--session', + self.session, + '--target', + '10%', + '--base', + os.getcwd(), + input=self.subset_input) self.assert_success(result) self.assert_subset_payload('subset_result.json') + # TODO(Konboi): The split subset isn't supported for the smart-tests initial release + """ @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) @ignore_warnings def test_subset_split(self): - test_path = Path("{}/components/layouts/modal/snapshot.test.tsx".format(os.getcwd())) + test_path = Path(f"{os.getcwd()}/components/layouts/modal/snapshot.test.tsx") responses.replace(responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format(get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={'testPaths': [[{'name': str(test_path)}]], 'rest': [], 'subsettingId': 123, @@ -61,23 +62,18 @@ def test_subset_split(self): "isBrainless": False, }, status=200) - - # emulate launchable record build - write_build(self.build_name) - - result = self.cli('subset', '--target', '20%', '--base', os.getcwd(), '--split', - 'jest', input=self.subset_input) + result = self.cli('subset', 'jest', '--session', self.session, + '--target', '20%', '--base', os.getcwd(), '--split', input=self.subset_input) self.assert_success(result) self.assertIn('subset/123', result.output) + """ @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test(self): - # emulate launchable record build - write_build(self.build_name) - - result = self.cli('record', 'tests', 'jest', str(self.test_files_dir.joinpath("junit.xml"))) + result = self.cli('record', 'test', 'jest', '--session', self.session, + str(self.test_files_dir.joinpath('junit.xml'))) self.assert_success(result) self.assert_record_tests_payload('record_test_result.json') diff --git a/tests/test_runners/test_maven.py b/tests/test_runners/test_maven.py index c61c413bd..ab4b314a1 100644 --- a/tests/test_runners/test_maven.py +++ b/tests/test_runners/test_maven.py @@ -5,22 +5,21 @@ import responses # type: ignore -from launchable.test_runners import maven -from launchable.utils.http_client import get_base_url +from smart_tests.test_runners import maven from tests.cli_test_case import CliTestCase class MavenTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset(self): - result = self.cli('subset', '--target', '10%', '--session', - self.session, 'maven', str(self.test_files_dir.joinpath('java/test/src/java/').resolve())) + result = self.cli('subset', 'maven', '--session', self.session, '--target', '10%', + str(self.test_files_dir.joinpath('java/test/src/java/').resolve())) self.assert_success(result) self.assert_subset_payload('subset_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_from_file(self): # if we prepare listed file with slash e.g) com/example/launchable/model/aModelATest.class # the test will be failed at Windows environment. So, we generate file @@ -28,31 +27,26 @@ def test_subset_from_file(self): def save_file(list, file_name): file = str(self.test_files_dir.joinpath(file_name)) with open(file, 'w+') as file: - for l in list: - file.write(l.replace(".", os.path.sep) + ".class\n") + for test_class in list: + file.write(test_class.replace(".", os.path.sep) + ".class\n") - list_1 = ["com.example.launchable.model.a.ModelATest", - "com.example.launchable.model.b.ModelBTest", - "com.example.launchable.model.b.ModelBTest$SomeInner", - "com.example.launchable.model.c.ModelCTest", + list_1 = ["com.example.sampleapp.model.a.ModelATest", + "com.example.sampleapp.model.b.ModelBTest", + "com.example.sampleapp.model.b.ModelBTest$SomeInner", + "com.example.sampleapp.model.c.ModelCTest", ] - list_2 = ["com.example.launchable.service.ServiceATest", - "com.example.launchable.service.ServiceATest$Inner1$Inner2", - "com.example.launchable.service.ServiceBTest", - "com.example.launchable.service.ServiceCTest", + list_2 = ["com.example.sampleapp.service.ServiceATest", + "com.example.sampleapp.service.ServiceATest$Inner1$Inner2", + "com.example.sampleapp.service.ServiceBTest", + "com.example.sampleapp.service.ServiceCTest", ] save_file(list_1, "createdFile_1.lst") save_file(list_2, "createdFile_2.lst") - result = self.cli('subset', - '--target', - '10%', - '--session', - self.session, - 'maven', + result = self.cli('subset', 'maven', '--session', self.session, '--target', '10%', "--test-compile-created-file", str(self.test_files_dir.joinpath("createdFile_1.lst")), "--test-compile-created-file", @@ -61,14 +55,14 @@ def save_file(list, file_name): self.assert_subset_payload('subset_from_file_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_scan_test_compile_lst(self): list = [ - "com.example.launchable.service.ServiceATest", - "com.example.launchable.service.ServiceATest$Inner1$Inner2", - "com.example.launchable.service.ServiceBTest", - "com.example.launchable.service.ServiceCTest", + "com.example.sampleapp.service.ServiceATest", + "com.example.sampleapp.service.ServiceATest$Inner1$Inner2", + "com.example.sampleapp.service.ServiceBTest", + "com.example.sampleapp.service.ServiceCTest", ] base_tmp_dir = os.path.join(".", "tmp-maven-scan/") @@ -79,16 +73,10 @@ def test_scan_test_compile_lst(self): file = os.path.join(temp_dir, 'testCompile', 'default-testCompile', 'createdFiles.lst') with open(file, 'w+') as file: - for l in list: - file.write(l.replace(".", os.path.sep) + ".class\n") - - result = self.cli('subset', - '--target', - '10%', - '--session', - self.session, - 'maven', - "--scan-test-compile-lst") + for test_class in list: + file.write(test_class.replace(".", os.path.sep) + ".class\n") + + result = self.cli('subset', 'maven', '--session', self.session, '--target', '10%', "--scan-test-compile-lst") # clean up test directory shutil.rmtree(base_tmp_dir) @@ -96,77 +84,30 @@ def test_scan_test_compile_lst(self): self.assert_subset_payload('subset_scan_test_compile_lst_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_by_absolute_time(self): - result = self.cli('subset', '--time', '1h30m', '--session', - self.session, 'maven', str(self.test_files_dir.joinpath('java/test/src/java/').resolve())) + result = self.cli('subset', 'maven', '--session', self.session, '--time', '1h30m', + str(self.test_files_dir.joinpath('java/test/src/java/').resolve())) self.assert_success(result) self.assert_subset_payload('subset_by_absolute_time_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_by_confidence(self): - result = self.cli('subset', '--confidence', '90%', '--session', - self.session, 'maven', str(self.test_files_dir.joinpath('java/test/src/java/').resolve())) + result = self.cli('subset', 'maven', '--session', self.session, '--confidence', '90%', + str(self.test_files_dir.joinpath('java/test/src/java/').resolve())) self.assert_success(result) self.assert_subset_payload('subset_by_confidence_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_split_subset_with_same_bin(self): - responses.replace( - responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset/456/slice".format( - get_base_url(), - self.organization, - self.workspace, - ), - json={ - 'testPaths': [ - [{'type': 'class', - 'name': 'com.launchableinc.example.App2Test'}], - [{'type': 'class', - 'name': 'com.launchableinc.example.AppTest'}], - ], - "rest": [], - }, - status=200, - ) - - same_bin_file = tempfile.NamedTemporaryFile(delete=False) - same_bin_file.write( - b'com.launchableinc.example.AppTest\n' - b'com.launchableinc.example.App2Test\n' - ) - result = self.cli( - 'split-subset', - '--subset-id', - 'subset/456', - '--bin', - '1/2', - "--same-bin", - same_bin_file.name, - 'maven', - ) - - self.assert_success(result) - - self.assertIn( - "com.launchableinc.example.App2Test\n" - "com.launchableinc.example.AppTest", - result.output.rstrip("\n") - ) - - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_maven(self): - result = self.cli('record', 'tests', '--session', self.session, - 'maven', str(self.test_files_dir) + "/**/reports") + result = self.cli('record', 'test', 'maven', '--session', self.session, str(self.test_files_dir) + "/**/reports") self.assert_success(result) self.assert_record_tests_payload("record_test_result.json") @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_maven_with_nested_class(self): """Verify that class names containing $ (inner class marker) are processed correctly during test recording""" # Test the path_builder function directly by extracting it from the maven module @@ -201,8 +142,7 @@ def default_path_builder(case, suite, report_file): self.assertNotIn("$", result_path[0]["name"]) # Now run the actual CLI command to ensure integration works - result = self.cli('record', 'tests', '--session', self.session, - 'maven', + result = self.cli('record', 'test', 'maven', '--session', self.session, str(self.test_files_dir) + "/maven/reports/TEST-1.xml", str(self.test_files_dir) + "/maven/reports/TEST-2.xml", str(self.test_files_dir) + "/maven/reports/TEST-nested.xml") diff --git a/tests/test_runners/test_minitest.py b/tests/test_runners/test_minitest.py index ce46ebafb..4b63a9057 100644 --- a/tests/test_runners/test_minitest.py +++ b/tests/test_runners/test_minitest.py @@ -1,30 +1,29 @@ import gzip import json import os -import tempfile from pathlib import Path from unittest import mock import responses # type: ignore -from launchable.utils.http_client import get_base_url +from smart_tests.utils.http_client import get_base_url from tests.cli_test_case import CliTestCase from tests.helper import ignore_warnings class MinitestTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_minitest(self): - result = self.cli('record', 'tests', '--session', self.session, 'minitest', str(self.test_files_dir) + "/") + result = self.cli('record', 'test', 'minitest', '--session', self.session, str(self.test_files_dir) + "/") self.assert_success(result) self.assert_record_tests_payload('record_test_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_minitest_chunked(self): - result = self.cli('record', 'tests', '--session', self.session, - '--post-chunk', 5, 'minitest', str(self.test_files_dir) + "/") + result = self.cli('record', 'test', 'minitest', '--session', self.session, + '--post-chunk', 5, str(self.test_files_dir) + "/") self.assert_success(result) payload1 = json.loads(gzip.decompress(self.find_request('/events').request.body).decode()) @@ -38,14 +37,12 @@ def test_record_test_minitest_chunked(self): payload1['events'] + payload2['events']) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) @ignore_warnings def test_subset(self): test_path = Path("test", "example_test.rb") responses.replace(responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format(get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={'testPaths': [[{'name': str(test_path)}]], 'rest': [], 'subsettingId': 123, @@ -62,8 +59,8 @@ def test_subset(self): }, status=200) - result = self.cli('subset', '--target', '20%', '--session', self.session, '--base', str(self.test_files_dir), - 'minitest', str(self.test_files_dir) + "/test/**/*.rb") + result = self.cli('subset', 'minitest', '--session', self.session, + '--target', '20%', '--base', str(self.test_files_dir), str(self.test_files_dir) + "/test/**/*.rb") self.assert_success(result) @@ -71,24 +68,23 @@ def test_subset(self): self.assertIn(str(output), result.output) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_with_invalid_path(self): - result = self.cli('subset', '--target', '20%', '--session', self.session, '--base', str(self.test_files_dir), - 'minitest', str(self.test_files_dir) + "/dummy") + result = self.cli('subset', 'minitest', '--session', self.session, + '--target', '20%', '--base', str(self.test_files_dir), str(self.test_files_dir) + "/dummy") self.assert_success(result) - self.assertTrue("Error: no tests found matching the path." in result.output) + # TODO(Konboi): split subset isn't supported at smart-tests initial release + """ @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) @ignore_warnings def test_subset_split(self): test_path = Path("test", "example_test.rb") responses.replace(responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format(get_base_url(), - self.organization, - self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={'testPaths': [[{'name': str(test_path)}]], 'rest': [], 'subsettingId': 123, @@ -102,35 +98,18 @@ def test_subset_split(self): }, status=200) - result = self.cli('subset', '--target', '20%', '--session', self.session, '--base', - str(self.test_files_dir), '--split', 'minitest', str(self.test_files_dir) + "/test/**/*.rb") + result = self.cli('subset', + 'minitest', + '--session', + self.session, + '--target', + '20%', + '--base', + str(self.test_files_dir), + '--split', + str(self.test_files_dir) + "/test/**/*.rb") self.assert_success(result) self.assertIn('subset/123', result.output) - - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - @ignore_warnings - def test_split_subset(self): - test_path = Path("test", "example_test.rb") - responses.replace(responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset/456/slice".format(get_base_url(), - self.organization, - self.workspace), - json={'testPaths': [[{'name': str(test_path)}]], - 'rest': [], - 'subsettingId': 123}, - status=200) - - rest = tempfile.NamedTemporaryFile(delete=False) - result = self.cli('split-subset', '--subset-id', 'subset/456', '--base', str(self.test_files_dir), - '--bin', '2/2', '--rest', rest.name, 'minitest') - - self.assert_success(result) - - output = Path(self.test_files_dir, "test", "example_test.rb") - self.assertEqual(str(output), result.output.rstrip("\n")) - self.assertEqual(rest.read().decode().rstrip("\n"), "") - rest.close() - os.unlink(rest.name) + """ diff --git a/tests/test_runners/test_nunit.py b/tests/test_runners/test_nunit.py index a59ab01e6..70eec7fda 100644 --- a/tests/test_runners/test_nunit.py +++ b/tests/test_runners/test_nunit.py @@ -1,80 +1,37 @@ import os -import tempfile from unittest import mock import responses # type: ignore -from launchable.utils.http_client import get_base_url +from smart_tests.utils.http_client import get_base_url from tests.cli_test_case import CliTestCase -from tests.helper import ignore_warnings class NUnitTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset(self): responses.replace( - responses.POST, "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), self.organization, self.workspace), json={ - 'testPaths': [ - [ - { - "type": "Assembly", - "name": "calc.dll", - }, - { - "type": "TestSuite", - "name": "ParameterizedTests", - }, - { - "type": "TestFixture", - "name": "MyTests", - }, - { - "type": "ParameterizedMethod", - "name": "DivideTest", - }, - { - "type": "TestCase", - "name": "DivideTest(12,3)", - }, - ], - [ - { - "type": "Assembly", - "name": "calc.dll", - }, - { - "type": "TestSuite", - "name": "calc", - }, - { - "type": "TestFixture", - "name": "Tests1", - }, - { - "type": "TestCase", - "name": "Test1", - }, - ], - ], + responses.POST, f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", + json={ + 'testPaths': + [[{"type": "Assembly", "name": "calc.dll", }, + {"type": "TestSuite", "name": "ParameterizedTests", }, + {"type": "TestFixture", "name": "MyTests", }, + {"type": "ParameterizedMethod", "name": "DivideTest", }, + {"type": "TestCase", "name": "DivideTest(12,3)", },], + [{"type": "Assembly", "name": "calc.dll", }, + {"type": "TestSuite", "name": "calc", }, + {"type": "TestFixture", "name": "Tests1", }, + {"type": "TestCase", "name": "Test1", },],], 'rest': [], 'subsettingId': 123, - 'summary': { - 'subset': { - 'duration': 15, - 'candidates': 2, - 'rate': 100, - }, - 'rest': { - 'duration': 0, - 'candidates': 0, - 'rate': 0, - }, - }, - }, status=200) + 'summary': + {'subset': {'duration': 15, 'candidates': 2, 'rate': 100, }, + 'rest': {'duration': 0, 'candidates': 0, 'rate': 0, }, }, }, + status=200) - result = self.cli('subset', '--target', '10%', '--session', self.session, 'nunit', + result = self.cli('subset', 'nunit', '--session', self.session, '--target', '10%', str(self.test_files_dir) + "/list.xml") self.assert_success(result) self.assert_subset_payload('subset_result.json') @@ -82,94 +39,26 @@ def test_subset(self): output = 'ParameterizedTests.MyTests.DivideTest(12,3)\ncalc.Tests1.Test1' self.assertIn(output, result.output) - @ignore_warnings - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_split_subset(self): - responses.replace( - responses.POST, "{}/intake/organizations/{}/workspaces/{}/subset/456/slice".format( - get_base_url(), self.organization, self.workspace), json={ - 'testPaths': [ - [ - { - "type": "Assembly", "name": "calc.dll", - }, - { - "type": "TestSuite", "name": "ParameterizedTests", - }, - { - "type": "TestFixture", "name": "MyTests", - }, - { - "type": "ParameterizedMethod", "name": "DivideTest", - }, - { - "type": "TestCase", "name": "DivideTest(12,3)", - }, - ], - ], - 'rest': [ - [ - { - "type": "Assembly", "name": "calc.dll", - }, - { - "type": "TestSuite", "name": "calc", - }, - { - "type": "TestFixture", "name": "Tests1", - }, - { - "type": "TestCase", "name": "Test1", - }, - ], - ], - 'subsettingId': 456, - 'summary': { - 'subset': { - 'duration': 8, 'candidates': 1, 'rate': 50, - }, - 'rest': { - 'duration': 7, 'candidates': 1, 'rate': 50, - }, - }, - }, - status=200) - - rest = tempfile.NamedTemporaryFile(delete=False) - result = self.cli('split-subset', '--subset-id', 'subset/456', - '--bin', '1/2', '--rest', rest.name, 'nunit') - - self.assert_success(result) - - self.assertIn('ParameterizedTests.MyTests.DivideTest(12,3)', result.output) - - self.assertEqual(rest.read().decode(), 'calc.Tests1.Test1') - rest.close() - os.unlink(rest.name) - @responses.activate @mock.patch.dict(os.environ, - {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_on_linux(self): - result = self.cli('record', 'tests', '--session', self.session, - 'nunit', str(self.test_files_dir) + "/output-linux.xml") + result = self.cli('record', 'test', 'nunit', '--session', self.session, str(self.test_files_dir) + "/output-linux.xml") self.assert_success(result) self.assert_record_tests_payload("record_test_result-linux.json") @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_on_windows(self): - result = self.cli('record', 'tests', '--session', self.session, - 'nunit', str(self.test_files_dir) + "/output-windows.xml") + result = self.cli('record', 'test', 'nunit', '--session', self.session, str(self.test_files_dir) + "/output-windows.xml") self.assert_success(result) self.assert_record_tests_payload("record_test_result-windows.json") @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_with_nunit_reporter_bug(self): - result = self.cli('record', 'tests', '--session', self.session, - 'nunit', str(self.test_files_dir) + "/nunit-reporter-bug-with-nested-type.xml") + result = self.cli('record', 'test', 'nunit', '--session', self.session, + str(self.test_files_dir) + "/nunit-reporter-bug-with-nested-type.xml") self.assert_success(result) # turns out we collapse all TestFixtures to TestSuitest so the golden file has TestSuite=Outer+Inner, # not TestFixture=Outer+Inner diff --git a/tests/test_runners/test_playwright.py b/tests/test_runners/test_playwright.py index 95ab8751d..241f169a7 100644 --- a/tests/test_runners/test_playwright.py +++ b/tests/test_runners/test_playwright.py @@ -7,18 +7,18 @@ import responses # type: ignore -from launchable.commands.record.case_event import CaseEvent -from launchable.testpath import unparse_test_path +from smart_tests.commands.record.case_event import CaseEvent +from smart_tests.testpath import unparse_test_path from tests.cli_test_case import CliTestCase class PlaywrightTest(CliTestCase): @responses.activate @mock.patch.dict(os.environ, - {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test(self): - result = self.cli('record', 'tests', '--session', self.session, - 'playwright', str(self.test_files_dir.joinpath("report.xml"))) + result = self.cli('record', 'test', 'playwright', '--session', self.session, + str(self.test_files_dir.joinpath("report.xml"))) self.assert_success(result) self.assert_record_tests_payload('record_test_result.json') @@ -29,18 +29,18 @@ def test_record_test(self): ) @responses.activate @mock.patch.dict(os.environ, - {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_with_json_option(self): # report.json was created by `launchableinc/example/playwright`` project - result = self.cli('record', 'tests', '--session', self.session, - 'playwright', '--json', str(self.test_files_dir.joinpath("report.json"))) + result = self.cli('record', 'test', 'playwright', '--session', self.session, + '--json', str(self.test_files_dir.joinpath("report.json"))) self.assert_success(result) self.assert_record_tests_payload('record_test_result_with_json.json') @responses.activate @mock.patch.dict(os.environ, - {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_timedOut_status(self): def _test_test_path_status(payload, test_path: str, status: CaseEvent) -> bool: checked = False @@ -54,13 +54,13 @@ def _test_test_path_status(payload, test_path: str, status: CaseEvent) -> bool: target_test_path = "file=tests/timeout-example.spec.ts#testcase=time-out" # XML Report Case - self.cli('record', 'tests', '--session', self.session, 'playwright', str(self.test_files_dir.joinpath("report.xml"))) + self.cli('record', 'test', 'playwright', '--session', self.session, str(self.test_files_dir.joinpath("report.xml"))) xml_payload = json.loads(gzip.decompress(self.find_request('/events').request.body).decode()) self.assertEqual(_test_test_path_status(xml_payload, target_test_path, CaseEvent.TEST_FAILED), True) # JSON Report Case - self.cli('record', 'tests', '--session', self.session, - 'playwright', '--json', str(self.test_files_dir.joinpath("report.json"))) + self.cli('record', 'test', 'playwright', '--session', self.session, + '--json', str(self.test_files_dir.joinpath("report.json"))) json_payload = json.loads(gzip.decompress(self.find_request('/events', 1).request.body).decode()) self.assertEqual(_test_test_path_status(json_payload, target_test_path, CaseEvent.TEST_FAILED), True) diff --git a/tests/test_runners/test_prove.py b/tests/test_runners/test_prove.py index efc0b42f1..b48c87fbe 100644 --- a/tests/test_runners/test_prove.py +++ b/tests/test_runners/test_prove.py @@ -3,8 +3,7 @@ import responses # type: ignore -from launchable.test_runners.prove import remove_leading_number_and_dash -from launchable.utils.session import read_session, write_build +from smart_tests.test_runners.prove import remove_leading_number_and_dash from tests.cli_test_case import CliTestCase @@ -30,13 +29,9 @@ def test_remove_leading_number_and_dash(self): class ProveTestTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_tests(self): - # emulate launchable record build - write_build(self.build_name) - - result = self.cli('record', 'tests', 'prove', str(self.test_files_dir.joinpath('report.xml'))) + result = self.cli('record', 'test', 'prove', '--session', self.session, + str(self.test_files_dir.joinpath('report.xml'))) self.assert_success(result) - - self.assertEqual(read_session(self.build_name), self.session) self.assert_record_tests_payload('record_test_result.json') diff --git a/tests/test_runners/test_pytest.py b/tests/test_runners/test_pytest.py index 5cfe90790..c40619f7a 100644 --- a/tests/test_runners/test_pytest.py +++ b/tests/test_runners/test_pytest.py @@ -6,7 +6,7 @@ import responses # type: ignore -from launchable.test_runners.pytest import PytestJSONReportParser, _parse_pytest_nodeid +from smart_tests.test_runners.pytest import PytestJSONReportParser, _parse_pytest_nodeid from tests.cli_test_case import CliTestCase @@ -25,10 +25,9 @@ class PytestTest(CliTestCase): ''' @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset(self): - result = self.cli('subset', '--target', '10%', '--session', - self.session, 'pytest', input=self.subset_input) + result = self.cli('subset', 'pytest', '--target', '10%', '--session', self.session, input=self.subset_input) self.assert_success(result) payload = json.loads(gzip.decompress(self.find_request('/subset').request.body).decode()) @@ -38,19 +37,19 @@ def test_subset(self): self.assertEqual(expected, payload) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_pytest(self): - result = self.cli('record', 'tests', '--session', self.session, - 'pytest', str(self.test_files_dir.joinpath("report.xml"))) + result = self.cli('record', 'test', 'pytest', '--session', self.session, + str(self.test_files_dir.joinpath("report.xml"))) self.assert_success(result) self.assert_record_tests_payload('record_test_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_with_json_option(self): - result = self.cli('record', 'tests', '--session', self.session, - 'pytest', '--json', str(self.test_files_dir.joinpath("report.json"))) + result = self.cli('record', 'test', 'pytest', '--session', self.session, + '--json', str(self.test_files_dir.joinpath("report.json"))) self.assert_success(result) self.assert_record_tests_payload('record_test_result_json.json') diff --git a/tests/test_runners/test_raw.py b/tests/test_runners/test_raw.py index fdaf60c73..7c4cf4609 100644 --- a/tests/test_runners/test_raw.py +++ b/tests/test_runners/test_raw.py @@ -8,19 +8,17 @@ import responses # type: ignore from dateutil.tz import tzlocal -from launchable.utils.http_client import get_base_url -from launchable.utils.session import write_build +from smart_tests.utils.http_client import get_base_url from tests.cli_test_case import CliTestCase -from tests.helper import ignore_warnings class RawTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset(self): responses.replace( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format(get_base_url(), self.organization, self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={ "testPaths": [ [{'type': 'testcase', 'name': 'FooTest.Bar'}], @@ -47,12 +45,8 @@ def test_subset(self): '# This is a comment', 'testcase=FooTest.Baz', ]) + '\n') - - # emulate launchable record build - write_build(self.build_name) - - result = self.cli('subset', '--target', '10%', - 'raw', test_path_file, mix_stderr=False) + result = self.cli('subset', 'raw', '--session', self.session, '--target', '10%', + test_path_file, mix_stderr=False) self.assert_success(result) # Check request body @@ -76,12 +70,11 @@ def test_subset(self): ]) + '\n') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset_get_tests_from_previous_sessions(self): responses.replace( responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), self.organization, self.workspace), + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", json={ "testPaths": [ [{'type': 'testcase', 'name': 'FooTest.Bar'}], @@ -99,19 +92,17 @@ def test_subset_get_tests_from_previous_sessions(self): "isBrainless": False }, status=200) - - # emulate launchable record build - write_build(self.build_name) - - # Don't use with for Windows environment rest = tempfile.NamedTemporaryFile(mode="+w", encoding="utf-8", delete=False) result = self.cli( 'subset', + 'raw', + '--session', + self.session, + '--get-tests-from-previous-sessions', '--target', '10%', - '--get-tests-from-previous-sessions', - "--rest", rest.name, - 'raw', + "--rest", + rest.name, mix_stderr=False) self.assert_success(result) @@ -134,7 +125,7 @@ def test_subset_get_tests_from_previous_sessions(self): os.unlink(rest.name) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_tests(self): with tempfile.TemporaryDirectory() as tempdir: test_path_file = os.path.join(tempdir, 'tests.json') @@ -216,11 +207,8 @@ def test_record_tests(self): ' ]', '}', ]) + '\n') - - # emulate launchable record build - write_build(self.build_name) - - result = self.cli('record', 'tests', 'raw', test_path_file, test_path_file2, test_path_file3, mix_stderr=False) + result = self.cli('record', 'test', 'raw', '--session', self.session, + test_path_file, test_path_file2, test_path_file3, mix_stderr=False) self.assert_success(result) # Check request body @@ -290,7 +278,7 @@ def test_record_tests(self): }) @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_tests_junit_xml(self): with tempfile.TemporaryDirectory() as tempdir: test_path_file = os.path.join(tempdir, 'tests.xml') @@ -319,11 +307,8 @@ def test_record_tests_junit_xml(self): ' ', '', ]) + '\n') - - # emulate launchable record build - write_build(self.build_name) - - result = self.cli('record', 'tests', 'raw', test_path_file, test_path_file2, mix_stderr=False) + result = self.cli('record', 'test', 'raw', '--session', self.session, + test_path_file, test_path_file2, mix_stderr=False) if result.exit_code != 0: self.assertEqual( result.exit_code, @@ -368,81 +353,3 @@ def test_record_tests_junit_xml(self): "flavors": [], "testSuite": "", }) - - @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - @ignore_warnings - def test_split_subset(self): - responses.replace( - responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset/456/slice".format( - get_base_url(), self.organization, self.workspace), - json={ - "testPaths": [ - [{'type': 'testcase', 'name': 'FooTest.Bar'}], - [{'type': 'testcase', 'name': 'FooTest.Foo'}], - ], - "rest": [[{'type': 'testcase', 'name': 'FooTest.Baz'}]], - }, - status=200) - - rest = tempfile.NamedTemporaryFile(delete=False) - result = self.cli( - 'split-subset', - '--subset-id', - 'subset/456', - '--bin', - '1/2', - '--rest', - rest.name, - 'raw') - - self.assert_success(result) - - self.assertEqual( - result.stdout, - '\n'.join([ - 'testcase=FooTest.Bar', - 'testcase=FooTest.Foo\n', - ])) - self.assertEqual( - rest.read().decode(), - 'testcase=FooTest.Baz', - ) - rest.close() - os.unlink(rest.name) - - @responses.activate - @mock.patch.dict(os.environ, - {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) - def test_split_subset_with_same_bin(self): - # This test must raise error. - responses.replace( - responses.POST, - "{}/intake/organizations/{}/workspaces/{}/subset/456/slice".format( - get_base_url(), self.organization, self.workspace), - json={ - "testPaths": [ - [{'type': 'testcase', 'name': 'FooTest.Bar'}], - [{'type': 'testcase', 'name': 'FooTest.Foo'}], - ], - "rest": [], - }, - status=200) - - same_bin_file = tempfile.NamedTemporaryFile(delete=False) - same_bin_file.write( - b'FooTest.Bar\n' - b'FooTest.Foo') - result = self.cli( - 'split-subset', - '--subset-id', - 'subset/456', - '--bin', - '1/2', - "--same-bin", - same_bin_file.name, - 'raw') - self.assertTrue("--same-bin option is supported only for gradle test and go-test." in result.stdout) - same_bin_file.close() - os.unlink(same_bin_file.name) diff --git a/tests/test_runners/test_robot.py b/tests/test_runners/test_robot.py index 7b18d49a8..4dabe6282 100644 --- a/tests/test_runners/test_robot.py +++ b/tests/test_runners/test_robot.py @@ -8,28 +8,24 @@ class RobotTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset(self): - result = self.cli('subset', '--target', '10%', '--session', - self.session, 'robot', str(self.test_files_dir) + "/dryrun.xml") + result = self.cli('subset', 'robot', '--target', '10%', '--session', + self.session, str(self.test_files_dir) + "/dryrun.xml") self.assert_success(result) self.assert_subset_payload('subset_result.json') @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test(self): - - result = self.cli('record', 'tests', '--session', self.session, - 'robot', str(self.test_files_dir) + "/output.xml") + result = self.cli('record', 'test', 'robot', '--session', self.session, str(self.test_files_dir) + "/output.xml") self.assert_success(result) self.assert_record_tests_payload("record_test_result.json") # for #637 @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_executed_only_one_file(self): - - result = self.cli('record', 'tests', '--session', self.session, - 'robot', str(self.test_files_dir) + "/single-output.xml") + result = self.cli('record', 'test', 'robot', '--session', self.session, str(self.test_files_dir) + "/single-output.xml") self.assert_success(result) self.assert_record_tests_payload("record_test_executed_only_one_file_result.json") diff --git a/tests/test_runners/test_rspec.py b/tests/test_runners/test_rspec.py index b927b7335..a233317ba 100644 --- a/tests/test_runners/test_rspec.py +++ b/tests/test_runners/test_rspec.py @@ -9,9 +9,9 @@ class RspecTest(CliTestCase): @responses.activate @mock.patch.dict(os.environ, - {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test_rspec(self): - result = self.cli('record', 'tests', '--session', self.session, - 'rspec', str(self.test_files_dir.joinpath("rspec.xml"))) + result = self.cli('record', 'test', 'rspec', '--session', self.session, + str(self.test_files_dir.joinpath("rspec.xml"))) self.assert_success(result) self.assert_record_tests_payload('record_test_result.json') diff --git a/tests/test_runners/test_vitest.py b/tests/test_runners/test_vitest.py index 050a48226..a71420c43 100644 --- a/tests/test_runners/test_vitest.py +++ b/tests/test_runners/test_vitest.py @@ -8,9 +8,8 @@ class VitestTest(CliTestCase): @responses.activate - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_tests(self): - result = self.cli('record', 'tests', '--session', - self.session, 'vitest', str(self.test_files_dir.joinpath("report.xml"))) + result = self.cli('record', 'test', 'vitest', '--session', self.session, str(self.test_files_dir.joinpath("report.xml"))) self.assert_success(result) self.assert_record_tests_payload('record_test_result.json') diff --git a/tests/test_runners/test_xctest.py b/tests/test_runners/test_xctest.py index 27c161c95..47faae3fd 100644 --- a/tests/test_runners/test_xctest.py +++ b/tests/test_runners/test_xctest.py @@ -3,24 +3,35 @@ import responses # type: ignore -from launchable.utils.http_client import get_base_url +from smart_tests.utils.http_client import get_base_url from tests.cli_test_case import CliTestCase class XCTestTest(CliTestCase): @responses.activate @mock.patch.dict(os.environ, - {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_record_test(self): - result = self.cli('record', 'tests', '--session', self.session, - 'xctest', str(self.test_files_dir.joinpath("junit.xml"))) + # Override session name lookup to allow session resolution + responses.replace( + responses.GET, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/" + f"{self.workspace}/builds/{self.build_name}/test_session_names/{self.session_name}", + json={ + 'id': self.session_id, + 'isObservation': False, + }, + status=200) + + result = self.cli('record', 'test', 'xctest', '--session', self.session, + str(self.test_files_dir.joinpath("junit.xml"))) self.assert_success(result) self.assert_record_tests_payload('record_test_result.json') @responses.activate @mock.patch.dict(os.environ, - {"LAUNCHABLE_TOKEN": CliTestCase.launchable_token}) + {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) def test_subset(self): mock_response = { "testPaths": [ @@ -40,18 +51,14 @@ def test_subset(self): "isObservation": False, } - responses.replace(responses.POST, "{}/intake/organizations/{}/workspaces/{}/subset".format( - get_base_url(), - self.organization, - self.workspace), - json=mock_response, - status=200) + responses.replace(responses.POST, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}/subset", + json=mock_response, + status=200) - result = self.cli('subset', - '--session', self.session, + result = self.cli('subset', 'xctest', '--session', self.session, '--get-tests-from-previous-sessions', '--output-exclusion-rules', - 'xctest', mix_stderr=False) self.assert_success(result) diff --git a/tests/test_session.py b/tests/test_session.py deleted file mode 100644 index a1d1c578c..000000000 --- a/tests/test_session.py +++ /dev/null @@ -1,61 +0,0 @@ -import os -import shutil -import tempfile -from unittest import TestCase - -from launchable.utils.exceptions import ParseSessionException -from launchable.utils.session import (SESSION_DIR_KEY, clean_session_files, parse_session, read_build, read_session, - remove_session, validate_session_format, write_build, write_session) - - -class SessionTestClass(TestCase): - build_name = '123' - session_id = '/intake/organizations/launchableinc/workspaces/mothership/builds/123/test_sessions/13' - - def setUp(self): - self.dir = tempfile.mkdtemp() - os.environ[SESSION_DIR_KEY] = self.dir - - def tearDown(self): - clean_session_files() - del os.environ[SESSION_DIR_KEY] - shutil.rmtree(self.dir) - - def test_write_read_build(self): - self.assertEqual(read_build(), None) - write_build(self.build_name) - self.assertEqual(read_build(), self.build_name) - - def test_write_read_remove_session(self): - write_build(self.build_name) - write_session(self.build_name, self.session_id) - self.assertEqual(read_session(self.build_name), self.session_id) - - remove_session() - self.assertEqual(read_session(self.build_name), None) - - def test_read_before_write(self): - self.assertEqual(read_session(self.build_name), None) - - def test_parse_session(self): - session = "builds/build-name/test_sessions/123" - build_name, session_id = parse_session(session) - self.assertEqual(build_name, "build-name") - self.assertEqual(session_id, "123") - - with self.assertRaises(Exception): - parse_session("hoge/fuga") - - def test_validate_session_format(self): - # Test with a valid session format - validate_session_format("builds/build-name/test_sessions/123") - - # Test with invalid session formats - invalid_sessions = [ - "123", # Only id - "workspaces/mothership/builds/123/test_sessions/13" # Too many parts - ] - - for invalid_session in invalid_sessions: - with self.assertRaises(ParseSessionException): - validate_session_format(invalid_session) diff --git a/tests/test_testpath.py b/tests/test_testpath.py index d5942f558..d42628d5e 100644 --- a/tests/test_testpath.py +++ b/tests/test_testpath.py @@ -5,7 +5,7 @@ import tempfile import unittest -from launchable.testpath import FilePathNormalizer, parse_test_path, unparse_test_path +from smart_tests.testpath import FilePathNormalizer, parse_test_path, unparse_test_path class TestPathEncodingTest(unittest.TestCase): diff --git a/tests/test_version.py b/tests/test_version.py index fc15dcf00..886f34980 100644 --- a/tests/test_version.py +++ b/tests/test_version.py @@ -1,9 +1,9 @@ from unittest import TestCase -from click.testing import CliRunner # type: ignore +from typer.testing import CliRunner # type: ignore -from launchable.__main__ import main -from launchable.version import __version__ +from smart_tests.__main__ import main +from smart_tests.version import __version__ class VersionTest(TestCase): @@ -11,4 +11,4 @@ def test_version(self): runner = CliRunner() result = runner.invoke(main, ['--version']) self.assertEqual(result.exit_code, 0) - self.assertEqual(result.output, 'launchable-cli, version {}\n'.format(__version__)) + self.assertIn(__version__, result.stdout) diff --git a/tests/utils/test_authentication.py b/tests/utils/test_authentication.py index 9106b0bc5..a060d976d 100644 --- a/tests/utils/test_authentication.py +++ b/tests/utils/test_authentication.py @@ -1,7 +1,7 @@ import os from unittest import TestCase, mock -from launchable.utils.authentication import authentication_headers, get_org_workspace +from smart_tests.utils.authentication import authentication_headers, get_org_workspace class AuthenticationTest(TestCase): @@ -11,25 +11,25 @@ def test_get_org_workspace_no_environment_variables(self): self.assertIsNone(org) self.assertIsNone(workspace) - @mock.patch.dict(os.environ, {"LAUNCHABLE_TOKEN": "invalid"}) - def test_get_org_workspace_invalid_LAUNCHABLE_TOKEN(self): + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": "invalid"}) + def test_get_org_workspace_invalid_SMART_TESTS_TOKEN(self): org, workspace = get_org_workspace() self.assertIsNone(org) self.assertIsNone(workspace) @mock.patch.dict(os.environ, - {"LAUNCHABLE_TOKEN": "v1:launchableinc/test:token"}) - def test_get_org_workspace_valid_LAUNCHABLE_TOKEN(self): + {"SMART_TESTS_TOKEN": "v1:launchableinc/test:token"}) + def test_get_org_workspace_valid_SMART_TESTS_TOKEN(self): org, workspace = get_org_workspace() self.assertEqual("launchableinc", org) self.assertEqual("test", workspace) @mock.patch.dict( os.environ, - {"LAUNCHABLE_ORGANIZATION": "launchableinc", "LAUNCHABLE_WORKSPACE": "test"}, + {"SMART_TESTS_ORGANIZATION": "launchableinc", "SMART_TESTS_WORKSPACE": "test"}, clear=True, ) - def test_get_org_workspace_LAUNCHABLE_ORGANIZATION_and_LAUNCHABLE_WORKSPACE( + def test_get_org_workspace_SMART_TESTS_ORGANIZATION_and_SMART_TESTS_WORKSPACE( self): org, workspace = get_org_workspace() self.assertEqual("launchableinc", org) @@ -37,10 +37,10 @@ def test_get_org_workspace_LAUNCHABLE_ORGANIZATION_and_LAUNCHABLE_WORKSPACE( @mock.patch.dict( os.environ, - {"LAUNCHABLE_TOKEN": "v1:token_org/token_wp:token", - "LAUNCHABLE_ORGANIZATION": "org", "LAUNCHABLE_WORKSPACE": "wp"}, + {"SMART_TESTS_TOKEN": "v1:token_org/token_wp:token", + "SMART_TESTS_ORGANIZATION": "org", "SMART_TESTS_WORKSPACE": "wp"}, ) - def test_get_org_workspace_LAUNCHABLE_TOKEN_and_LAUNCHABLE_ORGANIZATION_and_LAUNCHABLE_WORKSPACE( + def test_get_org_workspace_SMART_TESTS_TOKEN_and_SMART_TESTS_ORGANIZATION_and_SMART_TESTS_WORKSPACE( self): org, workspace = get_org_workspace() self.assertEqual("token_org", org) @@ -52,8 +52,8 @@ def test_authentication_headers_empty(self): self.assertEqual(len(header), 0) @mock.patch.dict(os.environ, - {"LAUNCHABLE_TOKEN": "v1:launchableinc/test:token"}) - def test_authentication_headers_LAUNCHABLE_TOKEN(self): + {"SMART_TESTS_TOKEN": "v1:launchableinc/test:token"}) + def test_authentication_headers_SMART_TESTS_TOKEN(self): header = authentication_headers() self.assertEqual(len(header), 1) self.assertEqual( @@ -99,12 +99,12 @@ def test_authentication_headers_GitHub_Actions_without_PR_head(self): @mock.patch.dict( os.environ, - {"LAUNCHABLE_TOKEN": "v1:launchableinc/test:token", "GITHUB_ACTIONS": "true", "GITHUB_RUN_ID": "1", + {"SMART_TESTS_TOKEN": "v1:launchableinc/test:token", "GITHUB_ACTIONS": "true", "GITHUB_RUN_ID": "1", "GITHUB_REPOSITORY": "launchableinc/test", "GITHUB_WORKFLOW": "build", "GITHUB_RUN_NUMBER": "1", "GITHUB_EVENT_NAME": "push", "GITHUB_SHA": "test"}, clear=True, ) - def test_authentication_headers_LAUNCHABLE_TOKEN_and_GitHub_Actions(self): + def test_authentication_headers_SMART_TESTS_TOKEN_and_GitHub_Actions(self): header = authentication_headers() self.assertEqual(len(header), 1) self.assertEqual( diff --git a/tests/utils/test_click.py b/tests/utils/test_click.py deleted file mode 100644 index 9ee41600f..000000000 --- a/tests/utils/test_click.py +++ /dev/null @@ -1,117 +0,0 @@ -import datetime -import sys -from datetime import timezone -from typing import Sequence, Tuple -from unittest import TestCase - -import click -from click.testing import CliRunner -from dateutil.tz import tzlocal - -from launchable.utils.click import DATETIME_WITH_TZ, KEY_VALUE, PercentageType, convert_to_seconds - - -class PercentageTypeTest(TestCase): - ERROR_MSG = "Expected percentage like 50% but got" - WINDOWS_ERROR_MSG = "please write '50%%' to pass in '50%'" - - def test_invalid_value_windows(self): - pct = PercentageType() - orig_platform = sys.platform - sys.platform = "win32" - try: - with self.assertRaises(click.BadParameter) as cm: - pct.convert("50", None, None) - msg = str(cm.exception) - self.assertIn(self.ERROR_MSG + " '50'", msg) - self.assertIn(self.WINDOWS_ERROR_MSG, msg) - finally: - sys.platform = orig_platform - - def test_invalid_value_non_windows(self): - pct = PercentageType() - orig_platform = sys.platform - sys.platform = "linux" - try: - with self.assertRaises(click.BadParameter) as cm: - pct.convert("50", None, None) - msg = str(cm.exception) - self.assertIn(self.ERROR_MSG + " '50'", msg) - self.assertNotIn(self.WINDOWS_ERROR_MSG, msg) - finally: - sys.platform = orig_platform - - def test_invalid_float(self): - pct = PercentageType() - with self.assertRaises(click.BadParameter) as cm: - pct.convert("abc%", None, None) - msg = str(cm.exception) - self.assertIn(self.ERROR_MSG + " 'abc%'", msg) - - def test_valid(self): - pct = PercentageType() - self.assertEqual(pct.convert("50%", None, None), 0.5) - self.assertEqual(pct.convert("0%", None, None), 0.0) - self.assertEqual(pct.convert("100%", None, None), 1.0) - - -class DurationTypeTest(TestCase): - def test_convert_to_seconds(self): - self.assertEqual(convert_to_seconds('30s'), 30) - self.assertEqual(convert_to_seconds('5m'), 300) - self.assertEqual(convert_to_seconds('1h30m'), 5400) - self.assertEqual(convert_to_seconds('1d10h15m'), 123300) - self.assertEqual(convert_to_seconds('15m 1d 10h'), 123300) - - with self.assertRaises(ValueError): - convert_to_seconds('1h30k') - - -class KeyValueTypeTest(TestCase): - def test_conversion(self): - def scenario(expected: Sequence[Tuple[str, str]], *args): - actual: Sequence[Tuple[str, str]] = [] - - @click.command() - @click.option( - '-f', - 'args', - multiple=True, - type=KEY_VALUE, - ) - def hello(args: Sequence[Tuple[str, str]]): - nonlocal actual - actual = args - - result = CliRunner().invoke(hello, args) - self.assertEqual(0, result.exit_code, result.stdout) - self.assertSequenceEqual(expected, actual) - - scenario([]) - scenario([('bar', 'zot')], '-f', 'bar=zot') - scenario([('bar', 'zot'), ('a', 'b')], '-f', 'bar=zot', '-f', 'a=b') - - -class TimestampTypeTest(TestCase): - def test_conversion(self): - def scenario(expected: str, *args): - actual: datetime.datetime = datetime.datetime.now() - - @click.command() - @click.option( - '-t', - 'timestamp', - type=DATETIME_WITH_TZ, - ) - def time(timestamp: datetime.datetime): - nonlocal actual - actual = timestamp - - result = CliRunner().invoke(time, args) - - self.assertEqual(0, result.exit_code, result.stdout) - self.assertEqual(expected, actual) - - scenario(datetime.datetime(2023, 10, 1, 12, 0, 0, tzinfo=tzlocal()), '-t', '2023-10-01 12:00:00') - scenario(datetime.datetime(2023, 10, 1, 20, 0, 0, tzinfo=timezone.utc), '-t', '2023-10-01 20:00:00+00:00') - scenario(datetime.datetime(2023, 10, 1, 20, 0, 0, tzinfo=timezone.utc), '-t', '2023-10-01T20:00:00Z') diff --git a/tests/utils/test_fail_fast_mode.py b/tests/utils/test_fail_fast_mode.py index c150e5f8f..8fc2c8437 100644 --- a/tests/utils/test_fail_fast_mode.py +++ b/tests/utils/test_fail_fast_mode.py @@ -1,8 +1,8 @@ import io from contextlib import contextmanager, redirect_stderr -from launchable.utils.commands import Command -from launchable.utils.fail_fast_mode import FailFastModeValidateParams, fail_fast_mode_validate +from smart_tests.utils.commands import Command +from smart_tests.utils.fail_fast_mode import FailFastModeValidateParams, fail_fast_mode_validate from tests.cli_test_case import CliTestCase @@ -30,7 +30,7 @@ def test_fail_fast_mode_validate(self): @contextmanager def tmp_set_fail_fast_mode(enabled: bool): - from launchable.utils.fail_fast_mode import _fail_fast_mode_cache, set_fail_fast_mode + from smart_tests.utils.fail_fast_mode import _fail_fast_mode_cache, set_fail_fast_mode original = _fail_fast_mode_cache try: set_fail_fast_mode(enabled) diff --git a/tests/utils/test_file_name_pattern.py b/tests/utils/test_file_name_pattern.py index 5fcf62677..1b55ac56d 100644 --- a/tests/utils/test_file_name_pattern.py +++ b/tests/utils/test_file_name_pattern.py @@ -1,6 +1,6 @@ from unittest import TestCase -from launchable.utils.file_name_pattern import jvm_test_pattern +from smart_tests.utils.file_name_pattern import jvm_test_pattern class FileNameHeuristicTest(TestCase): diff --git a/tests/utils/test_git_log_parser.py b/tests/utils/test_git_log_parser.py index eca319b02..4a4903634 100644 --- a/tests/utils/test_git_log_parser.py +++ b/tests/utils/test_git_log_parser.py @@ -3,7 +3,7 @@ from dateutil.parser import parse -from launchable.utils.git_log_parser import ChangedFile, GitCommit, parse_git_log +from smart_tests.utils.git_log_parser import ChangedFile, GitCommit, parse_git_log class GitLogParserTest(TestCase): diff --git a/tests/utils/test_glob.py b/tests/utils/test_glob.py index 4cb776128..1f5c4229a 100644 --- a/tests/utils/test_glob.py +++ b/tests/utils/test_glob.py @@ -1,6 +1,6 @@ from unittest import TestCase -from launchable.utils.glob import compile +from smart_tests.utils.glob import compile class GlobTest(TestCase): diff --git a/tests/utils/test_gzipgen.py b/tests/utils/test_gzipgen.py index de286acf6..14fc61b2e 100644 --- a/tests/utils/test_gzipgen.py +++ b/tests/utils/test_gzipgen.py @@ -1,7 +1,7 @@ import gzip from unittest import TestCase -from launchable.utils.gzipgen import compress +from smart_tests.utils.gzipgen import compress class GzippenTest(TestCase): diff --git a/tests/utils/test_http_client.py b/tests/utils/test_http_client.py index a565e2286..ac569e6b9 100644 --- a/tests/utils/test_http_client.py +++ b/tests/utils/test_http_client.py @@ -2,16 +2,14 @@ import platform from unittest import TestCase, mock -from requests import Session - -from launchable.utils.http_client import _HttpClient -from launchable.version import __version__ +from smart_tests.utils.http_client import _HttpClient +from smart_tests.version import __version__ class HttpClientTest(TestCase): @mock.patch.dict( os.environ, - {"LAUNCHABLE_ORGANIZATION": "launchableinc", "LAUNCHABLE_WORKSPACE": "test"}, + {"SMART_TESTS_ORGANIZATION": "launchableinc", "SMART_TESTS_WORKSPACE": "test"}, clear=True, ) def test_header(self): @@ -19,45 +17,17 @@ def test_header(self): self.assertEqual(cli._headers(True), { 'Content-Encoding': 'gzip', 'Content-Type': 'application/json', - "User-Agent": "Launchable/{} (Python {}, {})".format( - __version__, - platform.python_version(), - platform.platform(), - ), + "User-Agent": f"Launchable/{__version__} (Python {platform.python_version()}, {platform.platform()})", }) self.assertEqual(cli._headers(False), { 'Content-Type': 'application/json', - "User-Agent": "Launchable/{} (Python {}, {})".format( - __version__, - platform.python_version(), - platform.platform(), - ), + "User-Agent": f"Launchable/{__version__} (Python {platform.python_version()}, {platform.platform()})", }) cli = _HttpClient("/test", test_runner="dummy") self.assertEqual(cli._headers(False), { 'Content-Type': 'application/json', - "User-Agent": "Launchable/{} (Python {}, {}) TestRunner/{}".format( - __version__, - platform.python_version(), - platform.platform(), - "dummy", - ), + "User-Agent": f"Launchable/{__version__} (Python {platform.python_version()}, " + f"{platform.platform()}) TestRunner/dummy", }) - - def test_reason(self): - '''make sure we correctly propagate error message from the server''' - - # use new session to disable retry - cli = _HttpClient(session=Session()) - # /error is an actual endpoint that exists on our service to test the behavior - res = cli.request("GET", "intake/error") - self.assertEqual(res.status_code, 500) - self.assertEqual(res.reason, "Welp") - - try: - res.raise_for_status() - self.fail("should have raised") - except Exception as e: - self.assertIn("Welp", str(e)) diff --git a/tests/utils/test_link.py b/tests/utils/test_link.py index 3ee302b1c..188b59063 100644 --- a/tests/utils/test_link.py +++ b/tests/utils/test_link.py @@ -1,6 +1,6 @@ from unittest import TestCase -from launchable.utils.link import LinkKind, capture_link +from smart_tests.utils.link import LinkKind, capture_link class LinkTest(TestCase): diff --git a/tests/utils/test_logger.py b/tests/utils/test_logger.py index 706bbd68f..c081043b5 100644 --- a/tests/utils/test_logger.py +++ b/tests/utils/test_logger.py @@ -4,8 +4,8 @@ from unittest import TestCase from unittest.mock import patch -import launchable.utils.logger as logger -from launchable.utils.logger import Logger +import smart_tests.utils.logger as logger +from smart_tests.utils.logger import Logger class LoggerTest(TestCase): @@ -24,22 +24,22 @@ def tearDown(self): @patch("sys.stderr", new_callable=StringIO) def test_logging_default(self, mock_err): logging.basicConfig(level=logger.LOG_LEVEL_DEFAULT) - l = Logger() - l.audit("audit") - l.info("info") - l.warning("warn") - l.debug("debug") + logger_instance = Logger() + logger_instance.audit("audit") + logger_instance.info("info") + logger_instance.warning("warn") + logger_instance.debug("debug") self.assertEqual(mock_err.getvalue(), "WARNING:launchable:warn\n") @patch("sys.stderr", new_callable=StringIO) def test_log_level_audit(self, mock_err): logging.basicConfig(level=logger.LOG_LEVEL_AUDIT) - l = Logger() - l.audit("audit") - l.critical("critical") - l.error("error") - l.warning("warn") - l.info("info") - l.debug("debug") + logger_instance = Logger() + logger_instance.audit("audit") + logger_instance.critical("critical") + logger_instance.error("error") + logger_instance.warning("warn") + logger_instance.info("info") + logger_instance.debug("debug") self.assertEqual(mock_err.getvalue( ), "AUDIT:launchable:audit\nCRITICAL:launchable:critical\nERROR:launchable:error\nWARNING:launchable:warn\n") diff --git a/tests/utils/test_session.py b/tests/utils/test_session.py new file mode 100644 index 000000000..097632378 --- /dev/null +++ b/tests/utils/test_session.py @@ -0,0 +1,48 @@ +import os +from unittest import mock + +import responses + +from smart_tests.utils.http_client import get_base_url +from smart_tests.utils.session import TestSession, get_session +from smart_tests.utils.smart_tests_client import SmartTestsClient +from tests.cli_test_case import CliTestCase + + +class TestTestSession(CliTestCase): + @mock.patch.dict(os.environ, {"SMART_TESTS_TOKEN": CliTestCase.smart_tests_token}) + @responses.activate + def test_get_session(self): + client = SmartTestsClient(base_url=get_base_url()) + responses.replace( + responses.GET, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}" + f"/builds/{self.build_name}/test_sessions/{self.session_id}", + json={ + 'id': self.session_id, + 'buildId': 456, + 'buildNumber': self.build_name, + 'isObservation': True, + 'name': 'dummy-name', + }, + status=200) + + test_session = get_session(self.session, client) + self.assertEqual(test_session, TestSession( + id=self.session_id, + build_id=456, + build_name=self.build_name, + observation_mode=True, + name='dummy-name')) + + # not found test session case + responses.replace( + responses.GET, + f"{get_base_url()}/intake/organizations/{self.organization}/workspaces/{self.workspace}" + f"/builds/{self.build_name}/test_sessions/{self.session_id}", + json={}, + status=404) + + with self.assertRaises(SystemExit) as cm: + get_session(self.session, client) + self.assertEqual(cm.exception.code, 1) diff --git a/tests/utils/test_typer.py b/tests/utils/test_typer.py new file mode 100644 index 000000000..9e4c14909 --- /dev/null +++ b/tests/utils/test_typer.py @@ -0,0 +1,59 @@ +import datetime +from datetime import timezone +from unittest import TestCase + +import typer +from dateutil.tz import tzlocal + +from smart_tests.utils.typer_types import (DATETIME_WITH_TZ, KEY_VALUE, convert_to_seconds, + validate_datetime_with_tz, validate_key_value) + + +class DurationTypeTest(TestCase): + def test_convert_to_seconds(self): + self.assertEqual(convert_to_seconds('30s'), 30) + self.assertEqual(convert_to_seconds('5m'), 300) + self.assertEqual(convert_to_seconds('1h30m'), 5400) + self.assertEqual(convert_to_seconds('1d10h15m'), 123300) + self.assertEqual(convert_to_seconds('15m 1d 10h'), 123300) + + with self.assertRaises(ValueError): + convert_to_seconds('1h30k') + + +class KeyValueTypeTest(TestCase): + def test_conversion(self): + # Test the validate_key_value function directly + self.assertEqual(validate_key_value('bar=zot'), ('bar', 'zot')) + self.assertEqual(validate_key_value('a=b'), ('a', 'b')) + self.assertEqual(validate_key_value('key:value'), ('key', 'value')) + + with self.assertRaises(typer.BadParameter): + validate_key_value('invalid') + + # Test the parser class + parser = KEY_VALUE + self.assertEqual(parser('bar=zot'), ('bar', 'zot')) + self.assertEqual(parser('a=b'), ('a', 'b')) + + +class TimestampTypeTest(TestCase): + def test_conversion(self): + # Test the validate_datetime_with_tz function directly + result1 = validate_datetime_with_tz('2023-10-01 12:00:00') + expected1 = datetime.datetime(2023, 10, 1, 12, 0, 0, tzinfo=tzlocal()) + self.assertEqual(result1, expected1) + + result2 = validate_datetime_with_tz('2023-10-01 20:00:00+00:00') + expected2 = datetime.datetime(2023, 10, 1, 20, 0, 0, tzinfo=timezone.utc) + self.assertEqual(result2, expected2) + + result3 = validate_datetime_with_tz('2023-10-01T20:00:00Z') + expected3 = datetime.datetime(2023, 10, 1, 20, 0, 0, tzinfo=timezone.utc) + self.assertEqual(result3, expected3) + + # Test the parser class + parser = DATETIME_WITH_TZ + result4 = parser('2023-10-01 12:00:00') + expected4 = datetime.datetime(2023, 10, 1, 12, 0, 0, tzinfo=tzlocal()) + self.assertEqual(result4, expected4) diff --git a/tests/utils/test_typer_types.py b/tests/utils/test_typer_types.py new file mode 100644 index 000000000..9b605e33e --- /dev/null +++ b/tests/utils/test_typer_types.py @@ -0,0 +1,251 @@ +import datetime +import sys +from datetime import timezone +from unittest import TestCase + +import typer +from dateutil.tz import tzlocal + +from smart_tests.utils.typer_types import (DATETIME_WITH_TZ, EMOJI, KEY_VALUE, DateTimeWithTimezone, Duration, Fraction, + KeyValue, Percentage, convert_to_seconds, emoji, parse_datetime_with_timezone, + parse_duration, parse_fraction, parse_key_value, parse_percentage, + validate_datetime_with_tz, validate_key_value, validate_past_datetime) + + +class PercentageTest(TestCase): + def test_parse_valid_percentage(self): + pct = parse_percentage("50%") + self.assertIsInstance(pct, Percentage) + self.assertEqual(pct.value, 0.5) + self.assertEqual(float(pct), 0.5) + self.assertEqual(str(pct), "50.0%") + + def test_parse_edge_cases(self): + # Test 0% and 100% + self.assertEqual(parse_percentage("0%").value, 0.0) + self.assertEqual(parse_percentage("100%").value, 1.0) + + # Test decimal percentages + self.assertEqual(parse_percentage("25.5%").value, 0.255) + + def test_parse_invalid_percentage_missing_percent(self): + orig_platform = sys.platform + try: + # Test Windows behavior + sys.platform = "win32" + with self.assertRaises(typer.BadParameter) as cm: + parse_percentage("50") + msg = str(cm.exception) + self.assertIn("Expected percentage like 50% but got '50'", msg) + self.assertIn("please write '50%%' to pass in '50%'", msg) + + # Test non-Windows behavior + sys.platform = "linux" + with self.assertRaises(typer.BadParameter) as cm: + parse_percentage("50") + msg = str(cm.exception) + self.assertIn("Expected percentage like 50% but got '50'", msg) + self.assertNotIn("please write '50%%' to pass in '50%'", msg) + finally: + sys.platform = orig_platform + + def test_parse_invalid_percentage_non_numeric(self): + with self.assertRaises(typer.BadParameter) as cm: + parse_percentage("abc%") + msg = str(cm.exception) + self.assertIn("Expected percentage like 50% but got 'abc%'", msg) + + def test_percentage_class_methods(self): + pct = Percentage(0.75) + self.assertEqual(str(pct), "75.0%") + self.assertEqual(float(pct), 0.75) + + +class DurationTest(TestCase): + def test_convert_to_seconds(self): + self.assertEqual(convert_to_seconds('30s'), 30) + self.assertEqual(convert_to_seconds('5m'), 300) + self.assertEqual(convert_to_seconds('1h30m'), 5400) + self.assertEqual(convert_to_seconds('1d10h15m'), 123300) + self.assertEqual(convert_to_seconds('15m 1d 10h'), 123300) + self.assertEqual(convert_to_seconds('1w'), 604800) # 7 days + + # Test numeric only + self.assertEqual(convert_to_seconds('3600'), 3600) + + def test_convert_to_seconds_invalid(self): + with self.assertRaises(ValueError): + convert_to_seconds('1h30k') + + def test_parse_duration(self): + duration = parse_duration("30s") + self.assertIsInstance(duration, Duration) + self.assertEqual(duration.seconds, 30) + self.assertEqual(float(duration), 30) + self.assertEqual(str(duration), "30.0s") + + def test_parse_duration_invalid(self): + # Note: convert_to_seconds returns 0.0 for invalid input instead of raising ValueError + # So parse_duration returns Duration(0.0) for invalid input + duration = parse_duration("invalid") + self.assertEqual(duration.seconds, 0.0) + + +class KeyValueTest(TestCase): + def test_parse_key_value_equals(self): + kv = parse_key_value("key=value") + self.assertIsInstance(kv, KeyValue) + self.assertEqual(kv.key, "key") + self.assertEqual(kv.value, "value") + self.assertEqual(str(kv), "key=value") + + # Test tuple-like behavior + self.assertEqual(kv[0], "key") + self.assertEqual(kv[1], "value") + self.assertEqual(list(kv), ["key", "value"]) + + def test_parse_key_value_colon(self): + kv = parse_key_value("key:value") + self.assertEqual(kv.key, "key") + self.assertEqual(kv.value, "value") + + def test_parse_key_value_with_spaces(self): + kv = parse_key_value(" key = value ") + self.assertEqual(kv.key, "key") + self.assertEqual(kv.value, "value") + + def test_parse_key_value_with_multiple_delimiters(self): + # Should split on first occurrence only + kv = parse_key_value("key=value=extra") + self.assertEqual(kv.key, "key") + self.assertEqual(kv.value, "value=extra") + + def test_parse_key_value_invalid(self): + with self.assertRaises(typer.BadParameter) as cm: + parse_key_value("invalid") + msg = str(cm.exception) + self.assertIn("Expected a key-value pair formatted as --option key=value, but got 'invalid'", msg) + + def test_validate_key_value_compat(self): + # Test backward compatibility function + result = validate_key_value("key=value") + self.assertEqual(result, ("key", "value")) + + def test_key_value_compat_function(self): + # Test the KEY_VALUE constant + result = KEY_VALUE("key=value") + self.assertEqual(result, ("key", "value")) + + +class FractionTest(TestCase): + def test_parse_fraction(self): + frac = parse_fraction("3/4") + self.assertIsInstance(frac, Fraction) + self.assertEqual(frac.numerator, 3) + self.assertEqual(frac.denominator, 4) + self.assertEqual(str(frac), "3/4") + self.assertEqual(float(frac), 0.75) + + # Test tuple-like behavior + self.assertEqual(frac[0], 3) + self.assertEqual(frac[1], 4) + self.assertEqual(list(frac), [3, 4]) + + def test_parse_fraction_with_spaces(self): + frac = parse_fraction(" 1 / 2 ") + self.assertEqual(frac.numerator, 1) + self.assertEqual(frac.denominator, 2) + + def test_parse_fraction_invalid(self): + with self.assertRaises(typer.BadParameter) as cm: + parse_fraction("invalid") + msg = str(cm.exception) + self.assertIn("Expected fraction like 1/2 but got 'invalid'", msg) + + def test_parse_fraction_invalid_numbers(self): + with self.assertRaises(typer.BadParameter): + parse_fraction("a/b") + + +class DateTimeWithTimezoneTest(TestCase): + def test_parse_datetime_with_timezone(self): + dt_str = "2023-10-01 12:00:00+00:00" + dt_obj = parse_datetime_with_timezone(dt_str) + self.assertIsInstance(dt_obj, DateTimeWithTimezone) + self.assertEqual(dt_obj.dt.year, 2023) + self.assertEqual(dt_obj.dt.month, 10) + self.assertEqual(dt_obj.dt.day, 1) + self.assertEqual(dt_obj.dt.hour, 12) + # dateutil.parser creates tzutc() which is equivalent to but not equal to timezone.utc + self.assertEqual(dt_obj.dt.utcoffset(), timezone.utc.utcoffset(None)) + + def test_parse_datetime_without_timezone(self): + dt_str = "2023-10-01 12:00:00" + dt_obj = parse_datetime_with_timezone(dt_str) + self.assertEqual(dt_obj.dt.tzinfo, tzlocal()) + + def test_parse_datetime_iso_format(self): + dt_str = "2023-10-01T20:00:00Z" + dt_obj = parse_datetime_with_timezone(dt_str) + # dateutil.parser creates tzutc() which is equivalent to but not equal to timezone.utc + self.assertEqual(dt_obj.dt.utcoffset(), timezone.utc.utcoffset(None)) + + def test_parse_datetime_invalid(self): + with self.assertRaises(typer.BadParameter) as cm: + parse_datetime_with_timezone("invalid") + msg = str(cm.exception) + self.assertIn("Expected datetime like 2023-10-01T12:00:00 but got 'invalid'", msg) + + def test_datetime_with_timezone_methods(self): + dt_obj = parse_datetime_with_timezone("2023-10-01T12:00:00Z") + self.assertEqual(dt_obj.datetime(), dt_obj.dt) + # Test string representation + self.assertIn("2023-10-01T12:00:00", str(dt_obj)) + + def test_validate_datetime_with_tz_compat(self): + # Test backward compatibility function + result = validate_datetime_with_tz("2023-10-01T12:00:00Z") + self.assertIsInstance(result, datetime.datetime) + # dateutil.parser creates tzutc() which is equivalent to but not equal to timezone.utc + self.assertEqual(result.utcoffset(), timezone.utc.utcoffset(None)) + + def test_datetime_with_tz_compat_function(self): + # Test the DATETIME_WITH_TZ constant + result = DATETIME_WITH_TZ("2023-10-01T12:00:00Z") + self.assertIsInstance(result, datetime.datetime) + + def test_validate_past_datetime(self): + # Test with None + self.assertIsNone(validate_past_datetime(None)) + + # Test with past datetime + past_dt = datetime.datetime(2020, 1, 1, tzinfo=tzlocal()) + self.assertEqual(validate_past_datetime(past_dt), past_dt) + + # Test with future datetime + future_dt = datetime.datetime(2030, 1, 1, tzinfo=tzlocal()) + with self.assertRaises(typer.BadParameter) as cm: + validate_past_datetime(future_dt) + msg = str(cm.exception) + self.assertIn("The provided timestamp must be in the past", msg) + + # Test with non-datetime object + with self.assertRaises(typer.BadParameter) as cm: + validate_past_datetime("not a datetime") + msg = str(cm.exception) + self.assertIn("Expected a datetime object", msg) + + +class EmojiTest(TestCase): + def test_emoji_function(self): + # Test with fallback + result = emoji("🎉", "!") + self.assertIn(result, ["🎉", "!"]) # Depends on system capability + + # Test without fallback + result = emoji("🎉") + self.assertIn(result, ["🎉", ""]) # Depends on system capability + + def test_emoji_constant(self): + # EMOJI should be a boolean + self.assertIsInstance(EMOJI, bool) diff --git a/uv.lock b/uv.lock new file mode 100644 index 000000000..ef798c53d --- /dev/null +++ b/uv.lock @@ -0,0 +1,573 @@ +version = 1 +revision = 2 +requires-python = ">=3.13" + +[[package]] +name = "autopep8" +version = "2.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycodestyle" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/50/d8/30873d2b7b57dee9263e53d142da044c4600a46f2d28374b3e38b023df16/autopep8-2.3.2.tar.gz", hash = "sha256:89440a4f969197b69a995e4ce0661b031f455a9f776d2c5ba3dbd83466931758", size = 92210, upload-time = "2025-01-14T14:46:18.454Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/43/53afb8ba17218f19b77c7834128566c5bbb100a0ad9ba2e8e89d089d7079/autopep8-2.3.2-py2.py3-none-any.whl", hash = "sha256:ce8ad498672c845a0c3de2629c15b635ec2b05ef8177a6e7c91c74f3e9b51128", size = 45807, upload-time = "2025-01-14T14:46:15.466Z" }, +] + +[[package]] +name = "certifi" +version = "2025.6.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/73/f7/f14b46d4bcd21092d7d3ccef689615220d8a08fb25e564b65d20738e672e/certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b", size = 158753, upload-time = "2025-06-15T02:45:51.329Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/ae/320161bd181fc06471eed047ecce67b693fd7515b16d495d8932db763426/certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057", size = 157650, upload-time = "2025-06-15T02:45:49.977Z" }, +] + +[[package]] +name = "cfgv" +version = "3.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/11/74/539e56497d9bd1d484fd863dd69cbbfa653cd2aa27abfe35653494d85e94/cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560", size = 7114, upload-time = "2023-08-12T20:38:17.776Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c5/55/51844dd50c4fc7a33b653bfaba4c2456f06955289ca770a5dbd5fd267374/cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9", size = 7249, upload-time = "2023-08-12T20:38:16.269Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e4/33/89c2ced2b67d1c2a61c19c6751aa8902d46ce3dacb23600a283619f5a12d/charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63", size = 126367, upload-time = "2025-05-02T08:34:42.01Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ea/12/a93df3366ed32db1d907d7593a94f1fe6293903e3e92967bebd6950ed12c/charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0", size = 199622, upload-time = "2025-05-02T08:32:56.363Z" }, + { url = "https://files.pythonhosted.org/packages/04/93/bf204e6f344c39d9937d3c13c8cd5bbfc266472e51fc8c07cb7f64fcd2de/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf", size = 143435, upload-time = "2025-05-02T08:32:58.551Z" }, + { url = "https://files.pythonhosted.org/packages/22/2a/ea8a2095b0bafa6c5b5a55ffdc2f924455233ee7b91c69b7edfcc9e02284/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e", size = 153653, upload-time = "2025-05-02T08:33:00.342Z" }, + { url = "https://files.pythonhosted.org/packages/b6/57/1b090ff183d13cef485dfbe272e2fe57622a76694061353c59da52c9a659/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1", size = 146231, upload-time = "2025-05-02T08:33:02.081Z" }, + { url = "https://files.pythonhosted.org/packages/e2/28/ffc026b26f441fc67bd21ab7f03b313ab3fe46714a14b516f931abe1a2d8/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c", size = 148243, upload-time = "2025-05-02T08:33:04.063Z" }, + { url = "https://files.pythonhosted.org/packages/c0/0f/9abe9bd191629c33e69e47c6ef45ef99773320e9ad8e9cb08b8ab4a8d4cb/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691", size = 150442, upload-time = "2025-05-02T08:33:06.418Z" }, + { url = "https://files.pythonhosted.org/packages/67/7c/a123bbcedca91d5916c056407f89a7f5e8fdfce12ba825d7d6b9954a1a3c/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0", size = 145147, upload-time = "2025-05-02T08:33:08.183Z" }, + { url = "https://files.pythonhosted.org/packages/ec/fe/1ac556fa4899d967b83e9893788e86b6af4d83e4726511eaaad035e36595/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b", size = 153057, upload-time = "2025-05-02T08:33:09.986Z" }, + { url = "https://files.pythonhosted.org/packages/2b/ff/acfc0b0a70b19e3e54febdd5301a98b72fa07635e56f24f60502e954c461/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff", size = 156454, upload-time = "2025-05-02T08:33:11.814Z" }, + { url = "https://files.pythonhosted.org/packages/92/08/95b458ce9c740d0645feb0e96cea1f5ec946ea9c580a94adfe0b617f3573/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b", size = 154174, upload-time = "2025-05-02T08:33:13.707Z" }, + { url = "https://files.pythonhosted.org/packages/78/be/8392efc43487ac051eee6c36d5fbd63032d78f7728cb37aebcc98191f1ff/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148", size = 149166, upload-time = "2025-05-02T08:33:15.458Z" }, + { url = "https://files.pythonhosted.org/packages/44/96/392abd49b094d30b91d9fbda6a69519e95802250b777841cf3bda8fe136c/charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7", size = 98064, upload-time = "2025-05-02T08:33:17.06Z" }, + { url = "https://files.pythonhosted.org/packages/e9/b0/0200da600134e001d91851ddc797809e2fe0ea72de90e09bec5a2fbdaccb/charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980", size = 105641, upload-time = "2025-05-02T08:33:18.753Z" }, + { url = "https://files.pythonhosted.org/packages/20/94/c5790835a017658cbfabd07f3bfb549140c3ac458cfc196323996b10095a/charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0", size = 52626, upload-time = "2025-05-02T08:34:40.053Z" }, +] + +[[package]] +name = "click" +version = "8.1.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593, upload-time = "2024-12-21T18:38:44.339Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188, upload-time = "2024-12-21T18:38:41.666Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "distlib" +version = "0.3.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0d/dd/1bec4c5ddb504ca60fc29472f3d27e8d4da1257a854e1d96742f15c1d02d/distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403", size = 613923, upload-time = "2024-10-09T18:35:47.551Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/91/a1/cf2472db20f7ce4a6be1253a81cfdf85ad9c7885ffbed7047fb72c24cf87/distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87", size = 468973, upload-time = "2024-10-09T18:35:44.272Z" }, +] + +[[package]] +name = "filelock" +version = "3.18.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0a/10/c23352565a6544bdc5353e0b15fc1c563352101f30e24bf500207a54df9a/filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2", size = 18075, upload-time = "2025-03-14T07:11:40.47Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de", size = 16215, upload-time = "2025-03-14T07:11:39.145Z" }, +] + +[[package]] +name = "flake8" +version = "7.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mccabe" }, + { name = "pycodestyle" }, + { name = "pyflakes" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e7/c4/5842fc9fc94584c455543540af62fd9900faade32511fab650e9891ec225/flake8-7.2.0.tar.gz", hash = "sha256:fa558ae3f6f7dbf2b4f22663e5343b6b6023620461f8d4ff2019ef4b5ee70426", size = 48177, upload-time = "2025-03-29T20:08:39.329Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/5c/0627be4c9976d56b1217cb5187b7504e7fd7d3503f8bfd312a04077bd4f7/flake8-7.2.0-py2.py3-none-any.whl", hash = "sha256:93b92ba5bdb60754a6da14fa3b93a9361fd00a59632ada61fd7b130436c40343", size = 57786, upload-time = "2025-03-29T20:08:37.902Z" }, +] + +[[package]] +name = "identify" +version = "2.6.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/88/d193a27416618628a5eea64e3223acd800b40749a96ffb322a9b55a49ed1/identify-2.6.12.tar.gz", hash = "sha256:d8de45749f1efb108badef65ee8386f0f7bb19a7f26185f74de6367bffbaf0e6", size = 99254, upload-time = "2025-05-23T20:37:53.3Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7a/cd/18f8da995b658420625f7ef13f037be53ae04ec5ad33f9b718240dcfd48c/identify-2.6.12-py2.py3-none-any.whl", hash = "sha256:ad9672d5a72e0d2ff7c5c8809b62dfa60458626352fb0eb7b55e69bdc45334a2", size = 99145, upload-time = "2025-05-23T20:37:51.495Z" }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, +] + +[[package]] +name = "isort" +version = "6.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b8/21/1e2a441f74a653a144224d7d21afe8f4169e6c7c20bb13aec3a2dc3815e0/isort-6.0.1.tar.gz", hash = "sha256:1cb5df28dfbc742e490c5e41bad6da41b805b0a8be7bc93cd0fb2a8a890ac450", size = 821955, upload-time = "2025-02-26T21:13:16.955Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/11/114d0a5f4dabbdcedc1125dee0888514c3c3b16d3e9facad87ed96fad97c/isort-6.0.1-py3-none-any.whl", hash = "sha256:2dc5d7f65c9678d94c88dfc29161a320eec67328bc97aad576874cb4be1e9615", size = 94186, upload-time = "2025-02-26T21:13:14.911Z" }, +] + +[[package]] +name = "junitparser" +version = "4.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/97/954ee1ef04e50d8494e9f5d82d4051ed71a7618aa2c1514c1b3f24691174/junitparser-4.0.2.tar.gz", hash = "sha256:d5d07cece6d4a600ff3b7b96c8db5ffa45a91eed695cb86c45c3db113c1ca0f8", size = 25646, upload-time = "2025-06-24T04:37:32.664Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/87/b444f934f62ee2a1be45bb52563cf17a66b0d790eba43af4df9929e7107f/junitparser-4.0.2-py3-none-any.whl", hash = "sha256:94c3570e41fcaedc64cc3c634ca99457fe41a84dd1aa8ff74e9e12e66223a155", size = 14592, upload-time = "2025-06-24T04:37:31.322Z" }, +] + +[[package]] +name = "lxml" +version = "5.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/76/3d/14e82fc7c8fb1b7761f7e748fd47e2ec8276d137b6acfe5a4bb73853e08f/lxml-5.4.0.tar.gz", hash = "sha256:d12832e1dbea4be280b22fd0ea7c9b87f0d8fc51ba06e92dc62d52f804f78ebd", size = 3679479, upload-time = "2025-04-23T01:50:29.322Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/cb/2ba1e9dd953415f58548506fa5549a7f373ae55e80c61c9041b7fd09a38a/lxml-5.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:773e27b62920199c6197130632c18fb7ead3257fce1ffb7d286912e56ddb79e0", size = 8110086, upload-time = "2025-04-23T01:46:52.218Z" }, + { url = "https://files.pythonhosted.org/packages/b5/3e/6602a4dca3ae344e8609914d6ab22e52ce42e3e1638c10967568c5c1450d/lxml-5.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ce9c671845de9699904b1e9df95acfe8dfc183f2310f163cdaa91a3535af95de", size = 4404613, upload-time = "2025-04-23T01:46:55.281Z" }, + { url = "https://files.pythonhosted.org/packages/4c/72/bf00988477d3bb452bef9436e45aeea82bb40cdfb4684b83c967c53909c7/lxml-5.4.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9454b8d8200ec99a224df8854786262b1bd6461f4280064c807303c642c05e76", size = 5012008, upload-time = "2025-04-23T01:46:57.817Z" }, + { url = "https://files.pythonhosted.org/packages/92/1f/93e42d93e9e7a44b2d3354c462cd784dbaaf350f7976b5d7c3f85d68d1b1/lxml-5.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cccd007d5c95279e529c146d095f1d39ac05139de26c098166c4beb9374b0f4d", size = 4760915, upload-time = "2025-04-23T01:47:00.745Z" }, + { url = "https://files.pythonhosted.org/packages/45/0b/363009390d0b461cf9976a499e83b68f792e4c32ecef092f3f9ef9c4ba54/lxml-5.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0fce1294a0497edb034cb416ad3e77ecc89b313cff7adbee5334e4dc0d11f422", size = 5283890, upload-time = "2025-04-23T01:47:04.702Z" }, + { url = "https://files.pythonhosted.org/packages/19/dc/6056c332f9378ab476c88e301e6549a0454dbee8f0ae16847414f0eccb74/lxml-5.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:24974f774f3a78ac12b95e3a20ef0931795ff04dbb16db81a90c37f589819551", size = 4812644, upload-time = "2025-04-23T01:47:07.833Z" }, + { url = "https://files.pythonhosted.org/packages/ee/8a/f8c66bbb23ecb9048a46a5ef9b495fd23f7543df642dabeebcb2eeb66592/lxml-5.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:497cab4d8254c2a90bf988f162ace2ddbfdd806fce3bda3f581b9d24c852e03c", size = 4921817, upload-time = "2025-04-23T01:47:10.317Z" }, + { url = "https://files.pythonhosted.org/packages/04/57/2e537083c3f381f83d05d9b176f0d838a9e8961f7ed8ddce3f0217179ce3/lxml-5.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:e794f698ae4c5084414efea0f5cc9f4ac562ec02d66e1484ff822ef97c2cadff", size = 4753916, upload-time = "2025-04-23T01:47:12.823Z" }, + { url = "https://files.pythonhosted.org/packages/d8/80/ea8c4072109a350848f1157ce83ccd9439601274035cd045ac31f47f3417/lxml-5.4.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:2c62891b1ea3094bb12097822b3d44b93fc6c325f2043c4d2736a8ff09e65f60", size = 5289274, upload-time = "2025-04-23T01:47:15.916Z" }, + { url = "https://files.pythonhosted.org/packages/b3/47/c4be287c48cdc304483457878a3f22999098b9a95f455e3c4bda7ec7fc72/lxml-5.4.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:142accb3e4d1edae4b392bd165a9abdee8a3c432a2cca193df995bc3886249c8", size = 4874757, upload-time = "2025-04-23T01:47:19.793Z" }, + { url = "https://files.pythonhosted.org/packages/2f/04/6ef935dc74e729932e39478e44d8cfe6a83550552eaa072b7c05f6f22488/lxml-5.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1a42b3a19346e5601d1b8296ff6ef3d76038058f311902edd574461e9c036982", size = 4947028, upload-time = "2025-04-23T01:47:22.401Z" }, + { url = "https://files.pythonhosted.org/packages/cb/f9/c33fc8daa373ef8a7daddb53175289024512b6619bc9de36d77dca3df44b/lxml-5.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4291d3c409a17febf817259cb37bc62cb7eb398bcc95c1356947e2871911ae61", size = 4834487, upload-time = "2025-04-23T01:47:25.513Z" }, + { url = "https://files.pythonhosted.org/packages/8d/30/fc92bb595bcb878311e01b418b57d13900f84c2b94f6eca9e5073ea756e6/lxml-5.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4f5322cf38fe0e21c2d73901abf68e6329dc02a4994e483adbcf92b568a09a54", size = 5381688, upload-time = "2025-04-23T01:47:28.454Z" }, + { url = "https://files.pythonhosted.org/packages/43/d1/3ba7bd978ce28bba8e3da2c2e9d5ae3f8f521ad3f0ca6ea4788d086ba00d/lxml-5.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:0be91891bdb06ebe65122aa6bf3fc94489960cf7e03033c6f83a90863b23c58b", size = 5242043, upload-time = "2025-04-23T01:47:31.208Z" }, + { url = "https://files.pythonhosted.org/packages/ee/cd/95fa2201041a610c4d08ddaf31d43b98ecc4b1d74b1e7245b1abdab443cb/lxml-5.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:15a665ad90054a3d4f397bc40f73948d48e36e4c09f9bcffc7d90c87410e478a", size = 5021569, upload-time = "2025-04-23T01:47:33.805Z" }, + { url = "https://files.pythonhosted.org/packages/2d/a6/31da006fead660b9512d08d23d31e93ad3477dd47cc42e3285f143443176/lxml-5.4.0-cp313-cp313-win32.whl", hash = "sha256:d5663bc1b471c79f5c833cffbc9b87d7bf13f87e055a5c86c363ccd2348d7e82", size = 3485270, upload-time = "2025-04-23T01:47:36.133Z" }, + { url = "https://files.pythonhosted.org/packages/fc/14/c115516c62a7d2499781d2d3d7215218c0731b2c940753bf9f9b7b73924d/lxml-5.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:bcb7a1096b4b6b24ce1ac24d4942ad98f983cd3810f9711bcd0293f43a9d8b9f", size = 3814606, upload-time = "2025-04-23T01:47:39.028Z" }, +] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596, upload-time = "2023-06-03T06:41:14.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528, upload-time = "2023-06-03T06:41:11.019Z" }, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/ff/0ffefdcac38932a54d2b5eed4e0ba8a408f215002cd178ad1df0f2806ff8/mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", size = 9658, upload-time = "2022-01-24T01:14:51.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/1a/1f68f9ba0c207934b35b86a8ca3aad8395a3d6dd7921c0686e23853ff5a9/mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e", size = 7350, upload-time = "2022-01-24T01:14:49.62Z" }, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, +] + +[[package]] +name = "more-itertools" +version = "10.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ce/a0/834b0cebabbfc7e311f30b46c8188790a37f89fc8d756660346fe5abfd09/more_itertools-10.7.0.tar.gz", hash = "sha256:9fddd5403be01a94b204faadcff459ec3568cf110265d3c54323e1e866ad29d3", size = 127671, upload-time = "2025-04-22T14:17:41.838Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2b/9f/7ba6f94fc1e9ac3d2b853fdff3035fb2fa5afbed898c4a72b8a020610594/more_itertools-10.7.0-py3-none-any.whl", hash = "sha256:d43980384673cb07d2f7d2d918c616b30c659c089ee23953f601d6609c67510e", size = 65278, upload-time = "2025-04-22T14:17:40.49Z" }, +] + +[[package]] +name = "mypy" +version = "1.15.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ce/43/d5e49a86afa64bd3839ea0d5b9c7103487007d728e1293f52525d6d5486a/mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43", size = 3239717, upload-time = "2025-02-05T03:50:34.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/9b/fd2e05d6ffff24d912f150b87db9e364fa8282045c875654ce7e32fffa66/mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445", size = 10788592, upload-time = "2025-02-05T03:48:55.789Z" }, + { url = "https://files.pythonhosted.org/packages/74/37/b246d711c28a03ead1fd906bbc7106659aed7c089d55fe40dd58db812628/mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d", size = 9753611, upload-time = "2025-02-05T03:48:44.581Z" }, + { url = "https://files.pythonhosted.org/packages/a6/ac/395808a92e10cfdac8003c3de9a2ab6dc7cde6c0d2a4df3df1b815ffd067/mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5", size = 11438443, upload-time = "2025-02-05T03:49:25.514Z" }, + { url = "https://files.pythonhosted.org/packages/d2/8b/801aa06445d2de3895f59e476f38f3f8d610ef5d6908245f07d002676cbf/mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036", size = 12402541, upload-time = "2025-02-05T03:49:57.623Z" }, + { url = "https://files.pythonhosted.org/packages/c7/67/5a4268782eb77344cc613a4cf23540928e41f018a9a1ec4c6882baf20ab8/mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357", size = 12494348, upload-time = "2025-02-05T03:48:52.361Z" }, + { url = "https://files.pythonhosted.org/packages/83/3e/57bb447f7bbbfaabf1712d96f9df142624a386d98fb026a761532526057e/mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf", size = 9373648, upload-time = "2025-02-05T03:49:11.395Z" }, + { url = "https://files.pythonhosted.org/packages/09/4e/a7d65c7322c510de2c409ff3828b03354a7c43f5a8ed458a7a131b41c7b9/mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e", size = 2221777, upload-time = "2025-02-05T03:50:08.348Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437, upload-time = "2024-06-04T18:44:11.171Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314, upload-time = "2024-06-04T18:44:08.352Z" }, +] + +[[package]] +name = "pastel" +version = "0.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/76/f1/4594f5e0fcddb6953e5b8fe00da8c317b8b41b547e2b3ae2da7512943c62/pastel-0.2.1.tar.gz", hash = "sha256:e6581ac04e973cac858828c6202c1e1e81fee1dc7de7683f3e1ffe0bfd8a573d", size = 7555, upload-time = "2020-09-16T19:21:12.43Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/aa/18/a8444036c6dd65ba3624c63b734d3ba95ba63ace513078e1580590075d21/pastel-0.2.1-py2.py3-none-any.whl", hash = "sha256:4349225fcdf6c2bb34d483e523475de5bb04a5c10ef711263452cb37d7dd4364", size = 5955, upload-time = "2020-09-16T19:21:11.409Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.3.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/8b/3c73abc9c759ecd3f1f7ceff6685840859e8070c4d947c93fae71f6a0bf2/platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc", size = 21362, upload-time = "2025-05-07T22:47:42.121Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fe/39/979e8e21520d4e47a0bbe349e2713c0aac6f3d853d0e5b34d76206c439aa/platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4", size = 18567, upload-time = "2025-05-07T22:47:40.376Z" }, +] + +[[package]] +name = "poethepoet" +version = "0.35.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pastel" }, + { name = "pyyaml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d6/b1/d4f4361b278fae10f6074675385ce3acf53c647f8e6eeba22c652f8ba985/poethepoet-0.35.0.tar.gz", hash = "sha256:b396ae862d7626e680bbd0985b423acf71634ce93a32d8b5f38340f44f5fbc3e", size = 66006, upload-time = "2025-06-09T12:58:18.849Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/08/abc2d7e2400dd8906e3208f9b88ac610f097d7ee0c7a1fa4a157b49a9e86/poethepoet-0.35.0-py3-none-any.whl", hash = "sha256:bed5ae1fd63f179dfa67aabb93fa253d79695c69667c927d8b24ff378799ea75", size = 87164, upload-time = "2025-06-09T12:58:17.084Z" }, +] + +[[package]] +name = "pre-commit" +version = "4.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cfgv" }, + { name = "identify" }, + { name = "nodeenv" }, + { name = "pyyaml" }, + { name = "virtualenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/08/39/679ca9b26c7bb2999ff122d50faa301e49af82ca9c066ec061cfbc0c6784/pre_commit-4.2.0.tar.gz", hash = "sha256:601283b9757afd87d40c4c4a9b2b5de9637a8ea02eaff7adc2d0fb4e04841146", size = 193424, upload-time = "2025-03-18T21:35:20.987Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/74/a88bf1b1efeae488a0c0b7bdf71429c313722d1fc0f377537fbe554e6180/pre_commit-4.2.0-py2.py3-none-any.whl", hash = "sha256:a009ca7205f1eb497d10b845e52c838a98b6cdd2102a6c8e4540e94ee75c58bd", size = 220707, upload-time = "2025-03-18T21:35:19.343Z" }, +] + +[[package]] +name = "pycodestyle" +version = "2.13.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/04/6e/1f4a62078e4d95d82367f24e685aef3a672abfd27d1a868068fed4ed2254/pycodestyle-2.13.0.tar.gz", hash = "sha256:c8415bf09abe81d9c7f872502a6eee881fbe85d8763dd5b9924bb0a01d67efae", size = 39312, upload-time = "2025-03-29T17:33:30.669Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/be/b00116df1bfb3e0bb5b45e29d604799f7b91dd861637e4d448b4e09e6a3e/pycodestyle-2.13.0-py2.py3-none-any.whl", hash = "sha256:35863c5974a271c7a726ed228a14a4f6daf49df369d8c50cd9a6f58a5e143ba9", size = 31424, upload-time = "2025-03-29T17:33:29.405Z" }, +] + +[[package]] +name = "pyflakes" +version = "3.3.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/af/cc/1df338bd7ed1fa7c317081dcf29bf2f01266603b301e6858856d346a12b3/pyflakes-3.3.2.tar.gz", hash = "sha256:6dfd61d87b97fba5dcfaaf781171ac16be16453be6d816147989e7f6e6a9576b", size = 64175, upload-time = "2025-03-31T13:21:20.34Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/40/b293a4fa769f3b02ab9e387c707c4cbdc34f073f945de0386107d4e669e6/pyflakes-3.3.2-py2.py3-none-any.whl", hash = "sha256:5039c8339cbb1944045f4ee5466908906180f13cc99cc9949348d10f82a5c32a", size = 63164, upload-time = "2025-03-31T13:21:18.503Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581, upload-time = "2025-01-06T17:26:30.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293, upload-time = "2025-01-06T17:26:25.553Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631, upload-time = "2024-08-06T20:33:50.674Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309, upload-time = "2024-08-06T20:32:43.4Z" }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679, upload-time = "2024-08-06T20:32:44.801Z" }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428, upload-time = "2024-08-06T20:32:46.432Z" }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361, upload-time = "2024-08-06T20:32:51.188Z" }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523, upload-time = "2024-08-06T20:32:53.019Z" }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660, upload-time = "2024-08-06T20:32:54.708Z" }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597, upload-time = "2024-08-06T20:32:56.985Z" }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527, upload-time = "2024-08-06T20:33:03.001Z" }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446, upload-time = "2024-08-06T20:33:04.33Z" }, +] + +[[package]] +name = "requests" +version = "2.32.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e1/0a/929373653770d8a0d7ea76c37de6e41f11eb07559b103b1c02cafb3f7cf8/requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422", size = 135258, upload-time = "2025-06-09T16:43:07.34Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/e4/56027c4a6b4ae70ca9de302488c5ca95ad4a39e190093d6c1a8ace08341b/requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c", size = 64847, upload-time = "2025-06-09T16:43:05.728Z" }, +] + +[[package]] +name = "responses" +version = "0.25.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyyaml" }, + { name = "requests" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/81/7e/2345ac3299bd62bd7163216702bbc88976c099cfceba5b889f2a457727a1/responses-0.25.7.tar.gz", hash = "sha256:8ebae11405d7a5df79ab6fd54277f6f2bc29b2d002d0dd2d5c632594d1ddcedb", size = 79203, upload-time = "2025-03-11T15:36:16.624Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/fc/1d20b64fa90e81e4fa0a34c9b0240a6cfb1326b7e06d18a5432a9917c316/responses-0.25.7-py3-none-any.whl", hash = "sha256:92ca17416c90fe6b35921f52179bff29332076bb32694c0df02dcac2c6bc043c", size = 34732, upload-time = "2025-03-11T15:36:14.589Z" }, +] + +[[package]] +name = "rich" +version = "14.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/53/830aa4c3066a8ab0ae9a9955976fb770fe9c6102117c8ec4ab3ea62d89e8/rich-14.0.0.tar.gz", hash = "sha256:82f1bc23a6a21ebca4ae0c45af9bdbc492ed20231dcb63f297d6d1021a9d5725", size = 224078, upload-time = "2025-03-30T14:15:14.23Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl", hash = "sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0", size = 243229, upload-time = "2025-03-30T14:15:12.283Z" }, +] + +[[package]] +name = "shellingham" +version = "1.5.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310, upload-time = "2023-10-24T04:13:40.426Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "smart-tests-cli" +source = { editable = "." } +dependencies = [ + { name = "junitparser" }, + { name = "more-itertools" }, + { name = "python-dateutil" }, + { name = "requests" }, + { name = "tabulate" }, + { name = "typer" }, + { name = "urllib3" }, +] + +[package.dev-dependencies] +dev = [ + { name = "autopep8" }, + { name = "flake8" }, + { name = "isort" }, + { name = "lxml" }, + { name = "mypy" }, + { name = "poethepoet" }, + { name = "pre-commit" }, + { name = "responses" }, + { name = "types-pkg-resources" }, + { name = "types-python-dateutil" }, + { name = "types-requests" }, + { name = "types-tabulate" }, + { name = "unittest-xml-reporting" }, +] + +[package.metadata] +requires-dist = [ + { name = "junitparser", specifier = ">=4.0.0" }, + { name = "more-itertools", specifier = ">=7.1.0" }, + { name = "python-dateutil" }, + { name = "requests", specifier = ">=2.25" }, + { name = "tabulate" }, + { name = "typer", specifier = ">=0.9.0" }, + { name = "urllib3", specifier = ">=1.26" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "autopep8", specifier = ">=2.0.0" }, + { name = "flake8" }, + { name = "isort" }, + { name = "lxml" }, + { name = "mypy" }, + { name = "poethepoet" }, + { name = "pre-commit" }, + { name = "responses" }, + { name = "types-pkg-resources" }, + { name = "types-python-dateutil" }, + { name = "types-requests" }, + { name = "types-tabulate" }, + { name = "unittest-xml-reporting" }, +] + +[[package]] +name = "tabulate" +version = "0.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ec/fe/802052aecb21e3797b8f7902564ab6ea0d60ff8ca23952079064155d1ae1/tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c", size = 81090, upload-time = "2022-10-06T17:21:48.54Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/44/4a5f08c96eb108af5cb50b41f76142f0afa346dfa99d5296fe7202a11854/tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f", size = 35252, upload-time = "2022-10-06T17:21:44.262Z" }, +] + +[[package]] +name = "typer" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "rich" }, + { name = "shellingham" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c5/8c/7d682431efca5fd290017663ea4588bf6f2c6aad085c7f108c5dbc316e70/typer-0.16.0.tar.gz", hash = "sha256:af377ffaee1dbe37ae9440cb4e8f11686ea5ce4e9bae01b84ae7c63b87f1dd3b", size = 102625, upload-time = "2025-05-26T14:30:31.824Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/42/3efaf858001d2c2913de7f354563e3a3a2f0decae3efe98427125a8f441e/typer-0.16.0-py3-none-any.whl", hash = "sha256:1f79bed11d4d02d4310e3c1b7ba594183bcedb0ac73b27a9e5f28f6fb5b98855", size = 46317, upload-time = "2025-05-26T14:30:30.523Z" }, +] + +[[package]] +name = "types-pkg-resources" +version = "0.1.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a4/5c/5e04590a4b9749a33a6b8b93784270a65f8ebdec55a0b7742bb30e2075bb/types-pkg_resources-0.1.3.tar.gz", hash = "sha256:834a9b8d3dbea343562fd99d5d3359a726f6bf9d3733bccd2b4f3096fbab9dae", size = 4204, upload-time = "2021-06-17T15:01:28.046Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/97/a24ffd614ac2962dabbd599afbed00adf6464604a53b96b5f48301518a5f/types_pkg_resources-0.1.3-py2.py3-none-any.whl", hash = "sha256:0cb9972cee992249f93fff1a491bf2dc3ce674e5a1926e27d4f0866f7d9b6d9c", size = 4810, upload-time = "2021-06-17T15:01:26.789Z" }, +] + +[[package]] +name = "types-python-dateutil" +version = "2.9.0.20250516" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ef/88/d65ed807393285204ab6e2801e5d11fbbea811adcaa979a2ed3b67a5ef41/types_python_dateutil-2.9.0.20250516.tar.gz", hash = "sha256:13e80d6c9c47df23ad773d54b2826bd52dbbb41be87c3f339381c1700ad21ee5", size = 13943, upload-time = "2025-05-16T03:06:58.385Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c5/3f/b0e8db149896005adc938a1e7f371d6d7e9eca4053a29b108978ed15e0c2/types_python_dateutil-2.9.0.20250516-py3-none-any.whl", hash = "sha256:2b2b3f57f9c6a61fba26a9c0ffb9ea5681c9b83e69cd897c6b5f668d9c0cab93", size = 14356, upload-time = "2025-05-16T03:06:57.249Z" }, +] + +[[package]] +name = "types-requests" +version = "2.32.4.20250611" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6d/7f/73b3a04a53b0fd2a911d4ec517940ecd6600630b559e4505cc7b68beb5a0/types_requests-2.32.4.20250611.tar.gz", hash = "sha256:741c8777ed6425830bf51e54d6abe245f79b4dcb9019f1622b773463946bf826", size = 23118, upload-time = "2025-06-11T03:11:41.272Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3d/ea/0be9258c5a4fa1ba2300111aa5a0767ee6d18eb3fd20e91616c12082284d/types_requests-2.32.4.20250611-py3-none-any.whl", hash = "sha256:ad2fe5d3b0cb3c2c902c8815a70e7fb2302c4b8c1f77bdcd738192cdb3878072", size = 20643, upload-time = "2025-06-11T03:11:40.186Z" }, +] + +[[package]] +name = "types-tabulate" +version = "0.9.0.20241207" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3f/43/16030404a327e4ff8c692f2273854019ed36718667b2993609dc37d14dd4/types_tabulate-0.9.0.20241207.tar.gz", hash = "sha256:ac1ac174750c0a385dfd248edc6279fa328aaf4ea317915ab879a2ec47833230", size = 8195, upload-time = "2024-12-07T02:54:42.554Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5e/86/a9ebfd509cbe74471106dffed320e208c72537f9aeb0a55eaa6b1b5e4d17/types_tabulate-0.9.0.20241207-py3-none-any.whl", hash = "sha256:b8dad1343c2a8ba5861c5441370c3e35908edd234ff036d4298708a1d4cf8a85", size = 8307, upload-time = "2024-12-07T02:54:41.031Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.14.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d1/bc/51647cd02527e87d05cb083ccc402f93e441606ff1f01739a62c8ad09ba5/typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4", size = 107423, upload-time = "2025-06-02T14:52:11.399Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/e0/552843e0d356fbb5256d21449fa957fa4eff3bbc135a74a691ee70c7c5da/typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af", size = 43839, upload-time = "2025-06-02T14:52:10.026Z" }, +] + +[[package]] +name = "unittest-xml-reporting" +version = "3.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "lxml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ed/40/3bf1afc96e93c7322520981ac4593cbb29daa21b48d32746f05ab5563dca/unittest-xml-reporting-3.2.0.tar.gz", hash = "sha256:edd8d3170b40c3a81b8cf910f46c6a304ae2847ec01036d02e9c0f9b85762d28", size = 18002, upload-time = "2022-01-20T19:09:55.76Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/88/f6e9b87428584a3c62cac768185c438ca6d561367a5d267b293259d76075/unittest_xml_reporting-3.2.0-py2.py3-none-any.whl", hash = "sha256:f3d7402e5b3ac72a5ee3149278339db1a8f932ee405f48bcb9c681372f2717d5", size = 20936, upload-time = "2022-01-20T19:09:53.824Z" }, +] + +[[package]] +name = "urllib3" +version = "2.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" }, +] + +[[package]] +name = "virtualenv" +version = "20.31.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "distlib" }, + { name = "filelock" }, + { name = "platformdirs" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/56/2c/444f465fb2c65f40c3a104fd0c495184c4f2336d65baf398e3c75d72ea94/virtualenv-20.31.2.tar.gz", hash = "sha256:e10c0a9d02835e592521be48b332b6caee6887f332c111aa79a09b9e79efc2af", size = 6076316, upload-time = "2025-05-08T17:58:23.811Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/40/b1c265d4b2b62b58576588510fc4d1fe60a86319c8de99fd8e9fec617d2c/virtualenv-20.31.2-py3-none-any.whl", hash = "sha256:36efd0d9650ee985f0cad72065001e66d49a6f24eb44d98980f630686243cf11", size = 6057982, upload-time = "2025-05-08T17:58:21.15Z" }, +]