diff --git a/.githooks/pre-commit b/.githooks/pre-commit new file mode 100644 index 00000000000..e1063a389b2 --- /dev/null +++ b/.githooks/pre-commit @@ -0,0 +1,12 @@ +#!/usr/bin/env sh +set -eu + +# ensure gitleaks is available +if ! command -v gitleaks >/dev/null 2>&1; then + echo "Error: gitleaks is not installed or not in PATH." >&2 + echo "Install: https://github.com/gitleaks/gitleaks#install" >&2 + exit 1 +fi + +# scan for secrets before commit +gitleaks detect --no-git --verbose \ No newline at end of file diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000000..bc798da640f --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,42 @@ +# Log Management Codeowners rules +* @centreon/log-management +/stacks/** @centreon/log-management +/infra/** @centreon/log-management +/tools/** @centreon/log-management + +# Rust Codeowners rules +*.rs @centreon/owners-rust +Cargo.toml @centreon/owners-rust +Cargo.lock @centreon/owners-rust +/lib/crates/**/README.md @centreon/owners-rust +/lib/crates/**/docs/ @centreon/owners-rust +**/crate/**/README.md @centreon/owners-rust +**/crate/**/docs/ @centreon/owners-rust + +# Python Codeowners rules +*.py @centreon/owners-python +pyproject.toml @centreon/owners-python +uv.lock @centreon/owners-python +/lib/pip/**/README.md @centreon/owners-python +/lib/pip/**/docs/ @centreon/owners-python +**/pip/**/README.md @centreon/owners-python +**/pip/**/docs/ @centreon/owners-python + +# JS Codeowners rules +*.js @centreon/owners-react +*.ts @centreon/owners-react +*.tsx @centreon/owners-react +*.css @centreon/owners-react +**/pnpm-lock.yaml @centreon/owners-react +**/pnpm-workspace.yaml @centreon/owners-react +**/cypress @centreon/owners-react + +# Pipelines Codeowners rules +.github/** @centreon/owners-pipelines +.yamlfix.toml @centreon/owners-pipelines + +# Security Codeowners rules +.gitleaks.toml @centreon/owners-security +.gitleaksignore @centreon/owners-security +.githooks/pre-commit @centreon/owners-security +**/secu-*.yml @centreon/owners-security \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e4a12d69521..85093c255f6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -55,16 +55,24 @@ jobs: - name: Cleanup Disk Space run: | df -h - sudo rm -rf /usr/share/dotnet - sudo rm -rf /usr/local/lib/android - sudo rm -rf /usr/share/swift - sudo rm -rf /usr/local/.ghcup - sudo rm -rf /opt/hostedtoolcache/CodeQL - df -h + + if [ "$(df -BG / | awk 'NR==2 {gsub("G","",$4); print $4}')" -lt 30 ]; then + echo "Less than 30GiB available. Running cleanup..." + sudo rm -rf /usr/share/dotnet + sudo rm -rf /usr/local/lib/android + sudo rm -rf /usr/share/swift + sudo rm -rf /usr/local/.ghcup + sudo rm -rf /opt/hostedtoolcache/CodeQL + df -h + else + echo "30GiB or more available. Skipping cleanup." + fi - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Install Ubuntu packages - run: sudo apt-get -y install protobuf-compiler + run: | + sudo apt-get update + sudo apt-get -y install protobuf-compiler - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v.6.1.0 with: python-version: '3.11' @@ -88,9 +96,10 @@ jobs: if: steps.modified.outputs.rust_src == 'true' with: workspaces: "./quickwit -> target" + shared-key: "quickwit-cargo" - name: Install nextest if: always() && steps.modified.outputs.rust_src == 'true' - uses: taiki-e/cache-cargo-install-action@34ce5120836e5f9f1508d8713d7fdea0e8facd6f # v3.0.1 + uses: taiki-e/install-action@aba36d755ec7ca22d38b12111787c26115943952 with: tool: cargo-nextest - name: cargo build @@ -132,7 +141,9 @@ jobs: - .github/workflows/ci.yml - name: Install Ubuntu packages if: always() && steps.modified.outputs.rust_src == 'true' - run: sudo apt-get -y install protobuf-compiler + run: | + sudo apt-get update + sudo apt-get -y install protobuf-compiler - name: Setup nightly Rust Toolchain (for rustfmt) if: steps.modified.outputs.rust_src == 'true' uses: dtolnay/rust-toolchain@f7ccc83f9ed1e5b9c81d8a67d7ad1a747e22a561 # master @@ -149,6 +160,7 @@ jobs: uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 with: workspaces: "./quickwit -> target" + shared-key: "quickwit-cargo" - name: Install cargo deny if: always() && steps.modified.outputs.rust_src == 'true' uses: taiki-e/cache-cargo-install-action@34ce5120836e5f9f1508d8713d7fdea0e8facd6f # v3.0.1 diff --git a/.github/workflows/secu-dependency-scan.yml b/.github/workflows/secu-dependency-scan.yml new file mode 100644 index 00000000000..5f9dc58fb93 --- /dev/null +++ b/.github/workflows/secu-dependency-scan.yml @@ -0,0 +1,17 @@ +name: dependency-scan + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + +on: + pull_request: + paths: + - '**' + - '!lib/crates/**' + - '!tests/**' + + workflow_dispatch: + +jobs: + dependency-scan: + uses: centreon/security-tools/.github/workflows/dependency-analysis.yml@main diff --git a/.github/workflows/secu-secret-scan.yml b/.github/workflows/secu-secret-scan.yml new file mode 100644 index 00000000000..b6dc4bb06eb --- /dev/null +++ b/.github/workflows/secu-secret-scan.yml @@ -0,0 +1,12 @@ +name: secrets-scan + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + +on: + pull_request: + workflow_dispatch: + +jobs: + secrets-scan: + uses: centreon/security-tools/.github/workflows/gitleaks-analysis.yml@main diff --git a/.github/workflows/ui-ci.yml b/.github/workflows/ui-ci.yml index 3b1d298f971..ba6cbca14b5 100644 --- a/.github/workflows/ui-ci.yml +++ b/.github/workflows/ui-ci.yml @@ -18,32 +18,36 @@ permissions: contents: read jobs: - tests: - name: ${{ matrix.task.name }} + checks: + name: Lint, type check & unit tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 + with: + node-version: 24 + cache: "yarn" + cache-dependency-path: quickwit/quickwit-ui/yarn.lock + - name: Install JS dependencies + run: yarn --cwd quickwit-ui install + working-directory: ./quickwit + - name: Lint + run: yarn --cwd quickwit-ui lint + working-directory: ./quickwit + - name: Type check + run: yarn --cwd quickwit-ui type + working-directory: ./quickwit + - name: Unit tests + run: yarn --cwd quickwit-ui test + working-directory: ./quickwit + + e2e: + name: Playwright e2e runs-on: ubuntu-latest permissions: contents: read actions: write - strategy: - fail-fast: false - matrix: - task: - - name: Cypress run - command: | - sudo apt-get -y install protobuf-compiler - CI=false yarn --cwd quickwit-ui build - RUSTFLAGS="--cfg tokio_unstable" cargo build --features=postgres - mkdir qwdata - RUSTFLAGS="--cfg tokio_unstable" cargo run --features=postgres -- run --service searcher --service metastore --config ../config/quickwit.yaml & - yarn --cwd quickwit-ui cypress run - - name: Lint - command: yarn --cwd quickwit-ui lint - - name: Check type consistency - command: yarn --cwd quickwit-ui type - - name: Unit Test - command: yarn --cwd quickwit-ui test services: - # PostgreSQL service container postgres: image: postgres:latest ports: @@ -52,7 +56,6 @@ jobs: POSTGRES_USER: quickwit-dev POSTGRES_PASSWORD: quickwit-dev POSTGRES_DB: quickwit-metastore-dev - # Set health checks to wait until postgres has started options: >- --health-cmd pg_isready --health-interval 10s @@ -61,7 +64,7 @@ jobs: env: CARGO_INCREMENTAL: 0 RUST_BACKTRACE: 1 - RUSTFLAGS: -Dwarnings -C lto=off + RUSTFLAGS: -Dwarnings --cfg tokio_unstable RUSTDOCFLAGS: -Dwarnings -Arustdoc::private_intra_doc_links QW_TEST_DATABASE_URL: postgres://quickwit-dev:quickwit-dev@postgres:5432/quickwit-metastore-dev steps: @@ -75,14 +78,28 @@ jobs: uses: dtolnay/rust-toolchain@f7ccc83f9ed1e5b9c81d8a67d7ad1a747e22a561 # master with: toolchain: stable - - name: Install JS dependencies - run: yarn --cwd quickwit-ui install - working-directory: ./quickwit - name: Setup Rust cache - if: matrix.task.name == 'Cypress run' uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 with: workspaces: "./quickwit -> target" - - name: ${{ matrix.task.name }} - run: ${{ matrix.task.command }} + shared-key: "quickwit-cargo" + - name: Install JS dependencies + run: yarn --cwd quickwit-ui install + working-directory: ./quickwit + - name: Install Playwright browsers + run: npx playwright install chromium --with-deps --only-shell + working-directory: ./quickwit/quickwit-ui + - name: Build UI + run: CI=false yarn --cwd quickwit-ui build + working-directory: ./quickwit + - name: Build Quickwit + run: | + sudo apt-get update && sudo apt-get -y install protobuf-compiler + cargo build --features=postgres + working-directory: ./quickwit + - name: Run e2e tests + run: | + mkdir -p qwdata + cargo run --features=postgres -- run --service searcher --service metastore --config ../config/quickwit.yaml & + yarn --cwd quickwit-ui e2e-test working-directory: ./quickwit diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index b5c3eaf3c34..d290cf36c9f 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -281,6 +281,7 @@ oneshot,https://github.com/faern/oneshot,MIT OR Apache-2.0,Linus Färnstrand openssl-probe,https://github.com/alexcrichton/openssl-probe,MIT OR Apache-2.0,Alex Crichton opentelemetry,https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry,Apache-2.0,The opentelemetry Authors +opentelemetry-appender-tracing,https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-appender-tracing,Apache-2.0,The opentelemetry-appender-tracing Authors opentelemetry-http,https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-http,Apache-2.0,The opentelemetry-http Authors opentelemetry-otlp,https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-otlp,Apache-2.0,The opentelemetry-otlp Authors opentelemetry-proto,https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-proto,Apache-2.0,The opentelemetry-proto Authors diff --git a/docs/reference/es_compatible_api.md b/docs/reference/es_compatible_api.md index 32cbdafd761..28ba1aa7eb2 100644 --- a/docs/reference/es_compatible_api.md +++ b/docs/reference/es_compatible_api.md @@ -365,6 +365,79 @@ Example response: [HTTP accept header]: https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +### `_field_caps`   Field capabilities API + +``` +GET api/v1/_elastic//_field_caps +``` +``` +POST api/v1/_elastic//_field_caps +``` +``` +GET api/v1/_elastic/_field_caps +``` +``` +POST api/v1/_elastic/_field_caps +``` + +The [field capabilities API](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-caps.html) returns information about the capabilities of fields among multiple indices. + +#### Supported Query string parameters + +| Variable | Type | Description | Default value | +| --------------------- | ---------- | ------------------------------------------------------------------------------ | ------------- | +| `fields` | `String` | Comma-separated list of fields to retrieve capabilities for. Supports wildcards (`*`). | (Optional) | +| `allow_no_indices` | `Boolean` | If `true`, missing or closed indices are not an error. | (Optional) | +| `expand_wildcards` | `String` | Controls what kind of indices that wildcard patterns can match. | (Optional) | +| `ignore_unavailable` | `Boolean` | If `true`, unavailable indices are ignored. | (Optional) | +| `start_timestamp` | `Integer` | *(Quickwit-specific)* If set, restricts splits to documents with a timestamp range start >= `start_timestamp` (seconds since epoch). | (Optional) | +| `end_timestamp` | `Integer` | *(Quickwit-specific)* If set, restricts splits to documents with a timestamp range end < `end_timestamp` (seconds since epoch). | (Optional) | + +#### Supported Request Body parameters + +| Variable | Type | Description | Default value | +| ------------------ | ------------- | --------------------------------------------------------------------------- | ------------- | +| `index_filter` | `Json object` | A query to filter indices. If provided, only fields from indices that can potentially match the filter are returned. See [index_filter](#index_filter). | (Optional) | +| `runtime_mappings` | `Json object` | Accepted but not supported. | (Optional) | + +#### `index_filter` + +The `index_filter` parameter allows you to filter which indices contribute to the field capabilities response. When provided, Quickwit uses the filter query to prune indices (splits) that cannot match the filter, and only returns field capabilities for the remaining ones. + +Like Elasticsearch, this is a **best-effort** approach: Quickwit may return field capabilities from indices that do not actually contain any matching documents. In Quickwit, the filtering is limited to the existing split-pruning based on metadata: + +- **Time pruning**: Range queries on the timestamp field can eliminate splits whose time range does not overlap with the filter. +- **Tag pruning**: Term queries on [tag fields](../configuration/index-config.md#tag-fields) can eliminate splits that do not contain the requested tag value. + +Other filter types (e.g. full-text queries or term queries on non-tag fields) are accepted but will not prune any splits — all indices will be returned as if no filter was specified. In particular, Quickwit does not check whether terms are present in the term dictionary. + +#### Request Body example + +```json +{ + "index_filter": { + "range": { + "timestamp": { + "gte": "2024-01-01T00:00:00Z", + "lt": "2024-02-01T00:00:00Z" + } + } + } +} +``` + +```json +{ + "index_filter": { + "term": { + "status": "active" + } + } +} +``` + + ## Query DSL [Elasticsearch Query DSL reference](https://www.elastic.co/guide/en/elasticsearch/reference/8.8/query-dsl.html). diff --git a/docs/reference/metrics.md b/docs/reference/metrics.md index 9df72335a05..9e854f8f1f6 100644 --- a/docs/reference/metrics.md +++ b/docs/reference/metrics.md @@ -42,8 +42,8 @@ Currently Quickwit exposes metrics for three caches: `fastfields`, `shortlived`, | Namespace | Metric Name | Description | Type | | --------- | ----------- | ----------- | ---- | -| `quickwit_ingest` | `ingested_num_bytes` | Total size of the docs ingested in bytes | `counter` | -| `quickwit_ingest` | `ingested_num_docs` | Number of docs received to be ingested | `counter` | +| `quickwit_ingest` | `docs_bytes_total` | Total size of the docs ingested, measured in ingester's leader, after validation and before persistence/replication | `counter` | +| `quickwit_ingest` | `docs_total` | Total number of the docs ingested, measured in ingester's leader, after validation and before persistence/replication | `counter` | | `quickwit_ingest` | `queue_count` | Number of queues currently active | `counter` | ## Metastore Metrics diff --git a/quickwit/Cargo.lock b/quickwit/Cargo.lock index ce03131e6e2..fae390095d9 100644 --- a/quickwit/Cargo.lock +++ b/quickwit/Cargo.lock @@ -5355,6 +5355,18 @@ dependencies = [ "tracing", ] +[[package]] +name = "opentelemetry-appender-tracing" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef6a1ac5ca3accf562b8c306fa8483c85f4390f768185ab775f242f7fe8fdcc2" +dependencies = [ + "opentelemetry", + "tracing", + "tracing-core", + "tracing-subscriber", +] + [[package]] name = "opentelemetry-http" version = "0.31.0" @@ -6603,6 +6615,7 @@ dependencies = [ "once_cell", "openssl-probe 0.1.6", "opentelemetry", + "opentelemetry-appender-tracing", "opentelemetry-otlp", "opentelemetry_sdk", "predicates", diff --git a/quickwit/Cargo.toml b/quickwit/Cargo.toml index 3ffdcb2f5a6..7eac3d317b1 100644 --- a/quickwit/Cargo.toml +++ b/quickwit/Cargo.toml @@ -164,6 +164,7 @@ oneshot = "0.1" openssl = { version = "0.10", default-features = false } openssl-probe = "0.1" opentelemetry = "0.31" +opentelemetry-appender-tracing = "0.31" opentelemetry_sdk = { version = "0.31", features = ["rt-tokio"] } opentelemetry-otlp = { version = "0.31", features = ["grpc-tonic"] } ouroboros = "0.18" @@ -393,7 +394,7 @@ sasl2-sys = { git = "https://github.com/quickwit-oss/rust-sasl/", rev = "085a4c7 #tracing-subscriber = { git = "https://github.com/trinity-1686a/tracing.git", rev = "6806cac3" } [profile.dev] -debug = false +debug = true [profile.release] lto = "thin" diff --git a/quickwit/quickwit-cli/Cargo.toml b/quickwit/quickwit-cli/Cargo.toml index 8819d92ec97..5d9dc955107 100644 --- a/quickwit/quickwit-cli/Cargo.toml +++ b/quickwit/quickwit-cli/Cargo.toml @@ -37,6 +37,7 @@ numfmt = { workspace = true } once_cell = { workspace = true } openssl-probe = { workspace = true, optional = true } opentelemetry = { workspace = true } +opentelemetry-appender-tracing = { workspace = true } opentelemetry_sdk = { workspace = true } opentelemetry-otlp = { workspace = true } reqwest = { workspace = true } @@ -101,6 +102,7 @@ release-feature-set = [ "quickwit-indexing/pulsar", "quickwit-indexing/sqs", "quickwit-indexing/vrl", + "quickwit-serve/lambda", "quickwit-storage/azure", "quickwit-storage/gcs", "quickwit-metastore/postgres", @@ -114,6 +116,7 @@ release-feature-vendored-set = [ "quickwit-indexing/sqs", "quickwit-indexing/vrl", "quickwit-indexing/vendored-kafka", + "quickwit-serve/lambda", "quickwit-storage/azure", "quickwit-storage/gcs", "quickwit-metastore/postgres", @@ -126,6 +129,7 @@ release-macos-feature-vendored-set = [ "quickwit-indexing/sqs", "quickwit-indexing/vrl", "quickwit-indexing/vendored-kafka-macos", + "quickwit-serve/lambda", "quickwit-storage/azure", "quickwit-storage/gcs", "quickwit-metastore/postgres", diff --git a/quickwit/quickwit-cli/src/logger.rs b/quickwit/quickwit-cli/src/logger.rs index 6388d4ed78d..e1e60a14f93 100644 --- a/quickwit/quickwit-cli/src/logger.rs +++ b/quickwit/quickwit-cli/src/logger.rs @@ -18,6 +18,8 @@ use std::{env, fmt}; use anyhow::Context; use opentelemetry::trace::TracerProvider; use opentelemetry::{KeyValue, global}; +use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge; +use opentelemetry_sdk::logs::SdkLoggerProvider; use opentelemetry_sdk::propagation::TraceContextPropagator; use opentelemetry_sdk::trace::{BatchConfigBuilder, SdkTracerProvider}; use opentelemetry_sdk::{Resource, trace}; @@ -56,7 +58,10 @@ pub fn setup_logging_and_tracing( level: Level, ansi_colors: bool, build_info: &BuildInfo, -) -> anyhow::Result<(EnvFilterReloadFn, Option)> { +) -> anyhow::Result<( + EnvFilterReloadFn, + Option<(SdkTracerProvider, SdkLoggerProvider)>, +)> { #[cfg(feature = "tokio-console")] { if get_bool_from_env(QW_ENABLE_TOKIO_CONSOLE_ENV_KEY, false) { @@ -93,11 +98,11 @@ pub fn setup_logging_and_tracing( // Note on disabling ANSI characters: setting the ansi boolean on event format is insufficient. // It is thus set on layers, see https://github.com/tokio-rs/tracing/issues/1817 let provider_opt = if get_bool_from_env(QW_ENABLE_OPENTELEMETRY_OTLP_EXPORTER_ENV_KEY, false) { - let otlp_exporter = opentelemetry_otlp::SpanExporter::builder() + let span_exporter = opentelemetry_otlp::SpanExporter::builder() .with_tonic() .build() .context("failed to initialize OpenTelemetry OTLP exporter")?; - let batch_processor = trace::BatchSpanProcessor::builder(otlp_exporter) + let span_processor = trace::BatchSpanProcessor::builder(span_exporter) .with_batch_config( BatchConfigBuilder::default() // Quickwit can generate a lot of spans, especially in debug mode, and the @@ -112,17 +117,33 @@ pub fn setup_logging_and_tracing( .with_attribute(KeyValue::new("service.version", build_info.version.clone())) .build(); - let provider = opentelemetry_sdk::trace::SdkTracerProvider::builder() - .with_span_processor(batch_processor) + let logs_exporter = opentelemetry_otlp::LogExporter::builder() + .with_tonic() + .build() + .context("failed to initialize OpenTelemetry OTLP logs")?; + + let logger_provider = SdkLoggerProvider::builder() + .with_resource(resource.clone()) + .with_batch_exporter(logs_exporter) + .build(); + + let tracing_provider = opentelemetry_sdk::trace::SdkTracerProvider::builder() + .with_span_processor(span_processor) .with_resource(resource) .build(); - let tracer = provider.tracer("quickwit"); + + let tracer = tracing_provider.tracer("quickwit"); let telemetry_layer = tracing_opentelemetry::layer().with_tracer(tracer); + + // Bridge between tracing logs and otel tracing events + let logs_otel_layer = OpenTelemetryTracingBridge::new(&logger_provider); + registry .with(telemetry_layer) + .with(logs_otel_layer) .try_init() .context("failed to register tracing subscriber")?; - Some(provider) + Some((tracing_provider, logger_provider)) } else { registry .try_init() diff --git a/quickwit/quickwit-cli/src/main.rs b/quickwit/quickwit-cli/src/main.rs index 518cf6518e4..4a1f9ce036e 100644 --- a/quickwit/quickwit-cli/src/main.rs +++ b/quickwit/quickwit-cli/src/main.rs @@ -113,10 +113,13 @@ async fn main_impl() -> anyhow::Result<()> { 0 }; - if let Some(provider) = tracer_provider_opt { - provider + if let Some((trace_provider, logs_provider)) = tracer_provider_opt { + trace_provider .shutdown() .context("failed to shutdown OpenTelemetry tracer provider")?; + logs_provider + .shutdown() + .context("failed to shutdown OpenTelemetry logs provider")?; } std::process::exit(return_code) diff --git a/quickwit/quickwit-cli/src/tool.rs b/quickwit/quickwit-cli/src/tool.rs index 324e0d99da7..0f34a3017e6 100644 --- a/quickwit/quickwit-cli/src/tool.rs +++ b/quickwit/quickwit-cli/src/tool.rs @@ -45,6 +45,7 @@ use quickwit_indexing::models::{ use quickwit_ingest::IngesterPool; use quickwit_metastore::IndexMetadataResponseExt; use quickwit_proto::indexing::CpuCapacity; +use quickwit_proto::ingest::ingester::IngesterStatus; use quickwit_proto::metastore::{IndexMetadataRequest, MetastoreService, MetastoreServiceClient}; use quickwit_proto::search::{CountHits, SearchResponse}; use quickwit_proto::types::{IndexId, PipelineUid, SourceId, SplitId}; @@ -936,8 +937,9 @@ async fn create_empty_cluster(config: &NodeConfig) -> anyhow::Result { enabled_services: HashSet::new(), gossip_advertise_addr: config.gossip_advertise_addr, grpc_advertise_addr: config.grpc_advertise_addr, - indexing_cpu_capacity: CpuCapacity::zero(), indexing_tasks: Vec::new(), + indexing_cpu_capacity: CpuCapacity::zero(), + ingester_status: IngesterStatus::default(), availability_zone: None, }; let client_grpc_config = make_client_grpc_config(&config.grpc_config)?; diff --git a/quickwit/quickwit-cluster/src/change.rs b/quickwit/quickwit-cluster/src/change.rs index 1491212281c..77d3165f5f5 100644 --- a/quickwit/quickwit-cluster/src/change.rs +++ b/quickwit/quickwit-cluster/src/change.rs @@ -35,7 +35,10 @@ use crate::member::NodeStateExt; #[derive(Debug, Clone)] pub enum ClusterChange { Add(ClusterNode), - Update(ClusterNode), + Update { + previous: ClusterNode, + updated: ClusterNode, + }, Remove(ClusterNode), } @@ -246,7 +249,10 @@ async fn compute_cluster_change_events_on_updated( ); Some(ClusterChange::Remove(updated_node)) } else if previous_node.is_ready() && updated_node.is_ready() { - Some(ClusterChange::Update(updated_node)) + Some(ClusterChange::Update { + previous: previous_node, + updated: updated_node, + }) } else { None } @@ -681,16 +687,16 @@ pub(crate) mod tests { .await .unwrap(); - let ClusterChange::Update(node) = event else { + let ClusterChange::Update { updated, .. } = event else { panic!("expected `ClusterChange::Remove` event, got `{event:?}`"); }; - assert_eq!(node.chitchat_id(), &updated_chitchat_id); - assert_eq!(node.grpc_advertise_addr(), grpc_advertise_addr); - assert!(!node.is_self_node()); - assert!(node.is_ready()); + assert_eq!(updated.chitchat_id(), &updated_chitchat_id); + assert_eq!(updated.grpc_advertise_addr(), grpc_advertise_addr); + assert!(!updated.is_self_node()); + assert!(updated.is_ready()); assert_eq!( previous_nodes.get(&updated_chitchat_id.node_id).unwrap(), - &node + &updated ); } { @@ -1009,7 +1015,7 @@ pub(crate) mod tests { .await; assert_eq!(events.len(), 1); - let ClusterChange::Update(_node) = events[0].clone() else { + let ClusterChange::Update { .. } = events[0].clone() else { panic!( "Expected `ClusterChange::Update` event, got `{:?}`", events[0] diff --git a/quickwit/quickwit-cluster/src/cluster.rs b/quickwit/quickwit-cluster/src/cluster.rs index e3244746e0a..0ba54f7f71f 100644 --- a/quickwit/quickwit-cluster/src/cluster.rs +++ b/quickwit/quickwit-cluster/src/cluster.rs @@ -697,6 +697,7 @@ pub async fn create_cluster_for_test_with_id( self_node_readiness: bool, ) -> anyhow::Result { use quickwit_proto::indexing::PIPELINE_FULL_CAPACITY; + use quickwit_proto::ingest::ingester::IngesterStatus; let gossip_advertise_addr: SocketAddr = ([127, 0, 0, 1], gossip_advertise_port).into(); let self_node = ClusterMember { node_id, @@ -707,6 +708,7 @@ pub async fn create_cluster_for_test_with_id( grpc_advertise_addr: grpc_addr_from_listen_addr_for_test(gossip_advertise_addr), indexing_tasks: Vec::new(), indexing_cpu_capacity: PIPELINE_FULL_CAPACITY, + ingester_status: IngesterStatus::default(), availability_zone: None, }; let failure_detector_config = create_failure_detector_config_for_test(); diff --git a/quickwit/quickwit-cluster/src/lib.rs b/quickwit/quickwit-cluster/src/lib.rs index 30bb03e7f02..0f2dbebf749 100644 --- a/quickwit/quickwit-cluster/src/lib.rs +++ b/quickwit/quickwit-cluster/src/lib.rs @@ -36,6 +36,7 @@ use quickwit_common::tower::ClientGrpcConfig; use quickwit_config::service::QuickwitService; use quickwit_config::{GrpcConfig, NodeConfig, TlsConfig}; use quickwit_proto::indexing::CpuCapacity; +use quickwit_proto::ingest::ingester::IngesterStatus; use quickwit_proto::tonic::transport::{Certificate, ClientTlsConfig, Identity}; use time::OffsetDateTime; @@ -143,6 +144,7 @@ pub async fn start_cluster_service(node_config: &NodeConfig) -> anyhow::Result usize; + fn ingester_status(&self) -> IngesterStatus; + fn availability_zone(&self) -> Option; } @@ -79,6 +83,12 @@ impl NodeStateExt for NodeState { .sum() } + fn ingester_status(&self) -> IngesterStatus { + self.get(INGESTER_STATUS_KEY) + .and_then(IngesterStatus::from_json_str_name) + .unwrap_or_default() + } + fn availability_zone(&self) -> Option { self.get(AVAILABILITY_ZONE_KEY).map(|az| az.to_string()) } @@ -108,6 +118,10 @@ pub struct ClusterMember { pub indexing_tasks: Vec, /// Indexing cpu capacity of the node expressed in milli cpu. pub indexing_cpu_capacity: CpuCapacity, + /// Status of the ingester service running on the node. `IngesterStatus::Unspecified` if the + /// node is not an ingester. + pub ingester_status: IngesterStatus, + /// Whether the node is ready to serve requests. pub is_ready: bool, /// Availability zone the node is running in, if enabled. pub availability_zone: Option, @@ -159,10 +173,12 @@ pub(crate) fn build_cluster_member( .map(|enabled_services_str| { parse_enabled_services_str(enabled_services_str, &chitchat_id.node_id) })?; - let availability_zone = node_state.availability_zone(); let grpc_advertise_addr = node_state.grpc_advertise_addr()?; let indexing_tasks = parse_indexing_tasks(node_state); let indexing_cpu_capacity = parse_indexing_cpu_capacity(node_state); + let ingester_status = node_state.ingester_status(); + let availability_zone = node_state.availability_zone(); + let member = ClusterMember { node_id: chitchat_id.node_id.into(), generation_id: chitchat_id.generation_id.into(), @@ -172,6 +188,7 @@ pub(crate) fn build_cluster_member( grpc_advertise_addr, indexing_tasks, indexing_cpu_capacity, + ingester_status, availability_zone, }; Ok(member) diff --git a/quickwit/quickwit-cluster/src/node.rs b/quickwit/quickwit-cluster/src/node.rs index 4a8b11dbafc..b83e354c93d 100644 --- a/quickwit/quickwit-cluster/src/node.rs +++ b/quickwit/quickwit-cluster/src/node.rs @@ -20,6 +20,7 @@ use std::sync::Arc; use chitchat::{ChitchatId, NodeState}; use quickwit_config::service::QuickwitService; use quickwit_proto::indexing::{CpuCapacity, IndexingTask}; +use quickwit_proto::ingest::ingester::IngesterStatus; use quickwit_proto::types::NodeIdRef; use tonic::transport::Channel; @@ -46,6 +47,7 @@ impl ClusterNode { grpc_advertise_addr: member.grpc_advertise_addr, indexing_tasks: member.indexing_tasks, indexing_capacity: member.indexing_cpu_capacity, + ingester_status: member.ingester_status, is_ready: member.is_ready, is_self_node, }; @@ -62,7 +64,9 @@ impl ClusterNode { is_self_node: bool, enabled_services: &[&str], indexing_tasks: &[IndexingTask], + ingester_status: IngesterStatus, ) -> Self { + use quickwit_common::shared_consts::INGESTER_STATUS_KEY; use quickwit_common::tower::{ClientGrpcConfig, make_channel}; use crate::cluster::set_indexing_tasks_in_node_state; @@ -75,6 +79,7 @@ impl ClusterNode { let mut node_state = NodeState::for_test(); node_state.set(ENABLED_SERVICES_KEY, enabled_services.join(",")); node_state.set(GRPC_ADVERTISE_ADDR_KEY, grpc_advertise_addr.to_string()); + node_state.set(INGESTER_STATUS_KEY, ingester_status.as_json_str_name()); set_indexing_tasks_in_node_state(indexing_tasks, &mut node_state); Self::try_new(chitchat_id, &node_state, channel, is_self_node).unwrap() } @@ -125,6 +130,10 @@ impl ClusterNode { self.inner.indexing_capacity } + pub fn ingester_status(&self) -> IngesterStatus { + self.inner.ingester_status + } + pub fn is_ready(&self) -> bool { self.inner.is_ready } @@ -163,6 +172,7 @@ struct InnerNode { grpc_advertise_addr: SocketAddr, indexing_tasks: Vec, indexing_capacity: CpuCapacity, + ingester_status: IngesterStatus, is_ready: bool, is_self_node: bool, } diff --git a/quickwit/quickwit-common/src/rate_limited_tracing.rs b/quickwit/quickwit-common/src/rate_limited_tracing.rs index c9a323f9ec2..198c2bf8bdd 100644 --- a/quickwit/quickwit-common/src/rate_limited_tracing.rs +++ b/quickwit/quickwit-common/src/rate_limited_tracing.rs @@ -179,12 +179,13 @@ fn _check_macro_works() { #[doc(hidden)] pub use coarsetime::Instant as CoarsetimeInstant; +pub use rate_limited_debug; +pub use rate_limited_error; +pub use rate_limited_info; +pub use rate_limited_trace; #[doc(hidden)] pub use rate_limited_tracing; -pub use { - rate_limited_debug, rate_limited_error, rate_limited_info, rate_limited_trace, - rate_limited_warn, -}; +pub use rate_limited_warn; #[cfg(test)] mod tests { diff --git a/quickwit/quickwit-common/src/shared_consts.rs b/quickwit/quickwit-common/src/shared_consts.rs index 9923705f0b2..d46cbc535ca 100644 --- a/quickwit/quickwit-common/src/shared_consts.rs +++ b/quickwit/quickwit-common/src/shared_consts.rs @@ -61,9 +61,12 @@ pub fn split_deletion_grace_period() -> Duration { /// being requested. pub const SCROLL_BATCH_LEN: usize = 1_000; -/// Prefix used in chitchat to broadcast the list of primary shards hosted by a leader. +/// Key prefix used in chitchat to broadcast the list of primary shards hosted by a leader. pub const INGESTER_PRIMARY_SHARDS_PREFIX: &str = "ingester.primary_shards:"; +/// Key used in chitchat to broadcast the status of an ingester. +pub const INGESTER_STATUS_KEY: &str = "ingester.status"; + /// File name for the encoded list of fields in the split pub const SPLIT_FIELDS_FILE_NAME: &str = "split_fields"; diff --git a/quickwit/quickwit-common/src/tower/pool.rs b/quickwit/quickwit-common/src/tower/pool.rs index 87a6f15e879..cfc1de93249 100644 --- a/quickwit/quickwit-common/src/tower/pool.rs +++ b/quickwit/quickwit-common/src/tower/pool.rs @@ -109,6 +109,16 @@ where .collect() } + /// Returns all the key-value pairs in the pool. + pub fn keys_values(&self) -> Vec<(K, V)> { + self.pool + .read() + .expect("lock should not be poisoned") + .iter() + .map(|(key, value)| (key.clone(), value.clone())) + .collect() + } + /// Returns all the values in the pool. pub fn values(&self) -> Vec { self.pool diff --git a/quickwit/quickwit-control-plane/src/control_plane.rs b/quickwit/quickwit-control-plane/src/control_plane.rs index e4c6995d639..1185f8a7bbc 100644 --- a/quickwit/quickwit-control-plane/src/control_plane.rs +++ b/quickwit/quickwit-control-plane/src/control_plane.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::collections::btree_map::Entry; use std::collections::{BTreeMap, BTreeSet}; use std::fmt; use std::fmt::Formatter; @@ -27,14 +28,11 @@ use quickwit_actors::{ Actor, ActorContext, ActorExitStatus, ActorHandle, DeferableReplyHandler, Handler, Mailbox, Supervisor, Universe, WeakMailbox, }; -use quickwit_cluster::{ - ClusterChange, ClusterChangeStream, ClusterChangeStreamFactory, ClusterNode, -}; +use quickwit_cluster::{ClusterChange, ClusterChangeStream, ClusterChangeStreamFactory}; use quickwit_common::pretty::PrettyDisplay; use quickwit_common::pubsub::EventSubscriber; use quickwit_common::uri::Uri; use quickwit_common::{Progress, shared_consts}; -use quickwit_config::service::QuickwitService; use quickwit_config::{ClusterConfig, IndexConfig, IndexTemplate, SourceConfig}; use quickwit_ingest::{IngesterPool, LocalShardsUpdate}; use quickwit_metastore::{CreateIndexRequestExt, CreateIndexResponseExt, IndexMetadataResponseExt}; @@ -43,6 +41,7 @@ use quickwit_proto::control_plane::{ GetOrCreateOpenShardsRequest, GetOrCreateOpenShardsResponse, GetOrCreateOpenShardsSubrequest, }; use quickwit_proto::indexing::ShardPositionsUpdate; +use quickwit_proto::ingest::ingester::IngesterStatus; use quickwit_proto::metastore::{ AddSourceRequest, CreateIndexRequest, CreateIndexResponse, DeleteIndexRequest, DeleteShardsRequest, DeleteSourceRequest, EmptyResponse, FindIndexTemplateMatchesRequest, @@ -350,6 +349,28 @@ impl ControlPlane { } fn debug_info(&self) -> JsonValue { + // Build the union of ingesters tracked by ingester pool and the model. + let mut ingesters: BTreeMap = BTreeMap::new(); + + for (ingester_id, ingester) in self.ingest_controller.ingester_pool.keys_values() { + let ingester_json = json!({ + "available": true, + "status": ingester.status.as_json_str_name(), + }); + ingesters.insert(ingester_id.clone(), ingester_json); + } + for shard in self.model.all_shards() { + let ingester_id = NodeId::from(shard.leader_id.clone()); + + if let Entry::Vacant(entry) = ingesters.entry(ingester_id.clone()) { + let ingester_json = json!({ + "available": false, + "status": IngesterStatus::default(), + }); + entry.insert(ingester_json); + } + } + let physical_indexing_plan: Vec = self .indexing_scheduler .observable_state() @@ -392,6 +413,7 @@ impl ControlPlane { } } json!({ + "ingesters": ingesters, "physical_indexing_plan": physical_indexing_plan, "shard_table": per_index_and_leader_shards_json, }) @@ -1040,61 +1062,25 @@ fn apply_index_template_match( Ok(index_config) } -/// The indexer joined the cluster. #[derive(Debug)] -struct IndexerJoined(ClusterNode); +struct RebalanceShards; #[async_trait] -impl Handler for ControlPlane { +impl Handler for ControlPlane { type Reply = (); async fn handle( &mut self, - message: IndexerJoined, + _message: RebalanceShards, ctx: &ActorContext, ) -> Result { - info!( - "indexer `{}` joined the cluster: rebalancing shards and rebuilding indexing plan", - message.0.node_id() - ); - // TODO: Update shard table. - if let Err(metastore_error) = self + if let Err(error) = self .ingest_controller .rebalance_shards(&mut self.model, ctx.mailbox(), ctx.progress()) .await { - return convert_metastore_error::<()>(metastore_error).map(|_| ()); - } - self.indexing_scheduler.rebuild_plan(&self.model); - Ok(()) - } -} - -/// The indexer left the cluster. -#[derive(Debug)] -struct IndexerLeft(ClusterNode); - -#[async_trait] -impl Handler for ControlPlane { - type Reply = (); - - async fn handle( - &mut self, - message: IndexerLeft, - ctx: &ActorContext, - ) -> Result { - info!( - "indexer `{}` left the cluster: rebalancing shards and rebuilding indexing plan", - message.0.node_id() - ); - // TODO: Update shard table. - if let Err(metastore_error) = self - .ingest_controller - .rebalance_shards(&mut self.model, ctx.mailbox(), ctx.progress()) - .await - { - return convert_metastore_error::<()>(metastore_error).map(|_| ()); - } + return convert_metastore_error::<()>(error).map(|_| ()); + }; self.indexing_scheduler.rebuild_plan(&self.model); Ok(()) } @@ -1120,7 +1106,7 @@ impl Handler for ControlPlane { }; self.model.close_shards(&source_uid, &[shard_id]); } - // We drop the rebalance guard explicitly here to put some emphasis on where a the rebalance + // We drop the rebalance guard explicitly here to put some emphasis on where the rebalance // lock is released. drop(message.rebalance_guard); Ok(()) @@ -1142,24 +1128,50 @@ async fn watcher_indexers( let Some(mailbox) = weak_mailbox.upgrade() else { return; }; + + // Ingesters have two readiness levels: + // 1. Cluster connectivity: node is up and can reach the metastore (similar to other nodes) + // 2. Shard readiness: IngesterStatus::Ready indicates the ingester can accept new shards + // We rebalance shards when either readiness level changes. + let mut trigger_rebalance = false; match cluster_change { - ClusterChange::Add(node) => { - if node.enabled_services().contains(&QuickwitService::Indexer) - && let Err(error) = mailbox.send_message(IndexerJoined(node)).await - { - error!(%error, "failed to forward `IndexerJoined` event to control plane"); + ClusterChange::Add(node) if node.is_indexer() => { + if node.ingester_status().is_ready() { + info!( + "indexer `{}` with status `{}` joined the cluster: rebalancing shards and \ + rebuilding indexing plan", + node.node_id(), + node.ingester_status().as_json_str_name() + ); + trigger_rebalance = true; } } - ClusterChange::Remove(node) => { - if node.enabled_services().contains(&QuickwitService::Indexer) - && let Err(error) = mailbox.send_message(IndexerLeft(node)).await - { - error!(%error, "failed to forward `IndexerLeft` event to control plane"); - } + ClusterChange::Remove(node) if node.is_indexer() => { + info!( + "indexer `{}` left the cluster: rebalancing shards and rebuilding indexing \ + plan", + node.node_id() + ); + trigger_rebalance = true } - ClusterChange::Update(_) => { - // We are not interested in updates (yet). + ClusterChange::Update { previous, updated } if updated.is_indexer() => { + let was_ready = previous.ingester_status().is_ready(); + let is_ready = updated.ingester_status().is_ready(); + + if was_ready ^ is_ready { + info!( + "indexer `{}` status changed to `{}`: rebalancing shards and rebuilding \ + indexing plan", + updated.node_id(), + updated.ingester_status().as_json_str_name() + ); + trigger_rebalance = true; + } } + _ => {} + } + if trigger_rebalance && mailbox.send_message(RebalanceShards).await.is_err() { + return; } } } @@ -1171,11 +1183,12 @@ mod tests { use mockall::Sequence; use quickwit_actors::{AskError, Observe, SupervisorMetrics}; - use quickwit_cluster::ClusterChangeStreamFactoryForTest; + use quickwit_cluster::{ClusterChangeStreamFactoryForTest, ClusterNode}; use quickwit_config::{ CLI_SOURCE_ID, INGEST_V2_SOURCE_ID, IndexConfig, KafkaSourceParams, SourceParams, }; use quickwit_indexing::IndexingService; + use quickwit_ingest::IngesterPoolEntry; use quickwit_metastore::{ CreateIndexRequestExt, IndexMetadata, ListIndexesMetadataResponseExt, }; @@ -1187,8 +1200,8 @@ mod tests { MockIndexingService, }; use quickwit_proto::ingest::ingester::{ - IngesterServiceClient, InitShardSuccess, InitShardsResponse, MockIngesterService, - RetainShardsResponse, + IngesterServiceClient, IngesterStatus, InitShardSuccess, InitShardsResponse, + MockIngesterService, RetainShardsResponse, }; use quickwit_proto::ingest::{Shard, ShardPKey, ShardState}; use quickwit_proto::metastore::{ @@ -2210,7 +2223,11 @@ mod tests { assert!(&retain_shards_for_source.shard_ids.is_empty()); Ok(RetainShardsResponse {}) }); - let ingester = IngesterServiceClient::from_mock(mock_ingester); + let client = IngesterServiceClient::from_mock(mock_ingester); + let ingester = IngesterPoolEntry { + client, + status: IngesterStatus::Ready, + }; ingester_pool.insert("node1".into(), ingester); let cluster_config = ClusterConfig::for_test(); @@ -2256,7 +2273,11 @@ mod tests { ); Ok(RetainShardsResponse {}) }); - let ingester = IngesterServiceClient::from_mock(mock_ingester); + let client = IngesterServiceClient::from_mock(mock_ingester); + let ingester = IngesterPoolEntry { + client, + status: IngesterStatus::Ready, + }; ingester_pool.insert("node1".into(), ingester); let mut index_0 = IndexMetadata::for_test("test-index-0", "ram:///test-index-0"); @@ -2443,24 +2464,96 @@ mod tests { let cluster_change_stream_tx = cluster_change_stream_factory.change_stream_tx(); - let metastore_node = - ClusterNode::for_test("test-metastore", 1337, false, &["metastore"], &[]).await; + // a non-indexer node status change doesn't trigger a shard rebalancing. + let metastore_node = ClusterNode::for_test( + "test-metastore", + 1515, + false, + &["metastore"], + &[], + IngesterStatus::Unspecified, + ) + .await; let cluster_change = ClusterChange::Add(metastore_node); cluster_change_stream_tx.send(cluster_change).unwrap(); - let indexer_node = - ClusterNode::for_test("test-indexer", 1515, false, &["indexer"], &[]).await; + tokio::time::sleep(Duration::from_millis(1)).await; + assert!( + control_plane_inbox + .drain_for_test_typed::() + .is_empty() + ); + + // an indexer initializing doesn't trigger a shard rebalancing. + let indexer_node_initializing: ClusterNode = ClusterNode::for_test( + "test-indexer", + 1515, + false, + &["indexer"], + &[], + IngesterStatus::Initializing, + ) + .await; + let cluster_change = ClusterChange::Add(indexer_node_initializing); + cluster_change_stream_tx.send(cluster_change).unwrap(); + + tokio::time::sleep(Duration::from_millis(1)).await; + assert!( + control_plane_inbox + .drain_for_test_typed::() + .is_empty() + ); + + // an indexer ready triggers a shard rebalancing. + let indexer_node: ClusterNode = ClusterNode::for_test( + "test-indexer", + 1515, + false, + &["indexer"], + &[], + IngesterStatus::Ready, + ) + .await; let cluster_change = ClusterChange::Add(indexer_node.clone()); cluster_change_stream_tx.send(cluster_change).unwrap(); + tokio::time::sleep(Duration::from_millis(1)).await; + let RebalanceShards = control_plane_inbox.recv_typed_message().await.unwrap(); + + // removing an indexer node triggers a shard rebalancing. let cluster_change = ClusterChange::Remove(indexer_node.clone()); cluster_change_stream_tx.send(cluster_change).unwrap(); - let IndexerJoined(joined) = control_plane_inbox.recv_typed_message().await.unwrap(); - assert_eq!(joined.grpc_advertise_addr().port(), 1516); + tokio::time::sleep(Duration::from_millis(1)).await; + let RebalanceShards = control_plane_inbox.recv_typed_message().await.unwrap(); + + // a change in IngesterStatus readiness triggers a shard rebalancing. + let node_ready = ClusterNode::for_test( + "test-indexer", + 1515, + false, + &["indexer"], + &[], + IngesterStatus::Ready, + ) + .await; + let node_retiring = ClusterNode::for_test( + "test-indexer", + 1515, + false, + &["indexer"], + &[], + IngesterStatus::Retiring, + ) + .await; + let cluster_change = ClusterChange::Update { + previous: node_ready, + updated: node_retiring, + }; + cluster_change_stream_tx.send(cluster_change).unwrap(); - let IndexerLeft(left) = control_plane_inbox.recv_typed_message().await.unwrap(); - assert_eq!(left.grpc_advertise_addr().port(), 1516); + tokio::time::sleep(Duration::from_millis(1)).await; + let RebalanceShards = control_plane_inbox.recv_typed_message().await.unwrap(); universe.assert_quit().await; } @@ -2493,8 +2586,15 @@ mod tests { disable_control_loop, ); let cluster_change_stream_tx = cluster_change_stream_factory.change_stream_tx(); - let indexer_node = - ClusterNode::for_test("test-indexer", 1515, false, &["indexer"], &[]).await; + let indexer_node: ClusterNode = ClusterNode::for_test( + "test-indexer", + 1515, + false, + &["indexer"], + &[], + IngesterStatus::Ready, + ) + .await; let cluster_change = ClusterChange::Add(indexer_node.clone()); cluster_change_stream_tx.send(cluster_change).unwrap(); @@ -2552,7 +2652,11 @@ mod tests { }; Ok(response) }); - let ingester = IngesterServiceClient::from_mock(mock_ingester); + let client = IngesterServiceClient::from_mock(mock_ingester); + let ingester = IngesterPoolEntry { + client, + status: IngesterStatus::Ready, + }; ingester_pool.insert(ingester_id, ingester); let mut mock_metastore = MockMetastoreService::new(); @@ -2706,7 +2810,11 @@ mod tests { }; Ok(response) }); - let ingester = IngesterServiceClient::from_mock(mock_ingester); + let client = IngesterServiceClient::from_mock(mock_ingester); + let ingester = IngesterPoolEntry { + client, + status: IngesterStatus::Ready, + }; ingester_pool.insert(ingester_id, ingester); let mut mock_metastore = MockMetastoreService::new(); diff --git a/quickwit/quickwit-control-plane/src/ingest/ingest_controller.rs b/quickwit/quickwit-control-plane/src/ingest/ingest_controller.rs index 63295d61eca..18c9c6486e1 100644 --- a/quickwit/quickwit-control-plane/src/ingest/ingest_controller.rs +++ b/quickwit/quickwit-control-plane/src/ingest/ingest_controller.rs @@ -13,7 +13,7 @@ // limitations under the License. use std::collections::btree_map::Entry; -use std::collections::{BTreeMap, BTreeSet, HashMap}; +use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::fmt; use std::future::Future; use std::num::NonZeroUsize; @@ -34,7 +34,7 @@ use quickwit_proto::control_plane::{ GetOrCreateOpenShardsSuccess, }; use quickwit_proto::ingest::ingester::{ - CloseShardsRequest, CloseShardsResponse, IngesterService, InitShardFailure, + CloseShardsRequest, CloseShardsResponse, IngesterService, IngesterStatus, InitShardFailure, InitShardSubrequest, InitShardsRequest, InitShardsResponse, RetainShardsForSource, RetainShardsRequest, }; @@ -52,8 +52,7 @@ use rand::seq::SliceRandom; use rand::{Rng, RngCore, rng}; use serde::{Deserialize, Serialize}; use tokio::sync::{Mutex, OwnedMutexGuard}; -use tokio::task::JoinHandle; -use tracing::{Level, debug, enabled, error, info, warn}; +use tracing::{Level, debug, enabled, error, info, instrument, warn}; use ulid::Ulid; use super::scaling_arbiter::ScalingArbiter; @@ -202,12 +201,12 @@ pub struct IngestControllerStats { } pub struct IngestController { - ingester_pool: IngesterPool, + pub(crate) ingester_pool: IngesterPool, + pub(crate) stats: IngestControllerStats, metastore: MetastoreServiceClient, replication_factor: usize, // This lock ensures that only one rebalance operation is performed at a time. rebalance_lock: Arc>, - pub stats: IngestControllerStats, scaling_arbiter: ScalingArbiter, } @@ -350,8 +349,10 @@ impl IngestController { let operation: String = format!("retain shards `{ingester}`"); fire_and_forget( async move { - if let Err(retain_shards_err) = - ingester_client.retain_shards(retain_shards_req).await + if let Err(retain_shards_err) = ingester_client + .client + .retain_shards(retain_shards_req) + .await { error!(%retain_shards_err, "retain shards error"); } @@ -548,10 +549,12 @@ impl IngestController { // shards). let mut per_node_num_open_shards: HashMap = self .ingester_pool - .keys() + .keys_values() .into_iter() - .filter(|ingester| !unavailable_leaders.contains(ingester)) - .map(|ingester| (ingester, 0)) + .filter(|(ingester_id, ingester)| { + ingester.status.is_ready() && !unavailable_leaders.contains(ingester_id) + }) + .map(|(ingester_id, _)| (ingester_id, 0)) .collect(); let num_ingesters = per_node_num_open_shards.len(); @@ -648,7 +651,7 @@ impl IngestController { let init_shards_future = async move { let init_shards_result = tokio::time::timeout( INIT_SHARDS_REQUEST_TIMEOUT, - leader.init_shards(init_shards_request), + leader.client.init_shards(init_shards_request), ) .await; (leader_id.clone(), init_shards_result, init_shard_failures) @@ -910,7 +913,7 @@ impl IngestController { let close_shards_request = CloseShardsRequest { shard_pkeys }; if let Err(error) = progress - .protect_future(ingester.close_shards(close_shards_request)) + .protect_future(ingester.client.close_shards(close_shards_request)) .await { warn!("failed to scale down number of shards: {error}"); @@ -1008,15 +1011,16 @@ impl IngestController { /// /// This method is guarded by a lock to ensure that only one rebalance operation is performed at /// a time. + #[instrument(skip_all)] pub(crate) async fn rebalance_shards( &mut self, model: &mut ControlPlaneModel, mailbox: &Mailbox, progress: &Progress, - ) -> MetastoreResult>> { + ) -> MetastoreResult { let Ok(rebalance_guard) = self.rebalance_lock.clone().try_lock_owned() else { debug!("skipping rebalance: another rebalance is already in progress"); - return Ok(None); + return Ok(0); }; self.stats.num_rebalance_shards_ops += 1; @@ -1027,7 +1031,8 @@ impl IngestController { .set(shards_to_rebalance.len() as i64); if shards_to_rebalance.is_empty() { - return Ok(None); + debug!("skipping rebalance: no shards to rebalance"); + return Ok(0); } let mut per_source_num_shards_to_open: HashMap = HashMap::new(); @@ -1036,7 +1041,6 @@ impl IngestController { .entry(shard.source_uid()) .or_default() += 1; } - let mut per_source_num_opened_shards: HashMap = self .try_open_shards( per_source_num_shards_to_open, @@ -1067,6 +1071,7 @@ impl IngestController { // Close as many shards as we opened. Because `try_open_shards` might fail partially, we // must only close the shards that we successfully opened. let mut shards_to_close = Vec::with_capacity(shards_to_rebalance.len()); + for shard in shards_to_rebalance { let source_uid = shard.source_uid(); let Some(num_open_shards) = per_source_num_opened_shards.get_mut(&source_uid) else { @@ -1078,10 +1083,8 @@ impl IngestController { *num_open_shards -= 1; shards_to_close.push(shard); } - - let mailbox_clone = mailbox.clone(); - let close_shards_fut = self.close_shards(shards_to_close); + let mailbox_clone = mailbox.clone(); let close_shards_and_send_callback_fut = async move { // We wait for a few seconds before closing the shards to give the ingesters some time @@ -1099,8 +1102,12 @@ impl IngestController { }; let _ = mailbox_clone.send_message(callback).await; }; + tokio::spawn(close_shards_and_send_callback_fut); - Ok(Some(tokio::spawn(close_shards_and_send_callback_fut))) + if num_opened_shards > 0 { + info!("rebalance opened {num_opened_shards} new shards"); + } + Ok(num_opened_shards) } /// Computes shards that need to be rebalanced. @@ -1114,31 +1121,41 @@ impl IngestController { /// The closing operation can only be done by the leader of that shard. /// For these reason, we exclude these shards from the rebalance process. fn compute_shards_to_rebalance(&self, model: &ControlPlaneModel) -> Vec { - let mut per_available_ingester_shards: HashMap> = self - .ingester_pool - .keys() - .into_iter() - .map(|ingester_id| (ingester_id, Vec::new())) - .collect(); + let mut per_ready_ingester_shards: HashMap> = HashMap::new(); + let mut retiring_ingesters: HashSet = HashSet::new(); + + for (ingester_id, ingester) in self.ingester_pool.keys_values() { + if ingester.status.is_ready() { + per_ready_ingester_shards.insert(ingester_id, Vec::new()); + } else if ingester.status == IngesterStatus::Retiring { + retiring_ingesters.insert(ingester_id); + } + } + + let mut shards_to_rebalance: Vec = Vec::new(); + let mut num_ready_shards: usize = 0; - let mut num_available_shards: usize = 0; for shard in model.all_shards() { if !shard.is_open() { continue; } let leader_id_ref = NodeIdRef::from_str(&shard.leader_id); - if let Some(shards) = per_available_ingester_shards.get_mut(leader_id_ref) { - // We only consider shards that are on available ingesters - // because we won't be able to move shards that are not reachable. - num_available_shards += 1; - shards.push(&shard.shard) + + if let Some(shards) = per_ready_ingester_shards.get_mut(leader_id_ref) { + // Shards on ready ingesters participate in the balancing logic. + num_ready_shards += 1; + shards.push(&shard.shard); + } else if retiring_ingesters.contains(leader_id_ref) { + // All open shards on retiring ingesters must be rebalanced. + shards_to_rebalance.push(shard.shard.clone()); } } - let num_available_ingesters = per_available_ingester_shards.len(); + let num_retiring_shards = shards_to_rebalance.len(); + let num_ready_ingesters = per_ready_ingester_shards.len(); let mut rng = rng(); - let mut per_leader_open_shards_shuffled: Vec> = per_available_ingester_shards + let mut per_leader_open_shards_shuffled: Vec> = per_ready_ingester_shards .into_values() .map(|mut shards| { shards.shuffle(&mut rng); @@ -1146,11 +1163,9 @@ impl IngestController { }) .collect(); - let mut shards_to_rebalance: Vec = Vec::new(); - // This is more of a loop-loop, but since we know it should exit before - // `num_available_ingesters`, we defensively use a for-loop. - for _ in 0..num_available_shards { + // `num_ready_shards`, we defensively use a for-loop. + for _ in 0..num_ready_shards { let MinMaxResult::MinMax(min_shards, max_shards) = per_leader_open_shards_shuffled .iter_mut() .minmax_by_key(|shards| shards.len()) @@ -1177,13 +1192,13 @@ impl IngestController { debug!("no shards to rebalance"); } else { info!( - num_available_shards, - num_available_ingesters, + num_ready_shards, + num_ready_ingesters, + num_retiring_shards, num_shards_to_rebalance = shards_to_rebalance.len(), "rebalancing shards" ); } - shards_to_rebalance } @@ -1219,7 +1234,7 @@ impl IngestController { let close_shards_future = async move { tokio::time::timeout( CLOSE_SHARDS_REQUEST_TIMEOUT, - ingester.close_shards(shards_to_close_request), + ingester.client.close_shards(shards_to_close_request), ) .await }; @@ -1313,12 +1328,12 @@ mod tests { use quickwit_common::shared_consts::DEFAULT_SHARD_THROUGHPUT_LIMIT; use quickwit_common::tower::DelayLayer; use quickwit_config::{DocMapping, INGEST_V2_SOURCE_ID, SourceConfig}; - use quickwit_ingest::{RateMibPerSec, ShardInfo}; + use quickwit_ingest::{IngesterPoolEntry, RateMibPerSec, ShardInfo}; use quickwit_metastore::IndexMetadata; use quickwit_proto::control_plane::GetOrCreateOpenShardsSubrequest; use quickwit_proto::ingest::ingester::{ - CloseShardsResponse, IngesterServiceClient, InitShardSuccess, InitShardsResponse, - MockIngesterService, RetainShardsResponse, + CloseShardsResponse, IngesterServiceClient, IngesterStatus, InitShardSuccess, + InitShardsResponse, MockIngesterService, RetainShardsResponse, }; use quickwit_proto::ingest::{IngestV2Error, Shard, ShardState}; use quickwit_proto::metastore::{ @@ -1382,10 +1397,14 @@ mod tests { let metastore = MetastoreServiceClient::from_mock(mock_metastore); let mock_ingester = MockIngesterService::new(); - let ingester = IngesterServiceClient::from_mock(mock_ingester); + let client = IngesterServiceClient::from_mock(mock_ingester); let ingester_pool = IngesterPool::default(); - ingester_pool.insert(NodeId::from("test-ingester-1"), ingester.clone()); + let ingester = IngesterPoolEntry { + client, + status: IngesterStatus::Ready, + }; + ingester_pool.insert(NodeId::from("test-ingester-1"), ingester); let mut mock_ingester = MockIngesterService::new(); let index_uid_1_clone = index_uid_1.clone(); @@ -1412,8 +1431,12 @@ mod tests { }; Ok(response) }); - let ingester = IngesterServiceClient::from_mock(mock_ingester); - ingester_pool.insert(NodeId::from("test-ingester-2"), ingester.clone()); + let client = IngesterServiceClient::from_mock(mock_ingester); + let ingester = IngesterPoolEntry { + client, + status: IngesterStatus::Ready, + }; + ingester_pool.insert(NodeId::from("test-ingester-2"), ingester); let replication_factor = 2; let mut controller = IngestController::new( @@ -1596,10 +1619,14 @@ mod tests { }; Ok(response) }); - let ingester = IngesterServiceClient::from_mock(mock_ingester); + let client = IngesterServiceClient::from_mock(mock_ingester); let ingester_pool = IngesterPool::default(); - ingester_pool.insert(NodeId::from("test-ingester-1"), ingester.clone()); + let ingester = IngesterPoolEntry { + client, + status: IngesterStatus::Ready, + }; + ingester_pool.insert(NodeId::from("test-ingester-1"), ingester); let replication_factor = 1; let mut controller = IngestController::new( @@ -1710,10 +1737,12 @@ mod tests { controller.allocate_shards(0, &FnvHashSet::default(), &model); assert!(leader_follower_pairs_opt.is_none()); - ingester_pool.insert( - NodeId::from("test-ingester-1"), - IngesterServiceClient::mocked(), - ); + let client_1 = IngesterServiceClient::mocked(); + let ingester_1 = IngesterPoolEntry { + client: client_1, + status: IngesterStatus::Ready, + }; + ingester_pool.insert(NodeId::from("test-ingester-1"), ingester_1); let leader_follower_pairs_opt = controller.allocate_shards(0, &FnvHashSet::default(), &model); @@ -1722,7 +1751,12 @@ mod tests { // find any solution. assert!(leader_follower_pairs_opt.is_none()); - ingester_pool.insert("test-ingester-2".into(), IngesterServiceClient::mocked()); + let client_2 = IngesterServiceClient::mocked(); + let ingester_2 = IngesterPoolEntry { + client: client_2, + status: IngesterStatus::Ready, + }; + ingester_pool.insert("test-ingester-2".into(), ingester_2); let leader_follower_pairs = controller .allocate_shards(0, &FnvHashSet::default(), &model) @@ -1841,7 +1875,12 @@ mod tests { Some(NodeId::from("test-ingester-1")) ); - ingester_pool.insert("test-ingester-3".into(), IngesterServiceClient::mocked()); + let client = IngesterServiceClient::mocked(); + let ingester = IngesterPoolEntry { + client, + status: IngesterStatus::Ready, + }; + ingester_pool.insert("test-ingester-3".into(), ingester); let unavailable_leaders = FnvHashSet::from_iter([NodeId::from("test-ingester-2")]); let leader_follower_pairs = controller .allocate_shards(4, &unavailable_leaders, &model) @@ -1933,7 +1972,11 @@ mod tests { }; Ok(response) }); - let ingester_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let client_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let ingester_0 = IngesterPoolEntry { + client: client_0, + status: IngesterStatus::Ready, + }; ingester_pool.insert(ingester_id_0, ingester_0); let ingester_id_1 = NodeId::from("test-ingester-1"); @@ -1955,16 +1998,24 @@ mod tests { Err(IngestV2Error::Internal("internal error".to_string())) }); - let ingester_1 = IngesterServiceClient::from_mock(mock_ingester_1); + let client_1 = IngesterServiceClient::from_mock(mock_ingester_1); + let ingester_1 = IngesterPoolEntry { + client: client_1, + status: IngesterStatus::Ready, + }; ingester_pool.insert(ingester_id_1, ingester_1); let ingester_id_2 = NodeId::from("test-ingester-2"); let mut mock_ingester_2 = MockIngesterService::new(); mock_ingester_2.expect_init_shards().never(); - let ingester_2 = IngesterServiceClient::tower() + let client_2 = IngesterServiceClient::tower() .stack_init_shards_layer(DelayLayer::new(INIT_SHARDS_REQUEST_TIMEOUT * 2)) .build_from_mock(mock_ingester_2); + let ingester_2 = IngesterPoolEntry { + client: client_2, + status: IngesterStatus::Ready, + }; ingester_pool.insert(ingester_id_2, ingester_2); let init_shards_response = controller @@ -2171,10 +2222,12 @@ mod tests { Ok(response) }); - ingester_pool.insert( - NodeId::from("test-ingester-1"), - IngesterServiceClient::from_mock(mock_ingester), - ); + let client = IngesterServiceClient::from_mock(mock_ingester); + let ingester = IngesterPoolEntry { + client, + status: IngesterStatus::Ready, + }; + ingester_pool.insert(NodeId::from("test-ingester-1"), ingester); let source_uids: HashMap = HashMap::from_iter([(source_uid.clone(), 1)]); let unavailable_leaders = FnvHashSet::default(); let progress = Progress::default(); @@ -2343,7 +2396,11 @@ mod tests { "failed to close shards".to_string(), )) }); - let ingester = IngesterServiceClient::from_mock(mock_ingester); + let client = IngesterServiceClient::from_mock(mock_ingester); + let ingester = IngesterPoolEntry { + client, + status: IngesterStatus::Ready, + }; ingester_pool.insert("test-ingester".into(), ingester); let shard_infos = BTreeSet::from_iter([ @@ -2496,7 +2553,11 @@ mod tests { }, ); - let ingester = IngesterServiceClient::from_mock(mock_ingester); + let client = IngesterServiceClient::from_mock(mock_ingester); + let ingester = IngesterPoolEntry { + client, + status: IngesterStatus::Ready, + }; ingester_pool.insert("test-ingester".into(), ingester); let shard_infos = BTreeSet::from_iter([ShardInfo { @@ -2642,7 +2703,11 @@ mod tests { }; Ok(response) }); - let ingester = IngesterServiceClient::from_mock(mock_ingester); + let client = IngesterServiceClient::from_mock(mock_ingester); + let ingester = IngesterPoolEntry { + client, + status: IngesterStatus::Ready, + }; ingester_pool.insert("test-ingester".into(), ingester); // Test failed to open shards. @@ -2764,7 +2829,11 @@ mod tests { }; Ok(response) }); - let ingester = IngesterServiceClient::from_mock(mock_ingester); + let client = IngesterServiceClient::from_mock(mock_ingester); + let ingester = IngesterPoolEntry { + client, + status: IngesterStatus::Ready, + }; ingester_pool.insert("test-ingester".into(), ingester); // Test failed to close shard. @@ -2994,18 +3063,24 @@ mod tests { count_calls_clone.fetch_add(1, Ordering::Release); Ok(RetainShardsResponse {}) }); - ingester_pool.insert( - "node-1".into(), - IngesterServiceClient::from_mock(mock_ingester_1), - ); - ingester_pool.insert( - "node-2".into(), - IngesterServiceClient::from_mock(mock_ingester_2), - ); - ingester_pool.insert( - "node-3".into(), - IngesterServiceClient::from_mock(mock_ingester_3), - ); + let client_1 = IngesterServiceClient::from_mock(mock_ingester_1); + let ingester_1 = IngesterPoolEntry { + client: client_1, + status: IngesterStatus::Ready, + }; + ingester_pool.insert("node-1".into(), ingester_1); + let client_2 = IngesterServiceClient::from_mock(mock_ingester_2); + let ingester_2 = IngesterPoolEntry { + client: client_2, + status: IngesterStatus::Ready, + }; + ingester_pool.insert("node-2".into(), ingester_2); + let client_3 = IngesterServiceClient::from_mock(mock_ingester_3); + let ingester_3 = IngesterPoolEntry { + client: client_3, + status: IngesterStatus::Ready, + }; + ingester_pool.insert("node-3".into(), ingester_3); let node_id = "node-1".into(); let wait_handle = controller.sync_with_ingester(&node_id, &model); wait_handle.wait().await; @@ -3133,7 +3208,11 @@ mod tests { }; Ok(response) }); - let ingester_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let client_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let ingester_0 = IngesterPoolEntry { + client: client_0, + status: IngesterStatus::Ready, + }; ingester_pool.insert(ingester_id_0.clone(), ingester_0); let ingester_id_1 = NodeId::from("test-ingester-1"); @@ -3151,16 +3230,24 @@ mod tests { Err(IngestV2Error::Internal("internal error".to_string())) }); - let ingester_1 = IngesterServiceClient::from_mock(mock_ingester_1); + let client_1 = IngesterServiceClient::from_mock(mock_ingester_1); + let ingester_1 = IngesterPoolEntry { + client: client_1, + status: IngesterStatus::Ready, + }; ingester_pool.insert(ingester_id_1.clone(), ingester_1); let ingester_id_2 = NodeId::from("test-ingester-2"); let mut mock_ingester_2 = MockIngesterService::new(); mock_ingester_2.expect_close_shards().never(); - let ingester_2 = IngesterServiceClient::tower() + let client_2 = IngesterServiceClient::tower() .stack_close_shards_layer(DelayLayer::new(CLOSE_SHARDS_REQUEST_TIMEOUT * 2)) .build_from_mock(mock_ingester_2); + let ingester_2 = IngesterPoolEntry { + client: client_2, + status: IngesterStatus::Ready, + }; ingester_pool.insert(ingester_id_2.clone(), ingester_2); // In this test: @@ -3261,11 +3348,11 @@ mod tests { let (control_plane_mailbox, control_plane_inbox) = universe.create_test_mailbox(); let progress = Progress::default(); - let close_shards_task_opt = controller + let num_opened_shards = controller .rebalance_shards(&mut model, &control_plane_mailbox, &progress) .await .unwrap(); - assert!(close_shards_task_opt.is_none()); + assert_eq!(num_opened_shards, 0); let index_metadata = IndexMetadata::for_test("test-index", "ram://indexes/test-index"); let index_uid = index_metadata.index_uid.clone(); @@ -3340,7 +3427,11 @@ mod tests { }; Ok(response) }); - let ingester_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let client_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let ingester_0 = IngesterPoolEntry { + client: client_0, + status: IngesterStatus::Ready, + }; ingester_pool.insert(ingester_id_0.clone(), ingester_0); let ingester_id_1 = NodeId::from("test-ingester-1"); @@ -3382,24 +3473,26 @@ mod tests { }; Ok(response) }); - let ingester_1 = IngesterServiceClient::from_mock(mock_ingester_1); + let client_1 = IngesterServiceClient::from_mock(mock_ingester_1); + let ingester_1 = IngesterPoolEntry { + client: client_1, + status: IngesterStatus::Ready, + }; ingester_pool.insert(ingester_id_1.clone(), ingester_1); - let close_shards_task = controller + let num_opened_shards = controller .rebalance_shards(&mut model, &control_plane_mailbox, &progress) .await - .unwrap() .unwrap(); + assert_eq!(num_opened_shards, 1); - tokio::time::timeout(CLOSE_SHARDS_REQUEST_TIMEOUT * 2, close_shards_task) - .await - .unwrap() - .unwrap(); - - let callbacks: Vec = control_plane_inbox.drain_for_test_typed(); - assert_eq!(callbacks.len(), 1); - - let callback = &callbacks[0]; + let callback: RebalanceShardsCallback = tokio::time::timeout( + CLOSE_SHARDS_REQUEST_TIMEOUT * 2, + control_plane_inbox.recv_typed_message(), + ) + .await + .unwrap() + .unwrap(); assert_eq!(callback.closed_shards.len(), 1); } @@ -3544,8 +3637,9 @@ mod tests { /// - `available_ingester_shards`: open shards per available ingester /// - `unavailable_ingester_shards`: open shards on unavailable ingesters fn test_compute_shards_to_rebalance_aux( - available_ingester_shards: &[usize], + ready_ingester_shards: &[usize], unavailable_ingester_shards: &[usize], + retiring_ingester_shards: &[usize], ) { let index_id = "test-index"; let index_metadata = IndexMetadata::for_test(index_id, "ram://indexes/test-index"); @@ -3563,28 +3657,44 @@ mod tests { let mock_ingester = MockIngesterService::new(); let ingester_client = IngesterServiceClient::from_mock(mock_ingester); - let active_ids: Vec = (0..available_ingester_shards.len()) - .map(|i| format!("active-ingester-{}", i)) + let ready_ids: Vec = (0..ready_ingester_shards.len()) + .map(|i| format!("ready-ingester-{}", i)) .collect(); - for ingester_id in &active_ids { - ingester_pool.insert(NodeId::from(ingester_id.clone()), ingester_client.clone()); + for ingester_id in &ready_ids { + let ingester = IngesterPoolEntry { + client: ingester_client.clone(), + status: IngesterStatus::Ready, + }; + ingester_pool.insert(NodeId::from(ingester_id.clone()), ingester); } - let inactive_ids: Vec = (0..unavailable_ingester_shards.len()) - .map(|i| format!("inactive-ingester-{}", i)) + let unavailable_ids: Vec = (0..unavailable_ingester_shards.len()) + .map(|i| format!("unavailable-ingester-{}", i)) + .collect(); + + let retiring_ids: Vec = (0..retiring_ingester_shards.len()) + .map(|i| format!("retiring-ingester-{}", i)) .collect(); + for ingester_id in &retiring_ids { + let ingester = IngesterPoolEntry { + client: ingester_client.clone(), + status: IngesterStatus::Retiring, + }; + ingester_pool.insert(NodeId::from(ingester_id.clone()), ingester); + } + let mut shards: Vec = Vec::new(); let mut shard_id: u64 = 0; - for (idx, &num_shards) in available_ingester_shards.iter().enumerate() { + for (idx, &num_shards) in ready_ingester_shards.iter().enumerate() { for _ in 0..num_shards { shards.push(Shard { index_uid: Some(index_uid.clone()), source_id: source_id.clone(), shard_id: Some(ShardId::from(shard_id)), - leader_id: active_ids[idx].clone(), + leader_id: ready_ids[idx].clone(), shard_state: ShardState::Open as i32, ..Default::default() }); @@ -3599,7 +3709,24 @@ mod tests { index_uid: Some(index_uid.clone()), source_id: source_id.clone(), shard_id: Some(ShardId::from(shard_id)), - leader_id: inactive_ids[idx].clone(), + leader_id: unavailable_ids[idx].clone(), + shard_state: ShardState::Open as i32, + ..Default::default() + }); + shard_id += 1; + } + } + + let num_retiring_shards: usize = retiring_ingester_shards.iter().sum(); + + // Shards on retiring ingesters - all of these should be rebalanced + for (idx, &num_shards) in retiring_ingester_shards.iter().enumerate() { + for _ in 0..num_shards { + shards.push(Shard { + index_uid: Some(index_uid.clone()), + source_id: source_id.clone(), + shard_id: Some(ShardId::from(shard_id)), + leader_id: retiring_ids[idx].clone(), shard_state: ShardState::Open as i32, ..Default::default() }); @@ -3618,6 +3745,13 @@ mod tests { ); let shards_to_rebalance = controller.compute_shards_to_rebalance(&model); + // All shards on retiring ingesters must be rebalanced. + let num_retiring_shards_to_rebalance = shards_to_rebalance + .iter() + .filter(|shard| shard.leader_id.starts_with("retiring-")) + .count(); + assert_eq!(num_retiring_shards_to_rebalance, num_retiring_shards); + let source_uid = SourceUid { index_uid: index_uid.clone(), source_id: source_id.clone(), @@ -3630,9 +3764,9 @@ mod tests { let closed_shard_ids = model.close_shards(&source_uid, &shard_ids_to_rebalance); assert_eq!(closed_shard_ids.len(), shards_to_rebalance.len()); - let mut per_available_ingester_num_shards: HashMap<&str, usize> = active_ids + let mut per_ready_ingester_num_shards: HashMap<&str, usize> = ready_ids .iter() - .map(|active_id| (active_id.as_str(), 0)) + .map(|ready_id| (ready_id.as_str(), 0)) .collect(); for shard in model.all_shards() { @@ -3640,68 +3774,85 @@ mod tests { continue; } if let Some(count_shard) = - per_available_ingester_num_shards.get_mut(shard.leader_id.as_str()) + per_ready_ingester_num_shards.get_mut(shard.leader_id.as_str()) { *count_shard += 1; } } - // Now we move the different shards. - let mut per_ingester_num_shards_sorted: BTreeSet<(usize, &str)> = - per_available_ingester_num_shards - .into_iter() - .map(|(ingester_id, num_shards)| (num_shards, ingester_id)) - .collect(); - let mut opened_shards: Vec = Vec::new(); - for _ in 0..shards_to_rebalance.len() { - let (num_shards, ingester_id) = per_ingester_num_shards_sorted.pop_first().unwrap(); - let opened_shard = Shard { - index_uid: Some(index_uid.clone()), - source_id: source_id.to_string(), - shard_id: Some(ShardId::from(shard_id)), - leader_id: ingester_id.to_string(), - shard_state: ShardState::Open as i32, - ..Default::default() - }; - per_ingester_num_shards_sorted.insert((num_shards + 1, ingester_id)); - opened_shards.push(opened_shard); - shard_id += 1; - } + // Now we move the different shards to ready ingesters (not retiring ones). + // We can only simulate this if there are ready ingesters to receive shards. + if !ready_ids.is_empty() { + let mut per_ingester_num_shards_sorted: BTreeSet<(usize, &str)> = + per_ready_ingester_num_shards + .into_iter() + .map(|(ingester_id, num_shards)| (num_shards, ingester_id)) + .collect(); + let mut opened_shards: Vec = Vec::new(); + for _ in 0..shards_to_rebalance.len() { + let (num_shards, ingester_id) = per_ingester_num_shards_sorted.pop_first().unwrap(); + let opened_shard = Shard { + index_uid: Some(index_uid.clone()), + source_id: source_id.to_string(), + shard_id: Some(ShardId::from(shard_id)), + leader_id: ingester_id.to_string(), + shard_state: ShardState::Open as i32, + ..Default::default() + }; + per_ingester_num_shards_sorted.insert((num_shards + 1, ingester_id)); + opened_shards.push(opened_shard); + shard_id += 1; + } - if let Some((min_shards, max_shards)) = per_ingester_num_shards_sorted - .iter() - .map(|(num_shards, _)| num_shards) - .copied() - .minmax() - .into_option() - { - assert!(min_shards + min_shards.div_ceil(10).max(2) >= max_shards); - } + if let Some((min_shards, max_shards)) = per_ingester_num_shards_sorted + .iter() + .map(|(num_shards, _)| num_shards) + .copied() + .minmax() + .into_option() + { + assert!(min_shards + min_shards.div_ceil(10).max(2) >= max_shards); + } - // Test stability of the algorithm - model.insert_shards(&index_uid, &source_id, opened_shards); + // Test stability of the algorithm: mark the retiring ingesters as + // decommissioned, insert the new shards, and verify no further rebalance is + // needed among the ready ingesters. + for ingester_id in &retiring_ids { + let ingester = IngesterPoolEntry { + client: ingester_client.clone(), + status: IngesterStatus::Decommissioned, + }; + ingester_pool.insert(NodeId::from(ingester_id.clone()), ingester); + } + model.insert_shards(&index_uid, &source_id, opened_shards); - let shards_to_rebalance = controller.compute_shards_to_rebalance(&model); - assert!(shards_to_rebalance.is_empty()); + let shards_to_rebalance = controller.compute_shards_to_rebalance(&model); + assert!(shards_to_rebalance.is_empty()); + } } proptest! { #[test] fn test_compute_shards_to_rebalance_proptest( - active_shards in proptest::collection::vec(0..13usize, 0..13usize), - inactive_shards in proptest::collection::vec(0..13usize, 0..5usize), + ready_shards in proptest::collection::vec(0..13usize, 0..13usize), + unavailable_shards in proptest::collection::vec(0..13usize, 0..5usize), + retiring_shards in proptest::collection::vec(0..5usize, 0..5usize), ) { - test_compute_shards_to_rebalance_aux(&active_shards, &inactive_shards); + test_compute_shards_to_rebalance_aux(&ready_shards, &unavailable_shards, &retiring_shards); } } #[test] fn test_compute_shards_to_rebalance() { - test_compute_shards_to_rebalance_aux(&[], &[]); - test_compute_shards_to_rebalance_aux(&[0], &[]); - test_compute_shards_to_rebalance_aux(&[1], &[]); - test_compute_shards_to_rebalance_aux(&[0, 1], &[]); - test_compute_shards_to_rebalance_aux(&[0, 1], &[1]); - test_compute_shards_to_rebalance_aux(&[0, 1, 2], &[3, 4]); + test_compute_shards_to_rebalance_aux(&[], &[], &[]); + test_compute_shards_to_rebalance_aux(&[0], &[], &[]); + test_compute_shards_to_rebalance_aux(&[1], &[], &[]); + test_compute_shards_to_rebalance_aux(&[0, 1], &[], &[]); + test_compute_shards_to_rebalance_aux(&[0, 1], &[1], &[]); + test_compute_shards_to_rebalance_aux(&[0, 1, 2], &[3, 4], &[]); + // Retiring ingesters: all their shards must be rebalanced + test_compute_shards_to_rebalance_aux(&[1, 1], &[], &[3]); + test_compute_shards_to_rebalance_aux(&[0, 0, 0], &[], &[5]); + test_compute_shards_to_rebalance_aux(&[2], &[], &[1, 2]); } } diff --git a/quickwit/quickwit-indexing/src/source/ingest/mod.rs b/quickwit/quickwit-indexing/src/source/ingest/mod.rs index 63c746aabe0..273f2df5b37 100644 --- a/quickwit/quickwit-indexing/src/source/ingest/mod.rs +++ b/quickwit/quickwit-indexing/src/source/ingest/mod.rs @@ -361,6 +361,7 @@ impl IngestSource { }; for num_attempts in 1..=retry_params.max_attempts { let Err(error) = ingester + .client .truncate_shards(truncate_shards_request.clone()) .await else { @@ -672,9 +673,11 @@ mod tests { use quickwit_common::metrics::MEMORY_METRICS; use quickwit_common::stream_utils::InFlightValue; use quickwit_config::{IndexingSettings, SourceConfig, SourceParams}; + use quickwit_ingest::IngesterPoolEntry; use quickwit_proto::indexing::IndexingPipelineId; use quickwit_proto::ingest::ingester::{ - FetchMessage, IngesterServiceClient, MockIngesterService, TruncateShardsResponse, + FetchMessage, IngesterServiceClient, IngesterStatus, MockIngesterService, + TruncateShardsResponse, }; use quickwit_proto::ingest::{IngestV2Error, MRecordBatch, Shard, ShardState}; use quickwit_proto::metastore::{AcquireShardsResponse, MockMetastoreService}; @@ -929,8 +932,12 @@ mod tests { Ok(response) }); - let ingester_0 = IngesterServiceClient::from_mock(mock_ingester_0); - ingester_pool.insert("test-ingester-0".into(), ingester_0.clone()); + let client_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let ingester_0 = IngesterPoolEntry { + client: client_0, + status: IngesterStatus::Ready, + }; + ingester_pool.insert("test-ingester-0".into(), ingester_0); let event_broker = EventBroker::default(); @@ -1126,8 +1133,12 @@ mod tests { Ok(response) }); - let ingester_0 = IngesterServiceClient::from_mock(mock_ingester_0); - ingester_pool.insert("test-ingester-0".into(), ingester_0.clone()); + let client_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let ingester_0 = IngesterPoolEntry { + client: client_0, + status: IngesterStatus::Ready, + }; + ingester_pool.insert("test-ingester-0".into(), ingester_0); let event_broker = EventBroker::default(); let (shard_positions_update_tx, mut shard_positions_update_rx) = @@ -1291,8 +1302,12 @@ mod tests { Ok(response) }); - let ingester_0 = IngesterServiceClient::from_mock(mock_ingester_0); - ingester_pool.insert("test-ingester-0".into(), ingester_0.clone()); + let client_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let ingester_0 = IngesterPoolEntry { + client: client_0, + status: IngesterStatus::Ready, + }; + ingester_pool.insert("test-ingester-0".into(), ingester_0); let event_broker = EventBroker::default(); let (shard_positions_update_tx, mut shard_positions_update_rx) = @@ -1599,8 +1614,12 @@ mod tests { }) }); - let ingester_0 = IngesterServiceClient::from_mock(mock_ingester_0); - ingester_pool.insert("test-ingester-0".into(), ingester_0.clone()); + let client_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let ingester_0 = IngesterPoolEntry { + client: client_0, + status: IngesterStatus::Ready, + }; + ingester_pool.insert("test-ingester-0".into(), ingester_0); let event_broker = EventBroker::default(); let source_runtime = SourceRuntime { @@ -1699,8 +1718,12 @@ mod tests { Ok(TruncateShardsResponse {}) }); - let ingester_0 = IngesterServiceClient::from_mock(mock_ingester_0); - ingester_pool.insert("test-ingester-0".into(), ingester_0.clone()); + let client_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let ingester_0 = IngesterPoolEntry { + client: client_0, + status: IngesterStatus::Ready, + }; + ingester_pool.insert("test-ingester-0".into(), ingester_0); let mut mock_ingester_1 = MockIngesterService::new(); mock_ingester_1 @@ -1726,8 +1749,12 @@ mod tests { Ok(TruncateShardsResponse {}) }); - let ingester_1 = IngesterServiceClient::from_mock(mock_ingester_1); - ingester_pool.insert("test-ingester-1".into(), ingester_1.clone()); + let client_1 = IngesterServiceClient::from_mock(mock_ingester_1); + let ingester_1 = IngesterPoolEntry { + client: client_1, + status: IngesterStatus::Ready, + }; + ingester_pool.insert("test-ingester-1".into(), ingester_1); let mut mock_ingester_3 = MockIngesterService::new(); mock_ingester_3 @@ -1746,8 +1773,12 @@ mod tests { Ok(TruncateShardsResponse {}) }); - let ingester_3 = IngesterServiceClient::from_mock(mock_ingester_3); - ingester_pool.insert("test-ingester-3".into(), ingester_3.clone()); + let client_3 = IngesterServiceClient::from_mock(mock_ingester_3); + let ingester_3 = IngesterPoolEntry { + client: client_3, + status: IngesterStatus::Ready, + }; + ingester_pool.insert("test-ingester-3".into(), ingester_3); let event_broker = EventBroker::default(); let (shard_positions_update_tx, mut shard_positions_update_rx) = diff --git a/quickwit/quickwit-ingest/src/ingest_v2/broadcast.rs b/quickwit/quickwit-ingest/src/ingest_v2/broadcast.rs index 9bbbe94bb47..98be3df36a1 100644 --- a/quickwit/quickwit-ingest/src/ingest_v2/broadcast.rs +++ b/quickwit/quickwit-ingest/src/ingest_v2/broadcast.rs @@ -562,7 +562,7 @@ mod tests { let cluster = create_cluster_for_test(Vec::new(), &["indexer"], &transport, true) .await .unwrap(); - let (_temp_dir, state) = IngesterState::for_test().await; + let (_temp_dir, state) = IngesterState::for_test(cluster.clone()).await; let weak_state = state.weak(); let mut task = BroadcastLocalShardsTask { cluster, diff --git a/quickwit/quickwit-ingest/src/ingest_v2/fetch.rs b/quickwit/quickwit-ingest/src/ingest_v2/fetch.rs index 6e8d085e35d..e20ad123a2e 100644 --- a/quickwit/quickwit-ingest/src/ingest_v2/fetch.rs +++ b/quickwit/quickwit-ingest/src/ingest_v2/fetch.rs @@ -495,7 +495,11 @@ async fn fault_tolerant_fetch_stream( shard_id: Some(shard_id.clone()), from_position_exclusive: Some(from_position_exclusive.clone()), }; - let mut fetch_stream = match ingester.open_fetch_stream(open_fetch_stream_request).await { + let mut fetch_stream = match ingester + .client + .open_fetch_stream(open_fetch_stream_request) + .await + { Ok(fetch_stream) => fetch_stream, Err(not_found_error @ IngestV2Error::ShardNotFound { .. }) => { error!( @@ -623,12 +627,15 @@ pub(super) mod tests { use bytes::Bytes; use quickwit_proto::ingest::ShardState; - use quickwit_proto::ingest::ingester::{IngesterServiceClient, MockIngesterService}; + use quickwit_proto::ingest::ingester::{ + IngesterServiceClient, IngesterStatus, MockIngesterService, + }; use quickwit_proto::types::queue_id; use tokio::time::timeout; use super::*; use crate::MRecord; + use crate::ingest_v2::IngesterPoolEntry; pub fn into_fetch_payload(fetch_message: FetchMessage) -> FetchPayload { match fetch_message.message.unwrap() { @@ -1325,8 +1332,11 @@ pub(super) mod tests { Ok(service_stream_1) }); - let ingester_1 = IngesterServiceClient::from_mock(mock_ingester_1); - + let client_1 = IngesterServiceClient::from_mock(mock_ingester_1); + let ingester_1 = IngesterPoolEntry { + client: client_1, + status: IngesterStatus::Ready, + }; ingester_pool.insert("test-ingester-1".into(), ingester_1); let fetch_payload = FetchPayload { @@ -1425,7 +1435,11 @@ pub(super) mod tests { "open fetch stream error".to_string(), )) }); - let ingester_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let client_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let ingester_0 = IngesterPoolEntry { + client: client_0, + status: IngesterStatus::Ready, + }; let mut mock_ingester_1 = MockIngesterService::new(); let index_uid_clone = index_uid.clone(); @@ -1440,7 +1454,11 @@ pub(super) mod tests { Ok(service_stream_1) }); - let ingester_1 = IngesterServiceClient::from_mock(mock_ingester_1); + let client_1 = IngesterServiceClient::from_mock(mock_ingester_1); + let ingester_1 = IngesterPoolEntry { + client: client_1, + status: IngesterStatus::Ready, + }; ingester_pool.insert("test-ingester-0".into(), ingester_0); ingester_pool.insert("test-ingester-1".into(), ingester_1); @@ -1540,7 +1558,11 @@ pub(super) mod tests { Ok(service_stream_0) }); - let ingester_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let client_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let ingester_0 = IngesterPoolEntry { + client: client_0, + status: IngesterStatus::Ready, + }; let mut mock_ingester_1 = MockIngesterService::new(); let index_uid_clone = index_uid.clone(); @@ -1555,7 +1577,11 @@ pub(super) mod tests { Ok(service_stream_1) }); - let ingester_1 = IngesterServiceClient::from_mock(mock_ingester_1); + let client_1 = IngesterServiceClient::from_mock(mock_ingester_1); + let ingester_1 = IngesterPoolEntry { + client: client_1, + status: IngesterStatus::Ready, + }; ingester_pool.insert("test-ingester-0".into(), ingester_0); ingester_pool.insert("test-ingester-1".into(), ingester_1); @@ -1658,7 +1684,11 @@ pub(super) mod tests { shard_id: ShardId::from(1), }) }); - let ingester_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let client_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let ingester_0 = IngesterPoolEntry { + client: client_0, + status: IngesterStatus::Ready, + }; ingester_pool.insert("test-ingester-0".into(), ingester_0); fault_tolerant_fetch_stream( @@ -1746,8 +1776,11 @@ pub(super) mod tests { Ok(service_stream_2) }); - let ingester = IngesterServiceClient::from_mock(mock_ingester); - + let client = IngesterServiceClient::from_mock(mock_ingester); + let ingester = IngesterPoolEntry { + client, + status: IngesterStatus::Ready, + }; ingester_pool.insert("test-ingester".into(), ingester); let fetch_payload = FetchPayload { diff --git a/quickwit/quickwit-ingest/src/ingest_v2/helpers.rs b/quickwit/quickwit-ingest/src/ingest_v2/helpers.rs new file mode 100644 index 00000000000..5422d6c7ebc --- /dev/null +++ b/quickwit/quickwit-ingest/src/ingest_v2/helpers.rs @@ -0,0 +1,273 @@ +// Copyright 2021-Present Datadog, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::time::{Duration, Instant}; + +use anyhow::{Context, anyhow, bail}; +use futures::StreamExt; +use quickwit_common::pretty::PrettyDisplay; +use quickwit_proto::ingest::ingester::{ + DecommissionRequest, IngesterService, IngesterStatus, OpenObservationStreamRequest, +}; +use tracing::info; + +/// Tries to get the current status of an ingester by opening an observation stream +/// and reading the first message. +/// +/// # Errors +/// +/// Returns an error if: +/// - The observation stream fails to open +/// - The stream ends without producing a message +/// - The stream ends after returning an error +pub async fn try_get_ingester_status( + ingester: &impl IngesterService, +) -> anyhow::Result { + let mut observation_stream = ingester + .open_observation_stream(OpenObservationStreamRequest {}) + .await + .context("failed to open observation stream")?; + + let next_observation_message = observation_stream + .next() + .await + .context("observation stream ended")? + .context("observation stream failed")?; + + Ok(next_observation_message.status()) +} + +/// Waits for an ingester to reach a specific status by monitoring its observation stream. +/// +/// This function continuously polls the observation stream until the ingester reaches +/// the desired status. +/// +/// # Errors +/// +/// Returns an error if: +/// - The observation stream fails to open +/// - The stream ends without producing a message +/// - The stream ends after returning an error +/// - The timeout is exceeded +pub async fn wait_for_ingester_status( + ingester: &impl IngesterService, + status: IngesterStatus, + timeout_after: Duration, +) -> anyhow::Result<()> { + debug_assert!( + timeout_after > Duration::ZERO, + "timeout_after should be greater than zero" + ); + tokio::time::timeout( + timeout_after, + wait_for_ingester_status_inner(ingester, status), + ) + .await + .with_context(|| { + format!( + "timed out waiting for ingester to transition to status {status} after {} seconds", + timeout_after.as_secs(), + ) + })? +} + +async fn wait_for_ingester_status_inner( + ingester: &impl IngesterService, + status: IngesterStatus, +) -> anyhow::Result<()> { + let mut observation_stream = ingester + .open_observation_stream(OpenObservationStreamRequest {}) + .await + .context("failed to open observation stream")?; + + loop { + match observation_stream.next().await { + Some(Ok(observation_message)) => { + if observation_message.status() == status { + return Ok(()); + } + } + Some(Err(error)) => { + return Err(anyhow!(error).context("observation stream failed")); + } + None => { + bail!("observation stream ended"); + } + } + } +} + +/// Initiates decommission of an ingester and waits for it to complete. +/// +/// This function sends a decommission request to the ingester and then waits +/// for it to reach the `Decommissioned` status. +/// +/// # Errors +/// +/// Returns an error if: +/// - The decommission request fails +/// - The observation stream fails to open +/// - The stream ends without producing a message +/// - The stream ends after returning an error +/// - The timeout is exceeded +pub async fn wait_for_ingester_decommission( + ingester: &impl IngesterService, + timeout_after: Duration, +) -> anyhow::Result<()> { + let now = Instant::now(); + + ingester + .decommission(DecommissionRequest {}) + .await + .context("failed to initiate ingester decommission")?; + + wait_for_ingester_status( + ingester, + IngesterStatus::Decommissioned, + timeout_after.saturating_sub(now.elapsed()), + ) + .await?; + + info!( + "successfully decommissioned ingester in {}", + now.elapsed().pretty_display() + ); + Ok(()) +} + +#[cfg(test)] +mod tests { + + use std::time::Duration; + + use quickwit_common::ServiceStream; + use quickwit_proto::ingest::ingester::{ + DecommissionResponse, IngesterServiceClient, MockIngesterService, ObservationMessage, + }; + + use super::*; + + #[tokio::test] + async fn test_try_get_ingester_status() { + let mut mock_ingester = MockIngesterService::new(); + mock_ingester + .expect_open_observation_stream() + .once() + .returning(|_| { + let (service_stream_tx, service_stream) = ServiceStream::new_bounded(1); + let message = ObservationMessage { + node_id: "test-ingester".to_string(), + status: IngesterStatus::Initializing as i32, + }; + service_stream_tx.try_send(Ok(message)).unwrap(); + Ok(service_stream) + }); + let ingester = IngesterServiceClient::from_mock(mock_ingester); + let status = try_get_ingester_status(&ingester).await.unwrap(); + assert_eq!(status, IngesterStatus::Initializing); + } + + #[tokio::test] + async fn test_wait_for_ingester_status() { + let mut mock_ingester = MockIngesterService::new(); + mock_ingester + .expect_open_observation_stream() + .once() + .returning(|_| { + let (service_stream_tx, service_stream) = ServiceStream::new_bounded(2); + let message = ObservationMessage { + node_id: "test-ingester".to_string(), + status: IngesterStatus::Initializing as i32, + }; + service_stream_tx.try_send(Ok(message)).unwrap(); + + let message = ObservationMessage { + node_id: "test-ingester".to_string(), + status: IngesterStatus::Ready as i32, + }; + service_stream_tx.try_send(Ok(message)).unwrap(); + Ok(service_stream) + }); + let ingester = IngesterServiceClient::from_mock(mock_ingester); + wait_for_ingester_status(&ingester, IngesterStatus::Ready, Duration::from_secs(1)) + .await + .unwrap(); + } + + #[tokio::test] + async fn test_wait_for_ingester_decommission_elapsed_timeout_not_zero() { + let mut mock_ingester = MockIngesterService::new(); + mock_ingester + .expect_open_observation_stream() + .once() + .returning(|_| { + let (service_stream_tx, service_stream) = ServiceStream::new_bounded(1); + // Simulate the ingester transitioning to Decommissioned after 50ms. + tokio::spawn(async move { + tokio::time::sleep(Duration::from_millis(50)).await; + let message = ObservationMessage { + node_id: "test-ingester".to_string(), + status: IngesterStatus::Decommissioned as i32, + }; + service_stream_tx.try_send(Ok(message)).unwrap(); + }); + Ok(service_stream) + }); + mock_ingester + .expect_decommission() + .once() + .returning(|_| Ok(DecommissionResponse {})); + let ingester = IngesterServiceClient::from_mock(mock_ingester); + wait_for_ingester_decommission(&ingester, Duration::from_secs(1)) + .await + .unwrap(); + } + + #[tokio::test] + async fn test_wait_for_ingester_decommission() { + let mut mock_ingester = MockIngesterService::new(); + mock_ingester + .expect_open_observation_stream() + .once() + .returning(|_| { + let (service_stream_tx, service_stream) = ServiceStream::new_bounded(3); + let message = ObservationMessage { + node_id: "test-ingester".to_string(), + status: IngesterStatus::Ready as i32, + }; + service_stream_tx.try_send(Ok(message)).unwrap(); + + let message = ObservationMessage { + node_id: "test-ingester".to_string(), + status: IngesterStatus::Decommissioning as i32, + }; + service_stream_tx.try_send(Ok(message)).unwrap(); + + let message = ObservationMessage { + node_id: "test-ingester".to_string(), + status: IngesterStatus::Decommissioned as i32, + }; + service_stream_tx.try_send(Ok(message)).unwrap(); + Ok(service_stream) + }); + mock_ingester + .expect_decommission() + .once() + .returning(|_| Ok(DecommissionResponse {})); + let ingester = IngesterServiceClient::from_mock(mock_ingester); + wait_for_ingester_decommission(&ingester, Duration::from_secs(1)) + .await + .unwrap(); + } +} diff --git a/quickwit/quickwit-ingest/src/ingest_v2/idle.rs b/quickwit/quickwit-ingest/src/ingest_v2/idle.rs index 14f535be4f2..5b91dc8025e 100644 --- a/quickwit/quickwit-ingest/src/ingest_v2/idle.rs +++ b/quickwit/quickwit-ingest/src/ingest_v2/idle.rs @@ -78,7 +78,8 @@ impl CloseIdleShardsTask { #[cfg(test)] mod tests { - + use quickwit_cluster::{ChannelTransport, create_cluster_for_test}; + use quickwit_config::service::QuickwitService; use quickwit_proto::types::{IndexUid, ShardId}; use super::*; @@ -87,7 +88,15 @@ mod tests { #[tokio::test] async fn test_close_idle_shards_run() { - let (_temp_dir, state) = IngesterState::for_test().await; + let cluster = create_cluster_for_test( + Vec::new(), + &[QuickwitService::Indexer.as_str()], + &ChannelTransport::default(), + true, + ) + .await + .unwrap(); + let (_temp_dir, state) = IngesterState::for_test(cluster).await; let weak_state = state.weak(); let idle_shard_timeout = RUN_INTERVAL_PERIOD * 4; let join_handle = CloseIdleShardsTask::spawn(weak_state, idle_shard_timeout); diff --git a/quickwit/quickwit-ingest/src/ingest_v2/ingester.rs b/quickwit/quickwit-ingest/src/ingest_v2/ingester.rs index 65c268881ac..3258a528fbd 100644 --- a/quickwit/quickwit-ingest/src/ingest_v2/ingester.rs +++ b/quickwit/quickwit-ingest/src/ingest_v2/ingester.rs @@ -19,7 +19,6 @@ use std::path::Path; use std::sync::Arc; use std::time::{Duration, Instant}; -use anyhow::Context; use async_trait::async_trait; use bytesize::ByteSize; use futures::StreamExt; @@ -31,7 +30,6 @@ use quickwit_common::metrics::{GaugeGuard, MEMORY_METRICS}; use quickwit_common::pretty::PrettyDisplay; use quickwit_common::pubsub::{EventBroker, EventSubscriber}; use quickwit_common::rate_limiter::{RateLimiter, RateLimiterSettings}; -use quickwit_common::tower::Pool; use quickwit_common::{ServiceStream, rate_limited_error, rate_limited_warn}; use quickwit_proto::control_plane::{ AdviseResetShardsRequest, ControlPlaneService, ControlPlaneServiceClient, @@ -39,13 +37,13 @@ use quickwit_proto::control_plane::{ use quickwit_proto::indexing::ShardPositionsUpdate; use quickwit_proto::ingest::ingester::{ AckReplicationMessage, CloseShardsRequest, CloseShardsResponse, DecommissionRequest, - DecommissionResponse, FetchMessage, IngesterService, IngesterServiceClient, - IngesterServiceStream, IngesterStatus, InitShardFailure, InitShardSuccess, InitShardsRequest, - InitShardsResponse, ObservationMessage, OpenFetchStreamRequest, OpenObservationStreamRequest, - OpenReplicationStreamRequest, OpenReplicationStreamResponse, PersistFailure, - PersistFailureReason, PersistRequest, PersistResponse, PersistSuccess, ReplicateFailureReason, - ReplicateSubrequest, RetainShardsForSource, RetainShardsRequest, RetainShardsResponse, - SynReplicationMessage, TruncateShardsRequest, TruncateShardsResponse, + DecommissionResponse, FetchMessage, IngesterService, IngesterServiceStream, IngesterStatus, + InitShardFailure, InitShardSuccess, InitShardsRequest, InitShardsResponse, ObservationMessage, + OpenFetchStreamRequest, OpenObservationStreamRequest, OpenReplicationStreamRequest, + OpenReplicationStreamResponse, PersistFailure, PersistFailureReason, PersistRequest, + PersistResponse, PersistSuccess, ReplicateFailureReason, ReplicateSubrequest, + RetainShardsForSource, RetainShardsRequest, RetainShardsResponse, SynReplicationMessage, + TruncateShardsRequest, TruncateShardsResponse, }; use quickwit_proto::ingest::{ CommitTypeV2, DocBatchV2, IngestV2Error, IngestV2Result, ParseFailure, Shard, ShardIds, @@ -132,7 +130,7 @@ impl Ingester { pub async fn try_new( cluster: Cluster, control_plane: ControlPlaneServiceClient, - ingester_pool: Pool, + ingester_pool: IngesterPool, wal_dir_path: &Path, disk_capacity: ByteSize, memory_capacity: ByteSize, @@ -141,7 +139,7 @@ impl Ingester { idle_shard_timeout: Duration, ) -> IngestV2Result { let self_node_id: NodeId = cluster.self_node_id().into(); - let state = IngesterState::load(wal_dir_path, rate_limiter_settings); + let state = IngesterState::load(cluster.clone(), wal_dir_path, rate_limiter_settings).await; let weak_state = state.weak(); BroadcastLocalShardsTask::spawn(cluster, weak_state.clone()); @@ -164,12 +162,12 @@ impl Ingester { } /// Checks whether the ingester is fully decommissioned and updates its status accordingly. - fn check_decommissioning_status(&self, state: &mut InnerIngesterState) { + async fn check_decommissioning_status(&self, state: &mut InnerIngesterState) { if state.status() != IngesterStatus::Decommissioning { return; } if state.shards.values().all(|shard| shard.is_indexed()) { - state.set_status(IngesterStatus::Decommissioned); + state.set_status(IngesterStatus::Decommissioned).await; } } @@ -398,6 +396,7 @@ impl Ingester { IngestV2Error::Unavailable(message) })?; let mut ack_replication_stream = ingester + .client .open_replication_stream(syn_replication_stream) .await?; ack_replication_stream @@ -454,30 +453,17 @@ impl Ingester { let commit_type = persist_request.commit_type(); let force_commit = commit_type == CommitTypeV2::Force; - let leader_id: NodeId = persist_request.leader_id.into(); let mut state_guard = with_lock_metrics!(self.state.lock_fully().await, "persist", "write")?; - - if state_guard.status() != IngesterStatus::Ready { - persist_failures.reserve_exact(persist_request.subrequests.len()); - - for subrequest in persist_request.subrequests { - let persist_failure = PersistFailure { - subrequest_id: subrequest.subrequest_id, - index_uid: subrequest.index_uid, - source_id: subrequest.source_id, - shard_id: subrequest.shard_id, - reason: PersistFailureReason::ShardClosed as i32, - }; - persist_failures.push(persist_failure); - } - let persist_response = PersistResponse { - leader_id: leader_id.into(), - successes: Vec::new(), - failures: persist_failures, - }; - return Ok(persist_response); + let status = state_guard.status(); + + if !status.accepts_write_requests() { + let error = IngestV2Error::Unavailable(format!( + "ingester {} is not ready: {status}", + self.self_node_id + )); + return Err(error); } // first verify if we would locally accept each subrequest { @@ -844,9 +830,13 @@ impl Ingester { let follower_id: NodeId = open_replication_stream_request.follower_id.into(); let mut state_guard = self.state.lock_partially().await?; + let status = state_guard.status(); - if state_guard.status() != IngesterStatus::Ready { - return Err(IngestV2Error::Internal("node decommissioned".to_string())); + if !status.accepts_write_requests() { + let error = IngestV2Error::Unavailable(format!( + "ingester {follower_id} is not ready: {status}", + )); + return Err(error); } let Entry::Vacant(entry) = state_guard.replication_tasks.entry(leader_id.clone()) else { return Err(IngestV2Error::Internal(format!( @@ -929,9 +919,14 @@ impl Ingester { ) -> IngestV2Result { let mut state_guard = with_lock_metrics!(self.state.lock_fully().await, "init_shards", "write")?; - - if state_guard.status() != IngesterStatus::Ready { - return Err(IngestV2Error::Internal("node decommissioned".to_string())); + let status = state_guard.status(); + + if !status.accepts_write_requests() { + let error = IngestV2Error::Unavailable(format!( + "ingester {} is not ready: {status}", + self.self_node_id + )); + return Err(error); } let mut successes = Vec::with_capacity(init_shards_request.subrequests.len()); let mut failures = Vec::new(); @@ -1000,7 +995,7 @@ impl Ingester { let wal_usage = state_guard.mrecordlog.resource_usage(); report_wal_usage(wal_usage); - self.check_decommissioning_status(&mut state_guard); + self.check_decommissioning_status(&mut state_guard).await; let truncate_response = TruncateShardsResponse {}; Ok(truncate_response) } @@ -1027,22 +1022,6 @@ impl Ingester { Ok(response) } - async fn decommission_inner( - &self, - _decommission_request: DecommissionRequest, - ) -> IngestV2Result { - info!("decommissioning ingester"); - let mut state_guard = self.state.lock_partially().await?; - - for shard in state_guard.shards.values_mut() { - shard.close(); - } - state_guard.set_status(IngesterStatus::Decommissioning); - self.check_decommissioning_status(&mut state_guard); - - Ok(DecommissionResponse {}) - } - pub async fn debug_info(&self) -> JsonValue { let state_guard = match self.state.lock_fully().await { Ok(state_guard) => state_guard, @@ -1179,7 +1158,7 @@ impl IngesterService for Ingester { .delete_shard(&queue_id, "control-plane-retain-shards-rpc") .await; } - self.check_decommissioning_status(&mut state_guard); + self.check_decommissioning_status(&mut state_guard).await; Ok(RetainShardsResponse {}) } @@ -1199,9 +1178,50 @@ impl IngesterService for Ingester { async fn decommission( &self, - decommission_request: DecommissionRequest, + _decommission_request: DecommissionRequest, ) -> IngestV2Result { - self.decommission_inner(decommission_request).await + // Retire the ingester immediately by setting its status to `Retiring`. + info!("retiring ingester"); + let mut state_guard = self.state.lock_partially().await?; + state_guard.set_status(IngesterStatus::Retiring).await; + drop(state_guard); // Dropping explicitly for readability. + + // Drain write requests by scheduling the decommissioning of the ingester after a delay + // allowing the propagation of the `Retiring` status to other nodes. + let self_clone = self.clone(); + tokio::spawn(async move { + const DECOMMISSION_DELAY: Duration = if cfg!(any(test, feature = "testsuite")) { + Duration::from_millis(100) + } else { + // Having to wait for 10s is not great but we can live with it. During this time, we + // still make progress towards decommissioning because we gradually receive less + // write requests and indexing is still ongoing. However, it sets a floor on the + // amount of time with which we can fully decommission an ingester. This will be + // most noticeable when using Quickwit locally. + Duration::from_secs(10) + }; + tokio::time::sleep(DECOMMISSION_DELAY).await; + + info!("decommissioning ingester"); + let mut state_guard = match self_clone.state.lock_partially().await { + Ok(state_guard) => state_guard, + Err(error) => { + error!(%error, "failed to decommission ingester"); + return; + } + }; + state_guard + .set_status(IngesterStatus::Decommissioning) + .await; + + for shard in state_guard.shards.values_mut() { + shard.close(); + } + self_clone + .check_decommissioning_status(&mut state_guard) + .await; + }); + Ok(DecommissionResponse {}) } } @@ -1234,43 +1254,6 @@ impl EventSubscriber for WeakIngesterState { } } -pub async fn wait_for_ingester_status( - ingester: impl IngesterService, - status: IngesterStatus, -) -> anyhow::Result<()> { - let mut observation_stream = ingester - .open_observation_stream(OpenObservationStreamRequest {}) - .await - .context("failed to open observation stream")?; - - while let Some(observation_message_result) = observation_stream.next().await { - let observation_message = - observation_message_result.context("observation stream ended unexpectedly")?; - - if observation_message.status() == status { - break; - } - } - Ok(()) -} - -pub async fn wait_for_ingester_decommission(ingester: Ingester) -> anyhow::Result<()> { - let now = Instant::now(); - - ingester - .decommission(DecommissionRequest {}) - .await - .context("failed to initiate ingester decommission")?; - - wait_for_ingester_status(ingester, IngesterStatus::Decommissioned).await?; - - info!( - "successfully decommissioned ingester in {}", - now.elapsed().pretty_display() - ); - Ok(()) -} - struct PendingPersistSubrequest { queue_id: QueueId, subrequest_id: u32, @@ -1298,8 +1281,8 @@ mod tests { use quickwit_config::service::QuickwitService; use quickwit_proto::control_plane::{AdviseResetShardsResponse, MockControlPlaneService}; use quickwit_proto::ingest::ingester::{ - IngesterServiceGrpcServer, IngesterServiceGrpcServerAdapter, InitShardSubrequest, - PersistSubrequest, TruncateShardsSubrequest, + IngesterServiceClient, IngesterServiceGrpcServer, IngesterServiceGrpcServerAdapter, + IngesterStatus, InitShardSubrequest, PersistSubrequest, TruncateShardsSubrequest, }; use quickwit_proto::ingest::{ DocBatchV2, ParseFailureReason, ShardIdPosition, ShardIdPositions, ShardIds, ShardPKey, @@ -1312,10 +1295,11 @@ mod tests { use super::*; use crate::MRecord; - use crate::ingest_v2::DEFAULT_IDLE_SHARD_TIMEOUT; use crate::ingest_v2::broadcast::ShardInfos; use crate::ingest_v2::doc_mapper::try_build_doc_mapper; use crate::ingest_v2::fetch::tests::{into_fetch_eof, into_fetch_payload}; + use crate::ingest_v2::helpers::wait_for_ingester_status; + use crate::ingest_v2::{DEFAULT_IDLE_SHARD_TIMEOUT, IngesterPoolEntry}; const MAX_GRPC_MESSAGE_SIZE: ByteSize = ByteSize::mib(1); @@ -1431,7 +1415,7 @@ mod tests { .await .unwrap(); - wait_for_ingester_status(ingester.clone(), IngesterStatus::Ready) + wait_for_ingester_status(&ingester, IngesterStatus::Ready, Duration::from_secs(1)) .await .unwrap(); @@ -1460,9 +1444,10 @@ mod tests { let mut state_guard = ingester.state.lock_fully().await.unwrap(); let index_uid = IndexUid::for_test("test-index", 0); - let queue_id_01 = queue_id(&index_uid, "test-source", &ShardId::from(1)); - let queue_id_02 = queue_id(&index_uid, "test-source", &ShardId::from(2)); - let queue_id_03 = queue_id(&index_uid, "test-source", &ShardId::from(3)); + let source_id = SourceId::from("test-source"); + let queue_id_01 = queue_id(&index_uid, &source_id, &ShardId::from(1)); + let queue_id_02 = queue_id(&index_uid, &source_id, &ShardId::from(2)); + let queue_id_03 = queue_id(&index_uid, &source_id, &ShardId::from(3)); state_guard .mrecordlog @@ -1514,7 +1499,7 @@ mod tests { .await .unwrap(); - state_guard.set_status(IngesterStatus::Initializing); + state_guard.set_status(IngesterStatus::Initializing).await; drop(state_guard); @@ -1546,22 +1531,15 @@ mod tests { let mut state_guard = ingester.state.lock_fully().await.unwrap(); let index_uid = IndexUid::for_test("test-index", 0); + let source_id = SourceId::from("test-source"); - let shard_00 = IngesterShard::new_solo( - index_uid.clone(), - "test-source".to_string(), - ShardId::from(0), - ) - .build(); + let shard_00 = + IngesterShard::new_solo(index_uid.clone(), source_id.clone(), ShardId::from(0)).build(); state_guard.shards.insert(shard_00.queue_id(), shard_00); - let shard_01 = IngesterShard::new_solo( - index_uid.clone(), - "test-source".to_string(), - ShardId::from(1), - ) - .advertisable() - .build(); + let shard_01 = IngesterShard::new_solo(index_uid.clone(), source_id, ShardId::from(1)) + .advertisable() + .build(); let queue_id_01 = shard_01.queue_id(); state_guard.shards.insert(queue_id_01.clone(), shard_01); drop(state_guard); @@ -1615,6 +1593,7 @@ mod tests { let (ingester_ctx, ingester) = IngesterForTest::default().build().await; let index_uid = IndexUid::for_test("test-index", 0); + let source_id = SourceId::from("test-source"); let doc_mapping_uid = DocMappingUid::random(); let doc_mapping_json = format!( @@ -1628,7 +1607,7 @@ mod tests { ); let primary_shard = Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, leader_id: ingester_ctx.node_id.to_string(), @@ -1649,7 +1628,7 @@ mod tests { .await .unwrap(); - let queue_id = queue_id(&index_uid, "test-source", &ShardId::from(1)); + let queue_id = queue_id(&index_uid, &source_id, &ShardId::from(1)); let shard = state_guard.shards.get(&queue_id).unwrap(); shard.assert_is_solo(); shard.assert_is_open(); @@ -1663,6 +1642,7 @@ mod tests { let (ingester_ctx, ingester) = IngesterForTest::default().build().await; let index_uid = IndexUid::for_test("test-index", 0); + let source_id = SourceId::from("test-source"); let doc_mapping_uid = DocMappingUid::random(); let doc_mapping_json = format!( @@ -1672,7 +1652,7 @@ mod tests { ); let shard = Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, leader_id: ingester_ctx.node_id.to_string(), @@ -1700,7 +1680,7 @@ mod tests { let state_guard = ingester.state.lock_fully().await.unwrap(); - let queue_id = queue_id(&index_uid, "test-source", &ShardId::from(1)); + let queue_id = queue_id(&index_uid, &source_id, &ShardId::from(1)); let shard = state_guard.shards.get(&queue_id).unwrap(); shard.assert_is_solo(); shard.assert_is_open(); @@ -1714,8 +1694,9 @@ mod tests { async fn test_ingester_persist() { let (ingester_ctx, ingester) = IngesterForTest::default().build().await; - let index_uid = IndexUid::for_test("test-index", 0); - let index_uid2: IndexUid = IndexUid::for_test("test-index", 1); + let index_uid_0 = IndexUid::for_test("test-index", 0); + let index_uid_1 = IndexUid::for_test("test-index", 1); + let source_id = SourceId::from("test-source"); let doc_mapping_uid = DocMappingUid::random(); let doc_mapping_json = format!( @@ -1728,8 +1709,8 @@ mod tests { InitShardSubrequest { subrequest_id: 0, shard: Some(Shard { - index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + index_uid: Some(index_uid_0.clone()), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, leader_id: ingester_ctx.node_id.to_string(), @@ -1742,8 +1723,8 @@ mod tests { InitShardSubrequest { subrequest_id: 1, shard: Some(Shard { - index_uid: Some(index_uid2.clone()), - source_id: "test-source".to_string(), + index_uid: Some(index_uid_1.clone()), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, leader_id: ingester_ctx.node_id.to_string(), @@ -1763,15 +1744,15 @@ mod tests { subrequests: vec![ PersistSubrequest { subrequest_id: 0, - index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + index_uid: Some(index_uid_0.clone()), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), doc_batch: Some(DocBatchV2::for_test([r#"{"doc": "test-doc-010"}"#])), }, PersistSubrequest { subrequest_id: 1, - index_uid: Some(index_uid2.clone()), - source_id: "test-source".to_string(), + index_uid: Some(index_uid_1.clone()), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), doc_batch: Some(DocBatchV2::for_test([ r#"{"doc": "test-doc-110"}"#, @@ -1787,7 +1768,7 @@ mod tests { let persist_success_0 = &persist_response.successes[0]; assert_eq!(persist_success_0.subrequest_id, 0); - assert_eq!(persist_success_0.index_uid(), &index_uid); + assert_eq!(persist_success_0.index_uid(), &index_uid_0); assert_eq!(persist_success_0.source_id, "test-source"); assert_eq!(persist_success_0.shard_id(), ShardId::from(1)); assert_eq!( @@ -1797,7 +1778,7 @@ mod tests { let persist_success_1 = &persist_response.successes[1]; assert_eq!(persist_success_1.subrequest_id, 1); - assert_eq!(persist_success_1.index_uid(), &index_uid2); + assert_eq!(persist_success_1.index_uid(), &index_uid_1); assert_eq!(persist_success_1.source_id, "test-source"); assert_eq!(persist_success_1.shard_id(), ShardId::from(1)); assert_eq!( @@ -1808,7 +1789,7 @@ mod tests { let state_guard = ingester.state.lock_fully().await.unwrap(); assert_eq!(state_guard.shards.len(), 2); - let queue_id_01 = queue_id(&index_uid, "test-source", &ShardId::from(1)); + let queue_id_01 = queue_id(&index_uid_0, &source_id, &ShardId::from(1)); let solo_shard_01 = state_guard.shards.get(&queue_id_01).unwrap(); solo_shard_01.assert_is_solo(); solo_shard_01.assert_is_open(); @@ -1820,7 +1801,7 @@ mod tests { &[(0, [0, 0], r#"{"doc": "test-doc-010"}"#), (1, [0, 1], "")], ); - let queue_id_11 = queue_id(&index_uid2, "test-source", &ShardId::from(1)); + let queue_id_11 = queue_id(&index_uid_1, &source_id, &ShardId::from(1)); let solo_shard_11 = state_guard.shards.get(&queue_id_11).unwrap(); solo_shard_11.assert_is_solo(); solo_shard_11.assert_is_open(); @@ -1842,6 +1823,7 @@ mod tests { let (ingester_ctx, ingester) = IngesterForTest::default().build().await; let index_uid = IndexUid::for_test("test-index", 0); + let source_id = SourceId::from("test-source"); let doc_mapping_uid = DocMappingUid::random(); let doc_mapping_json = format!( @@ -1854,7 +1836,7 @@ mod tests { subrequest_id: 0, shard: Some(Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(0)), shard_state: ShardState::Open as i32, leader_id: ingester_ctx.node_id.to_string(), @@ -1885,7 +1867,7 @@ mod tests { subrequests: vec![PersistSubrequest { subrequest_id: 0, index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id, shard_id: Some(ShardId::from(0)), doc_batch: None, }], @@ -1911,6 +1893,7 @@ mod tests { let (ingester_ctx, ingester) = IngesterForTest::default().build().await; let index_uid = IndexUid::for_test("test-index", 0); + let source_id = SourceId::from("test-source"); let doc_mapping_uid = DocMappingUid::random(); let doc_mapping_json = format!( @@ -1925,7 +1908,7 @@ mod tests { subrequest_id: 0, shard: Some(Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(0)), shard_state: ShardState::Open as i32, leader_id: ingester_ctx.node_id.to_string(), @@ -1946,7 +1929,7 @@ mod tests { subrequests: vec![PersistSubrequest { subrequest_id: 0, index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id, shard_id: Some(ShardId::from(0)), doc_batch: Some(DocBatchV2::for_test([ "", // invalid @@ -1986,6 +1969,7 @@ mod tests { let (ingester_ctx, ingester) = IngesterForTest::default().build().await; let index_uid = IndexUid::for_test("test-index", 0); + let source_id = SourceId::from("test-source"); let doc_mapping_uid = DocMappingUid::random(); let doc_mapping_json = format!( @@ -2000,7 +1984,7 @@ mod tests { subrequest_id: 0, shard: Some(Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(0)), shard_state: ShardState::Open as i32, leader_id: ingester_ctx.node_id.to_string(), @@ -2021,7 +2005,7 @@ mod tests { subrequests: vec![PersistSubrequest { subrequest_id: 0, index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id, shard_id: Some(ShardId::from(0)), doc_batch: Some(DocBatchV2::for_test([ "", // invalid @@ -2049,6 +2033,7 @@ mod tests { .await; let index_uid = IndexUid::for_test("test-index", 0); + let source_id = SourceId::from("test-source"); let doc_mapping_uid = DocMappingUid::random(); let doc_mapping_json = format!( @@ -2063,7 +2048,7 @@ mod tests { subrequest_id: 0, shard: Some(Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(0)), shard_state: ShardState::Open as i32, leader_id: ingester_ctx.node_id.to_string(), @@ -2084,7 +2069,7 @@ mod tests { subrequests: vec![PersistSubrequest { subrequest_id: 0, index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id, shard_id: Some(ShardId::from(0)), doc_batch: Some(DocBatchV2::for_test(["", "[]", r#"{"foo": "bar"}"#])), }], @@ -2110,6 +2095,7 @@ mod tests { .await; let index_uid = IndexUid::for_test("test-index", 0); + let source_id = SourceId::from("test-source"); let doc_mapping_uid = DocMappingUid::random(); let doc_mapping_json = format!( @@ -2124,7 +2110,7 @@ mod tests { subrequest_id: 0, shard: Some(Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(0)), shard_state: ShardState::Open as i32, leader_id: ingester_ctx.node_id.to_string(), @@ -2145,7 +2131,7 @@ mod tests { subrequests: vec![PersistSubrequest { subrequest_id: 0, index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id, shard_id: Some(ShardId::from(0)), doc_batch: Some(DocBatchV2::for_test(["", "[]", r#"{"foo": "bar"}"#])), }], @@ -2177,13 +2163,10 @@ mod tests { let mut state_guard = ingester.state.lock_fully().await.unwrap(); let index_uid = IndexUid::for_test("test-index", 0); - let queue_id = queue_id(&index_uid, "test-source", &ShardId::from(1)); - let solo_shard = IngesterShard::new_solo( - index_uid.clone(), - "test-source".to_string(), - ShardId::from(1), - ) - .build(); + let source_id = SourceId::from("test-source"); + let solo_shard = + IngesterShard::new_solo(index_uid.clone(), source_id, ShardId::from(1)).build(); + let queue_id = solo_shard.queue_id(); state_guard.shards.insert(queue_id.clone(), solo_shard); state_guard @@ -2206,7 +2189,7 @@ mod tests { subrequests: vec![PersistSubrequest { subrequest_id: 0, index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id, shard_id: Some(ShardId::from(1)), doc_batch: Some(DocBatchV2::for_test([r#"test-doc-foo"#])), }], @@ -2236,17 +2219,15 @@ mod tests { let mut state_guard = ingester.state.lock_fully().await.unwrap(); let index_uid = IndexUid::for_test("test-index", 0); + let source_id = SourceId::from("test-source"); let doc_mapper = try_build_doc_mapper("{}").unwrap(); // Insert a dangling shard, i.e. a shard without a corresponding queue. - let solo_shard = IngesterShard::new_solo( - index_uid.clone(), - "test-source".to_string(), - ShardId::from(1), - ) - .with_doc_mapper(doc_mapper) - .build(); + let solo_shard = + IngesterShard::new_solo(index_uid.clone(), source_id.clone(), ShardId::from(1)) + .with_doc_mapper(doc_mapper) + .build(); state_guard.shards.insert(solo_shard.queue_id(), solo_shard); drop(state_guard); @@ -2256,7 +2237,7 @@ mod tests { subrequests: vec![PersistSubrequest { subrequest_id: 0, index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id, shard_id: Some(ShardId::from(1)), doc_batch: Some(DocBatchV2::for_test([r#"{"doc": "test-doc-foo"}"#])), }], @@ -2297,11 +2278,15 @@ mod tests { leader_ctx.ingester_pool.insert( follower_ctx.node_id.clone(), - IngesterServiceClient::new(follower.clone()), + IngesterPoolEntry { + client: IngesterServiceClient::new(follower.clone()), + status: IngesterStatus::Ready, + }, ); let index_uid = IndexUid::for_test("test-index", 0); - let index_uid2: IndexUid = IndexUid::for_test("test-index", 1); + let index_uid2 = IndexUid::for_test("test-index", 1); + let source_id = SourceId::from("test-source"); let doc_mapping_uid = DocMappingUid::random(); let doc_mapping_json = format!( @@ -2315,7 +2300,7 @@ mod tests { subrequest_id: 0, shard: Some(Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, leader_id: leader_ctx.node_id.to_string(), @@ -2330,7 +2315,7 @@ mod tests { subrequest_id: 1, shard: Some(Shard { index_uid: Some(index_uid2.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, leader_id: leader_ctx.node_id.to_string(), @@ -2352,14 +2337,14 @@ mod tests { PersistSubrequest { subrequest_id: 0, index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), doc_batch: Some(DocBatchV2::for_test([r#"{"doc": "test-doc-010"}"#])), }, PersistSubrequest { subrequest_id: 1, index_uid: Some(index_uid2.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), doc_batch: Some(DocBatchV2::for_test([ r#"{"doc": "test-doc-110"}"#, @@ -2396,7 +2381,7 @@ mod tests { let leader_state_guard = leader.state.lock_fully().await.unwrap(); assert_eq!(leader_state_guard.shards.len(), 2); - let queue_id_01 = queue_id(&index_uid, "test-source", &ShardId::from(1)); + let queue_id_01 = queue_id(&index_uid, &source_id, &ShardId::from(1)); let primary_shard_01 = leader_state_guard.shards.get(&queue_id_01).unwrap(); primary_shard_01.assert_is_primary(); primary_shard_01.assert_is_open(); @@ -2408,7 +2393,7 @@ mod tests { &[(0, [0, 0], r#"{"doc": "test-doc-010"}"#), (1, [0, 1], "")], ); - let queue_id_11 = queue_id(&index_uid2, "test-source", &ShardId::from(1)); + let queue_id_11 = queue_id(&index_uid2, &source_id, &ShardId::from(1)); let primary_shard_11 = leader_state_guard.shards.get(&queue_id_11).unwrap(); primary_shard_11.assert_is_primary(); primary_shard_11.assert_is_open(); @@ -2497,19 +2482,23 @@ mod tests { } }); let follower_channel = Endpoint::from_static("http://127.0.0.1:7777").connect_lazy(); - let follower_grpc_client = IngesterServiceClient::from_channel( + let follower_client = IngesterServiceClient::from_channel( "127.0.0.1:7777".parse().unwrap(), follower_channel, MAX_GRPC_MESSAGE_SIZE, None, ); - + let follower_ingester = IngesterPoolEntry { + client: follower_client, + status: IngesterStatus::Ready, + }; leader_ctx .ingester_pool - .insert(follower_ctx.node_id.clone(), follower_grpc_client); + .insert(follower_ctx.node_id.clone(), follower_ingester); let index_uid = IndexUid::for_test("test-index", 0); - let index_uid2: IndexUid = IndexUid::for_test("test-index", 1); + let index_uid2 = IndexUid::for_test("test-index", 1); + let source_id = SourceId::from("test-source"); let doc_mapping_uid = DocMappingUid::random(); let doc_mapping_json = format!( @@ -2523,7 +2512,7 @@ mod tests { subrequest_id: 0, shard: Some(Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, leader_id: leader_ctx.node_id.to_string(), @@ -2538,7 +2527,7 @@ mod tests { subrequest_id: 1, shard: Some(Shard { index_uid: Some(index_uid2.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, leader_id: leader_ctx.node_id.to_string(), @@ -2560,14 +2549,14 @@ mod tests { PersistSubrequest { subrequest_id: 0, index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), doc_batch: Some(DocBatchV2::for_test([r#"{"doc": "test-doc-010"}"#])), }, PersistSubrequest { subrequest_id: 1, index_uid: Some(index_uid2.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), doc_batch: Some(DocBatchV2::for_test([ r#"{"doc": "test-doc-110"}"#, @@ -2604,7 +2593,7 @@ mod tests { let leader_state_guard = leader.state.lock_fully().await.unwrap(); assert_eq!(leader_state_guard.shards.len(), 2); - let queue_id_01 = queue_id(&index_uid, "test-source", &ShardId::from(1)); + let queue_id_01 = queue_id(&index_uid, &source_id, &ShardId::from(1)); let primary_shard_01 = leader_state_guard.shards.get(&queue_id_01).unwrap(); primary_shard_01.assert_is_primary(); primary_shard_01.assert_is_open(); @@ -2616,7 +2605,7 @@ mod tests { &[(0, [0, 0], r#"{"doc": "test-doc-010"}"#)], ); - let queue_id_11 = queue_id(&index_uid2, "test-source", &ShardId::from(1)); + let queue_id_11 = queue_id(&index_uid2, &source_id, &ShardId::from(1)); let primary_shard_11 = leader_state_guard.shards.get(&queue_id_11).unwrap(); primary_shard_11.assert_is_primary(); primary_shard_11.assert_is_open(); @@ -2664,13 +2653,11 @@ mod tests { async fn test_ingester_persist_no_available_shards() { let (ingester_ctx, ingester) = IngesterForTest::default().build().await; let index_uid = IndexUid::for_test("test-index", 0); - let solo_shard = IngesterShard::new_solo( - index_uid.clone(), - "test-source".to_string(), - ShardId::from(1), - ) - .with_state(ShardState::Closed) - .build(); + let source_id = SourceId::from("test-source"); + let solo_shard = + IngesterShard::new_solo(index_uid.clone(), source_id.clone(), ShardId::from(1)) + .with_state(ShardState::Closed) + .build(); let queue_id = solo_shard.queue_id(); ingester .state @@ -2686,7 +2673,7 @@ mod tests { subrequests: vec![PersistSubrequest { subrequest_id: 0, index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id, shard_id: Some(ShardId::from(1)), doc_batch: Some(DocBatchV2::for_test([r#"{"doc": "test-doc-010"}"#])), }], @@ -2727,6 +2714,7 @@ mod tests { .await; let index_uid = IndexUid::for_test("test-index", 0); + let source_id = SourceId::from("test-source"); let doc_mapping_uid = DocMappingUid::random(); let doc_mapping_json = format!( @@ -2736,7 +2724,7 @@ mod tests { ); let primary_shard = Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, leader_id: ingester_ctx.node_id.to_string(), @@ -2765,7 +2753,7 @@ mod tests { subrequests: vec![PersistSubrequest { subrequest_id: 0, index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), doc_batch: Some(DocBatchV2::for_test([r#"{"doc": "test-doc-010"}"#])), }], @@ -2788,7 +2776,7 @@ mod tests { let state_guard = ingester.state.lock_fully().await.unwrap(); assert_eq!(state_guard.shards.len(), 1); - let queue_id_01 = queue_id(&index_uid, "test-source", &ShardId::from(1)); + let queue_id_01 = queue_id(&index_uid, &source_id, &ShardId::from(1)); let solo_shard_01 = state_guard.shards.get(&queue_id_01).unwrap(); solo_shard_01.assert_is_solo(); @@ -2808,6 +2796,7 @@ mod tests { .await; let index_uid = IndexUid::for_test("test-index", 0); + let source_id = SourceId::from("test-source"); let doc_mapping_uid = DocMappingUid::random(); let doc_mapping_json = format!( @@ -2817,7 +2806,7 @@ mod tests { ); let primary_shard = Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, leader_id: ingester_ctx.node_id.to_string(), @@ -2846,7 +2835,7 @@ mod tests { subrequests: vec![PersistSubrequest { subrequest_id: 0, index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), doc_batch: Some(DocBatchV2::for_test([r#"{"doc": "test-doc-010"}"#])), }], @@ -2866,7 +2855,7 @@ mod tests { let state_guard = ingester.state.lock_fully().await.unwrap(); assert_eq!(state_guard.shards.len(), 1); - let queue_id_01 = queue_id(&index_uid, "test-source", &ShardId::from(1)); + let queue_id_01 = queue_id(&index_uid, &source_id, &ShardId::from(1)); let solo_shard_01 = state_guard.shards.get(&queue_id_01).unwrap(); solo_shard_01.assert_is_solo(); solo_shard_01.assert_is_open(); @@ -2916,10 +2905,11 @@ mod tests { let (_ingester_ctx, ingester) = IngesterForTest::default().build().await; let index_uid = IndexUid::for_test("test-index", 0); + let source_id = SourceId::from("test-source"); let open_fetch_stream_request = OpenFetchStreamRequest { client_id: "test-client".to_string(), index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1337)), from_position_exclusive: Some(Position::Beginning), }; @@ -2939,13 +2929,13 @@ mod tests { ); let shard = Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, doc_mapping_uid: Some(doc_mapping_uid), ..Default::default() }; - let queue_id = queue_id(&index_uid, "test-source", &ShardId::from(1)); + let queue_id = queue_id(&index_uid, &source_id, &ShardId::from(1)); let mut state_guard = ingester.state.lock_fully().await.unwrap(); @@ -2974,7 +2964,7 @@ mod tests { let open_fetch_stream_request = OpenFetchStreamRequest { client_id: "test-client".to_string(), index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id, shard_id: Some(ShardId::from(1)), from_position_exclusive: Some(Position::Beginning), }; @@ -3039,8 +3029,9 @@ mod tests { let (ingester_ctx, ingester) = IngesterForTest::default().build().await; let index_uid = IndexUid::for_test("test-index", 0); - let queue_id_01 = queue_id(&index_uid, "test-source", &ShardId::from(1)); - let queue_id_02 = queue_id(&index_uid, "test-source", &ShardId::from(2)); + let source_id = SourceId::from("test-source"); + let queue_id_01 = queue_id(&index_uid, &source_id, &ShardId::from(1)); + let queue_id_02 = queue_id(&index_uid, &source_id, &ShardId::from(2)); let doc_mapping_uid_01 = DocMappingUid::random(); let doc_mapping_json_01 = format!( @@ -3050,7 +3041,7 @@ mod tests { ); let shard_01 = Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, doc_mapping_uid: Some(doc_mapping_uid_01), @@ -3065,7 +3056,7 @@ mod tests { ); let shard_02 = Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(2)), shard_state: ShardState::Closed as i32, doc_mapping_uid: Some(doc_mapping_uid_02), @@ -3127,19 +3118,19 @@ mod tests { subrequests: vec![ TruncateShardsSubrequest { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), truncate_up_to_position_inclusive: Some(Position::offset(0u64)), }, TruncateShardsSubrequest { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(2)), truncate_up_to_position_inclusive: Some(Position::eof(0u64)), }, TruncateShardsSubrequest { index_uid: Some(IndexUid::for_test("test-index", 1337)), - source_id: "test-source".to_string(), + source_id, shard_id: Some(ShardId::from(1337)), truncate_up_to_position_inclusive: Some(Position::offset(1337u64)), }, @@ -3174,14 +3165,11 @@ mod tests { let (ingester_ctx, ingester) = IngesterForTest::default().build().await; let index_uid = IndexUid::for_test("test-index", 0); + let source_id = SourceId::from("test-source"); let mut state_guard = ingester.state.lock_fully().await.unwrap(); - let solo_shard = IngesterShard::new_solo( - index_uid.clone(), - "test-source".to_string(), - ShardId::from(1), - ) - .build(); + let solo_shard = + IngesterShard::new_solo(index_uid.clone(), source_id.clone(), ShardId::from(1)).build(); state_guard.shards.insert(solo_shard.queue_id(), solo_shard); drop(state_guard); @@ -3189,7 +3177,7 @@ mod tests { ingester_id: ingester_ctx.node_id.to_string(), subrequests: vec![TruncateShardsSubrequest { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id, shard_id: Some(ShardId::from(1)), truncate_up_to_position_inclusive: Some(Position::offset(0u64)), }], @@ -3249,6 +3237,7 @@ mod tests { .await; let index_uid = IndexUid::for_test("test-index", 0); + let source_id = SourceId::from("test-source"); let doc_mapping_uid = DocMappingUid::random(); let doc_mapping_json = format!( @@ -3258,7 +3247,7 @@ mod tests { ); let shard_01 = Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, doc_mapping_uid: Some(doc_mapping_uid), @@ -3266,13 +3255,13 @@ mod tests { }; let shard_02 = Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(2)), shard_state: ShardState::Open as i32, doc_mapping_uid: Some(doc_mapping_uid), ..Default::default() }; - let queue_id_02 = queue_id(&index_uid, "test-source", &ShardId::from(2)); + let queue_id_02 = queue_id(&index_uid, &source_id, &ShardId::from(2)); let mut state_guard = ingester.state.lock_fully().await.unwrap(); let now = Instant::now(); @@ -3328,6 +3317,7 @@ mod tests { let (_ingester_ctx, ingester) = IngesterForTest::default().build().await; let index_uid = IndexUid::for_test("test-index", 0); + let source_id = SourceId::from("test-source"); let doc_mapping_uid = DocMappingUid::random(); let doc_mapping_json = format!( @@ -3337,7 +3327,7 @@ mod tests { ); let shard_17 = Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(17)), shard_state: ShardState::Open as i32, doc_mapping_uid: Some(doc_mapping_uid), @@ -3346,7 +3336,7 @@ mod tests { let shard_18 = Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(18)), shard_state: ShardState::Closed as i32, doc_mapping_uid: Some(doc_mapping_uid), @@ -3395,7 +3385,7 @@ mod tests { let retain_shards_request = RetainShardsRequest { retain_shards_for_sources: vec![RetainShardsForSource { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id, shard_ids: vec![ShardId::from(17u64)], }], }; @@ -3413,7 +3403,8 @@ mod tests { let (_ingester_ctx, ingester) = IngesterForTest::default().build().await; let index_uid = IndexUid::for_test("test-index", 0); - let queue_id = queue_id(&index_uid, "test-source", &ShardId::from(1)); + let source_id = SourceId::from("test-source"); + let queue_id = queue_id(&index_uid, &source_id, &ShardId::from(1)); let doc_mapping_uid = DocMappingUid::random(); let doc_mapping_json = format!( @@ -3423,7 +3414,7 @@ mod tests { ); let shard = Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, doc_mapping_uid: Some(doc_mapping_uid), @@ -3447,7 +3438,7 @@ mod tests { let open_fetch_stream_request = OpenFetchStreamRequest { client_id: "test-client".to_string(), index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), from_position_exclusive: Some(Position::Beginning), }; @@ -3460,12 +3451,12 @@ mod tests { shard_pkeys: vec![ ShardPKey { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), }, ShardPKey { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id, shard_id: Some(ShardId::from(1337)), }, ], @@ -3514,7 +3505,9 @@ mod tests { assert_eq!(observation.status(), IngesterStatus::Ready); let mut state_guard = ingester.state.lock_fully().await.unwrap(); - state_guard.set_status(IngesterStatus::Decommissioning); + state_guard + .set_status(IngesterStatus::Decommissioning) + .await; drop(state_guard); let observation = observation_stream.next().await.unwrap().unwrap(); @@ -3527,40 +3520,87 @@ mod tests { assert!(observation_opt.is_none()); } + #[tokio::test] + async fn test_ingester_decommission() { + let (_ingester_ctx, ingester) = IngesterForTest::default().build().await; + + let mut state_guard = ingester.state.lock_fully().await.unwrap(); + let index_uid = IndexUid::for_test("test-index", 0); + let source_id = SourceId::from("test-source"); + + let shard = IngesterShard::new_solo(index_uid, source_id, ShardId::from(1)).build(); + let queue_id = shard.queue_id(); + + state_guard.shards.insert(queue_id.clone(), shard); + drop(state_guard); + + let mut observation_stream = ingester + .open_observation_stream(OpenObservationStreamRequest {}) + .await + .unwrap(); + + ingester.decommission(DecommissionRequest {}).await.unwrap(); + + let next_observation = observation_stream.next().await.unwrap().unwrap(); + let next_status = next_observation.status(); + assert_eq!(next_status, IngesterStatus::Retiring); + + wait_for_ingester_status( + &ingester, + IngesterStatus::Decommissioning, + Duration::from_secs(1), + ) + .await + .unwrap(); + + let state_guard = ingester.state.lock_fully().await.unwrap(); + let shard = state_guard.shards.get(&queue_id).unwrap(); + shard.assert_is_closed(); + } + #[tokio::test] async fn test_check_decommissioning_status() { let (_ingester_ctx, ingester) = IngesterForTest::default().build().await; let mut state_guard = ingester.state.lock_fully().await.unwrap(); - ingester.check_decommissioning_status(&mut state_guard); + ingester + .check_decommissioning_status(&mut state_guard) + .await; assert_eq!(state_guard.status(), IngesterStatus::Ready); - state_guard.set_status(IngesterStatus::Decommissioning); - ingester.check_decommissioning_status(&mut state_guard); + state_guard + .set_status(IngesterStatus::Decommissioning) + .await; + ingester + .check_decommissioning_status(&mut state_guard) + .await; assert_eq!(state_guard.status(), IngesterStatus::Decommissioned); - state_guard.set_status(IngesterStatus::Decommissioning); + state_guard + .set_status(IngesterStatus::Decommissioning) + .await; let index_uid = IndexUid::for_test("test-index", 0); + let source_id = SourceId::from("test-source"); - let solo_shard = IngesterShard::new_solo( - index_uid.clone(), - "test-source".to_string(), - ShardId::from(1), - ) - .with_state(ShardState::Closed) - .with_replication_position_inclusive(Position::offset(12u64)) - .build(); + let solo_shard = IngesterShard::new_solo(index_uid.clone(), source_id, ShardId::from(1)) + .with_state(ShardState::Closed) + .with_replication_position_inclusive(Position::offset(12u64)) + .build(); let queue_id = solo_shard.queue_id(); state_guard.shards.insert(queue_id.clone(), solo_shard); - ingester.check_decommissioning_status(&mut state_guard); + ingester + .check_decommissioning_status(&mut state_guard) + .await; assert_eq!(state_guard.status(), IngesterStatus::Decommissioning); let shard = state_guard.shards.get_mut(&queue_id).unwrap(); shard.truncation_position_inclusive = Position::Beginning.as_eof(); - ingester.check_decommissioning_status(&mut state_guard); + ingester + .check_decommissioning_status(&mut state_guard) + .await; assert_eq!(state_guard.status(), IngesterStatus::Decommissioned); } @@ -3571,6 +3611,7 @@ mod tests { ingester.subscribe(&event_broker); let index_uid = IndexUid::for_test("test-index", 0); + let source_id = SourceId::from("test-source"); let doc_mapping_uid = DocMappingUid::random(); let doc_mapping_json = format!( @@ -3580,23 +3621,23 @@ mod tests { ); let shard_01 = Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, doc_mapping_uid: Some(doc_mapping_uid), ..Default::default() }; - let queue_id_01 = queue_id(&index_uid, "test-source", &ShardId::from(1)); + let queue_id_01 = queue_id(&index_uid, &source_id, &ShardId::from(1)); let shard_02 = Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(2)), shard_state: ShardState::Closed as i32, doc_mapping_uid: Some(doc_mapping_uid), ..Default::default() }; - let queue_id_02 = queue_id(&index_uid, "test-source", &ShardId::from(2)); + let queue_id_02 = queue_id(&index_uid, &source_id, &ShardId::from(2)); let mut state_guard = ingester.state.lock_fully().await.unwrap(); let now = Instant::now(); @@ -3649,7 +3690,7 @@ mod tests { let shard_position_update = ShardPositionsUpdate { source_uid: SourceUid { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id, }, updated_shard_positions: vec![ (ShardId::from(1), Position::offset(0u64)), @@ -3689,7 +3730,8 @@ mod tests { .await; let index_uid = IndexUid::for_test("test-index", 0); - let queue_id_01 = queue_id(&index_uid, "test-source", &ShardId::from(1)); + let source_id = SourceId::from("test-source"); + let queue_id_01 = queue_id(&index_uid, &source_id, &ShardId::from(1)); let doc_mapping_uid = DocMappingUid::random(); let doc_mapping_json = format!( @@ -3699,7 +3741,7 @@ mod tests { ); let shard_01 = Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id, shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, doc_mapping_uid: Some(doc_mapping_uid), @@ -3742,6 +3784,7 @@ mod tests { let index_uid_0: IndexUid = IndexUid::for_test("test-index-0", 0); let index_uid_1: IndexUid = IndexUid::for_test("test-index-1", 0); + let source_id = SourceId::from("test-source"); let doc_mapping_uid = DocMappingUid::random(); let doc_mapping_json = format!( @@ -3751,7 +3794,7 @@ mod tests { ); let shard_01 = Shard { index_uid: Some(index_uid_0.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, doc_mapping_uid: Some(doc_mapping_uid), @@ -3759,7 +3802,7 @@ mod tests { }; let shard_02 = Shard { index_uid: Some(index_uid_0.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(2)), shard_state: ShardState::Closed as i32, doc_mapping_uid: Some(doc_mapping_uid), @@ -3767,7 +3810,7 @@ mod tests { }; let shard_03 = Shard { index_uid: Some(index_uid_1.clone()), - source_id: "test-source".to_string(), + source_id, shard_id: Some(ShardId::from(3)), shard_state: ShardState::Closed as i32, doc_mapping_uid: Some(doc_mapping_uid), diff --git a/quickwit/quickwit-ingest/src/ingest_v2/mod.rs b/quickwit/quickwit-ingest/src/ingest_v2/mod.rs index c8543faf793..977b50581b7 100644 --- a/quickwit/quickwit-ingest/src/ingest_v2/mod.rs +++ b/quickwit/quickwit-ingest/src/ingest_v2/mod.rs @@ -16,6 +16,7 @@ mod broadcast; mod debouncing; mod doc_mapper; mod fetch; +mod helpers; mod idle; mod ingester; mod metrics; @@ -41,7 +42,7 @@ use bytes::buf::Writer; use bytes::{BufMut, BytesMut}; use bytesize::ByteSize; use quickwit_common::tower::Pool; -use quickwit_proto::ingest::ingester::IngesterServiceClient; +use quickwit_proto::ingest::ingester::{IngesterServiceClient, IngesterStatus}; use quickwit_proto::ingest::router::{IngestRequestV2, IngestSubrequest}; use quickwit_proto::ingest::{CommitTypeV2, DocBatchV2}; use quickwit_proto::types::{DocUid, DocUidGenerator, IndexId, NodeId, SubrequestId}; @@ -50,12 +51,21 @@ use tracing::{error, info}; use workbench::pending_subrequests; pub use self::fetch::{FetchStreamError, MultiFetchStream}; -pub use self::ingester::{Ingester, wait_for_ingester_decommission, wait_for_ingester_status}; +pub use self::helpers::{ + try_get_ingester_status, wait_for_ingester_decommission, wait_for_ingester_status, +}; +pub use self::ingester::Ingester; use self::mrecord::MRECORD_HEADER_LEN; pub use self::mrecord::{MRecord, decoded_mrecords}; pub use self::router::IngestRouter; -pub type IngesterPool = Pool; +#[derive(Clone)] +pub struct IngesterPoolEntry { + pub client: IngesterServiceClient, + pub status: IngesterStatus, +} + +pub type IngesterPool = Pool; /// Identifies an ingester client, typically a source, for logging and debugging purposes. pub type ClientId = String; diff --git a/quickwit/quickwit-ingest/src/ingest_v2/replication.rs b/quickwit/quickwit-ingest/src/ingest_v2/replication.rs index 0071f685044..bbf0cd037c5 100644 --- a/quickwit/quickwit-ingest/src/ingest_v2/replication.rs +++ b/quickwit/quickwit-ingest/src/ingest_v2/replication.rs @@ -761,6 +761,8 @@ impl Drop for ReplicationTaskHandle { #[cfg(test)] mod tests { + use quickwit_cluster::{ChannelTransport, create_cluster_for_test}; + use quickwit_config::service::QuickwitService; use quickwit_proto::ingest::ingester::{ReplicateSubrequest, ReplicateSuccess}; use quickwit_proto::ingest::{DocBatchV2, Shard}; use quickwit_proto::types::{IndexUid, Position, ShardId, queue_id}; @@ -1036,7 +1038,15 @@ mod tests { async fn test_replication_task_happy_path() { let leader_id: NodeId = "test-leader".into(); let follower_id: NodeId = "test-follower".into(); - let (_temp_dir, state) = IngesterState::for_test().await; + let cluster = create_cluster_for_test( + Vec::new(), + &[QuickwitService::Indexer.as_str()], + &ChannelTransport::default(), + true, + ) + .await + .unwrap(); + let (_temp_dir, state) = IngesterState::for_test(cluster).await; let (syn_replication_stream_tx, syn_replication_stream) = ServiceStream::new_bounded(SYN_REPLICATION_STREAM_CAPACITY); let (ack_replication_stream_tx, mut ack_replication_stream) = @@ -1299,7 +1309,15 @@ mod tests { async fn test_replication_task_shard_closed() { let leader_id: NodeId = "test-leader".into(); let follower_id: NodeId = "test-follower".into(); - let (_temp_dir, state) = IngesterState::for_test().await; + let cluster = create_cluster_for_test( + Vec::new(), + &[QuickwitService::Indexer.as_str()], + &ChannelTransport::default(), + true, + ) + .await + .unwrap(); + let (_temp_dir, state) = IngesterState::for_test(cluster).await; let (syn_replication_stream_tx, syn_replication_stream) = ServiceStream::new_bounded(SYN_REPLICATION_STREAM_CAPACITY); let (ack_replication_stream_tx, mut ack_replication_stream) = @@ -1376,7 +1394,15 @@ mod tests { async fn test_replication_task_deletes_dangling_shard() { let leader_id: NodeId = "test-leader".into(); let follower_id: NodeId = "test-follower".into(); - let (_temp_dir, state) = IngesterState::for_test().await; + let cluster = create_cluster_for_test( + Vec::new(), + &[QuickwitService::Indexer.as_str()], + &ChannelTransport::default(), + true, + ) + .await + .unwrap(); + let (_temp_dir, state) = IngesterState::for_test(cluster).await; let (syn_replication_stream_tx, syn_replication_stream) = ServiceStream::new_bounded(SYN_REPLICATION_STREAM_CAPACITY); let (ack_replication_stream_tx, mut ack_replication_stream) = @@ -1464,7 +1490,15 @@ mod tests { let leader_id: NodeId = "test-leader".into(); let follower_id: NodeId = "test-follower".into(); - let (_temp_dir, state) = IngesterState::for_test().await; + let cluster = create_cluster_for_test( + Vec::new(), + &[QuickwitService::Indexer.as_str()], + &ChannelTransport::default(), + true, + ) + .await + .unwrap(); + let (_temp_dir, state) = IngesterState::for_test(cluster).await; let (syn_replication_stream_tx, syn_replication_stream) = ServiceStream::new_bounded(SYN_REPLICATION_STREAM_CAPACITY); let (ack_replication_stream_tx, mut ack_replication_stream) = @@ -1553,7 +1587,15 @@ mod tests { async fn test_replication_task_resource_exhausted() { let leader_id: NodeId = "test-leader".into(); let follower_id: NodeId = "test-follower".into(); - let (_temp_dir, state) = IngesterState::for_test().await; + let cluster = create_cluster_for_test( + Vec::new(), + &[QuickwitService::Indexer.as_str()], + &ChannelTransport::default(), + true, + ) + .await + .unwrap(); + let (_temp_dir, state) = IngesterState::for_test(cluster).await; let (syn_replication_stream_tx, syn_replication_stream) = ServiceStream::new_bounded(SYN_REPLICATION_STREAM_CAPACITY); let (ack_replication_stream_tx, mut ack_replication_stream) = diff --git a/quickwit/quickwit-ingest/src/ingest_v2/router.rs b/quickwit/quickwit-ingest/src/ingest_v2/router.rs index 67ad31a2722..12a031960ab 100644 --- a/quickwit/quickwit-ingest/src/ingest_v2/router.rs +++ b/quickwit/quickwit-ingest/src/ingest_v2/router.rs @@ -434,7 +434,7 @@ impl IngestRouter { let persist_future = async move { let persist_result = tokio::time::timeout( PERSIST_REQUEST_TIMEOUT, - ingester.persist(persist_request), + ingester.client.persist(persist_request), ) .await .unwrap_or_else(|_| { @@ -709,7 +709,8 @@ mod tests { GetOrCreateOpenShardsResponse, GetOrCreateOpenShardsSuccess, MockControlPlaneService, }; use quickwit_proto::ingest::ingester::{ - IngesterServiceClient, MockIngesterService, PersistFailure, PersistResponse, PersistSuccess, + IngesterServiceClient, IngesterStatus, MockIngesterService, PersistFailure, + PersistResponse, PersistSuccess, }; use quickwit_proto::ingest::router::IngestSubrequest; use quickwit_proto::ingest::{ @@ -720,6 +721,7 @@ mod tests { use super::*; use crate::RateMibPerSec; + use crate::ingest_v2::IngesterPoolEntry; use crate::ingest_v2::broadcast::ShardInfo; use crate::ingest_v2::routing_table::{RoutingEntry, RoutingTableEntry}; use crate::ingest_v2::workbench::SubworkbenchFailure; @@ -841,7 +843,12 @@ mod tests { drop(rendezvous_1); drop(rendezvous_2); - ingester_pool.insert("test-ingester-0".into(), IngesterServiceClient::mocked()); + let client_0 = IngesterServiceClient::mocked(); + let ingester_0 = IngesterPoolEntry { + client: client_0, + status: IngesterStatus::Ready, + }; + ingester_pool.insert("test-ingester-0".into(), ingester_0); { // Ingester-0 has been marked as unavailable due to the previous requests. let (get_or_create_open_shard_request_opt, _rendezvous) = router @@ -1359,8 +1366,19 @@ mod tests { let control_plane = ControlPlaneServiceClient::from_mock(MockControlPlaneService::new()); let ingester_pool = IngesterPool::default(); - ingester_pool.insert("test-ingester-0".into(), IngesterServiceClient::mocked()); - ingester_pool.insert("test-ingester-1".into(), IngesterServiceClient::mocked()); + let client_0 = IngesterServiceClient::mocked(); + let ingester_0 = IngesterPoolEntry { + client: client_0, + status: IngesterStatus::Ready, + }; + ingester_pool.insert("test-ingester-0".into(), ingester_0); + + let client_1 = IngesterServiceClient::mocked(); + let ingester_1 = IngesterPoolEntry { + client: client_1, + status: IngesterStatus::Ready, + }; + ingester_pool.insert("test-ingester-1".into(), ingester_1); let replication_factor = 1; let router = IngestRouter::new( @@ -1589,7 +1607,11 @@ mod tests { }; Ok(response) }); - let ingester_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let client_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let ingester_0 = IngesterPoolEntry { + client: client_0, + status: IngesterStatus::Ready, + }; ingester_pool.insert("test-ingester-0".into(), ingester_0.clone()); let mut mock_ingester_1 = MockIngesterService::new(); @@ -1626,7 +1648,11 @@ mod tests { }; Ok(response) }); - let ingester_1 = IngesterServiceClient::from_mock(mock_ingester_1); + let client_1 = IngesterServiceClient::from_mock(mock_ingester_1); + let ingester_1 = IngesterPoolEntry { + client: client_1, + status: IngesterStatus::Ready, + }; ingester_pool.insert("test-ingester-1".into(), ingester_1); let ingest_request = IngestRequestV2 { @@ -1774,7 +1800,11 @@ mod tests { }; Ok(response) }); - let ingester_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let client_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let ingester_0 = IngesterPoolEntry { + client: client_0, + status: IngesterStatus::Ready, + }; ingester_pool.insert("test-ingester-0".into(), ingester_0.clone()); let ingest_request = IngestRequestV2 { @@ -2077,7 +2107,11 @@ mod tests { }) .in_sequence(&mut seq); - let ingester_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let client_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let ingester_0 = IngesterPoolEntry { + client: client_0, + status: IngesterStatus::Ready, + }; ingester_pool.insert("test-ingester-0".into(), ingester_0.clone()); let ingest_request = IngestRequestV2 { @@ -2153,7 +2187,11 @@ mod tests { }; Ok(response) }); - let ingester_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let client_0 = IngesterServiceClient::from_mock(mock_ingester_0); + let ingester_0 = IngesterPoolEntry { + client: client_0, + status: IngesterStatus::Ready, + }; ingester_pool.insert("test-ingester-0".into(), ingester_0.clone()); let ingest_request = IngestRequestV2 { diff --git a/quickwit/quickwit-ingest/src/ingest_v2/routing_table.rs b/quickwit/quickwit-ingest/src/ingest_v2/routing_table.rs index 987d754ed69..3eef8788f64 100644 --- a/quickwit/quickwit-ingest/src/ingest_v2/routing_table.rs +++ b/quickwit/quickwit-ingest/src/ingest_v2/routing_table.rs @@ -126,12 +126,17 @@ impl RoutingTableEntry { if unavailable_leaders.contains(&shard.leader_id) { continue; } - if ingester_pool.contains_key(&shard.leader_id) { - return true; - } else { + let Some(ingester) = ingester_pool.get(&shard.leader_id) else { let leader_id: NodeId = shard.leader_id.clone(); unavailable_leaders.insert(leader_id); + continue; + }; + if !ingester.status.is_ready() { + let leader_id: NodeId = shard.leader_id.clone(); + unavailable_leaders.insert(leader_id); + continue; } + return true; } } } @@ -164,7 +169,9 @@ impl RoutingTableEntry { error = NextOpenShardError::RateLimited; continue; } - if ingester_pool.contains_key(&shard_routing_entry.leader_id) { + if let Some(ingester) = ingester_pool.get(&shard_routing_entry.leader_id) + && ingester.status.is_ready() + { return Ok(shard_routing_entry); } } @@ -497,15 +504,16 @@ impl RoutingTable { #[cfg(test)] mod tests { use quickwit_proto::ingest::ShardState; - use quickwit_proto::ingest::ingester::IngesterServiceClient; + use quickwit_proto::ingest::ingester::{IngesterServiceClient, IngesterStatus}; use super::*; + use crate::IngesterPoolEntry; #[test] fn test_routing_table_entry_new() { let self_node_id: NodeId = "test-node-0".into(); let index_uid = IndexUid::for_test("test-index", 0); - let source_id: SourceId = "test-source".into(); + let source_id = SourceId::from("test-source"); let table_entry = RoutingTableEntry::new( &self_node_id, index_uid.clone(), @@ -518,7 +526,7 @@ mod tests { let shards = vec![ Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(3)), shard_state: ShardState::Open as i32, leader_id: "test-node-0".to_string(), @@ -526,7 +534,7 @@ mod tests { }, Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, leader_id: "test-node-0".to_string(), @@ -534,7 +542,7 @@ mod tests { }, Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(2)), shard_state: ShardState::Open as i32, leader_id: "test-node-1".to_string(), @@ -542,7 +550,7 @@ mod tests { }, Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(1)), shard_state: ShardState::Open as i32, leader_id: "test-node-0".to_string(), @@ -550,7 +558,7 @@ mod tests { }, Shard { index_uid: Some(index_uid.clone()), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: Some(ShardId::from(4)), shard_state: ShardState::Closed as i32, leader_id: "test-node-0".to_string(), @@ -569,7 +577,7 @@ mod tests { #[test] fn test_routing_table_entry_has_open_shards() { let index_uid = IndexUid::for_test("test-index", 0); - let source_id: SourceId = "test-source".into(); + let source_id = SourceId::from("test-source"); let table_entry = RoutingTableEntry::empty(index_uid.clone(), source_id.clone()); let mut closed_shard_ids = Vec::new(); @@ -584,88 +592,114 @@ mod tests { assert!(closed_shard_ids.is_empty()); assert!(unavailable_leaders.is_empty()); - ingester_pool.insert("test-ingester-0".into(), IngesterServiceClient::mocked()); - ingester_pool.insert("test-ingester-1".into(), IngesterServiceClient::mocked()); - + // Ingester 0 is ready, but shard 0 is closed. + ingester_pool.insert( + "test-ingester-0".into(), + IngesterPoolEntry { + client: IngesterServiceClient::mocked(), + status: IngesterStatus::Ready, + }, + ); + // Ingester 1 is ready, and shard 1 is open, but it is declared as unavailable by the caller + // via `unavailable_leaders`. + ingester_pool.insert( + "test-ingester-1".into(), + IngesterPoolEntry { + client: IngesterServiceClient::mocked(), + status: IngesterStatus::Ready, + }, + ); + // Ingester 2 is unavailable, i.e. not in the ingester pool. + // Ingester 3 is retiring. + ingester_pool.insert( + "test-ingester-3".into(), + IngesterPoolEntry { + client: IngesterServiceClient::mocked(), + status: IngesterStatus::Retiring, + }, + ); + // Ingester 4 is ready, and shard 4 is open. + ingester_pool.insert( + "test-ingester-4".into(), + IngesterPoolEntry { + client: IngesterServiceClient::mocked(), + status: IngesterStatus::Ready, + }, + ); let table_entry = RoutingTableEntry { index_uid: index_uid.clone(), source_id: source_id.clone(), local_shards: vec![ RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), - shard_id: ShardId::from(1), + source_id: source_id.clone(), + shard_id: ShardId::from(0), shard_state: ShardState::Closed, leader_id: "test-ingester-0".into(), }, RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), - shard_id: ShardId::from(2), - shard_state: ShardState::Open, - leader_id: "test-ingester-0".into(), - }, - ], - local_round_robin_idx: AtomicUsize::default(), - remote_shards: Vec::new(), - remote_round_robin_idx: AtomicUsize::default(), - }; - assert!(table_entry.has_open_shards( - &ingester_pool, - &mut closed_shard_ids, - &mut unavailable_leaders - )); - assert_eq!(closed_shard_ids.len(), 1); - assert_eq!(closed_shard_ids[0], ShardId::from(1)); - assert!(unavailable_leaders.is_empty()); - - closed_shard_ids.clear(); - - let table_entry = RoutingTableEntry { - index_uid: index_uid.clone(), - source_id, - local_shards: Vec::new(), - local_round_robin_idx: AtomicUsize::default(), - remote_shards: vec![ - RoutingEntry { - index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(1), - shard_state: ShardState::Closed, + shard_state: ShardState::Open, leader_id: "test-ingester-1".into(), }, RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(2), shard_state: ShardState::Open, leader_id: "test-ingester-2".into(), }, RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(3), shard_state: ShardState::Open, - leader_id: "test-ingester-1".into(), + leader_id: "test-ingester-3".into(), + }, + RoutingEntry { + index_uid: index_uid.clone(), + source_id: source_id.clone(), + shard_id: ShardId::from(4), + shard_state: ShardState::Open, + leader_id: "test-ingester-4".into(), + }, + RoutingEntry { + index_uid: index_uid.clone(), + source_id: source_id.clone(), + shard_id: ShardId::from(4), + shard_state: ShardState::Open, + leader_id: "test-ingester-4".into(), }, ], + local_round_robin_idx: AtomicUsize::default(), + remote_shards: Vec::new(), remote_round_robin_idx: AtomicUsize::default(), }; + unavailable_leaders.insert("test-ingester-1".into()); + assert!(table_entry.has_open_shards( &ingester_pool, &mut closed_shard_ids, &mut unavailable_leaders )); - assert_eq!(closed_shard_ids.len(), 1); - assert_eq!(closed_shard_ids[0], ShardId::from(1)); - assert_eq!(unavailable_leaders.len(), 1); - assert!(unavailable_leaders.contains("test-ingester-2")); + assert_eq!(closed_shard_ids, vec![ShardId::from(0)]); + + assert_eq!( + unavailable_leaders, + HashSet::from_iter([ + "test-ingester-1".into(), + "test-ingester-2".into(), + "test-ingester-3".into(), + ]) + ); } #[test] fn test_routing_table_entry_next_open_shard_round_robin() { let index_uid = IndexUid::for_test("test-index", 0); - let source_id: SourceId = "test-source".into(); + let source_id = SourceId::from("test-source"); let table_entry = RoutingTableEntry::empty(index_uid.clone(), source_id.clone()); let ingester_pool = IngesterPool::default(); let mut rate_limited_shards = HashSet::new(); @@ -675,8 +709,20 @@ mod tests { .unwrap_err(); assert_eq!(error, NextOpenShardError::NoShardsAvailable); - ingester_pool.insert("test-ingester-0".into(), IngesterServiceClient::mocked()); - ingester_pool.insert("test-ingester-1".into(), IngesterServiceClient::mocked()); + ingester_pool.insert( + "test-ingester-0".into(), + IngesterPoolEntry { + client: IngesterServiceClient::mocked(), + status: IngesterStatus::Ready, + }, + ); + ingester_pool.insert( + "test-ingester-1".into(), + IngesterPoolEntry { + client: IngesterServiceClient::mocked(), + status: IngesterStatus::Ready, + }, + ); let table_entry = RoutingTableEntry { index_uid: index_uid.clone(), @@ -684,21 +730,21 @@ mod tests { local_shards: vec![ RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(1), shard_state: ShardState::Closed, leader_id: "test-ingester-0".into(), }, RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(2), shard_state: ShardState::Open, leader_id: "test-ingester-0".into(), }, RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(3), shard_state: ShardState::Open, leader_id: "test-ingester-0".into(), @@ -728,7 +774,7 @@ mod tests { source_id: source_id.clone(), local_shards: vec![RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(1), shard_state: ShardState::Closed, leader_id: "test-ingester-0".into(), @@ -737,28 +783,28 @@ mod tests { remote_shards: vec![ RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(2), shard_state: ShardState::Open, leader_id: "test-ingester-1".into(), }, RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(3), shard_state: ShardState::Closed, leader_id: "test-ingester-1".into(), }, RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(4), shard_state: ShardState::Open, leader_id: "test-ingester-2".into(), }, RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(5), shard_state: ShardState::Open, leader_id: "test-ingester-1".into(), @@ -792,10 +838,16 @@ mod tests { #[test] fn test_routing_table_entry_next_open_shard_round_robin_rate_limited_error() { let index_uid = IndexUid::for_test("test-index", 0); - let source_id: SourceId = "test-source".into(); + let source_id = SourceId::from("test-source"); let ingester_pool = IngesterPool::default(); - ingester_pool.insert("test-ingester-0".into(), IngesterServiceClient::mocked()); + ingester_pool.insert( + "test-ingester-0".into(), + IngesterPoolEntry { + client: IngesterServiceClient::mocked(), + status: IngesterStatus::Ready, + }, + ); let rate_limited_shards = HashSet::from_iter([ShardId::from(1)]); @@ -804,7 +856,7 @@ mod tests { source_id: source_id.clone(), local_shards: vec![RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(1), shard_state: ShardState::Open, leader_id: "test-ingester-0".into(), @@ -822,7 +874,7 @@ mod tests { #[test] fn test_routing_table_entry_insert_open_shards() { let index_uid_0 = IndexUid::for_test("test-index", 0); - let source_id: SourceId = "test-source".into(); + let source_id = SourceId::from("test-source"); let mut table_entry = RoutingTableEntry::empty(index_uid_0.clone(), source_id.clone()); let local_node_id: NodeId = "test-ingester-0".into(); @@ -931,7 +983,7 @@ mod tests { #[test] fn test_routing_table_entry_close_shards() { let index_uid = IndexUid::for_test("test-index", 0); - let source_id: SourceId = "test-source".into(); + let source_id = SourceId::from("test-source"); let mut table_entry = RoutingTableEntry::empty(index_uid.clone(), source_id.clone()); table_entry.close_shards(&index_uid, &[]); @@ -945,21 +997,21 @@ mod tests { local_shards: vec![ RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(1), shard_state: ShardState::Open, leader_id: "test-ingester-0".into(), }, RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(2), shard_state: ShardState::Open, leader_id: "test-ingester-0".into(), }, RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(3), shard_state: ShardState::Open, leader_id: "test-ingester-0".into(), @@ -969,21 +1021,21 @@ mod tests { remote_shards: vec![ RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(5), shard_state: ShardState::Open, leader_id: "test-ingester-1".into(), }, RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(6), shard_state: ShardState::Open, leader_id: "test-ingester-1".into(), }, RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(7), shard_state: ShardState::Open, leader_id: "test-ingester-1".into(), @@ -1012,7 +1064,7 @@ mod tests { #[test] fn test_routing_table_entry_delete_shards() { let index_uid = IndexUid::for_test("test-index", 0); - let source_id: SourceId = "test-source".into(); + let source_id = SourceId::from("test-source"); let mut table_entry = RoutingTableEntry::empty(index_uid.clone(), source_id.clone()); table_entry.delete_shards(&index_uid, &[]); @@ -1026,21 +1078,21 @@ mod tests { local_shards: vec![ RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(1), shard_state: ShardState::Open, leader_id: "test-ingester-0".into(), }, RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(2), shard_state: ShardState::Open, leader_id: "test-ingester-0".into(), }, RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(3), shard_state: ShardState::Open, leader_id: "test-ingester-0".into(), @@ -1050,21 +1102,21 @@ mod tests { remote_shards: vec![ RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(5), shard_state: ShardState::Open, leader_id: "test-ingester-1".into(), }, RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(6), shard_state: ShardState::Open, leader_id: "test-ingester-1".into(), }, RoutingEntry { index_uid: index_uid.clone(), - source_id: "test-source".to_string(), + source_id: source_id.clone(), shard_id: ShardId::from(7), shard_state: ShardState::Open, leader_id: "test-ingester-1".into(), diff --git a/quickwit/quickwit-ingest/src/ingest_v2/state.rs b/quickwit/quickwit-ingest/src/ingest_v2/state.rs index 591ef4f704f..a19e4eb509d 100644 --- a/quickwit/quickwit-ingest/src/ingest_v2/state.rs +++ b/quickwit/quickwit-ingest/src/ingest_v2/state.rs @@ -20,8 +20,10 @@ use std::sync::{Arc, Weak}; use std::time::{Duration, Instant}; use mrecordlog::error::{DeleteQueueError, TruncateError}; +use quickwit_cluster::Cluster; use quickwit_common::pretty::PrettyDisplay; use quickwit_common::rate_limiter::{RateLimiter, RateLimiterSettings}; +use quickwit_common::shared_consts::INGESTER_STATUS_KEY; use quickwit_doc_mapper::DocMapper; use quickwit_proto::control_plane::AdviseResetShardsResponse; use quickwit_proto::ingest::ingester::IngesterStatus; @@ -58,6 +60,7 @@ pub(super) struct InnerIngesterState { pub replication_streams: HashMap, // Replication tasks running for each replication stream opened with leaders. pub replication_tasks: HashMap, + cluster: Cluster, status: IngesterStatus, status_tx: watch::Sender, } @@ -67,9 +70,12 @@ impl InnerIngesterState { self.status } - pub fn set_status(&mut self, status: IngesterStatus) { + pub async fn set_status(&mut self, status: IngesterStatus) { self.status = status; self.status_tx.send(status).expect("channel should be open"); + self.cluster + .set_self_key_value(INGESTER_STATUS_KEY, status.as_json_str_name()) + .await; } /// Returns the shard with the most available permits for this index and source. @@ -90,17 +96,22 @@ impl InnerIngesterState { } impl IngesterState { - fn new() -> Self { + async fn create(cluster: Cluster) -> Self { let status = IngesterStatus::Initializing; let (status_tx, status_rx) = watch::channel(status); - let inner = InnerIngesterState { + let mut inner = InnerIngesterState { shards: Default::default(), doc_mappers: Default::default(), replication_streams: Default::default(), replication_tasks: Default::default(), + cluster, status, status_tx, }; + // We call `set_status` here instead of setting it directly because it also updates the + // ingester status in chitchat. + inner.set_status(IngesterStatus::Initializing).await; + let inner = Arc::new(Mutex::new(inner)); let mrecordlog = Arc::new(RwLock::new(None)); @@ -111,8 +122,12 @@ impl IngesterState { } } - pub fn load(wal_dir_path: &Path, rate_limiter_settings: RateLimiterSettings) -> Self { - let state = Self::new(); + pub async fn load( + cluster: Cluster, + wal_dir_path: &Path, + rate_limiter_settings: RateLimiterSettings, + ) -> Self { + let state = Self::create(cluster).await; let state_clone = state.clone(); let wal_dir_path = wal_dir_path.to_path_buf(); @@ -125,15 +140,12 @@ impl IngesterState { } #[cfg(test)] - pub async fn for_test() -> (tempfile::TempDir, Self) { + pub async fn for_test(cluster: Cluster) -> (tempfile::TempDir, Self) { let temp_dir = tempfile::tempdir().unwrap(); - let mut state = IngesterState::load(temp_dir.path(), RateLimiterSettings::default()); + let mut state = + IngesterState::load(cluster, temp_dir.path(), RateLimiterSettings::default()).await; - state - .status_rx - .wait_for(|status| *status == IngesterStatus::Ready) - .await - .unwrap(); + state.wait_for_ready().await; (temp_dir, state) } @@ -168,7 +180,7 @@ impl IngesterState { } Err(error) => { error!("failed to open WAL: {error}"); - inner_guard.set_status(IngesterStatus::Failed); + inner_guard.set_status(IngesterStatus::Failed).await; return; } }; @@ -229,7 +241,7 @@ impl IngesterState { info!("deleted {num_deleted_shards} empty shard(s)"); } mrecordlog_guard.replace(mrecordlog); - inner_guard.set_status(IngesterStatus::Ready); + inner_guard.set_status(IngesterStatus::Ready).await; } pub async fn wait_for_ready(&mut self) { @@ -386,38 +398,32 @@ impl FullyLockedIngesterState<'_> { truncate_up_to_position_inclusive: Position, initiator: &'static str, ) { - // TODO: Replace with if-let-chains when stabilized. - let Some(truncate_up_to_offset_inclusive) = truncate_up_to_position_inclusive.as_u64() - else { - return; - }; - let Some(shard) = self.inner.shards.get_mut(queue_id) else { - return; - }; - if shard.truncation_position_inclusive >= truncate_up_to_position_inclusive { - return; - } - match self - .mrecordlog - .truncate(queue_id, truncate_up_to_offset_inclusive) - .await + if let Some(truncate_up_to_offset_inclusive) = truncate_up_to_position_inclusive.as_u64() + && let Some(shard) = self.inner.shards.get_mut(queue_id) + && shard.truncation_position_inclusive < truncate_up_to_position_inclusive { - Ok(_) => { - info!( - "truncated shard `{queue_id}` at {truncate_up_to_position_inclusive} \ - initiated via `{initiator}`" - ); - shard.truncation_position_inclusive = truncate_up_to_position_inclusive; - } - Err(TruncateError::MissingQueue(_)) => { - error!("failed to truncate shard `{queue_id}`: WAL queue not found"); - self.shards.remove(queue_id); - info!("deleted dangling shard `{queue_id}`"); - } - Err(TruncateError::IoError(io_error)) => { - error!("failed to truncate shard `{queue_id}`: {io_error}"); - } - }; + match self + .mrecordlog + .truncate(queue_id, truncate_up_to_offset_inclusive) + .await + { + Ok(_) => { + info!( + "truncated shard `{queue_id}` at {truncate_up_to_position_inclusive} \ + initiated via `{initiator}`" + ); + shard.truncation_position_inclusive = truncate_up_to_position_inclusive; + } + Err(TruncateError::MissingQueue(_)) => { + error!("failed to truncate shard `{queue_id}`: WAL queue not found"); + self.shards.remove(queue_id); + info!("deleted dangling shard `{queue_id}`"); + } + Err(TruncateError::IoError(io_error)) => { + error!("failed to truncate shard `{queue_id}`: {io_error}"); + } + }; + } } /// Deletes and truncates the shards as directed by the `advise_reset_shards_response` returned @@ -467,6 +473,8 @@ impl WeakIngesterState { #[cfg(test)] mod tests { use bytesize::ByteSize; + use quickwit_cluster::{ChannelTransport, create_cluster_for_test}; + use quickwit_config::service::QuickwitService; use quickwit_proto::types::ShardId; use tokio::time::timeout; @@ -474,7 +482,15 @@ mod tests { #[tokio::test] async fn test_ingester_state_does_not_lock_while_initializing() { - let state = IngesterState::new(); + let cluster = create_cluster_for_test( + Vec::new(), + &[QuickwitService::Indexer.as_str()], + &ChannelTransport::default(), + true, + ) + .await + .unwrap(); + let state = IngesterState::create(cluster.clone()).await; let inner_guard = state.inner.lock().await; assert_eq!(inner_guard.status(), IngesterStatus::Initializing); @@ -489,9 +505,22 @@ mod tests { #[tokio::test] async fn test_ingester_state_failed() { - let state = IngesterState::new(); + let cluster = create_cluster_for_test( + Vec::new(), + &[QuickwitService::Indexer.as_str()], + &ChannelTransport::default(), + true, + ) + .await + .unwrap(); + let state = IngesterState::create(cluster.clone()).await; - state.inner.lock().await.set_status(IngesterStatus::Failed); + state + .inner + .lock() + .await + .set_status(IngesterStatus::Failed) + .await; let error = state.lock_partially().await.unwrap_err().to_string(); assert!(error.to_string().ends_with("failed to initialize ingester")); @@ -502,7 +531,15 @@ mod tests { #[tokio::test] async fn test_ingester_state_init() { - let mut state = IngesterState::new(); + let cluster = create_cluster_for_test( + Vec::new(), + &[QuickwitService::Indexer.as_str()], + &ChannelTransport::default(), + true, + ) + .await + .unwrap(); + let mut state = IngesterState::create(cluster.clone()).await; let temp_dir = tempfile::tempdir().unwrap(); state @@ -539,7 +576,15 @@ mod tests { #[tokio::test] async fn test_find_most_capacity_shard_returns_shard_with_least_used_capacity() { - let (_temp_dir, state) = IngesterState::for_test().await; + let cluster = create_cluster_for_test( + Vec::new(), + &[QuickwitService::Indexer.as_str()], + &ChannelTransport::default(), + true, + ) + .await + .unwrap(); + let (_temp_dir, state) = IngesterState::for_test(cluster).await; let mut state_guard = state.lock_partially().await.unwrap(); let index_uid = IndexUid::for_test("test-index", 0); @@ -577,7 +622,15 @@ mod tests { #[tokio::test] async fn test_find_most_capacity_shard_skips_closed_shards() { - let (_temp_dir, state) = IngesterState::for_test().await; + let cluster = create_cluster_for_test( + Vec::new(), + &[QuickwitService::Indexer.as_str()], + &ChannelTransport::default(), + true, + ) + .await + .unwrap(); + let (_temp_dir, state) = IngesterState::for_test(cluster).await; let mut locked_state = state.lock_partially().await.unwrap(); let index_uid = IndexUid::for_test("test-index", 0); @@ -619,7 +672,15 @@ mod tests { #[tokio::test] async fn test_find_most_capacity_shard_returns_none_for_unknown_index_or_source() { - let (_temp_dir, state) = IngesterState::for_test().await; + let cluster = create_cluster_for_test( + Vec::new(), + &[QuickwitService::Indexer.as_str()], + &ChannelTransport::default(), + true, + ) + .await + .unwrap(); + let (_temp_dir, state) = IngesterState::for_test(cluster).await; let mut locked_state = state.lock_partially().await.unwrap(); let index_uid = IndexUid::for_test("test-index", 0); @@ -642,4 +703,34 @@ mod tests { locked_state.find_most_capacity_shard_mut(&index_uid, &SourceId::from("other-source")); assert!(shard_opt.is_none()); } + + #[tokio::test] + async fn test_ingester_state_set_status() { + let cluster = create_cluster_for_test( + Vec::new(), + &[QuickwitService::Indexer.as_str()], + &ChannelTransport::default(), + true, + ) + .await + .unwrap(); + let state = IngesterState::create(cluster.clone()).await; + let temp_dir = tempfile::tempdir().unwrap(); + + state + .init(temp_dir.path(), RateLimiterSettings::default()) + .await; + + let mut state_guard = state.lock_fully().await.unwrap(); + state_guard.set_status(IngesterStatus::Failed).await; + assert_eq!(state_guard.status(), IngesterStatus::Failed); + assert_eq!(*state.status_rx.borrow(), IngesterStatus::Failed); + + let status_json_str = cluster + .get_self_key_value(INGESTER_STATUS_KEY) + .await + .unwrap(); + let status = IngesterStatus::from_json_str_name(&status_json_str).unwrap(); + assert_eq!(status, IngesterStatus::Failed); + } } diff --git a/quickwit/quickwit-integration-tests/src/test_utils/cluster_sandbox.rs b/quickwit/quickwit-integration-tests/src/test_utils/cluster_sandbox.rs index 5277e0ac740..a7385ca0946 100644 --- a/quickwit/quickwit-integration-tests/src/test_utils/cluster_sandbox.rs +++ b/quickwit/quickwit-integration-tests/src/test_utils/cluster_sandbox.rs @@ -56,6 +56,39 @@ pub struct TestNodeConfig { pub enable_otlp: bool, } +impl TestNodeConfig { + async fn build_node_config( + &self, + node_idx: usize, + cluster_id: String, + temp_dir: &TempDir, + unique_dir_name: String, + tcp_listener_resolver: &TestTcpListenerResolver, + ) -> NodeConfig { + let socket: SocketAddr = ([127, 0, 0, 1], 0u16).into(); + let rest_tcp_listener = TcpListener::bind(socket).await.unwrap(); + let grpc_tcp_listener = TcpListener::bind(socket).await.unwrap(); + let mut config = NodeConfig::for_test_from_ports( + rest_tcp_listener.local_addr().unwrap().port(), + grpc_tcp_listener.local_addr().unwrap().port(), + ); + tcp_listener_resolver.add_listener(rest_tcp_listener).await; + tcp_listener_resolver.add_listener(grpc_tcp_listener).await; + config.indexer_config.enable_otlp_endpoint = self.enable_otlp; + config.enabled_services.clone_from(&self.services); + config.jaeger_config.enable_endpoint = true; + config.cluster_id.clone_from(&cluster_id); + config.node_id = NodeId::new(format!("test-node-{node_idx}")); + let root_data_dir = temp_dir.path().to_path_buf(); + config.data_dir_path = root_data_dir.join(config.node_id.as_str()); + config.metastore_uri = + QuickwitUri::from_str(&format!("ram:///{unique_dir_name}/metastore")).unwrap(); + config.default_index_root_uri = + QuickwitUri::from_str(&format!("ram:///{unique_dir_name}/indexes")).unwrap(); + config + } +} + pub struct ClusterSandboxBuilder { temp_dir: TempDir, node_configs: Vec, @@ -106,32 +139,21 @@ impl ClusterSandboxBuilder { /// - `default_index_root_uri` defined by `root_data_dir/indexes`. /// - `peers` defined by others nodes `gossip_advertise_addr`. pub async fn build_config(self) -> ResolvedClusterConfig { - let root_data_dir = self.temp_dir.path().to_path_buf(); let cluster_id = new_coolid("test-cluster"); let mut resolved_node_configs = Vec::new(); let mut peers: Vec = Vec::new(); let unique_dir_name = new_coolid("test-dir"); let tcp_listener_resolver = TestTcpListenerResolver::default(); for (node_idx, node_builder) in self.node_configs.iter().enumerate() { - let socket: SocketAddr = ([127, 0, 0, 1], 0u16).into(); - let rest_tcp_listener = TcpListener::bind(socket).await.unwrap(); - let grpc_tcp_listener = TcpListener::bind(socket).await.unwrap(); - let mut config = NodeConfig::for_test_from_ports( - rest_tcp_listener.local_addr().unwrap().port(), - grpc_tcp_listener.local_addr().unwrap().port(), - ); - tcp_listener_resolver.add_listener(rest_tcp_listener).await; - tcp_listener_resolver.add_listener(grpc_tcp_listener).await; - config.indexer_config.enable_otlp_endpoint = node_builder.enable_otlp; - config.enabled_services.clone_from(&node_builder.services); - config.jaeger_config.enable_endpoint = true; - config.cluster_id.clone_from(&cluster_id); - config.node_id = NodeId::new(format!("test-node-{node_idx}")); - config.data_dir_path = root_data_dir.join(config.node_id.as_str()); - config.metastore_uri = - QuickwitUri::from_str(&format!("ram:///{unique_dir_name}/metastore")).unwrap(); - config.default_index_root_uri = - QuickwitUri::from_str(&format!("ram:///{unique_dir_name}/indexes")).unwrap(); + let config = node_builder + .build_node_config( + node_idx, + cluster_id.clone(), + &self.temp_dir, + unique_dir_name.clone(), + &tcp_listener_resolver, + ) + .await; peers.push(config.gossip_advertise_addr.to_string()); resolved_node_configs.push((config, node_builder.services.clone())); } @@ -143,7 +165,9 @@ impl ClusterSandboxBuilder { .collect_vec(); } ResolvedClusterConfig { + cluster_id, temp_dir: self.temp_dir, + unique_dir_name, node_configs: resolved_node_configs, tcp_listener_resolver, } @@ -167,7 +191,9 @@ impl ClusterSandboxBuilder { /// Intermediate state where the ports of all the test cluster nodes have /// been reserved and the configurations have been generated. pub struct ResolvedClusterConfig { + cluster_id: String, temp_dir: TempDir, + unique_dir_name: String, pub node_configs: Vec<(NodeConfig, HashSet)>, tcp_listener_resolver: TestTcpListenerResolver, } @@ -176,49 +202,23 @@ impl ResolvedClusterConfig { /// Start a cluster using this config and waits for the nodes to be ready pub async fn start(self) -> ClusterSandbox { quickwit_cli::install_default_crypto_ring_provider(); - let mut node_shutdown_handles = Vec::new(); - let runtimes_config = RuntimesConfig::light_for_tests(); - let storage_resolver = StorageResolver::unconfigured(); - let metastore_resolver = MetastoreResolver::unconfigured(); - let cluster_size = self.node_configs.len(); - for node_config in self.node_configs.iter() { - let mut shutdown_handler = - NodeShutdownHandle::new(node_config.0.node_id.clone(), node_config.1.clone()); - let shutdown_signal = shutdown_handler.shutdown_signal(); - let join_handle = tokio::spawn({ - let node_config = node_config.0.clone(); - let node_id = node_config.node_id.clone(); - let services = node_config.enabled_services.clone(); - let metastore_resolver = metastore_resolver.clone(); - let storage_resolver = storage_resolver.clone(); - let tcp_listener_resolver = self.tcp_listener_resolver.clone(); - - async move { - let result = serve_quickwit( - node_config, - runtimes_config, - metastore_resolver, - storage_resolver, - tcp_listener_resolver, - shutdown_signal, - quickwit_serve::do_nothing_env_filter_reload_fn(), - ) - .await?; - debug!("{node_id} stopped successfully ({:?})", services); - Result::<_, anyhow::Error>::Ok(result) - } - }); - shutdown_handler.set_node_join_handle(join_handle); - node_shutdown_handles.push(shutdown_handler); - } - let sandbox = ClusterSandbox { - node_configs: self.node_configs, - _temp_dir: self.temp_dir, - node_shutdown_handles, + let mut sandbox = ClusterSandbox { + cluster_id: self.cluster_id, + node_configs: Vec::new(), + temp_dir: self.temp_dir, + unique_dir_name: self.unique_dir_name, + node_shutdown_handles: Vec::new(), + tcp_listener_resolver: self.tcp_listener_resolver, + storage_resolver: StorageResolver::unconfigured(), + metastore_resolver: MetastoreResolver::unconfigured(), }; + for (config, services) in &self.node_configs { + sandbox.spawn_node(config.clone(), services.clone()); + } + sandbox.node_configs = self.node_configs; sandbox - .wait_for_cluster_num_ready_nodes(cluster_size) + .wait_for_cluster_num_ready_nodes(sandbox.node_configs.len()) .await .unwrap(); sandbox @@ -257,12 +257,74 @@ pub(crate) async fn ingest( /// A test environment where you can start a Quickwit cluster and use the gRPC /// or REST clients to test it. pub struct ClusterSandbox { + cluster_id: String, pub node_configs: Vec<(NodeConfig, HashSet)>, - _temp_dir: TempDir, + unique_dir_name: String, + temp_dir: TempDir, node_shutdown_handles: Vec, + tcp_listener_resolver: TestTcpListenerResolver, + storage_resolver: StorageResolver, + metastore_resolver: MetastoreResolver, } impl ClusterSandbox { + fn spawn_node(&mut self, config: NodeConfig, services: HashSet) { + let mut shutdown_handle = NodeShutdownHandle::new(config.node_id.clone(), services.clone()); + let shutdown_signal = shutdown_handle.shutdown_signal(); + let runtimes_config = RuntimesConfig::light_for_tests(); + let join_handle = tokio::spawn({ + let node_id = config.node_id.clone(); + let metastore_resolver = self.metastore_resolver.clone(); + let storage_resolver = self.storage_resolver.clone(); + let tcp_listener_resolver = self.tcp_listener_resolver.clone(); + async move { + let result = serve_quickwit( + config, + runtimes_config, + metastore_resolver, + storage_resolver, + tcp_listener_resolver, + shutdown_signal, + quickwit_serve::do_nothing_env_filter_reload_fn(), + ) + .await?; + debug!("{node_id} stopped successfully ({services:?})"); + Result::<_, anyhow::Error>::Ok(result) + } + }); + shutdown_handle.set_node_join_handle(join_handle); + self.node_shutdown_handles.push(shutdown_handle); + } + + /// Dynamically adds a node to the cluster. Does not wait for readiness. + pub async fn add_node(&mut self, services: impl IntoIterator) { + self.add_node_inner(TestNodeConfig { + services: HashSet::from_iter(services), + enable_otlp: false, + }) + .await; + } + + async fn add_node_inner(&mut self, config_builder: TestNodeConfig) { + let mut config = config_builder + .build_node_config( + self.node_configs.len() + 1, + self.cluster_id.clone(), + &self.temp_dir, + self.unique_dir_name.clone(), + &self.tcp_listener_resolver, + ) + .await; + config.peer_seeds = self + .node_configs + .iter() + .map(|config| config.0.gossip_advertise_addr.to_string()) + .collect_vec(); + self.spawn_node(config.clone(), config_builder.services.clone()); + self.node_configs + .push((config, config_builder.services.clone())); + } + fn find_node_for_service(&self, service: QuickwitService) -> NodeConfig { self.node_configs .iter() @@ -351,7 +413,7 @@ impl ClusterSandbox { TraceServiceClient::new(self.channel(QuickwitService::Indexer)) } - async fn wait_for_cluster_num_ready_nodes( + pub async fn wait_for_cluster_num_ready_nodes( &self, expected_num_ready_nodes: usize, ) -> anyhow::Result<()> { @@ -596,4 +658,48 @@ impl ClusterSandbox { self.shutdown_services(QuickwitService::supported_services()) .await } + + /// Remove a node from the sandbox and return its shutdown handle. + /// After this call, `rest_client` and other lookup methods skip the removed + /// node, so callers can trigger shutdown concurrently with other sandbox + /// operations. + pub fn remove_node_with_service(&mut self, service: QuickwitService) -> NodeShutdownHandle { + let idx = self + .node_shutdown_handles + .iter() + .position(|h| h.node_services.contains(&service)) + .unwrap_or_else(|| panic!("no node with service {service:?}")); + self.node_configs.remove(idx); + self.node_shutdown_handles.remove(idx) + } +} + +/// We don't usually test the tests, but the complexity of the sandbox setup code justifies it here. +#[tokio::test] +async fn test_sandbox_happy_path() { + let sandbox = ClusterSandboxBuilder::default() + .add_node([QuickwitService::ControlPlane, QuickwitService::Metastore]) + .add_node([QuickwitService::Searcher]) + .add_node([QuickwitService::Indexer]) + .build_and_start() + .await; + + sandbox.wait_for_cluster_num_ready_nodes(3).await.unwrap(); + sandbox.shutdown().await.unwrap(); +} + +#[tokio::test] +async fn test_sandbox_add_node_dynamically() { + let mut sandbox = ClusterSandboxBuilder::default() + .add_node([QuickwitService::ControlPlane, QuickwitService::Metastore]) + .add_node([QuickwitService::Searcher]) + .build_and_start() + .await; + sandbox.wait_for_cluster_num_ready_nodes(2).await.unwrap(); + + // Later, add an indexer node to the running cluster + sandbox.add_node([QuickwitService::Indexer]).await; + + sandbox.wait_for_cluster_num_ready_nodes(3).await.unwrap(); + sandbox.shutdown().await.unwrap(); } diff --git a/quickwit/quickwit-integration-tests/src/tests/ingest_v2_tests.rs b/quickwit/quickwit-integration-tests/src/tests/ingest_v2_tests.rs index 4a5e29c9565..9c3de3a6b85 100644 --- a/quickwit/quickwit-integration-tests/src/tests/ingest_v2_tests.rs +++ b/quickwit/quickwit-integration-tests/src/tests/ingest_v2_tests.rs @@ -856,3 +856,147 @@ async fn test_shutdown_indexer_first() { .unwrap() .unwrap(); } + +/// Tests that the graceful shutdown sequence works correctly in a multi-indexer +/// cluster: shutting down one indexer does NOT cause 500 errors or data loss, +/// and the cluster eventually rebalances. see #6158 +/// +/// We start with a single indexer so the shard for this index is guaranteed to +/// live on it. After ingesting, we dynamically add a second indexer, then shut +/// down the first one. This proves the decommission sequence correctly drains +/// in-flight data even when the shard owner is the node being removed. +#[tokio::test] +async fn test_graceful_shutdown_no_data_loss() { + let mut sandbox = ClusterSandboxBuilder::default() + .add_node([QuickwitService::Indexer]) + .add_node([ + QuickwitService::ControlPlane, + QuickwitService::Searcher, + QuickwitService::Metastore, + QuickwitService::Janitor, + ]) + .build_and_start() + .await; + let index_id = "test_graceful_shutdown_no_data_loss"; + + // Create index with a long commit timeout so documents stay uncommitted + // in the ingesters' WAL. The decommission sequence should commit + // them before the indexer quits. + sandbox + .rest_client(QuickwitService::Indexer) + .indexes() + .create( + format!( + r#" + version: 0.8 + index_id: {index_id} + doc_mapping: + field_mappings: + - name: body + type: text + indexing_settings: + commit_timeout_secs: 5 + "# + ), + ConfigFormat::Yaml, + false, + ) + .await + .unwrap(); + + // Ingest docs with auto-commit. With a 5s commit timeout, these documents + // sit uncommitted in the ingesters' WAL - exactly the in-flight state we + // want to exercise during draining. + ingest( + &sandbox.rest_client(QuickwitService::Indexer), + index_id, + ingest_json!({"body": "before-shutdown-1"}), + CommitType::Auto, + ) + .await + .unwrap(); + + ingest( + &sandbox.rest_client(QuickwitService::Indexer), + index_id, + ingest_json!({"body": "before-shutdown-2"}), + CommitType::Auto, + ) + .await + .unwrap(); + + // Add a second indexer after the shard has been created on the first one. + sandbox.add_node([QuickwitService::Indexer]).await; + + // Remove the first indexer (the shard owner) from the sandbox and get its + // shutdown handle. After this call, rest_client(Indexer) returns the + // second (surviving) indexer. + let shutdown_handle = sandbox.remove_node_with_service(QuickwitService::Indexer); + + // Concurrently: shut down the removed indexer AND ingest more data via the + // surviving indexer. This verifies the cluster stays operational and the + // router on the surviving node does not return 500 errors while one indexer + // is decommissioning. The control plane excludes the decommissioning + // ingester from shard allocation, so new shards go to the surviving one. + let ingest_client = sandbox.rest_client(QuickwitService::Indexer); + let (shutdown_result, ingest_result) = tokio::join!( + async { + tokio::time::timeout(Duration::from_secs(30), shutdown_handle.shutdown()) + .await + .expect("indexer shutdown timed out — decommission may be stuck") + }, + async { + // Small delay so the decommission sequence has started before we ingest. + tokio::time::sleep(Duration::from_millis(200)).await; + ingest( + &ingest_client, + index_id, + ingest_json!({"body": "during-shutdown"}), + CommitType::Auto, + ) + .await + }, + ); + shutdown_result.expect("indexer shutdown failed"); + ingest_result.expect("ingest during shutdown should succeed (no 500 errors)"); + + // All 3 documents should eventually be searchable. Documents 1 & 2 were + // in-flight on the decommissioning indexer and should have been committed during + // the decommission step. Document 3 was ingested to the surviving indexer. + wait_until_predicate( + || async { + match sandbox + .rest_client(QuickwitService::Searcher) + .search( + index_id, + quickwit_serve::SearchRequestQueryString { + query: "*".to_string(), + max_hits: 10, + ..Default::default() + }, + ) + .await + { + Ok(resp) => resp.num_hits == 3, + Err(_) => false, + } + }, + Duration::from_secs(30), + Duration::from_millis(500), + ) + .await + .expect("expected 3 documents after decommission shutdown, some data may have been lost"); + + // Verify the cluster sees 2 ready nodes (the surviving indexer + the + // control-plane/searcher/metastore/janitor node). + sandbox + .wait_for_cluster_num_ready_nodes(2) + .await + .expect("cluster should see 2 ready nodes after indexer shutdown"); + + // Clean shutdown of the remaining nodes. + tokio::time::timeout(Duration::from_secs(15), sandbox.shutdown()) + .await + .unwrap() + .unwrap(); +} diff --git a/quickwit/quickwit-lambda-client/src/deploy.rs b/quickwit/quickwit-lambda-client/src/deploy.rs index e189e4a7ec0..aacef0929ea 100644 --- a/quickwit/quickwit-lambda-client/src/deploy.rs +++ b/quickwit/quickwit-lambda-client/src/deploy.rs @@ -26,7 +26,7 @@ //! - Old versions are garbage collected (keep current + top 5 most recent) use std::collections::HashMap; -use std::sync::{Arc, OnceLock}; +use std::sync::OnceLock; use anyhow::{Context, anyhow}; use aws_sdk_lambda::Client as LambdaClient; @@ -108,7 +108,7 @@ fn version_description(deploy_config_opt: Option<&LambdaDeployConfig>) -> String /// ensuring the deployed Lambda matches the embedded binary. pub async fn try_get_or_deploy_invoker( lambda_config: &LambdaConfig, -) -> anyhow::Result> { +) -> anyhow::Result { let aws_config = aws_config::load_defaults(aws_config::BehaviorVersion::latest()).await; let client = LambdaClient::new(&aws_config); let function_name = &lambda_config.function_name; diff --git a/quickwit/quickwit-lambda-client/src/invoker.rs b/quickwit/quickwit-lambda-client/src/invoker.rs index 7f29a64ba60..c8ffa0716a0 100644 --- a/quickwit/quickwit-lambda-client/src/invoker.rs +++ b/quickwit/quickwit-lambda-client/src/invoker.rs @@ -12,22 +12,96 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::Arc; +use std::time::Duration; use anyhow::Context as _; use async_trait::async_trait; use aws_sdk_lambda::Client as LambdaClient; +use aws_sdk_lambda::error::{DisplayErrorContext, SdkError}; +use aws_sdk_lambda::operation::invoke::InvokeError; use aws_sdk_lambda::primitives::Blob; use aws_sdk_lambda::types::InvocationType; use base64::prelude::*; use prost::Message; +use quickwit_common::retry::RetryParams; use quickwit_lambda_server::{LambdaSearchRequestPayload, LambdaSearchResponsePayload}; use quickwit_proto::search::{LambdaSearchResponses, LambdaSingleSplitResult, LeafSearchRequest}; use quickwit_search::{LambdaLeafSearchInvoker, SearchError}; -use tracing::{debug, info, instrument}; +use tracing::{debug, info, instrument, warn}; use crate::metrics::LAMBDA_METRICS; +/// Upper bound on the retry-after hint we will honor from Lambda rate-limit responses. +const MAX_RETRY_AFTER: Duration = Duration::from_secs(10); + +/// Richer error type used internally by the invoker so that rate-limit retry-after hints +/// are not lost before the retry loop can consume them. +enum LambdaInvokeError { + /// Lambda returned a throttling error. The optional duration is the `Retry-After` hint + /// provided by Lambda; `None` means no hint was present. + RateLimited(Option), + /// The invocation timed out. + Timeout(String), + /// A non-retryable error. + Permanent(SearchError), +} + +impl LambdaInvokeError { + fn into_search_error(self) -> SearchError { + match self { + Self::RateLimited(_) => SearchError::TooManyRequests, + Self::Timeout(msg) => SearchError::Timeout(msg), + Self::Permanent(err) => err, + } + } +} + +impl From for LambdaInvokeError { + fn from(err: SearchError) -> Self { + LambdaInvokeError::Permanent(err) + } +} + +fn invoke_error_to_lambda_error(error: SdkError) -> LambdaInvokeError { + if let SdkError::ServiceError(ref service_error) = error { + match service_error.err() { + InvokeError::TooManyRequestsException(exc) => { + let retry_after = exc + .retry_after_seconds() + .and_then(|raw| raw.parse::().ok()) + .filter(|secs| secs.is_finite() && *secs > 0.0) + .map(|secs| Duration::from_secs_f64(secs).min(MAX_RETRY_AFTER)); + return LambdaInvokeError::RateLimited(retry_after); + } + InvokeError::EniLimitReachedException(_) + | InvokeError::SubnetIpAddressLimitReachedException(_) + | InvokeError::Ec2ThrottledException(_) + | InvokeError::ResourceConflictException(_) => { + return LambdaInvokeError::RateLimited(None); + } + _ => {} + } + } + + let is_timeout = match &error { + SdkError::TimeoutError(_) => true, + SdkError::DispatchFailure(failure) => failure.is_timeout(), + SdkError::ServiceError(service_error) => matches!( + service_error.err(), + InvokeError::EfsMountTimeoutException(_) | InvokeError::SnapStartTimeoutException(_) + ), + _ => false, + }; + + let error_msg = format!("lambda invocation failed: {}", DisplayErrorContext(&error)); + + if is_timeout { + LambdaInvokeError::Timeout(error_msg) + } else { + LambdaInvokeError::Permanent(SearchError::Internal(error_msg)) + } +} + /// Create a Lambda invoker for a specific version. /// /// The version number is used as the qualifier when invoking, ensuring we call @@ -35,7 +109,7 @@ use crate::metrics::LAMBDA_METRICS; pub(crate) async fn create_lambda_invoker_for_version( function_name: String, version: String, -) -> anyhow::Result> { +) -> anyhow::Result { let aws_config = aws_config::load_defaults(aws_config::BehaviorVersion::latest()).await; let client = LambdaClient::new(&aws_config); let invoker = AwsLambdaInvoker { @@ -44,11 +118,11 @@ pub(crate) async fn create_lambda_invoker_for_version( version, }; invoker.validate().await?; - Ok(Arc::new(invoker)) + Ok(invoker) } /// AWS Lambda implementation of RemoteFunctionInvoker. -struct AwsLambdaInvoker { +pub(crate) struct AwsLambdaInvoker { client: LambdaClient, function_name: String, /// The version number to invoke (e.g., "7", "12"). @@ -79,6 +153,13 @@ impl AwsLambdaInvoker { } } +/// Retry parameters used for exponential backoff when no `Retry-After` hint is available. +const LAMBDA_RETRY_PARAMS: RetryParams = RetryParams { + base_delay: Duration::from_secs(1), + max_delay: Duration::from_secs(10), + max_attempts: 3, +}; + #[async_trait] impl LambdaLeafSearchInvoker for AwsLambdaInvoker { #[instrument(skip(self, request), fields(function_name = %self.function_name, version = %self.version))] @@ -87,9 +168,7 @@ impl LambdaLeafSearchInvoker for AwsLambdaInvoker { request: LeafSearchRequest, ) -> Result, SearchError> { let start = std::time::Instant::now(); - - let result = self.invoke_leaf_search_inner(request).await; - + let result = self.invoke_leaf_search_with_retry(request).await; let elapsed = start.elapsed().as_secs_f64(); let status = if result.is_ok() { "success" } else { "error" }; LAMBDA_METRICS @@ -100,16 +179,50 @@ impl LambdaLeafSearchInvoker for AwsLambdaInvoker { .leaf_search_duration_seconds .with_label_values([status]) .observe(elapsed); - result } } impl AwsLambdaInvoker { - async fn invoke_leaf_search_inner( + async fn invoke_leaf_search_with_retry( &self, request: LeafSearchRequest, ) -> Result, SearchError> { + let mut error = match self.invoke_leaf_search_once(request.clone()).await { + Ok(results) => return Ok(results), + Err(error) => error, + }; + + for num_attempts in 1..LAMBDA_RETRY_PARAMS.max_attempts { + // Determine whether to retry and how long to wait. + let delay = match &error { + LambdaInvokeError::RateLimited(retry_after) => { + retry_after.unwrap_or_else(|| LAMBDA_RETRY_PARAMS.compute_delay(num_attempts)) + } + LambdaInvokeError::Timeout(_) => LAMBDA_RETRY_PARAMS.compute_delay(num_attempts), + LambdaInvokeError::Permanent(_) => return Err(error.into_search_error()), + }; + + warn!( + num_attempts = num_attempts, + delay_ms = delay.as_millis(), + "lambda invocation failed, retrying" + ); + tokio::time::sleep(delay).await; + + match self.invoke_leaf_search_once(request.clone()).await { + Ok(results) => return Ok(results), + Err(e) => error = e, + }; + } + + Err(error.into_search_error()) + } + + async fn invoke_leaf_search_once( + &self, + request: LeafSearchRequest, + ) -> Result, LambdaInvokeError> { // Serialize request to protobuf bytes, then base64 encode let request_bytes = request.encode_to_vec(); let payload = LambdaSearchRequestPayload { @@ -141,7 +254,7 @@ impl AwsLambdaInvoker { let response = invoke_builder .send() .await - .map_err(|e| SearchError::Internal(format!("Lambda invocation error: {}", e)))?; + .map_err(invoke_error_to_lambda_error)?; // Check for function error if let Some(error) = response.function_error() { @@ -152,7 +265,8 @@ impl AwsLambdaInvoker { return Err(SearchError::Internal(format!( "lambda function error: {}: {}", error, error_payload - ))); + )) + .into()); } // Deserialize response diff --git a/quickwit/quickwit-lambda-server/src/context.rs b/quickwit/quickwit-lambda-server/src/context.rs index 879572e5f8d..d3b9167414f 100644 --- a/quickwit/quickwit-lambda-server/src/context.rs +++ b/quickwit/quickwit-lambda-server/src/context.rs @@ -33,7 +33,8 @@ impl LambdaSearcherContext { info!("initializing lambda searcher context"); let searcher_config = try_searcher_config_from_env()?; - let searcher_context = Arc::new(SearcherContext::new(searcher_config, None, None)); + let searcher_context = + Arc::new(SearcherContext::new_without_invoker(searcher_config, None)); let storage_resolver = StorageResolver::configured(&Default::default()); Ok(Self { diff --git a/quickwit/quickwit-proto/protos/quickwit/ingester.proto b/quickwit/quickwit-proto/protos/quickwit/ingester.proto index a5b651d94d8..f7341b12862 100644 --- a/quickwit/quickwit-proto/protos/quickwit/ingester.proto +++ b/quickwit/quickwit-proto/protos/quickwit/ingester.proto @@ -300,6 +300,8 @@ enum IngesterStatus { INGESTER_STATUS_INITIALIZING = 1; // The ingester is ready and accepts read and write requests. INGESTER_STATUS_READY = 2; + // The ingester is about to be decommissioned. It still accepts read and write requests, but will not accept write requests in a few seconds and should be avoided by future write requests. + INGESTER_STATUS_RETIRING = 6; // The ingester is being decommissioned. It accepts read requests but rejects write requests // (open shards, persist, and replicate requests). It will transition to `Decommissioned` once // all shards are fully indexed. diff --git a/quickwit/quickwit-proto/protos/quickwit/search.proto b/quickwit/quickwit-proto/protos/quickwit/search.proto index ae3442fe1aa..32c1aa92540 100644 --- a/quickwit/quickwit-proto/protos/quickwit/search.proto +++ b/quickwit/quickwit-proto/protos/quickwit/search.proto @@ -125,6 +125,11 @@ message ListFieldsRequest { optional int64 start_timestamp = 3; optional int64 end_timestamp = 4; + // JSON-serialized QueryAst for index_filter support. + // When provided, only fields from documents matching this query are returned. + optional string query_ast = 5; + optional uint64 max_fields = 6; + uint64 start_offset = 7; // Control if the request will fail if split_ids contains a split that does not exist. // optional bool fail_on_missing_index = 6; } @@ -141,11 +146,11 @@ message LeafListFieldsRequest { // Optional limit query to a list of fields // Wildcard expressions are supported. repeated string fields = 4; - } message ListFieldsResponse { repeated ListFieldsEntryResponse fields = 1; + uint64 num_fields = 2; } message ListFieldsEntryResponse { diff --git a/quickwit/quickwit-proto/src/codegen/quickwit/quickwit.ingest.ingester.rs b/quickwit/quickwit-proto/src/codegen/quickwit/quickwit.ingest.ingester.rs index 018e19a39a9..ae434b45529 100644 --- a/quickwit/quickwit-proto/src/codegen/quickwit/quickwit.ingest.ingester.rs +++ b/quickwit/quickwit-proto/src/codegen/quickwit/quickwit.ingest.ingester.rs @@ -470,11 +470,14 @@ impl ReplicateFailureReason { #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum IngesterStatus { + /// For nodes without an ingester. Unspecified = 0, - /// The ingester is live but not ready yet to accept requests. + /// The ingester is live but not ready yet to accept requests (i.e. the Write-Ahead Log is not loaded yet). Initializing = 1, /// The ingester is ready and accepts read and write requests. Ready = 2, + /// The ingester is about to be decommissioned. It still accepts read and write requests, but will not accept write requests in a few seconds and should be avoided by future write requests. + Retiring = 6, /// The ingester is being decommissioned. It accepts read requests but rejects write requests /// (open shards, persist, and replicate requests). It will transition to `Decommissioned` once /// all shards are fully indexed. @@ -495,6 +498,7 @@ impl IngesterStatus { Self::Unspecified => "INGESTER_STATUS_UNSPECIFIED", Self::Initializing => "INGESTER_STATUS_INITIALIZING", Self::Ready => "INGESTER_STATUS_READY", + Self::Retiring => "INGESTER_STATUS_RETIRING", Self::Decommissioning => "INGESTER_STATUS_DECOMMISSIONING", Self::Decommissioned => "INGESTER_STATUS_DECOMMISSIONED", Self::Failed => "INGESTER_STATUS_FAILED", @@ -506,6 +510,7 @@ impl IngesterStatus { "INGESTER_STATUS_UNSPECIFIED" => Some(Self::Unspecified), "INGESTER_STATUS_INITIALIZING" => Some(Self::Initializing), "INGESTER_STATUS_READY" => Some(Self::Ready), + "INGESTER_STATUS_RETIRING" => Some(Self::Retiring), "INGESTER_STATUS_DECOMMISSIONING" => Some(Self::Decommissioning), "INGESTER_STATUS_DECOMMISSIONED" => Some(Self::Decommissioned), "INGESTER_STATUS_FAILED" => Some(Self::Failed), diff --git a/quickwit/quickwit-proto/src/codegen/quickwit/quickwit.search.rs b/quickwit/quickwit-proto/src/codegen/quickwit/quickwit.search.rs index 1e933055cd3..bfe89674f78 100644 --- a/quickwit/quickwit-proto/src/codegen/quickwit/quickwit.search.rs +++ b/quickwit/quickwit-proto/src/codegen/quickwit/quickwit.search.rs @@ -70,6 +70,14 @@ pub struct ListFieldsRequest { pub start_timestamp: ::core::option::Option, #[prost(int64, optional, tag = "4")] pub end_timestamp: ::core::option::Option, + /// JSON-serialized QueryAst for index_filter support. + /// When provided, only fields from documents matching this query are returned. + #[prost(string, optional, tag = "5")] + pub query_ast: ::core::option::Option<::prost::alloc::string::String>, + #[prost(uint64, optional, tag = "6")] + pub max_fields: ::core::option::Option, + #[prost(uint64, tag = "7")] + pub start_offset: u64, } #[derive(serde::Serialize, serde::Deserialize, utoipa::ToSchema)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -94,6 +102,8 @@ pub struct LeafListFieldsRequest { pub struct ListFieldsResponse { #[prost(message, repeated, tag = "1")] pub fields: ::prost::alloc::vec::Vec, + #[prost(uint64, tag = "2")] + pub num_fields: u64, } #[derive(serde::Serialize, serde::Deserialize, utoipa::ToSchema)] #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] diff --git a/quickwit/quickwit-proto/src/ingest/ingester.rs b/quickwit/quickwit-proto/src/ingest/ingester.rs index d2da3f8d9bd..0236380f8f5 100644 --- a/quickwit/quickwit-proto/src/ingest/ingester.rs +++ b/quickwit/quickwit-proto/src/ingest/ingester.rs @@ -72,11 +72,39 @@ impl IngesterStatus { Self::Unspecified => "unspecified", Self::Initializing => "initializing", Self::Ready => "ready", + Self::Retiring => "retiring", Self::Decommissioning => "decommissioning", Self::Decommissioned => "decommissioned", Self::Failed => "failed", } } + + pub fn from_json_str_name(value: &str) -> Option { + match value { + "unspecified" => Some(Self::Unspecified), + "initializing" => Some(Self::Initializing), + "ready" => Some(Self::Ready), + "retiring" => Some(Self::Retiring), + "decommissioning" => Some(Self::Decommissioning), + "decommissioned" => Some(Self::Decommissioned), + "failed" => Some(Self::Failed), + _ => None, + } + } + + pub fn is_ready(&self) -> bool { + matches!(self, Self::Ready) + } + + pub fn accepts_write_requests(&self) -> bool { + matches!(self, Self::Ready | Self::Retiring) + } +} + +impl std::fmt::Display for IngesterStatus { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.as_json_str_name()) + } } impl OpenFetchStreamRequest { diff --git a/quickwit/quickwit-proto/src/lib.rs b/quickwit/quickwit-proto/src/lib.rs index f4ddb734d2a..f89fdb97687 100644 --- a/quickwit/quickwit-proto/src/lib.rs +++ b/quickwit/quickwit-proto/src/lib.rs @@ -28,7 +28,8 @@ use tracing_opentelemetry::OpenTelemetrySpanExt; pub mod cluster; pub mod control_plane; -pub use {bytes, tonic}; +pub use bytes; +pub use tonic; pub mod developer; pub mod error; mod getters; diff --git a/quickwit/quickwit-query/src/elastic_query_dsl/bool_query.rs b/quickwit/quickwit-query/src/elastic_query_dsl/bool_query.rs index da87d498139..a744f0b3801 100644 --- a/quickwit/quickwit-query/src/elastic_query_dsl/bool_query.rs +++ b/quickwit/quickwit-query/src/elastic_query_dsl/bool_query.rs @@ -21,10 +21,9 @@ use crate::not_nan_f32::NotNaNf32; use crate::query_ast::{self, QueryAst}; /// # Unsupported features -/// - minimum_should_match /// - named queries #[serde_as] -#[derive(Deserialize, Debug, PartialEq, Eq, Clone)] +#[derive(Deserialize, Debug, PartialEq, Clone)] #[serde(deny_unknown_fields)] pub struct BoolQuery { #[serde_as(deserialize_as = "DefaultOnNull>")] @@ -43,8 +42,14 @@ pub struct BoolQuery { pub boost: Option, #[serde(default)] pub minimum_should_match: Option, + #[serde(alias = "adjust_pure_negative", default, skip_serializing)] + _adjust_pure_negative: Option, } +// `IgnoredAny` implements `PartialEq` but not `Eq`, so we derive `PartialEq` +// and manually assert `Eq` (safe because `IgnoredAny` is a unit struct). +impl Eq for BoolQuery {} + #[derive(Deserialize, Debug, Eq, PartialEq, Clone)] #[serde(untagged)] pub enum MinimumShouldMatch { @@ -126,6 +131,7 @@ impl BoolQuery { filter: Vec::new(), boost: None, minimum_should_match: None, + _adjust_pure_negative: None, } } } @@ -199,7 +205,8 @@ mod tests { should: Vec::new(), filter: Vec::new(), boost: None, - minimum_should_match: None + minimum_should_match: None, + _adjust_pure_negative: None, } ); } @@ -220,6 +227,7 @@ mod tests { filter: vec![term_query_from_field_value("product_id", "2").into(),], boost: None, minimum_should_match: None, + _adjust_pure_negative: None, } ); } @@ -243,10 +251,25 @@ mod tests { filter: Vec::new(), boost: None, minimum_should_match: None, + _adjust_pure_negative: None, } ); } + #[test] + fn test_dsl_bool_query_deserialize_adjust_pure_negative() { + let bool_query_json = r#"{ + "must": [ + { "term": {"product_id": {"value": "1" }} } + ], + "adjust_pure_negative": true + }"#; + let bool_query: BoolQuery = serde_json::from_str(bool_query_json).unwrap(); + assert!(bool_query._adjust_pure_negative.is_some()); + assert_eq!(bool_query.must.len(), 1); + bool_query.convert_to_query_ast().unwrap(); + } + #[test] fn test_dsl_bool_query_deserialize_minimum_should_match() { let bool_query: super::BoolQuery = serde_json::from_str( diff --git a/quickwit/quickwit-query/src/elastic_query_dsl/range_query.rs b/quickwit/quickwit-query/src/elastic_query_dsl/range_query.rs index c28fde94ee0..4a062d08fcf 100644 --- a/quickwit/quickwit-query/src/elastic_query_dsl/range_query.rs +++ b/quickwit/quickwit-query/src/elastic_query_dsl/range_query.rs @@ -39,6 +39,14 @@ pub struct RangeQueryParams { boost: Option, #[serde(default)] format: Option, + #[serde(default)] + from: Option, + #[serde(default)] + to: Option, + #[serde(default)] + include_lower: Option, + #[serde(default)] + include_upper: Option, } pub type RangeQuery = OneFieldMap; @@ -53,7 +61,34 @@ impl ConvertibleToQueryAst for RangeQuery { lte, boost, format, + from, + to, + include_lower, + include_upper, } = self.value; + + let (mut gt, mut gte, mut lt, mut lte) = (gt, gte, lt, lte); + if let Some(from_val) = from + && gt.is_none() + && gte.is_none() + { + if include_lower.unwrap_or(true) { + gte = Some(from_val); + } else { + gt = Some(from_val); + } + } + if let Some(to_val) = to + && lt.is_none() + && lte.is_none() + { + if include_upper.unwrap_or(true) { + lte = Some(to_val); + } else { + lt = Some(to_val); + } + } + let (gt, gte, lt, lte) = if let Some(JsonLiteral::String(java_date_format)) = format { let parser = StrptimeParser::from_java_datetime_format(&java_date_format) .map_err(|err| anyhow::anyhow!("failed to parse range query date format. {err}"))?; @@ -121,6 +156,7 @@ mod tests { lte: None, boost: None, format: JsonLiteral::String("yyyy-MM-dd['T'HH:mm:ss]".to_string()).into(), + ..Default::default() }; let range_query: ElasticRangeQuery = ElasticRangeQuery { field: "date".to_string(), @@ -138,6 +174,51 @@ mod tests { )); } + fn into_json_number(n: u64) -> JsonLiteral { + JsonLiteral::Number(serde_json::Number::from(n)) + } + + #[test] + fn test_range_query_with_from_to_inclusive() { + let range_json = + r#"{"score": {"from": 50, "to": 100, "include_lower": true, "include_upper": true}}"#; + let range_query: ElasticRangeQuery = serde_json::from_str(range_json).unwrap(); + let ast = range_query.convert_to_query_ast().unwrap(); + let QueryAst::Range(rq) = ast else { + panic!("expected Range, got {ast:?}"); + }; + assert_eq!(rq.field, "score"); + assert_eq!(rq.lower_bound, Bound::Included(into_json_number(50))); + assert_eq!(rq.upper_bound, Bound::Included(into_json_number(100))); + } + + #[test] + fn test_range_query_with_from_to_exclusive() { + let range_json = + r#"{"score": {"from": 50, "to": 100, "include_lower": false, "include_upper": false}}"#; + let range_query: ElasticRangeQuery = serde_json::from_str(range_json).unwrap(); + let ast = range_query.convert_to_query_ast().unwrap(); + let QueryAst::Range(rq) = ast else { + panic!("expected Range, got {ast:?}"); + }; + assert_eq!(rq.field, "score"); + assert_eq!(rq.lower_bound, Bound::Excluded(into_json_number(50))); + assert_eq!(rq.upper_bound, Bound::Excluded(into_json_number(100))); + } + + #[test] + fn test_range_query_with_from_to_defaults() { + let range_json = r#"{"score": {"from": 50, "to": 100}}"#; + let range_query: ElasticRangeQuery = serde_json::from_str(range_json).unwrap(); + let ast = range_query.convert_to_query_ast().unwrap(); + let QueryAst::Range(rq) = ast else { + panic!("expected Range, got {ast:?}"); + }; + assert_eq!(rq.field, "score"); + assert_eq!(rq.lower_bound, Bound::Included(into_json_number(50))); + assert_eq!(rq.upper_bound, Bound::Included(into_json_number(100))); + } + #[test] fn test_date_range_query_with_strict_date_optional_time_format() { let range_query_params = ElasticRangeQueryParams { @@ -147,6 +228,7 @@ mod tests { lte: Some(JsonLiteral::String("2024-09-28T10:22:55.797Z".to_string())), boost: None, format: JsonLiteral::String("strict_date_optional_time".to_string()).into(), + ..Default::default() }; let range_query: ElasticRangeQuery = ElasticRangeQuery { field: "timestamp".to_string(), diff --git a/quickwit/quickwit-search/src/error.rs b/quickwit/quickwit-search/src/error.rs index 21141a3035a..073a1ddedc3 100644 --- a/quickwit/quickwit-search/src/error.rs +++ b/quickwit/quickwit-search/src/error.rs @@ -14,6 +14,7 @@ use itertools::Itertools; use quickwit_common::rate_limited_error; +use quickwit_common::retry::Retryable; use quickwit_doc_mapper::QueryParserError; use quickwit_proto::error::grpc_error_to_grpc_status; use quickwit_proto::metastore::{EntityKind, MetastoreError}; @@ -175,6 +176,12 @@ impl From for SearchError { } } +impl Retryable for SearchError { + fn is_retryable(&self) -> bool { + matches!(self, SearchError::TooManyRequests | SearchError::Timeout(_)) + } +} + impl From for SearchError { fn from(join_error: JoinError) -> SearchError { SearchError::Internal(format!("spawned task in root join failed: {join_error}")) diff --git a/quickwit/quickwit-search/src/invoker.rs b/quickwit/quickwit-search/src/invoker.rs index 8a5ad183b63..f515bb4a9c7 100644 --- a/quickwit/quickwit-search/src/invoker.rs +++ b/quickwit/quickwit-search/src/invoker.rs @@ -36,3 +36,27 @@ pub trait LambdaLeafSearchInvoker: Send + Sync + 'static { request: LeafSearchRequest, ) -> Result, SearchError>; } + +#[async_trait] +impl LambdaLeafSearchInvoker for Box +where T: LambdaLeafSearchInvoker + ?Sized +{ + async fn invoke_leaf_search( + &self, + request: LeafSearchRequest, + ) -> Result, SearchError> { + (**self).invoke_leaf_search(request).await + } +} + +#[async_trait] +impl LambdaLeafSearchInvoker for std::sync::Arc +where T: LambdaLeafSearchInvoker + ?Sized +{ + async fn invoke_leaf_search( + &self, + request: LeafSearchRequest, + ) -> Result, SearchError> { + (**self).invoke_leaf_search(request).await + } +} diff --git a/quickwit/quickwit-search/src/lib.rs b/quickwit/quickwit-search/src/lib.rs index 89b45a69014..4330b4a6de5 100644 --- a/quickwit/quickwit-search/src/lib.rs +++ b/quickwit/quickwit-search/src/lib.rs @@ -85,7 +85,7 @@ pub use crate::error::{SearchError, parse_grpc_error}; use crate::fetch_docs::fetch_docs; pub use crate::invoker::LambdaLeafSearchInvoker; pub use crate::root::{ - IndexMetasForLeafSearch, SearchJob, check_all_index_metadata_found, jobs_to_leaf_request, + IndexMetasForLeafSearch, SearchJob, ensure_all_indexes_found, jobs_to_leaf_request, root_search, search_plan, }; pub use crate::search_job_placer::{Job, SearchJobPlacer}; @@ -228,7 +228,7 @@ pub async fn resolve_index_patterns( ListIndexesMetadataRequest::all() } else { ListIndexesMetadataRequest { - index_id_patterns: index_id_patterns.to_owned(), + index_id_patterns: index_id_patterns.to_vec(), } }; @@ -238,7 +238,7 @@ pub async fn resolve_index_patterns( .await? .deserialize_indexes_metadata() .await?; - check_all_index_metadata_found(&indexes_metadata, index_id_patterns)?; + ensure_all_indexes_found(&indexes_metadata, index_id_patterns)?; Ok(indexes_metadata) } @@ -287,7 +287,7 @@ pub async fn single_node_search( let search_job_placer = SearchJobPlacer::new(searcher_pool.clone()); let cluster_client = ClusterClient::new(search_job_placer); let searcher_config = SearcherConfig::default(); - let searcher_context = Arc::new(SearcherContext::new(searcher_config, None, None)); + let searcher_context = Arc::new(SearcherContext::new_without_invoker(searcher_config, None)); let search_service = Arc::new(SearchServiceImpl::new( metastore.clone(), storage_resolver, diff --git a/quickwit/quickwit-search/src/list_fields.rs b/quickwit/quickwit-search/src/list_fields.rs index f4cf173fe08..0c3faf73009 100644 --- a/quickwit/quickwit-search/src/list_fields.rs +++ b/quickwit/quickwit-search/src/list_fields.rs @@ -15,7 +15,7 @@ use std::collections::{HashMap, HashSet}; use std::path::Path; use std::str::FromStr; -use std::sync::{Arc, LazyLock}; +use std::sync::Arc; use anyhow::Context; use futures::future; @@ -24,6 +24,8 @@ use itertools::Itertools; use quickwit_common::rate_limited_warn; use quickwit_common::shared_consts::{FIELD_PRESENCE_FIELD_NAME, SPLIT_FIELDS_FILE_NAME}; use quickwit_common::uri::Uri; +use quickwit_config::build_doc_mapper; +use quickwit_doc_mapper::tag_pruning::extract_tags_from_query; use quickwit_metastore::SplitMetadata; use quickwit_proto::metastore::MetastoreServiceClient; use quickwit_proto::search::{ @@ -31,6 +33,7 @@ use quickwit_proto::search::{ ListFieldsResponse, SplitIdAndFooterOffsets, deserialize_split_fields, }; use quickwit_proto::types::{IndexId, IndexUid}; +use quickwit_query::query_ast::QueryAst; use quickwit_storage::Storage; use crate::leaf::open_split_bundle; @@ -41,16 +44,6 @@ use crate::{ search_thread_pool, }; -/// QW_FIELD_LIST_SIZE_LIMIT defines a hard limit on the number of fields that -/// can be returned (error otherwise). -/// -/// Having many fields can happen when a user is creating fields dynamically in -/// a JSON type with random field names. This leads to huge memory consumption -/// when building the response. This is a workaround until a way is found to -/// prune the long tail of rare fields. -static FIELD_LIST_SIZE_LIMIT: LazyLock = - LazyLock::new(|| quickwit_common::get_from_env("QW_FIELD_LIST_SIZE_LIMIT", 100_000, false)); - const DYNAMIC_FIELD_PREFIX: &str = "_dynamic."; /// Get the list of fields in the given split. @@ -224,48 +217,60 @@ fn merge_same_field_group( } } -/// Merge iterators of ListFieldsEntryResponse into a `Vec`. -/// -/// The iterators need to be sorted by (field_name, fieldtype) -fn merge_leaf_list_fields( - iterators: Vec>, -) -> crate::Result> { - let merged = iterators - .into_iter() - .kmerge_by(|a, b| (&a.field_name, a.field_type) <= (&b.field_name, b.field_type)); - let mut responses = Vec::new(); - - let mut current_group: Vec = Vec::new(); - // Build ListFieldsEntryResponse from current group - let flush_group = |responses: &mut Vec<_>, current_group: &mut Vec| { - let entry = merge_same_field_group(current_group); - responses.push(entry); - current_group.clear(); - }; +struct ListFieldMerger> { + merged: itertools::KMergeBy bool>, + current_group: Vec, +} - for entry in merged { - if let Some(last) = current_group.last() - && (last.field_name != entry.field_name || last.field_type != entry.field_type) - { - flush_group(&mut responses, &mut current_group); - } - if responses.len() >= *FIELD_LIST_SIZE_LIMIT { - return Err(SearchError::Internal(format!( - "list fields response exceeded {} fields", - *FIELD_LIST_SIZE_LIMIT - ))); +impl> ListFieldMerger { + fn new(iterators: impl Iterator) -> Self { + let cmp_fn: fn(&ListFieldsEntryResponse, &ListFieldsEntryResponse) -> bool = + |a, b| field_order(a, b) == std::cmp::Ordering::Less; + + let merged = iterators.kmerge_by(cmp_fn); + Self { + merged, + current_group: Vec::new(), } - current_group.push(entry); - } - if !current_group.is_empty() { - flush_group(&mut responses, &mut current_group); } +} + +impl> Iterator for ListFieldMerger { + type Item = ListFieldsEntryResponse; - Ok(responses) + fn next(&mut self) -> Option { + loop { + match self.merged.next() { + Some(entry) => { + if let Some(last) = self.current_group.last() + && (last.field_name != entry.field_name + || last.field_type != entry.field_type) + { + let result = merge_same_field_group(&mut self.current_group); + self.current_group.clear(); + self.current_group.push(entry); + return Some(result); + } + self.current_group.push(entry); + } + None => { + if !self.current_group.is_empty() { + let result = merge_same_field_group(&mut self.current_group); + self.current_group.clear(); + return Some(result); + } + return None; + } + } + } + } } // Returns true if any of the patterns match the field name. fn matches_any_pattern(field_name: &str, field_patterns: &[FieldPattern]) -> bool { + if field_patterns.is_empty() { + return true; + } field_patterns .iter() .any(|pattern| pattern.matches(field_name)) @@ -274,25 +279,37 @@ fn matches_any_pattern(field_name: &str, field_patterns: &[FieldPattern]) -> boo enum FieldPattern { Match { field: String }, Wildcard { prefix: String, suffix: String }, + Contains { infix: String }, } impl FromStr for FieldPattern { type Err = crate::SearchError; fn from_str(field_pattern: &str) -> crate::Result { - match field_pattern.find('*') { + if field_pattern.starts_with('*') && field_pattern.ends_with('*') { + let infix = field_pattern.trim_matches('*').to_string(); + if infix.contains('*') { + return Err(crate::SearchError::InvalidArgument(format!( + "invalid field pattern `{field_pattern}`: 'contains' type patterns can't have a wildcard in the middle" + ))); + } + return Ok(Self::Contains { infix }); + } + + match field_pattern.split_once('*') { None => Ok(FieldPattern::Match { field: field_pattern.to_string(), }), - Some(pos) => { - let prefix = field_pattern[..pos].to_string(); - let suffix = field_pattern[pos + 1..].to_string(); + Some((prefix, suffix)) => { if suffix.contains("*") { return Err(crate::SearchError::InvalidArgument(format!( "invalid field pattern `{field_pattern}`: we only support one wildcard" ))); } - Ok(FieldPattern::Wildcard { prefix, suffix }) + Ok(FieldPattern::Wildcard { + prefix: prefix.to_string(), + suffix: suffix.to_string(), + }) } } } @@ -305,11 +322,14 @@ impl FieldPattern { FieldPattern::Wildcard { prefix, suffix } => { field_name.starts_with(prefix) && field_name.ends_with(suffix) } + FieldPattern::Contains { infix } => field_name.contains(infix), } } } /// `leaf` step of list fields. +/// +/// Returns field metadata from the assigned splits. pub async fn leaf_list_fields( index_id: IndexId, index_storage: Arc, @@ -322,6 +342,12 @@ pub async fn leaf_list_fields( .map(|pattern_str| FieldPattern::from_str(pattern_str)) .collect::>()?; + // If no splits, return empty response + if split_ids.is_empty() { + return Ok(ListFieldsResponse::default()); + } + + // Get fields from all splits let single_split_list_fields_futures: Vec<_> = split_ids .iter() .map(|split_id| { @@ -356,26 +382,25 @@ pub async fn leaf_list_fields( } } - let filtered_list_fields_sorted_iters: Vec<_> = single_split_list_fields_vec - .into_iter() - .map(|list_fields_sorted| { - list_fields_sorted.into_iter().filter(|field| { - if field_patterns.is_empty() { - true - } else { - matches_any_pattern(&field.field_name, &field_patterns) - } - }) - }) - .collect(); - merge_leaf_list_fields(filtered_list_fields_sorted_iters) + let filtered_list_fields_sorted_iters = + single_split_list_fields_vec + .into_iter() + .map(|list_fields_sorted| { + list_fields_sorted + .into_iter() + .filter(|field| matches_any_pattern(&field.field_name, &field_patterns)) + }); + + ListFieldMerger::new(filtered_list_fields_sorted_iters).collect::>() }) .await - .context("failed to merge single split list fields")??; - Ok(ListFieldsResponse { fields }) + .context("failed to merge single split list fields")?; + + let num_fields = fields.len() as u64; + Ok(ListFieldsResponse { fields, num_fields }) } -/// Index metas needed for executing a leaf search request. +/// Index metas needed for executing a leaf list fields request. #[derive(Clone, Debug)] pub struct IndexMetasForLeafSearch { /// Index id. @@ -397,31 +422,81 @@ pub async fn root_list_fields( resolve_index_patterns(&list_fields_req.index_id_patterns[..], &mut metastore).await?; // The request contains a wildcard, but couldn't find any index. if indexes_metadata.is_empty() { - return Ok(ListFieldsResponse { fields: Vec::new() }); + return Ok(ListFieldsResponse::default()); } - let index_uid_to_index_meta: HashMap = indexes_metadata - .iter() - .map(|index_metadata| { - let index_metadata_for_leaf_search = IndexMetasForLeafSearch { - index_uri: index_metadata.index_uri().clone(), - index_id: index_metadata.index_config.index_id.to_string(), + + // Build index metadata map and extract timestamp field for time range refinement + let mut index_uid_to_index_meta: HashMap = HashMap::new(); + let mut index_uids: Vec = Vec::new(); + let mut timestamp_field_opt: Option = None; + let mut query_ast_resolved_opt: Option = None; + + for index_metadata in indexes_metadata { + let doc_mapper = build_doc_mapper( + &index_metadata.index_config.doc_mapping, + &index_metadata.index_config.search_settings, + )?; + + let query_ast_resolved_for_index: Option = + if let Some(query_str) = &list_fields_req.query_ast { + let query_ast: QueryAst = serde_json::from_str(query_str) + .map_err(|err| SearchError::InvalidQuery(err.to_string()))?; + + let resolved = query_ast + .parse_user_query(doc_mapper.default_search_fields()) + .map_err(|err| SearchError::InvalidQuery(err.to_string()))?; + + Some(resolved) + } else { + None }; - ( - index_metadata.index_uid.clone(), - index_metadata_for_leaf_search, - ) - }) - .collect(); - let index_uids: Vec = indexes_metadata - .into_iter() - .map(|index_metadata| index_metadata.index_uid) - .collect(); + // Resolve query and parse AST (use first index's field) + if query_ast_resolved_opt.is_none() && query_ast_resolved_for_index.is_some() { + query_ast_resolved_opt = query_ast_resolved_for_index; + } + + // Extract timestamp field for time range refinement (use first index's field) + if timestamp_field_opt.is_none() && list_fields_req.query_ast.is_some() { + timestamp_field_opt = doc_mapper.timestamp_field_name().map(|s| s.to_string()); + } + + let index_metadata_for_leaf_search = IndexMetasForLeafSearch { + index_uri: index_metadata.index_uri().clone(), + index_id: index_metadata.index_config.index_id.to_string(), + }; + + index_uids.push(index_metadata.index_uid.clone()); + index_uid_to_index_meta.insert( + index_metadata.index_uid.clone(), + index_metadata_for_leaf_search, + ); + } + + // Extract tags and refine time range from query_ast for split pruning + let mut start_timestamp = list_fields_req.start_timestamp; + let mut end_timestamp = list_fields_req.end_timestamp; + let tags_filter_opt = if let Some(query_ast) = query_ast_resolved_opt { + // Refine time range from query AST if timestamp field is available + if let Some(ref timestamp_field) = timestamp_field_opt { + crate::root::refine_start_end_timestamp_from_ast( + &query_ast, + timestamp_field, + &mut start_timestamp, + &mut end_timestamp, + ); + } + + extract_tags_from_query(query_ast) + } else { + None + }; + let split_metadatas: Vec = list_relevant_splits( index_uids, - list_fields_req.start_timestamp, - list_fields_req.end_timestamp, - None, + start_timestamp, + end_timestamp, + tags_filter_opt, &mut metastore, ) .await?; @@ -442,18 +517,31 @@ pub async fn root_list_fields( } } let leaf_list_fields_protos: Vec = try_join_all(leaf_request_tasks).await?; - let fields = search_thread_pool() + let (fields, num_fields) = search_thread_pool() .run_cpu_intensive(move || { - let leaf_list_fields = leaf_list_fields_protos + let fields_iter = leaf_list_fields_protos .into_iter() - .map(|leaf_list_fields_proto| leaf_list_fields_proto.fields.into_iter()) + .map(|leaf_list_fields_proto| leaf_list_fields_proto.fields.into_iter()); + let mut fields_iter = ListFieldMerger::new(fields_iter); + let skipped = fields_iter + .by_ref() + .take(list_fields_req.start_offset as usize) + .count(); + + let fields: Vec = fields_iter + .by_ref() + .take(list_fields_req.max_fields.unwrap_or(u64::MAX) as usize) .collect(); - merge_leaf_list_fields(leaf_list_fields) + + let remaining = fields_iter.count(); + let num_fields = (skipped + fields.len() + remaining) as u64; + + (fields, num_fields) }) .await - .context("failed to merge leaf list fields responses")??; + .context("failed to merge leaf list fields responses")?; - Ok(ListFieldsResponse { fields }) + Ok(ListFieldsResponse { fields, num_fields }) } /// Builds a list of [`LeafListFieldsRequest`], one per index, from a list of [`SearchJob`]. @@ -512,11 +600,14 @@ mod tests { non_aggregatable_index_ids: Vec::new(), index_ids: vec!["index1".to_string()], }; - let resp = merge_leaf_list_fields(vec![ - vec![entry1.clone()].into_iter(), - vec![entry2.clone()].into_iter(), - ]) - .unwrap(); + let resp: Vec<_> = ListFieldMerger::new( + vec![ + vec![entry1.clone()].into_iter(), + vec![entry2.clone()].into_iter(), + ] + .into_iter(), + ) + .collect::>(); assert_eq!(resp, vec![entry1]); } #[test] @@ -539,11 +630,14 @@ mod tests { non_aggregatable_index_ids: Vec::new(), index_ids: vec!["index1".to_string()], }; - let resp = merge_leaf_list_fields(vec![ - vec![entry1.clone()].into_iter(), - vec![entry2.clone()].into_iter(), - ]) - .unwrap(); + let resp: Vec<_> = ListFieldMerger::new( + vec![ + vec![entry1.clone()].into_iter(), + vec![entry2.clone()].into_iter(), + ] + .into_iter(), + ) + .collect::>(); assert_eq!(resp, vec![entry1, entry2]); } #[test] @@ -566,11 +660,14 @@ mod tests { non_aggregatable_index_ids: Vec::new(), index_ids: vec!["index2".to_string()], }; - let resp = merge_leaf_list_fields(vec![ - vec![entry1.clone()].into_iter(), - vec![entry2.clone()].into_iter(), - ]) - .unwrap(); + let resp: Vec<_> = ListFieldMerger::new( + vec![ + vec![entry1.clone()].into_iter(), + vec![entry2.clone()].into_iter(), + ] + .into_iter(), + ) + .collect::>(); let expected = ListFieldsEntryResponse { field_name: "field1".to_string(), field_type: ListFieldType::Str as i32, @@ -602,11 +699,14 @@ mod tests { non_aggregatable_index_ids: Vec::new(), index_ids: vec!["index2".to_string()], }; - let resp = merge_leaf_list_fields(vec![ - vec![entry1.clone()].into_iter(), - vec![entry2.clone()].into_iter(), - ]) - .unwrap(); + let resp: Vec<_> = ListFieldMerger::new( + vec![ + vec![entry1.clone()].into_iter(), + vec![entry2.clone()].into_iter(), + ] + .into_iter(), + ) + .collect::>(); let expected = ListFieldsEntryResponse { field_name: "field1".to_string(), field_type: ListFieldType::Str as i32, @@ -647,11 +747,14 @@ mod tests { non_aggregatable_index_ids: Vec::new(), index_ids: vec!["index1".to_string()], }; - let resp = merge_leaf_list_fields(vec![ - vec![entry1.clone(), entry2.clone()].into_iter(), - vec![entry3.clone()].into_iter(), - ]) - .unwrap(); + let resp: Vec<_> = ListFieldMerger::new( + vec![ + vec![entry1.clone(), entry2.clone()].into_iter(), + vec![entry3.clone()].into_iter(), + ] + .into_iter(), + ) + .collect::>(); assert_eq!(resp, vec![entry1.clone(), entry3.clone()]); } #[test] @@ -683,11 +786,14 @@ mod tests { non_aggregatable_index_ids: Vec::new(), index_ids: vec!["index1".to_string()], }; - let resp = merge_leaf_list_fields(vec![ - vec![entry1.clone(), entry3.clone()].into_iter(), - vec![entry2.clone()].into_iter(), - ]) - .unwrap(); + let resp: Vec<_> = ListFieldMerger::new( + vec![ + vec![entry1.clone(), entry3.clone()].into_iter(), + vec![entry2.clone()].into_iter(), + ] + .into_iter(), + ) + .collect::>(); assert_eq!(resp, vec![entry1.clone(), entry3.clone()]); } #[test] @@ -719,11 +825,14 @@ mod tests { non_aggregatable_index_ids: Vec::new(), index_ids: vec!["index1".to_string()], }; - let resp = merge_leaf_list_fields(vec![ - vec![entry1.clone(), entry3.clone()].into_iter(), - vec![entry2.clone()].into_iter(), - ]) - .unwrap(); + let resp: Vec<_> = ListFieldMerger::new( + vec![ + vec![entry1.clone(), entry3.clone()].into_iter(), + vec![entry2.clone()].into_iter(), + ] + .into_iter(), + ) + .collect::>(); assert_eq!(resp, vec![entry1.clone(), entry3.clone()]); } #[test] @@ -750,11 +859,15 @@ mod tests { non_aggregatable_index_ids: Vec::new(), index_ids: vec!["index4".to_string()], }; - let resp = merge_leaf_list_fields(vec![ - vec![entry1.clone()].into_iter(), - vec![entry2.clone()].into_iter(), - ]) - .unwrap(); + let resp: Vec<_> = ListFieldMerger::new( + vec![ + vec![entry1.clone()].into_iter(), + vec![entry2.clone()].into_iter(), + ] + .into_iter(), + ) + .collect::>(); + let expected = ListFieldsEntryResponse { field_name: "field1".to_string(), field_type: ListFieldType::Str as i32, @@ -772,6 +885,141 @@ mod tests { assert_eq!(resp, vec![expected]); } + fn make_entry( + field_name: &str, + field_type: ListFieldType, + index_id: &str, + ) -> ListFieldsEntryResponse { + ListFieldsEntryResponse { + field_name: field_name.to_string(), + field_type: field_type as i32, + searchable: true, + aggregatable: true, + non_searchable_index_ids: Vec::new(), + non_aggregatable_index_ids: Vec::new(), + index_ids: vec![index_id.to_string()], + } + } + + #[test] + fn merge_iter_skip_take_basic_pagination() { + // 4 distinct fields spread across 2 leaves, paginate with skip=1, take=2 + let leaf1 = vec![ + make_entry("aaa", ListFieldType::Str, "idx1"), + make_entry("ccc", ListFieldType::Str, "idx1"), + ]; + let leaf2 = vec![ + make_entry("bbb", ListFieldType::Str, "idx2"), + make_entry("ddd", ListFieldType::Str, "idx2"), + ]; + + let all: Vec<_> = ListFieldMerger::new( + vec![leaf1.clone().into_iter(), leaf2.clone().into_iter()].into_iter(), + ) + .collect::>(); + assert_eq!(all.len(), 4); + assert_eq!(all[0].field_name, "aaa"); + assert_eq!(all[1].field_name, "bbb"); + assert_eq!(all[2].field_name, "ccc"); + assert_eq!(all[3].field_name, "ddd"); + + // Page 2: skip 1, take 2 → ["bbb", "ccc"] + let page: Vec<_> = ListFieldMerger::new( + vec![leaf1.clone().into_iter(), leaf2.clone().into_iter()].into_iter(), + ) + .skip(1) + .take(2) + .collect::>(); + assert_eq!(page.len(), 2); + assert_eq!(page[0].field_name, "bbb"); + assert_eq!(page[1].field_name, "ccc"); + } + + #[test] + fn merge_iter_skip_take_with_grouping() { + // Same field appears in multiple leaves — grouping must happen before pagination + let leaf1 = vec![ + make_entry("aaa", ListFieldType::Str, "idx1"), + make_entry("bbb", ListFieldType::Str, "idx1"), + ]; + let leaf2 = vec![ + make_entry("aaa", ListFieldType::Str, "idx2"), + make_entry("ccc", ListFieldType::Str, "idx2"), + ]; + + // Without pagination: 3 merged fields [aaa(idx1+idx2), bbb, ccc] + let all: Vec<_> = ListFieldMerger::new( + vec![leaf1.clone().into_iter(), leaf2.clone().into_iter()].into_iter(), + ) + .collect::>(); + assert_eq!(all.len(), 3); + assert_eq!(all[0].field_name, "aaa"); + assert_eq!( + all[0].index_ids, + vec!["idx1".to_string(), "idx2".to_string()] + ); + assert_eq!(all[1].field_name, "bbb"); + assert_eq!(all[2].field_name, "ccc"); + + // skip=1, take=1 → ["bbb"] (skips the merged "aaa") + let page: Vec<_> = ListFieldMerger::new( + vec![leaf1.clone().into_iter(), leaf2.clone().into_iter()].into_iter(), + ) + .skip(1) + .take(1) + .collect::>(); + assert_eq!(page.len(), 1); + assert_eq!(page[0].field_name, "bbb"); + } + + #[test] + fn merge_iter_skip_beyond_end() { + let leaf1 = vec![make_entry("aaa", ListFieldType::Str, "idx1")]; + let page: Vec<_> = ListFieldMerger::new(vec![leaf1.into_iter()].into_iter()) + .skip(10) + .take(5) + .collect::>(); + assert!(page.is_empty()); + } + + #[test] + fn merge_iter_take_more_than_available() { + let leaf1 = vec![ + make_entry("aaa", ListFieldType::Str, "idx1"), + make_entry("bbb", ListFieldType::Str, "idx1"), + ]; + let page: Vec<_> = ListFieldMerger::new(vec![leaf1.into_iter()].into_iter()) + .take(100) + .collect::>(); + assert_eq!(page.len(), 2); + } + + #[test] + fn merge_iter_pagination_with_mixed_types() { + // Same field name but different types → treated as separate entries + let leaf1 = vec![ + make_entry("field", ListFieldType::Str, "idx1"), + make_entry("field", ListFieldType::U64, "idx1"), + ]; + let leaf2 = vec![make_entry("field", ListFieldType::Str, "idx2")]; + + // 2 merged entries: field/Str(idx1+idx2), field/U64(idx1) + let all: Vec<_> = ListFieldMerger::new( + vec![leaf1.clone().into_iter(), leaf2.clone().into_iter()].into_iter(), + ) + .collect::>(); + assert_eq!(all.len(), 2); + + // skip=1 → only field/U64 + let page: Vec<_> = + ListFieldMerger::new(vec![leaf1.into_iter(), leaf2.into_iter()].into_iter()) + .skip(1) + .take(10) + .collect::>(); + assert_eq!(page.len(), 1); + assert_eq!(page[0].field_type, ListFieldType::U64 as i32); + } + #[test] fn test_field_pattern() { let prefix_pattern = FieldPattern::from_str("toto*").unwrap(); @@ -797,6 +1045,15 @@ mod tests { assert!(!inner_pattern.matches("tito")); assert!(inner_pattern.matches("towhateverti")); + let contains_pattern = FieldPattern::from_str("*my_field*").unwrap(); + assert!(!contains_pattern.matches("")); + assert!(!contains_pattern.matches("my_fiel")); + assert!(contains_pattern.matches("my_field")); + assert!(contains_pattern.matches("prefix_my_field")); + assert!(contains_pattern.matches("my_field_suffix")); + assert!(contains_pattern.matches("prefix_my_field_suffix")); + assert!(FieldPattern::from_str("to**").is_err()); + assert!(FieldPattern::from_str("*a*b*").is_err()); } } diff --git a/quickwit/quickwit-search/src/root.rs b/quickwit/quickwit-search/src/root.rs index 5d5610ca4d9..a8ca8a6cffc 100644 --- a/quickwit/quickwit-search/src/root.rs +++ b/quickwit/quickwit-search/src/root.rs @@ -1083,36 +1083,34 @@ fn finalize_aggregation_if_any( /// We put this check here and not in the metastore to make sure the logic is independent /// of the metastore implementation, and some different use cases could require different /// behaviors. This specification was principally motivated by #4042. -pub fn check_all_index_metadata_found( - index_metadatas: &[IndexMetadata], +pub fn ensure_all_indexes_found( + indexes_metadata: &[IndexMetadata], index_id_patterns: &[String], ) -> crate::Result<()> { let mut index_ids: HashSet<&str> = index_id_patterns .iter() - .map(|index_ptn| index_ptn.as_str()) - .filter(|index_ptn| !index_ptn.contains('*') && !index_ptn.starts_with('-')) + .filter(|pattern| !pattern.contains('*') && !pattern.starts_with('-')) + .map(|pattern| pattern.as_str()) .collect(); if index_ids.is_empty() { - // All of the patterns are wildcard patterns. + // All the patterns are wildcard or negative patterns. return Ok(()); } - - for index_metadata in index_metadatas { - index_ids.remove(index_metadata.index_uid.index_id.as_str()); + for index_metadata in indexes_metadata { + index_ids.remove(index_metadata.index_id()); } - - if !index_ids.is_empty() { - let missing_index_ids = index_ids - .into_iter() - .map(|missing_index_id| missing_index_id.to_string()) - .collect(); - return Err(SearchError::IndexesNotFound { - index_ids: missing_index_ids, - }); + if index_ids.is_empty() { + return Ok(()); } + let not_found_index_ids = index_ids + .into_iter() + .map(|index_id| index_id.to_string()) + .collect(); - Ok(()) + Err(SearchError::IndexesNotFound { + index_ids: not_found_index_ids, + }) } async fn refine_and_list_matches( @@ -1171,10 +1169,7 @@ async fn plan_splits_for_root_search( .await?; if !search_request.ignore_missing_indexes { - check_all_index_metadata_found( - &indexes_metadata[..], - &search_request.index_id_patterns[..], - )?; + ensure_all_indexes_found(&indexes_metadata[..], &search_request.index_id_patterns[..])?; } if indexes_metadata.is_empty() { @@ -1287,10 +1282,7 @@ pub async fn search_plan( .await?; if !search_request.ignore_missing_indexes { - check_all_index_metadata_found( - &indexes_metadata[..], - &search_request.index_id_patterns[..], - )?; + ensure_all_indexes_found(&indexes_metadata[..], &search_request.index_id_patterns[..])?; } if indexes_metadata.is_empty() { return Ok(SearchPlanResponse { diff --git a/quickwit/quickwit-search/src/service.rs b/quickwit/quickwit-search/src/service.rs index 5ee74c16556..55fe014cba7 100644 --- a/quickwit/quickwit-search/src/service.rs +++ b/quickwit/quickwit-search/src/service.rs @@ -436,14 +436,26 @@ impl SearcherContext { #[cfg(test)] pub fn for_test() -> SearcherContext { let searcher_config = SearcherConfig::default(); - SearcherContext::new(searcher_config, None, None) + SearcherContext::new_without_invoker(searcher_config, None) + } + + /// Creates a new searcher context without a lambda invoker. + pub fn new_without_invoker( + searcher_config: SearcherConfig, + split_cache_opt: Option>, + ) -> Self { + Self::new( + searcher_config, + split_cache_opt, + None::>, + ) } /// Creates a new searcher context, given a searcher config, and an optional `SplitCache`. pub fn new( searcher_config: SearcherConfig, split_cache_opt: Option>, - lambda_invoker: Option>, + lambda_invoker: Option, ) -> Self { let global_split_footer_cache = MemorySizedCache::from_config( &searcher_config.split_footer_cache, @@ -463,6 +475,9 @@ impl SearcherContext { Some(searcher_config.aggregation_bucket_limit), ); + let lambda_invoker = + lambda_invoker.map(|invoker| Arc::new(invoker) as Arc); + Self { searcher_config, fast_fields_cache: storage_long_term_cache, diff --git a/quickwit/quickwit-search/src/tests.rs b/quickwit/quickwit-search/src/tests.rs index 61751e3d253..c8d851d06cb 100644 --- a/quickwit/quickwit-search/src/tests.rs +++ b/quickwit/quickwit-search/src/tests.rs @@ -1028,8 +1028,10 @@ async fn test_search_util(test_sandbox: &TestSandbox, query: &str) -> Vec { max_hits: 100, ..Default::default() }); - let searcher_context: Arc = - Arc::new(SearcherContext::new(SearcherConfig::default(), None, None)); + let searcher_context: Arc = Arc::new(SearcherContext::new_without_invoker( + SearcherConfig::default(), + None, + )); let search_response = single_doc_mapping_leaf_search( searcher_context, @@ -1666,7 +1668,10 @@ async fn test_single_node_list_terms() -> anyhow::Result<()> { .into_iter() .map(|split| extract_split_and_footer_offsets(&split.split_metadata)) .collect(); - let searcher_context = Arc::new(SearcherContext::new(SearcherConfig::default(), None, None)); + let searcher_context = Arc::new(SearcherContext::new_without_invoker( + SearcherConfig::default(), + None, + )); { let request = ListTermsRequest { diff --git a/quickwit/quickwit-serve/Cargo.toml b/quickwit/quickwit-serve/Cargo.toml index 16f8a36002d..2721aa719f3 100644 --- a/quickwit/quickwit-serve/Cargo.toml +++ b/quickwit/quickwit-serve/Cargo.toml @@ -47,6 +47,7 @@ tokio = { workspace = true } tokio-rustls = { workspace = true } tokio-stream = { workspace = true } tokio-util = { workspace = true } +tonic = { workspace = true } tonic-health = { workspace = true } tonic-reflection = { workspace = true } tower = { workspace = true, features = ["limit"] } @@ -72,7 +73,7 @@ quickwit-opentelemetry = { workspace = true } quickwit-proto = { workspace = true } quickwit-query = { workspace = true } quickwit-search = { workspace = true } -quickwit-lambda-client = { workspace = true } +quickwit-lambda-client = { workspace = true, optional = true } quickwit-storage = { workspace = true } quickwit-telemetry = { workspace = true } @@ -115,3 +116,6 @@ sqs-for-tests = [ "quickwit-indexing/sqs", "quickwit-indexing/sqs-test-helpers" ] +lambda = [ + "quickwit-lambda-client" +] diff --git a/quickwit/quickwit-serve/src/developer_api/server.rs b/quickwit/quickwit-serve/src/developer_api/server.rs index 3c8a0dd0982..680953f0509 100644 --- a/quickwit/quickwit-serve/src/developer_api/server.rs +++ b/quickwit/quickwit-serve/src/developer_api/server.rs @@ -76,10 +76,14 @@ impl DeveloperService for DeveloperApiServer { let cluster_snapshot = self.cluster.snapshot().await; + // We must redact sensitive information such as credentials. + let mut node_config = (*self.node_config).clone(); + node_config.redact(); + let mut debug_info = json!({ "build_info": BuildInfo::get(), "runtime_info": RuntimeInfo::get(), - "node_config": self.node_config, + "node_config": node_config, "cluster_membership_info": json!({ "ready_nodes": cluster_snapshot.ready_nodes, "live_nodes": cluster_snapshot.live_nodes, @@ -137,7 +141,10 @@ mod tests { .await .unwrap(); - let node_config = Arc::new(NodeConfig::for_test()); + let mut node_config = NodeConfig::for_test(); + node_config.metastore_uri = + quickwit_common::uri::Uri::for_test("postgresql://username:password@db"); + let node_config = Arc::new(node_config); let developer_api_server = DeveloperApiServer { node_config, @@ -155,6 +162,11 @@ mod tests { assert!(debug_info["node_config"].is_object()); assert!(debug_info["cluster_membership_info"].is_object()); + assert_eq!( + debug_info["node_config"]["metastore_uri"], + "postgresql://username:***redacted***@db" + ); + // TODO: Test control plane and ingester debug info. } } diff --git a/quickwit/quickwit-serve/src/elasticsearch_api/filter.rs b/quickwit/quickwit-serve/src/elasticsearch_api/filter.rs index b8d2343f666..5f8ddd69f9f 100644 --- a/quickwit/quickwit-serve/src/elasticsearch_api/filter.rs +++ b/quickwit/quickwit-serve/src/elasticsearch_api/filter.rs @@ -251,6 +251,15 @@ fn merge_scroll_body_params( } } +pub(crate) fn elastic_nodes_filter() -> impl Filter + Clone { + warp::path!("_elastic" / "_nodes" / "http").and(warp::get()) +} + +pub(crate) fn elastic_search_shards_filter() +-> impl Filter + Clone { + warp::path!("_elastic" / String / "_search_shards").and(warp::get()) +} + #[utoipa::path(post, tag = "Search", path = "/_search/scroll")] pub(crate) fn elastic_scroll_filter() -> impl Filter + Clone { @@ -265,3 +274,20 @@ pub(crate) fn elastic_scroll_filter() }, ) } + +pub(crate) fn elastic_delete_scroll_filter() -> impl Filter + Clone +{ + warp::path!("_elastic" / "_search" / "scroll").and(warp::delete()) +} + +pub(crate) fn elastic_aliases_filter() -> impl Filter + Clone { + warp::path!("_elastic" / "_aliases").and(warp::get()) +} + +pub(crate) fn elastic_index_mapping_filter() +-> impl Filter + Clone { + warp::path!("_elastic" / String / "_mapping") + .or(warp::path!("_elastic" / String / "_mappings")) + .unify() + .and(warp::get()) +} diff --git a/quickwit/quickwit-serve/src/elasticsearch_api/mod.rs b/quickwit/quickwit-serve/src/elasticsearch_api/mod.rs index dd189c834b6..683f152ba6e 100644 --- a/quickwit/quickwit-serve/src/elasticsearch_api/mod.rs +++ b/quickwit/quickwit-serve/src/elasticsearch_api/mod.rs @@ -29,19 +29,25 @@ use quickwit_ingest::IngestServiceClient; use quickwit_proto::ingest::router::IngestRouterServiceClient; use quickwit_proto::metastore::MetastoreServiceClient; use quickwit_search::SearchService; -use rest_handler::es_compat_cluster_health_handler; pub use rest_handler::{ es_compat_cat_indices_handler, es_compat_cluster_info_handler, es_compat_delete_index_handler, - es_compat_index_cat_indices_handler, es_compat_index_count_handler, - es_compat_index_field_capabilities_handler, es_compat_index_multi_search_handler, - es_compat_index_search_handler, es_compat_index_stats_handler, es_compat_resolve_index_handler, - es_compat_scroll_handler, es_compat_search_handler, es_compat_stats_handler, + es_compat_delete_scroll_handler, es_compat_index_cat_indices_handler, + es_compat_index_count_handler, es_compat_index_field_capabilities_handler, + es_compat_index_multi_search_handler, es_compat_index_search_handler, + es_compat_index_stats_handler, es_compat_resolve_index_handler, es_compat_scroll_handler, + es_compat_search_handler, es_compat_stats_handler, +}; +use rest_handler::{ + es_compat_cluster_health_handler, es_compat_nodes_handler, es_compat_search_shards_handler, }; use serde::{Deserialize, Serialize}; use warp::hyper::StatusCode; use warp::{Filter, Rejection}; use crate::elasticsearch_api::model::ElasticsearchError; +use crate::elasticsearch_api::rest_handler::{ + es_compat_aliases_handler, es_compat_index_mapping_handler, +}; use crate::rest::recover_fn; use crate::rest_api_response::RestApiResponse; use crate::{BodyFormat, BuildInfo}; @@ -63,7 +69,8 @@ pub fn elastic_api_handlers( enable_ingest_v2: bool, ) -> impl Filter + Clone { let ingest_content_length_limit = node_config.ingest_api_config.content_length_limit; - es_compat_cluster_info_handler(node_config, BuildInfo::get()) + es_compat_cluster_info_handler(node_config.clone(), BuildInfo::get()) + .or(es_compat_nodes_handler(node_config.clone())) .or(es_compat_search_handler(search_service.clone())) .or(es_compat_bulk_handler( ingest_service.clone(), @@ -83,6 +90,7 @@ pub fn elastic_api_handlers( .or(es_compat_index_search_handler(search_service.clone())) .or(es_compat_index_count_handler(search_service.clone())) .or(es_compat_scroll_handler(search_service.clone())) + .or(es_compat_delete_scroll_handler()) .or(es_compat_index_multi_search_handler(search_service.clone())) .or(es_compat_index_field_capabilities_handler( search_service.clone(), @@ -95,7 +103,17 @@ pub fn elastic_api_handlers( .or(es_compat_index_cat_indices_handler(metastore.clone())) .or(es_compat_cat_indices_handler(metastore.clone())) .or(es_compat_resolve_index_handler(metastore.clone())) + .or(es_compat_aliases_handler()) + .or(es_compat_index_mapping_handler( + metastore.clone(), + search_service.clone(), + )) + .or(es_compat_search_shards_handler(node_config)) .recover(recover_fn) + .with(warp::reply::with::header( + "X-Elastic-Product", + "Elasticsearch", + )) .boxed() // Register newly created handlers here. } @@ -221,7 +239,10 @@ mod tests { .reply(&es_search_api_handler) .await; assert_eq!(resp.status(), 200); - assert!(resp.headers().get("x-elastic-product").is_none(),); + assert_eq!( + resp.headers().get("x-elastic-product").unwrap(), + "Elasticsearch" + ); let string_body = String::from_utf8(resp.body().to_vec()).unwrap(); let es_msearch_response: serde_json::Value = serde_json::from_str(&string_body).unwrap(); let responses = es_msearch_response @@ -518,9 +539,13 @@ mod tests { "cluster_name" : config.cluster_id, "version" : { "distribution" : "quickwit", - "number" : build_info.version, + "number" : "7.17.0", "build_hash" : build_info.commit_hash, "build_date" : build_info.build_date, + "build_snapshot" : false, + "lucene_version" : "8.11.1", + "minimum_wire_compatibility_version" : "6.8.0", + "minimum_index_compatibility_version" : "6.0.0-beta1", } }); assert_json_include!(actual: resp_json, expected: expected_response_json); diff --git a/quickwit/quickwit-serve/src/elasticsearch_api/model/cat_indices.rs b/quickwit/quickwit-serve/src/elasticsearch_api/model/cat_indices.rs index 37c56e73d47..5aa58fbc3d0 100644 --- a/quickwit/quickwit-serve/src/elasticsearch_api/model/cat_indices.rs +++ b/quickwit/quickwit-serve/src/elasticsearch_api/model/cat_indices.rs @@ -83,8 +83,13 @@ impl CatIndexQueryParams { if self.v.is_some() { return Err(unsupported_parameter_error("v")); } - if self.s.is_some() { - return Err(unsupported_parameter_error("s")); + if let Some(sort_by) = &self.s { + if sort_by.len() > 1 { + return Err(unsupported_parameter_error("s")); + } + if sort_by[0] != "index" && sort_by[0] != "index:asc" { + return Err(unsupported_parameter_error("s")); + } } Ok(()) } @@ -305,4 +310,42 @@ mod tests { // Add more test cases as needed } + + #[test] + fn test_cat_index_query_params_validate_s_parameter() { + let params = CatIndexQueryParams { + format: Some("json".to_string()), + s: Some(vec!["index:asc".to_string()]), + ..Default::default() + }; + assert!(params.validate().is_ok()); + + let params = CatIndexQueryParams { + format: Some("json".to_string()), + s: Some(vec!["index".to_string()]), + ..Default::default() + }; + assert!(params.validate().is_ok()); + + let params = CatIndexQueryParams { + format: Some("json".to_string()), + s: Some(vec!["index:desc".to_string()]), + ..Default::default() + }; + assert!(params.validate().is_err()); + + let params = CatIndexQueryParams { + format: Some("json".to_string()), + s: Some(vec!["index:asc".to_string(), "docs.count".to_string()]), + ..Default::default() + }; + assert!(params.validate().is_err()); + + let params = CatIndexQueryParams { + format: Some("json".to_string()), + s: None, + ..Default::default() + }; + assert!(params.validate().is_ok()); + } } diff --git a/quickwit/quickwit-serve/src/elasticsearch_api/model/field_capability.rs b/quickwit/quickwit-serve/src/elasticsearch_api/model/field_capability.rs index a382c541dc7..c244772c0a9 100644 --- a/quickwit/quickwit-serve/src/elasticsearch_api/model/field_capability.rs +++ b/quickwit/quickwit-serve/src/elasticsearch_api/model/field_capability.rs @@ -15,7 +15,10 @@ use std::collections::HashMap; use quickwit_proto::search::{ListFieldType, ListFieldsEntryResponse, ListFieldsResponse}; +use quickwit_query::ElasticQueryDsl; +use quickwit_query::query_ast::QueryAst; use serde::{Deserialize, Serialize}; +use warp::hyper::StatusCode; use super::ElasticsearchError; use super::search_query_params::*; @@ -173,16 +176,228 @@ pub fn convert_to_es_field_capabilities_response( FieldCapabilityResponse { indices, fields } } +/// Parses an Elasticsearch index_filter JSON value into a Quickwit QueryAst. +/// +/// Returns `Ok(None)` if the index_filter is null. +/// Returns `Ok(Some(QueryAst))` if the index_filter is valid. +/// Returns `Err` if the index_filter is invalid or cannot be converted (including empty object). +#[allow(clippy::result_large_err)] +pub fn parse_index_filter_to_query_ast( + index_filter: serde_json::Value, +) -> Result, ElasticsearchError> { + if index_filter.is_null() { + return Ok(None); + } + + // Parse ES Query DSL to internal QueryAst + let elastic_query_dsl: ElasticQueryDsl = + serde_json::from_value(index_filter).map_err(|err| { + ElasticsearchError::new( + StatusCode::BAD_REQUEST, + format!("Invalid index_filter: {err}"), + None, + ) + })?; + + let query_ast: QueryAst = elastic_query_dsl.try_into().map_err(|err: anyhow::Error| { + ElasticsearchError::new( + StatusCode::BAD_REQUEST, + format!("Failed to convert index_filter: {err}"), + None, + ) + })?; + + Ok(Some(query_ast)) +} + #[allow(clippy::result_large_err)] pub fn build_list_field_request_for_es_api( index_id_patterns: Vec, search_params: FieldCapabilityQueryParams, - _search_body: FieldCapabilityRequestBody, + search_body: FieldCapabilityRequestBody, ) -> Result { + let query_ast = parse_index_filter_to_query_ast(search_body.index_filter)?; + let query_ast_json = query_ast + .map(|ast| serde_json::to_string(&ast).expect("QueryAst should be JSON serializable")); + Ok(quickwit_proto::search::ListFieldsRequest { index_id_patterns, fields: search_params.fields.unwrap_or_default(), start_timestamp: search_params.start_timestamp, end_timestamp: search_params.end_timestamp, + query_ast: query_ast_json, + ..Default::default() }) } + +#[cfg(test)] +mod tests { + use serde_json::json; + + use super::*; + + #[test] + fn test_build_list_field_request_empty_index_filter() { + let result = build_list_field_request_for_es_api( + vec!["test_index".to_string()], + FieldCapabilityQueryParams::default(), + FieldCapabilityRequestBody::default(), + ) + .unwrap(); + + assert_eq!(result.index_id_patterns, vec!["test_index".to_string()]); + assert!(result.query_ast.is_none()); + } + + #[test] + fn test_build_list_field_request_with_term_index_filter() { + let search_body = FieldCapabilityRequestBody { + index_filter: json!({ + "term": { + "status": "active" + } + }), + runtime_mappings: serde_json::Value::Null, + }; + + let result = build_list_field_request_for_es_api( + vec!["test_index".to_string()], + FieldCapabilityQueryParams::default(), + search_body, + ) + .unwrap(); + + assert_eq!(result.index_id_patterns, vec!["test_index".to_string()]); + assert!(result.query_ast.is_some()); + + // Verify the query_ast is valid JSON + let query_ast: serde_json::Value = + serde_json::from_str(&result.query_ast.unwrap()).unwrap(); + assert!(query_ast.is_object()); + } + + #[test] + fn test_build_list_field_request_with_bool_index_filter() { + let search_body = FieldCapabilityRequestBody { + index_filter: json!({ + "bool": { + "must": [ + { "term": { "status": "active" } } + ], + "filter": [ + { "range": { "age": { "gte": 18 } } } + ] + } + }), + runtime_mappings: serde_json::Value::Null, + }; + + let result = build_list_field_request_for_es_api( + vec!["test_index".to_string()], + FieldCapabilityQueryParams::default(), + search_body, + ) + .unwrap(); + + assert!(result.query_ast.is_some()); + } + + #[test] + fn test_build_list_field_request_with_invalid_index_filter() { + let search_body = FieldCapabilityRequestBody { + index_filter: json!({ + "invalid_query_type": { + "field": "value" + } + }), + runtime_mappings: serde_json::Value::Null, + }; + + let result = build_list_field_request_for_es_api( + vec!["test_index".to_string()], + FieldCapabilityQueryParams::default(), + search_body, + ); + + assert!(result.is_err()); + let err = result.unwrap_err(); + assert_eq!(err.status, StatusCode::BAD_REQUEST); + } + + #[test] + fn test_build_list_field_request_with_null_index_filter() { + let search_body = FieldCapabilityRequestBody { + index_filter: serde_json::Value::Null, + runtime_mappings: serde_json::Value::Null, + }; + + let result = build_list_field_request_for_es_api( + vec!["test_index".to_string()], + FieldCapabilityQueryParams::default(), + search_body, + ) + .unwrap(); + + assert!(result.query_ast.is_none()); + } + + #[test] + fn test_build_list_field_request_preserves_other_params() { + let search_params = FieldCapabilityQueryParams { + fields: Some(vec!["field1".to_string(), "field2".to_string()]), + start_timestamp: Some(1000), + end_timestamp: Some(2000), + ..Default::default() + }; + + let search_body = FieldCapabilityRequestBody { + index_filter: json!({ "match_all": {} }), + runtime_mappings: serde_json::Value::Null, + }; + + let result = build_list_field_request_for_es_api( + vec!["test_index".to_string()], + search_params, + search_body, + ) + .unwrap(); + + assert_eq!( + result.fields, + vec!["field1".to_string(), "field2".to_string()] + ); + assert_eq!(result.start_timestamp, Some(1000)); + assert_eq!(result.end_timestamp, Some(2000)); + assert!(result.query_ast.is_some()); + } + + #[test] + fn test_parse_index_filter_to_query_ast_null() { + let result = parse_index_filter_to_query_ast(serde_json::Value::Null).unwrap(); + assert!(result.is_none()); + } + + #[test] + fn test_parse_index_filter_to_query_ast_empty_object() { + // Empty object {} should return error to match ES behavior + let result = parse_index_filter_to_query_ast(json!({})); + assert!(result.is_err()); + } + + #[test] + fn test_parse_index_filter_to_query_ast_valid_term() { + let result = parse_index_filter_to_query_ast(json!({ + "term": { "status": "active" } + })) + .unwrap(); + assert!(result.is_some()); + } + + #[test] + fn test_parse_index_filter_to_query_ast_invalid() { + let result = parse_index_filter_to_query_ast(json!({ + "invalid_query_type": { "field": "value" } + })); + assert!(result.is_err()); + } +} diff --git a/quickwit/quickwit-serve/src/elasticsearch_api/model/mappings.rs b/quickwit/quickwit-serve/src/elasticsearch_api/model/mappings.rs new file mode 100644 index 00000000000..1c15dbb4c0e --- /dev/null +++ b/quickwit/quickwit-serve/src/elasticsearch_api/model/mappings.rs @@ -0,0 +1,304 @@ +// Copyright 2021-Present Datadog, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::HashMap; + +use quickwit_doc_mapper::{FieldMappingEntry, FieldMappingType}; +use quickwit_metastore::IndexMetadata; +use quickwit_proto::search::{ListFieldType, ListFieldsResponse}; +use serde::ser::SerializeMap; +use serde::{Serialize, Serializer}; + +/// Top-level response for `GET /{index}/_mapping(s)`. +/// +/// Serializes as `{ "": { "mappings": { "properties": { ... } } } }`. +pub(crate) struct ElasticsearchMappingsResponse { + indices: HashMap, +} + +impl Serialize for ElasticsearchMappingsResponse { + fn serialize(&self, serializer: S) -> Result { + let mut map = serializer.serialize_map(Some(self.indices.len()))?; + for (index_id, mappings) in &self.indices { + map.serialize_entry(index_id, mappings)?; + } + map.end() + } +} + +#[derive(Debug, Serialize)] +struct IndexMappings { + mappings: MappingProperties, +} + +#[derive(Debug, Serialize)] +struct MappingProperties { + properties: HashMap, +} + +#[derive(Debug, Serialize)] +#[serde(untagged)] +enum FieldMapping { + Leaf { + #[serde(rename = "type")] + typ: &'static str, + }, + Object { + #[serde(rename = "type")] + typ: &'static str, + properties: HashMap, + }, +} + +impl ElasticsearchMappingsResponse { + pub fn from_doc_mapping( + indexes_metadata: Vec, + list_fields_response: Option<&ListFieldsResponse>, + ) -> Self { + let indices = indexes_metadata + .into_iter() + .map(|index_metadata| { + let field_mappings = &index_metadata.index_config.doc_mapping.field_mappings; + let mut properties = build_properties(field_mappings); + if let Some(list_fields) = list_fields_response { + merge_dynamic_fields(&mut properties, list_fields); + } + let index_id = index_metadata.index_id().to_string(); + ( + index_id, + IndexMappings { + mappings: MappingProperties { properties }, + }, + ) + }) + .collect(); + Self { indices } + } +} + +fn build_properties(field_mappings: &[FieldMappingEntry]) -> HashMap { + let mut properties = HashMap::with_capacity(field_mappings.len()); + for entry in field_mappings { + if let Some(field_mapping) = field_mapping_from_entry(entry) { + properties.insert(entry.name.clone(), field_mapping); + } + } + properties +} + +fn field_mapping_from_entry(entry: &FieldMappingEntry) -> Option { + match &entry.mapping_type { + FieldMappingType::Text(..) => Some(FieldMapping::Leaf { typ: "text" }), + FieldMappingType::I64(..) => Some(FieldMapping::Leaf { typ: "long" }), + FieldMappingType::U64(..) => Some(FieldMapping::Leaf { typ: "long" }), + FieldMappingType::F64(..) => Some(FieldMapping::Leaf { typ: "double" }), + FieldMappingType::Bool(..) => Some(FieldMapping::Leaf { typ: "boolean" }), + FieldMappingType::DateTime(..) => Some(FieldMapping::Leaf { typ: "date" }), + FieldMappingType::IpAddr(..) => Some(FieldMapping::Leaf { typ: "ip" }), + FieldMappingType::Bytes(..) => Some(FieldMapping::Leaf { typ: "binary" }), + FieldMappingType::Json(..) => Some(FieldMapping::Leaf { typ: "object" }), + FieldMappingType::Object(options) => { + let properties = build_properties(&options.field_mappings); + Some(FieldMapping::Object { + typ: "object", + properties, + }) + } + FieldMappingType::Concatenate(_) => None, + } +} + +/// Merges dynamic fields from a `ListFieldsResponse` into the properties map. +/// +/// Fields already present in the map (from explicit doc mappings) are skipped, +/// as are internal fields (prefixed with `_`). +fn merge_dynamic_fields( + properties: &mut HashMap, + list_fields_response: &ListFieldsResponse, +) { + for field_entry in &list_fields_response.fields { + let field_name = &field_entry.field_name; + if field_name.starts_with('_') { + continue; + } + if properties.contains_key(field_name) { + continue; + } + let Ok(field_type) = ListFieldType::try_from(field_entry.field_type) else { + continue; + }; + if let Some(es_type) = es_type_from_list_field_type(field_type) { + properties.insert(field_name.clone(), FieldMapping::Leaf { typ: es_type }); + } + } +} + +fn es_type_from_list_field_type(field_type: ListFieldType) -> Option<&'static str> { + match field_type { + ListFieldType::Str => Some("keyword"), + ListFieldType::U64 | ListFieldType::I64 => Some("long"), + ListFieldType::F64 => Some("double"), + ListFieldType::Bool => Some("boolean"), + ListFieldType::Date => Some("date"), + ListFieldType::Bytes => Some("binary"), + ListFieldType::IpAddr => Some("ip"), + ListFieldType::Facet | ListFieldType::Json => None, + } +} + +#[cfg(test)] +mod tests { + use serde_json::json; + + use super::*; + + #[test] + fn test_field_mapping_from_entry_bool() { + let entry_json = json!({ "name": "active", "type": "bool" }); + let entry: FieldMappingEntry = serde_json::from_value(entry_json).unwrap(); + let mapping = field_mapping_from_entry(&entry).unwrap(); + let serialized = serde_json::to_value(&mapping).unwrap(); + assert_eq!(serialized, json!({ "type": "boolean" })); + } + + #[test] + fn test_field_mapping_from_entry_text() { + let entry_json = json!({ "name": "message", "type": "text" }); + let entry: FieldMappingEntry = serde_json::from_value(entry_json).unwrap(); + let mapping = field_mapping_from_entry(&entry).unwrap(); + let serialized = serde_json::to_value(&mapping).unwrap(); + assert_eq!(serialized, json!({ "type": "text" })); + } + + #[test] + fn test_field_mapping_from_entry_i64() { + let entry_json = json!({ "name": "count", "type": "i64" }); + let entry: FieldMappingEntry = serde_json::from_value(entry_json).unwrap(); + let mapping = field_mapping_from_entry(&entry).unwrap(); + let serialized = serde_json::to_value(&mapping).unwrap(); + assert_eq!(serialized, json!({ "type": "long" })); + } + + #[test] + fn test_field_mapping_from_entry_object() { + let entry_json = json!({ + "name": "nested", + "type": "object", + "field_mappings": [ + { "name": "id", "type": "u64" }, + { "name": "label", "type": "text" } + ] + }); + let entry: FieldMappingEntry = serde_json::from_value(entry_json).unwrap(); + let mapping = field_mapping_from_entry(&entry).unwrap(); + let serialized = serde_json::to_value(&mapping).unwrap(); + assert_eq!( + serialized, + json!({ + "type": "object", + "properties": { + "id": { "type": "long" }, + "label": { "type": "text" } + } + }) + ); + } + + #[test] + fn test_field_mapping_from_entry_concatenate_skipped() { + let entry_json = json!({ + "name": "concat_field", + "type": "concatenate", + "concatenate_fields": ["field_a", "field_b"] + }); + let entry: FieldMappingEntry = serde_json::from_value(entry_json).unwrap(); + assert!(field_mapping_from_entry(&entry).is_none()); + } + + #[test] + fn test_build_properties_all_leaf_types() { + let entries: Vec = serde_json::from_value(json!([ + { "name": "title", "type": "text" }, + { "name": "count", "type": "i64" }, + { "name": "unsigned", "type": "u64" }, + { "name": "score", "type": "f64" }, + { "name": "active", "type": "bool" }, + { "name": "created_at", "type": "datetime" }, + { "name": "ip_field", "type": "ip" }, + { "name": "data", "type": "bytes" }, + { "name": "payload", "type": "json" }, + { + "name": "metadata", + "type": "object", + "field_mappings": [ + { "name": "source", "type": "text" } + ] + } + ])) + .unwrap(); + + let props = build_properties(&entries); + let to_json = |fm: &FieldMapping| serde_json::to_value(fm).unwrap(); + + assert_eq!(to_json(&props["title"]), json!({ "type": "text" })); + assert_eq!(to_json(&props["count"]), json!({ "type": "long" })); + assert_eq!(to_json(&props["unsigned"]), json!({ "type": "long" })); + assert_eq!(to_json(&props["score"]), json!({ "type": "double" })); + assert_eq!(to_json(&props["active"]), json!({ "type": "boolean" })); + assert_eq!(to_json(&props["created_at"]), json!({ "type": "date" })); + assert_eq!(to_json(&props["ip_field"]), json!({ "type": "ip" })); + assert_eq!(to_json(&props["data"]), json!({ "type": "binary" })); + assert_eq!(to_json(&props["payload"]), json!({ "type": "object" })); + + let meta = to_json(&props["metadata"]); + assert_eq!(meta["type"], "object"); + assert_eq!(meta["properties"]["source"]["type"], "text"); + } + + #[test] + fn test_merge_dynamic_fields_skips_existing_and_internal() { + use quickwit_proto::search::ListFieldsEntryResponse; + + let mut properties = HashMap::new(); + properties.insert("title".to_string(), FieldMapping::Leaf { typ: "text" }); + + let list_fields = ListFieldsResponse { + num_fields: 3, + fields: vec![ + ListFieldsEntryResponse { + field_name: "title".to_string(), + field_type: ListFieldType::Str as i32, + ..Default::default() + }, + ListFieldsEntryResponse { + field_name: "_timestamp".to_string(), + field_type: ListFieldType::Date as i32, + ..Default::default() + }, + ListFieldsEntryResponse { + field_name: "dynamic_field".to_string(), + field_type: ListFieldType::Str as i32, + ..Default::default() + }, + ], + }; + + merge_dynamic_fields(&mut properties, &list_fields); + + assert_eq!(properties.len(), 2); + assert!(properties.contains_key("title")); + assert!(properties.contains_key("dynamic_field")); + assert!(!properties.contains_key("_timestamp")); + } +} diff --git a/quickwit/quickwit-serve/src/elasticsearch_api/model/mod.rs b/quickwit/quickwit-serve/src/elasticsearch_api/model/mod.rs index ae1f6aa1a41..4351a26b65b 100644 --- a/quickwit/quickwit-serve/src/elasticsearch_api/model/mod.rs +++ b/quickwit/quickwit-serve/src/elasticsearch_api/model/mod.rs @@ -17,6 +17,7 @@ mod bulk_query_params; mod cat_indices; mod error; mod field_capability; +mod mappings; mod multi_search; mod scroll; mod search_body; @@ -35,6 +36,7 @@ pub use field_capability::{ FieldCapabilityQueryParams, FieldCapabilityRequestBody, FieldCapabilityResponse, build_list_field_request_for_es_api, convert_to_es_field_capabilities_response, }; +pub(crate) use mappings::ElasticsearchMappingsResponse; pub use multi_search::{ MultiSearchHeader, MultiSearchQueryParams, MultiSearchResponse, MultiSearchSingleResponse, }; diff --git a/quickwit/quickwit-serve/src/elasticsearch_api/model/search_query_params.rs b/quickwit/quickwit-serve/src/elasticsearch_api/model/search_query_params.rs index c4a4a124439..3098f23b964 100644 --- a/quickwit/quickwit-serve/src/elasticsearch_api/model/search_query_params.rs +++ b/quickwit/quickwit-serve/src/elasticsearch_api/model/search_query_params.rs @@ -115,6 +115,8 @@ pub struct SearchQueryParams { #[serde(default)] pub scroll: Option, #[serde(default)] + pub search_type: Option, + #[serde(default)] pub seq_no_primary_term: Option, #[serde(default)] pub size: Option, diff --git a/quickwit/quickwit-serve/src/elasticsearch_api/rest_handler.rs b/quickwit/quickwit-serve/src/elasticsearch_api/rest_handler.rs index 15663a35cee..37f1c6ae80a 100644 --- a/quickwit/quickwit-serve/src/elasticsearch_api/rest_handler.rs +++ b/quickwit/quickwit-serve/src/elasticsearch_api/rest_handler.rs @@ -27,7 +27,7 @@ use quickwit_common::truncate_str; use quickwit_config::{NodeConfig, validate_index_id_pattern}; use quickwit_index_management::IndexService; use quickwit_metastore::*; -use quickwit_proto::metastore::MetastoreServiceClient; +use quickwit_proto::metastore::{IndexMetadataRequest, MetastoreService, MetastoreServiceClient}; use quickwit_proto::search::{ CountHits, ListFieldsResponse, PartialHit, ScrollRequest, SearchResponse, SortByValue, SortDatetimeFormat, @@ -39,18 +39,20 @@ use quickwit_search::{ AggregationResults, SearchError, SearchService, list_all_splits, resolve_index_patterns, }; use serde::{Deserialize, Serialize}; -use serde_json::json; +use serde_json::{Map, Value, json}; use warp::hyper::StatusCode; use warp::reply::with_status; use warp::{Filter, Rejection}; use super::filter::{ - elastic_cat_indices_filter, elastic_cluster_health_filter, elastic_cluster_info_filter, - elastic_delete_index_filter, elastic_field_capabilities_filter, - elastic_index_cat_indices_filter, elastic_index_count_filter, - elastic_index_field_capabilities_filter, elastic_index_search_filter, - elastic_index_stats_filter, elastic_multi_search_filter, elastic_resolve_index_filter, - elastic_scroll_filter, elastic_stats_filter, elasticsearch_filter, + elastic_aliases_filter, elastic_cat_indices_filter, elastic_cluster_health_filter, + elastic_cluster_info_filter, elastic_delete_index_filter, elastic_delete_scroll_filter, + elastic_field_capabilities_filter, elastic_index_cat_indices_filter, + elastic_index_count_filter, elastic_index_field_capabilities_filter, + elastic_index_mapping_filter, elastic_index_search_filter, elastic_index_stats_filter, + elastic_multi_search_filter, elastic_nodes_filter, elastic_resolve_index_filter, + elastic_scroll_filter, elastic_search_shards_filter, elastic_stats_filter, + elasticsearch_filter, }; use super::model::{ CatIndexQueryParams, DeleteQueryParams, ElasticsearchCatIndexResponse, ElasticsearchError, @@ -62,6 +64,7 @@ use super::model::{ build_list_field_request_for_es_api, convert_to_es_field_capabilities_response, }; use super::{TrackTotalHits, make_elastic_api_response}; +use crate::elasticsearch_api::model::ElasticsearchMappingsResponse; use crate::format::BodyFormat; use crate::rest::recover_fn; use crate::rest_api_response::{RestApiError, RestApiResponse}; @@ -80,11 +83,17 @@ pub fn es_compat_cluster_info_handler( warp::reply::json(&json!({ "name" : config.node_id, "cluster_name" : config.cluster_id, + "cluster_uuid" : config.cluster_id, + "tagline" : "You Know, for Search", "version" : { "distribution" : "quickwit", - "number" : build_info.version, + "number" : "7.17.0", "build_hash" : build_info.commit_hash, "build_date" : build_info.build_date, + "build_snapshot" : false, + "lucene_version" : "8.11.1", + "minimum_wire_compatibility_version" : "6.8.0", + "minimum_index_compatibility_version" : "6.0.0-beta1", } })) }, @@ -92,6 +101,115 @@ pub fn es_compat_cluster_info_handler( .boxed() } +/// GET _elastic/_nodes/http +pub fn es_compat_nodes_handler( + node_config: Arc, +) -> impl Filter + Clone { + elastic_nodes_filter() + .and(with_arg(node_config)) + .then(|config: Arc| async move { + let advertise_addr = std::net::SocketAddr::new( + config.grpc_advertise_addr.ip(), + config.rest_config.listen_addr.port(), + ); + warp::reply::json(&json!({ + "nodes": { + config.node_id.as_str(): { + "roles": ["data", "ingest"], + "http": { + "publish_address": advertise_addr.to_string() + } + } + } + })) + }) + .boxed() +} + +/// GET _elastic/{index}/_search_shards +pub fn es_compat_search_shards_handler( + node_config: Arc, +) -> impl Filter + Clone { + elastic_search_shards_filter() + .and(with_arg(node_config)) + .then(|index_id: String, config: Arc| async move { + warp::reply::json(&json!({ + "shards": [[{ + "index": index_id, + "shard": 0, + "primary": true, + "node": config.node_id.as_str() + }]] + })) + }) + .boxed() +} + +/// GET _elastic/_aliases +pub fn es_compat_aliases_handler() +-> impl Filter + Clone { + elastic_aliases_filter() + .then(|| async { Ok(Value::Object(Map::new())) }) + .map(|result| make_elastic_api_response(result, BodyFormat::default())) + .recover(recover_fn) + .boxed() +} + +/// GET _elastic/{index}/_mapping or _elastic/{index}/_mappings +pub fn es_compat_index_mapping_handler( + metastore: MetastoreServiceClient, + search_service: Arc, +) -> impl Filter + Clone { + elastic_index_mapping_filter() + .and(with_arg(metastore)) + .and(with_arg(search_service)) + .then(es_compat_index_mapping) + .map(|result| make_elastic_api_response(result, BodyFormat::default())) + .recover(recover_fn) +} + +async fn get_index_metadata( + index_id: String, + metastore: MetastoreServiceClient, +) -> Result { + let index_metadata_request = IndexMetadataRequest::for_index_id(index_id); + let index_metadata = metastore + .index_metadata(index_metadata_request) + .await? + .deserialize_index_metadata()?; + Ok(index_metadata) +} + +async fn es_compat_index_mapping( + index_id: String, + mut metastore: MetastoreServiceClient, + search_service: Arc, +) -> Result { + let indexes_metadata = if index_id.contains('*') || index_id.contains(',') { + let patterns: Vec = index_id.split(',').map(|s| s.trim().to_string()).collect(); + resolve_index_patterns(&patterns, &mut metastore).await? + } else { + vec![get_index_metadata(index_id.clone(), metastore).await?] + }; + let index_id_patterns: Vec = indexes_metadata + .iter() + .map(|m| m.index_id().to_string()) + .collect(); + let list_fields_request = quickwit_proto::search::ListFieldsRequest { + index_id_patterns, + ..Default::default() + }; + let list_fields_response = search_service + .root_list_fields(list_fields_request) + .await + .ok(); + let response = ElasticsearchMappingsResponse::from_doc_mapping( + indexes_metadata, + list_fields_response.as_ref(), + ); + Ok(response) +} + /// GET or POST _elastic/_search pub fn es_compat_search_handler( _search_service: Arc, @@ -302,6 +420,24 @@ pub fn es_compat_scroll_handler( .boxed() } +/// DELETE _elastic/_search/scroll +/// +/// Clears a scroll context. Quickwit manages scroll lifetime via TTL, +/// so this is a no-op that returns success. +pub fn es_compat_delete_scroll_handler() +-> impl Filter + Clone { + elastic_delete_scroll_filter() + .then(|| async { + Ok::<_, ElasticsearchError>(json!({ + "succeeded": true, + "num_freed": 0 + })) + }) + .map(|result| make_elastic_api_response(result, BodyFormat::default())) + .recover(recover_fn) + .boxed() +} + #[allow(clippy::result_large_err)] fn build_request_for_es_api( index_id_patterns: Vec, diff --git a/quickwit/quickwit-serve/src/lib.rs b/quickwit/quickwit-serve/src/lib.rs index dccaecd3912..1137decf466 100644 --- a/quickwit/quickwit-serve/src/lib.rs +++ b/quickwit/quickwit-serve/src/lib.rs @@ -59,7 +59,7 @@ use itertools::Itertools; use once_cell::sync::Lazy; use quickwit_actors::{ActorExitStatus, Mailbox, SpawnContext, Universe}; use quickwit_cluster::{ - Cluster, ClusterChange, ClusterChangeStream, ListenerHandle, start_cluster_service, + Cluster, ClusterChange, ClusterChangeStream, ClusterNode, ListenerHandle, start_cluster_service, }; use quickwit_common::pubsub::{EventBroker, EventSubscriptionHandle}; use quickwit_common::rate_limiter::RateLimiterSettings; @@ -82,8 +82,9 @@ use quickwit_indexing::models::ShardPositionsService; use quickwit_indexing::start_indexing_service; use quickwit_ingest::{ GetMemoryCapacity, IngestRequest, IngestRouter, IngestServiceClient, Ingester, IngesterPool, - LocalShardsUpdate, get_idle_shard_timeout, setup_local_shards_update_listener, - start_ingest_api_service, wait_for_ingester_decommission, wait_for_ingester_status, + IngesterPoolEntry, LocalShardsUpdate, get_idle_shard_timeout, + setup_local_shards_update_listener, start_ingest_api_service, try_get_ingester_status, + wait_for_ingester_decommission, wait_for_ingester_status, }; use quickwit_jaeger::JaegerService; use quickwit_janitor::{JanitorService, start_janitor_service}; @@ -112,6 +113,7 @@ use quickwit_search::{ use quickwit_storage::{SplitCache, StorageResolver}; use tcp_listener::TcpListenerResolver; use tokio::sync::oneshot; +use tonic::codec::CompressionEncoding; use tonic_health::ServingStatus; use tonic_health::server::HealthReporter; use tower::ServiceBuilder; @@ -379,8 +381,8 @@ fn start_shard_positions_service( // We spawn a task here, because we need the ingester to be ready before spawning the // the `ShardPositionsService`. If we don't, all the events we emit too early will be dismissed. tokio::spawn(async move { - if let Some(ingester) = ingester_opt - && wait_for_ingester_status(ingester, IngesterStatus::Ready) + if let Some(ingester) = &ingester_opt + && wait_for_ingester_status(ingester, IngesterStatus::Ready, Duration::from_secs(300)) .await .is_err() { @@ -406,8 +408,8 @@ async fn shutdown_signal_handler( shutdown_signal.await; // We must decommission the ingester first before terminating the indexing pipelines that // may consume from it. We also need to keep the gRPC server running while doing so. - if let Some(ingester) = ingester_opt - && let Err(error) = wait_for_ingester_decommission(ingester).await + if let Some(ingester) = &ingester_opt + && let Err(error) = wait_for_ingester_decommission(ingester, Duration::from_secs(300)).await { error!("failed to decommission ingester gracefully: {:?}", error); } @@ -556,10 +558,10 @@ pub async fn serve_quickwit( // Setup the indexer pool to track cluster changes. setup_indexer_pool( - &node_config, cluster.change_stream(), - indexer_pool, indexing_service_opt.clone(), + indexer_pool, + node_config.grpc_config.max_message_size, ); // Setup ingest service v2. @@ -631,25 +633,38 @@ pub async fn serve_quickwit( }; // Initialize Lambda invoker if enabled and searcher service is running - let lambda_invoker_opt = if node_config.is_service_enabled(QuickwitService::Searcher) { + let searcher_context = if node_config.is_service_enabled(QuickwitService::Searcher) { if let Some(lambda_config) = &node_config.searcher_config.lambda { - info!("initializing AWS Lambda invoker for search"); - warn!("offloading to lambda is EXPERIMENTAL. Use at your own risk"); - let invoker = quickwit_lambda_client::try_get_or_deploy_invoker(lambda_config).await?; - Some(invoker) + #[cfg(feature = "lambda")] + { + info!("initializing AWS Lambda invoker for search"); + warn!("offloading to lambda is EXPERIMENTAL. Use at your own risk"); + let invoker = + quickwit_lambda_client::try_get_or_deploy_invoker(lambda_config).await?; + Arc::new(SearcherContext::new( + node_config.searcher_config.clone(), + split_cache_opt, + Some(invoker), + )) + } + #[cfg(not(feature = "lambda"))] + { + let _ = lambda_config; + bail!("lambda support is statically disabled, but enabled in configuration"); + } } else { - None + Arc::new(SearcherContext::new_without_invoker( + node_config.searcher_config.clone(), + split_cache_opt, + )) } } else { - None + Arc::new(SearcherContext::new_without_invoker( + node_config.searcher_config.clone(), + split_cache_opt, + )) }; - let searcher_context = Arc::new(SearcherContext::new( - node_config.searcher_config.clone(), - split_cache_opt, - lambda_invoker_opt, - )); - let (search_job_placer, search_service) = setup_searcher( &node_config, cluster.change_stream(), @@ -963,64 +978,127 @@ async fn setup_ingest_v2( } else { None }; - // Setup ingester pool change stream. - let ingester_opt_clone = ingester_opt.clone(); - let max_message_size = node_config.grpc_config.max_message_size; - let ingester_change_stream = cluster.change_stream().filter_map(move |cluster_change| { - let ingester_opt_clone_clone = ingester_opt_clone.clone(); + setup_ingester_pool( + cluster.change_stream(), + ingester_opt.clone(), + ingester_pool, + grpc_compression_encoding_opt, + node_config.grpc_config.max_message_size, + ); + Ok((ingest_router, ingest_router_service, ingester_opt)) +} + +fn setup_ingester_pool( + cluster_change_stream: ClusterChangeStream, + ingester_opt: Option, + ingester_pool: IngesterPool, + grpc_compression_encoding_opt: Option, + grpc_max_message_size: ByteSize, +) { + let ingester_change_stream = cluster_change_stream.filter_map(move |cluster_change| { + let ingester_opt_clone = ingester_opt.clone(); Box::pin(async move { match cluster_change { ClusterChange::Add(node) if node.is_indexer() => { - let chitchat_id = node.chitchat_id(); - info!( - node_id = chitchat_id.node_id, - generation_id = chitchat_id.generation_id, - "adding node `{}` to ingester pool", - chitchat_id.node_id, + let change = build_ingester_insert_change( + &node, + ingester_opt_clone, + grpc_max_message_size, + grpc_compression_encoding_opt, ); - let node_id: NodeId = node.node_id().into(); - - if node.is_self_node() { - // Here, since the service is available locally, we bypass the network stack - // and use the instance directly. However, we still want client-side - // metrics, so we use both metrics layers. - let ingester = ingester_opt_clone_clone - .expect("ingester service should be initialized"); - let ingester_service = ingester_service_layer_stack( - IngesterServiceClient::tower() - .stack_layer(INGEST_GRPC_CLIENT_METRICS_LAYER.clone()), - ) - .build(ingester); - Some(Change::Insert(node_id, ingester_service)) - } else { - let ingester_service = IngesterServiceClient::tower() - .stack_layer(INGEST_GRPC_CLIENT_METRICS_LAYER.clone()) - .stack_layer(TimeoutLayer::new(GRPC_INGESTER_SERVICE_TIMEOUT)) - .build_from_channel( - node.grpc_advertise_addr(), - node.channel(), - max_message_size, - grpc_compression_encoding_opt, - ); - Some(Change::Insert(node_id, ingester_service)) - } + Some(change) } - ClusterChange::Remove(node) if node.is_indexer() => { - let chitchat_id = node.chitchat_id(); - info!( - node_id = chitchat_id.node_id, - generation_id = chitchat_id.generation_id, - "removing node `{}` from ingester pool", - chitchat_id.node_id, + ClusterChange::Update { previous, updated } + if updated.is_indexer() + && previous.ingester_status() != updated.ingester_status() => + { + // only update the ingester pool when the ingester status changes, to avoid + // unnecessary churn + let change = build_ingester_insert_change( + &updated, + ingester_opt_clone, + grpc_max_message_size, + grpc_compression_encoding_opt, ); - Some(Change::Remove(node.node_id().into())) + Some(change) + } + ClusterChange::Remove(node) if node.is_indexer() => { + let change = build_ingester_remove_change(&node); + Some(change) } _ => None, } }) }); ingester_pool.listen_for_changes(ingester_change_stream); - Ok((ingest_router, ingest_router_service, ingester_opt)) +} + +fn build_ingester_insert_change( + node: &ClusterNode, + ingester_opt: Option, + grpc_max_message_size: ByteSize, + grpc_compression_encoding_opt: Option, +) -> Change { + let chitchat_id = node.chitchat_id(); + info!( + node_id = chitchat_id.node_id, + generation_id = chitchat_id.generation_id, + "adding/updating node `{}` with ingester status `{}` to ingester pool", + chitchat_id.node_id, + node.ingester_status(), + ); + let node_id: NodeId = node.node_id().into(); + let ingester_service = build_ingester_service( + node, + ingester_opt, + grpc_max_message_size, + grpc_compression_encoding_opt, + ); + let pool_entry = IngesterPoolEntry { + client: ingester_service, + status: node.ingester_status(), + }; + Change::Insert(node_id, pool_entry) +} + +fn build_ingester_remove_change(node: &ClusterNode) -> Change { + let chitchat_id = node.chitchat_id(); + info!( + node_id = chitchat_id.node_id, + generation_id = chitchat_id.generation_id, + "removing node `{}` from ingester pool", + chitchat_id.node_id, + ); + let node_id: NodeId = node.node_id().into(); + Change::Remove(node_id) +} + +fn build_ingester_service( + node: &ClusterNode, + ingester_opt: Option, + max_message_size: ByteSize, + grpc_compression_encoding_opt: Option, +) -> IngesterServiceClient { + if node.is_self_node() { + // Here, since the service is available locally, we bypass the network stack + // and use the instance directly. However, we still want client-side + // metrics, so we use both metrics layers. + let ingester = ingester_opt.expect("ingester service should be initialized"); + let service = ingester_service_layer_stack( + IngesterServiceClient::tower().stack_layer(INGEST_GRPC_CLIENT_METRICS_LAYER.clone()), + ) + .build(ingester); + return service; + } + IngesterServiceClient::tower() + .stack_layer(INGEST_GRPC_CLIENT_METRICS_LAYER.clone()) + .stack_layer(TimeoutLayer::new(GRPC_INGESTER_SERVICE_TIMEOUT)) + .build_from_channel( + node.grpc_advertise_addr(), + node.channel(), + max_message_size, + grpc_compression_encoding_opt, + ) } async fn setup_searcher( @@ -1144,90 +1222,26 @@ async fn setup_control_plane( } fn setup_indexer_pool( - node_config: &NodeConfig, cluster_change_stream: ClusterChangeStream, - indexer_pool: IndexerPool, indexing_service_opt: Option>, + indexer_pool: IndexerPool, + grpc_max_message_size: ByteSize, ) { - let max_message_size = node_config.grpc_config.max_message_size; let indexer_change_stream = cluster_change_stream.filter_map(move |cluster_change| { let indexing_service_clone_opt = indexing_service_opt.clone(); Box::pin(async move { - match &cluster_change { + match cluster_change { ClusterChange::Add(node) if node.is_indexer() => { - let chitchat_id = node.chitchat_id(); - info!( - node_id = chitchat_id.node_id, - generation_id = chitchat_id.generation_id, - "adding node `{}` to indexer pool", - chitchat_id.node_id, + let change = build_indexer_insert_change( + &node, + indexing_service_clone_opt, + grpc_max_message_size, ); - } - _ => {} - }; - match cluster_change { - ClusterChange::Add(node) | ClusterChange::Update(node) if node.is_indexer() => { - let node_id = node.node_id().to_owned(); - let indexing_tasks = node.indexing_tasks().to_vec(); - let indexing_capacity = node.indexing_capacity(); - - if node.is_self_node() { - // Here, since the service is available locally, we bypass the network stack - // and use the mailbox directly. However, we still want client-side metrics, - // so we use both metrics layers. - let indexing_service_mailbox = indexing_service_clone_opt - .expect("indexing service should be initialized"); - // These layers apply to all the RPCs of the indexing service. - let shared_layers = ServiceBuilder::new() - .layer(INDEXING_GRPC_CLIENT_METRICS_LAYER.clone()) - .layer(INDEXING_GRPC_SERVER_METRICS_LAYER.clone()) - .into_inner(); - let client = IndexingServiceClient::tower() - .stack_layer(shared_layers) - .build_from_mailbox(indexing_service_mailbox); - let change = Change::Insert( - node_id.clone(), - IndexerNodeInfo { - node_id, - generation_id: node.chitchat_id().generation_id, - client, - indexing_tasks, - indexing_capacity, - }, - ); - Some(change) - } else { - let client = IndexingServiceClient::tower() - .stack_layer(INDEXING_GRPC_CLIENT_METRICS_LAYER.clone()) - .stack_layer(TimeoutLayer::new(GRPC_INDEXING_SERVICE_TIMEOUT)) - .build_from_channel( - node.grpc_advertise_addr(), - node.channel(), - max_message_size, - None, - ); - let change = Change::Insert( - node_id.clone(), - IndexerNodeInfo { - node_id, - generation_id: node.chitchat_id().generation_id, - client, - indexing_tasks, - indexing_capacity, - }, - ); - Some(change) - } + Some(change) } ClusterChange::Remove(node) if node.is_indexer() => { - let chitchat_id = node.chitchat_id(); - info!( - node_id = chitchat_id.node_id, - generation_id = chitchat_id.generation_id, - "removing node `{}` from indexer pool", - chitchat_id.node_id, - ); - Some(Change::Remove(node.node_id().to_owned())) + let change = build_indexer_remove_change(&node); + Some(change) } _ => None, } @@ -1236,6 +1250,75 @@ fn setup_indexer_pool( indexer_pool.listen_for_changes(indexer_change_stream); } +fn build_indexer_insert_change( + node: &ClusterNode, + indexing_service_opt: Option>, + grpc_max_message_size: ByteSize, +) -> Change { + let chitchat_id = node.chitchat_id(); + info!( + node_id = chitchat_id.node_id, + generation_id = chitchat_id.generation_id, + "adding node `{}` with ingester status `{}` to indexer pool", + chitchat_id.node_id, + node.ingester_status() + ); + let node_id: NodeId = node.node_id().into(); + let client = build_indexing_service(node, indexing_service_opt, grpc_max_message_size); + Change::Insert( + node_id.clone(), + IndexerNodeInfo { + node_id, + generation_id: chitchat_id.generation_id, + client, + indexing_tasks: node.indexing_tasks().to_vec(), + indexing_capacity: node.indexing_capacity(), + }, + ) +} + +fn build_indexer_remove_change(node: &ClusterNode) -> Change { + let chitchat_id = node.chitchat_id(); + info!( + node_id = chitchat_id.node_id, + generation_id = chitchat_id.generation_id, + "removing node `{}` from indexer pool", + chitchat_id.node_id, + ); + let node_id: NodeId = node.node_id().into(); + Change::Remove(node_id) +} + +fn build_indexing_service( + node: &ClusterNode, + indexing_service_opt: Option>, + max_message_size: ByteSize, +) -> IndexingServiceClient { + if node.is_self_node() { + // Here, since the service is available locally, we bypass the network stack + // and use the mailbox directly. However, we still want client-side metrics, + // so we use both metrics layers. + let indexing_service_mailbox = + indexing_service_opt.expect("indexing service should be initialized"); + let shared_layers = ServiceBuilder::new() + .layer(INDEXING_GRPC_CLIENT_METRICS_LAYER.clone()) + .layer(INDEXING_GRPC_SERVER_METRICS_LAYER.clone()) + .into_inner(); + return IndexingServiceClient::tower() + .stack_layer(shared_layers) + .build_from_mailbox(indexing_service_mailbox); + } + IndexingServiceClient::tower() + .stack_layer(INDEXING_GRPC_CLIENT_METRICS_LAYER.clone()) + .stack_layer(TimeoutLayer::new(GRPC_INDEXING_SERVICE_TIMEOUT)) + .build_from_channel( + node.grpc_advertise_addr(), + node.channel(), + max_message_size, + None, + ) +} + fn require( val_opt: Option, ) -> impl Filter + Clone { @@ -1284,19 +1367,12 @@ async fn node_readiness_reporting_task( }; info!("REST server is ready"); - if let Some(ingester) = ingester_opt - && let Err(error) = wait_for_ingester_status(ingester, IngesterStatus::Ready).await - { - error!("failed to initialize ingester: {:?}", error); - info!("shutting down"); - return; - } let mut interval = tokio::time::interval(READINESS_REPORTING_INTERVAL); loop { interval.tick().await; - let new_node_ready = match metastore.check_connectivity().await { + let metastore_is_available = match metastore.check_connectivity().await { Ok(()) => { debug!(metastore_endpoints=?metastore.endpoints(), "metastore service is available"); true @@ -1306,6 +1382,23 @@ async fn node_readiness_reporting_task( false } }; + let ingester_is_available = if let Some(ingester) = &ingester_opt { + match try_get_ingester_status(ingester).await { + Ok(status) => { + status == IngesterStatus::Initializing || status != IngesterStatus::Failed + } + Err(error) => { + // If we couldn't get the ingester status, it's not looking good, so we set the + // node to not ready. + error!(%error, "failed to get ingester status"); + false + } + } + } else { + true + }; + let new_node_ready = metastore_is_available && ingester_is_available; + if new_node_ready != node_ready { node_ready = new_node_ready; cluster.set_self_node_readiness(node_ready).await; @@ -1373,10 +1466,8 @@ mod tests { use quickwit_common::{ServiceStream, assert_eventually}; use quickwit_config::SearcherConfig; use quickwit_metastore::{IndexMetadata, metastore_for_test}; - use quickwit_proto::indexing::IndexingTask; use quickwit_proto::ingest::ingester::{MockIngesterService, ObservationMessage}; use quickwit_proto::metastore::{ListIndexesMetadataResponse, MockMetastoreService}; - use quickwit_proto::types::{IndexUid, PipelineUid}; use quickwit_search::Job; use tokio::sync::watch; use tonic::transport::{Channel, Server}; @@ -1513,53 +1604,32 @@ mod tests { ClusterChangeStream::new_unbounded(); let indexer_pool = IndexerPool::default(); setup_indexer_pool( - &node_config, cluster_change_stream, - indexer_pool.clone(), Some(indexing_service_mailbox), + indexer_pool.clone(), + node_config.grpc_config.max_message_size, ); - let new_indexer_node = - ClusterNode::for_test("test-indexer-node", 1, true, &["indexer"], &[]).await; - cluster_change_stream_tx - .send(ClusterChange::Add(new_indexer_node)) - .unwrap(); - tokio::time::sleep(Duration::from_millis(1)).await; - - assert_eq!(indexer_pool.len(), 1); - - let new_indexer_node_info = indexer_pool.get("test-indexer-node").unwrap(); - assert!(new_indexer_node_info.indexing_tasks.is_empty()); - - let new_indexing_task = IndexingTask { - pipeline_uid: Some(PipelineUid::for_test(0u128)), - index_uid: Some(IndexUid::for_test("test-index", 0)), - source_id: "test-source".to_string(), - shard_ids: Vec::new(), - params_fingerprint: 0, - }; - let updated_indexer_node = ClusterNode::for_test( + // adding a indexer node refreshes the indexer pool + let new_indexer_node = ClusterNode::for_test( "test-indexer-node", 1, true, &["indexer"], - std::slice::from_ref(&new_indexing_task), + &[], + IngesterStatus::Ready, ) .await; cluster_change_stream_tx - .send(ClusterChange::Update(updated_indexer_node.clone())) + .send(ClusterChange::Add(new_indexer_node.clone())) .unwrap(); tokio::time::sleep(Duration::from_millis(1)).await; - let updated_indexer_node_info = indexer_pool.get("test-indexer-node").unwrap(); - assert_eq!(updated_indexer_node_info.indexing_tasks.len(), 1); - assert_eq!( - updated_indexer_node_info.indexing_tasks[0], - new_indexing_task - ); + assert_eq!(indexer_pool.len(), 1); + // removing an indexer node refreshes the indexer pool cluster_change_stream_tx - .send(ClusterChange::Remove(updated_indexer_node)) + .send(ClusterChange::Remove(new_indexer_node)) .unwrap(); tokio::time::sleep(Duration::from_millis(1)).await; @@ -1569,8 +1639,10 @@ mod tests { #[tokio::test] async fn test_setup_searcher() { let node_config = NodeConfig::for_test(); - let searcher_context = - Arc::new(SearcherContext::new(SearcherConfig::default(), None, None)); + let searcher_context = Arc::new(SearcherContext::new_without_invoker( + SearcherConfig::default(), + None, + )); let metastore = metastore_for_test(); let (change_stream, change_stream_tx) = ClusterChangeStream::new_unbounded(); let storage_resolver = StorageResolver::unconfigured(); @@ -1600,7 +1672,15 @@ mod tests { .await .unwrap_err(); - let self_node = ClusterNode::for_test("node-1", 1337, true, &["searcher"], &[]).await; + let self_node = ClusterNode::for_test( + "node-1", + 1337, + true, + &["searcher"], + &[], + IngesterStatus::Ready, + ) + .await; change_stream_tx .send(ClusterChange::Add(self_node.clone())) .unwrap(); @@ -1616,7 +1696,15 @@ mod tests { .send(ClusterChange::Remove(self_node)) .unwrap(); - let node = ClusterNode::for_test("node-1", 1337, false, &["searcher"], &[]).await; + let node = ClusterNode::for_test( + "node-1", + 1337, + false, + &["searcher"], + &[], + IngesterStatus::Ready, + ) + .await; change_stream_tx.send(ClusterChange::Add(node)).unwrap(); tokio::time::sleep(Duration::from_millis(1)).await; @@ -1626,4 +1714,96 @@ mod tests { .unwrap(); assert!(!searcher_client.is_local()); } + + #[tokio::test] + async fn test_setup_ingester_pool() { + let (cluster_change_stream, cluster_change_stream_tx) = + ClusterChangeStream::new_unbounded(); + let ingester_pool = IngesterPool::default(); + setup_ingester_pool( + cluster_change_stream, + None::, + ingester_pool.clone(), + None, + ByteSize::mib(20), + ); + + // Add an indexer node with IngesterStatus::Initializing. + let new_node = ClusterNode::for_test( + "test-ingester-node", + 1, + false, + &["indexer"], + &[], + IngesterStatus::Initializing, + ) + .await; + cluster_change_stream_tx + .send(ClusterChange::Add(new_node.clone())) + .unwrap(); + tokio::time::sleep(Duration::from_millis(1)).await; + + assert_eq!(ingester_pool.len(), 1); + let pool_entry = ingester_pool + .get(&NodeId::from("test-ingester-node")) + .unwrap(); + assert_eq!(pool_entry.status, IngesterStatus::Initializing); + + // Update the node: ingester status transitions from Initializing to Ready. + let updated_node = ClusterNode::for_test( + "test-ingester-node", + 1, + false, + &["indexer"], + &[], + IngesterStatus::Ready, + ) + .await; + cluster_change_stream_tx + .send(ClusterChange::Update { + previous: new_node.clone(), + updated: updated_node.clone(), + }) + .unwrap(); + tokio::time::sleep(Duration::from_millis(1)).await; + + assert_eq!(ingester_pool.len(), 1); + let pool_entry = ingester_pool + .get(&NodeId::from("test-ingester-node")) + .unwrap(); + assert_eq!(pool_entry.status, IngesterStatus::Ready); + + // Update the node: ingester status transitions from Ready to Decommissioning. + let updated_node_2 = ClusterNode::for_test( + "test-ingester-node", + 1, + false, + &["indexer"], + &[], + IngesterStatus::Decommissioning, + ) + .await; + cluster_change_stream_tx + .send(ClusterChange::Update { + previous: updated_node.clone(), + updated: updated_node_2.clone(), + }) + .unwrap(); + tokio::time::sleep(Duration::from_millis(1)).await; + + // The node should still be in the pool with updated status. + assert_eq!(ingester_pool.len(), 1); + let pool_entry = ingester_pool + .get(&NodeId::from("test-ingester-node")) + .unwrap(); + assert_eq!(pool_entry.status, IngesterStatus::Decommissioning); + + // Remove the node. + cluster_change_stream_tx + .send(ClusterChange::Remove(updated_node)) + .unwrap(); + tokio::time::sleep(Duration::from_millis(1)).await; + + assert!(ingester_pool.is_empty()); + } } diff --git a/quickwit/quickwit-serve/src/rest.rs b/quickwit/quickwit-serve/src/rest.rs index 3f193783b04..d5cb8d8e0cb 100644 --- a/quickwit/quickwit-serve/src/rest.rs +++ b/quickwit/quickwit-serve/src/rest.rs @@ -51,7 +51,8 @@ use crate::node_info_handler::node_info_handler; use crate::otlp_api::otlp_ingest_api_handlers; use crate::rest_api_response::{RestApiError, RestApiResponse}; use crate::search_api::{ - search_get_handler, search_plan_get_handler, search_plan_post_handler, search_post_handler, + list_fields_handler, search_get_handler, search_plan_get_handler, search_plan_post_handler, + search_post_handler, }; use crate::template_api::index_template_api_handlers; use crate::ui_handler::ui_handler; @@ -283,6 +284,7 @@ fn search_routes( .or(search_post_handler(search_service.clone())) .or(search_plan_get_handler(search_service.clone())) .or(search_plan_post_handler(search_service.clone())) + .or(list_fields_handler(search_service.clone())) .recover(recover_fn) .boxed() } diff --git a/quickwit/quickwit-serve/src/search_api/mod.rs b/quickwit/quickwit-serve/src/search_api/mod.rs index ef1f643257c..61973a894db 100644 --- a/quickwit/quickwit-serve/src/search_api/mod.rs +++ b/quickwit/quickwit-serve/src/search_api/mod.rs @@ -17,7 +17,8 @@ mod rest_handler; pub use self::grpc_adapter::GrpcSearchAdapter; pub use self::rest_handler::{ - SearchApi, SearchRequestQueryString, SortBy, search_get_handler, search_plan_get_handler, - search_plan_post_handler, search_post_handler, search_request_from_api_request, + SearchApi, SearchRequestQueryString, SortBy, list_fields_handler, search_get_handler, + search_plan_get_handler, search_plan_post_handler, search_post_handler, + search_request_from_api_request, }; pub(crate) use self::rest_handler::{extract_index_id_patterns, extract_index_id_patterns_default}; diff --git a/quickwit/quickwit-serve/src/search_api/rest_handler.rs b/quickwit/quickwit-serve/src/search_api/rest_handler.rs index b1400fa12c0..08d174747dd 100644 --- a/quickwit/quickwit-serve/src/search_api/rest_handler.rs +++ b/quickwit/quickwit-serve/src/search_api/rest_handler.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use percent_encoding::percent_decode_str; use quickwit_config::validate_index_id_pattern; -use quickwit_proto::search::{CountHits, SortField, SortOrder}; +use quickwit_proto::search::{CountHits, ListFieldsRequest, SortField, SortOrder}; use quickwit_query::query_ast::query_ast_from_user_text; use quickwit_search::{SearchError, SearchPlanResponseRest, SearchResponseRest, SearchService}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -36,12 +36,14 @@ use crate::{BodyFormat, with_arg}; search_post_handler, search_plan_get_handler, search_plan_post_handler, + list_fields_handler, ), components(schemas( BodyFormat, SearchRequestQueryString, SearchResponseRest, SearchPlanResponseRest, + ListFieldsQueryString, SortBy, SortField, SortOrder, @@ -240,6 +242,67 @@ mod count_hits_from_bool { } } +/// This struct represents the QueryString passed to +/// the rest API. +#[derive( + Debug, Default, Eq, PartialEq, Serialize, Deserialize, utoipa::IntoParams, utoipa::ToSchema, +)] +#[into_params(parameter_in = Query)] +#[serde(deny_unknown_fields)] +pub struct ListFieldsQueryString { + /// Field names to filter on. It can be a comma-separated list of field names. If empty, all + /// fields are returned. It also supports wildcards. + #[serde(default)] + pub fields: String, + /// If set, restrict search to documents with a `timestamp >= start_timestamp`. + /// This timestamp is expressed in seconds. + #[serde(skip_serializing_if = "Option::is_none")] + pub start_timestamp: Option, + /// If set, restrict search to documents with a `timestamp < end_timestamp``. + /// This timestamp is expressed in seconds. + #[serde(skip_serializing_if = "Option::is_none")] + pub end_timestamp: Option, + /// Maximum number of fields to return. + pub max_fields: Option, + /// First hit to return. Together with num_hits, this parameter + /// can be used for pagination. + /// + /// E.g. + /// The results with rank [start_offset..start_offset + max_hits) are returned + #[serde(default)] // Default to 0. (We are 0-indexed) + pub start_offset: u64, + /// Query text. The query language is that of tantivy. + #[serde(default)] + pub query: String, + /// The output format. + #[serde(default)] + pub format: BodyFormat, +} + +pub fn list_fields_request_from_api_request( + index_id_patterns: Vec, + request: ListFieldsQueryString, +) -> Result { + let query_ast = query_ast_from_user_text(&request.query, None); + let query_ast = serde_json::to_string(&query_ast)?; + + let req = ListFieldsRequest { + end_timestamp: request.end_timestamp, + start_timestamp: request.start_timestamp, + fields: if !request.fields.is_empty() { + request.fields.split(',').map(String::from).collect() + } else { + Default::default() + }, + index_id_patterns, + max_fields: request.max_fields, + start_offset: request.start_offset, + query_ast: Some(query_ast), + }; + + Ok(req) +} + pub fn search_request_from_api_request( index_id_patterns: Vec, search_request: SearchRequestQueryString, @@ -328,6 +391,14 @@ fn search_plan_post_filter() .and(warp::body::json()) } +fn list_fields_filter() +-> impl Filter, ListFieldsQueryString), Error = Rejection> + Clone { + warp::path!(String / "list-fields") + .and_then(extract_index_id_patterns) + .and(warp::get()) + .and(warp::query()) +} + async fn search( index_id_patterns: Vec, search_request: SearchRequestQueryString, @@ -355,6 +426,20 @@ async fn search_plan( into_rest_api_response(result, body_format) } +async fn list_fields( + index_id_patterns: Vec, + request: ListFieldsQueryString, + search_service: Arc, +) -> impl warp::Reply { + let result: Result<_, SearchError> = async { + let request = list_fields_request_from_api_request(index_id_patterns, request)?; + search_service.root_list_fields(request).await + } + .await; + + into_rest_api_response(result, BodyFormat::Json) +} + #[utoipa::path( get, tag = "Search", @@ -449,6 +534,27 @@ pub fn search_plan_post_handler( .then(search_plan) } +#[utoipa::path( + get, + tag = "Search", + path = "/{index_id}/list-fields", + responses( + (status = 200, description = "Successfully retrieved list of fields.", body = [String]) + ), + params( + ("index_id" = String, Path, description = "The index ID to search."), + ListFieldsQueryString + ) +)] +/// List Fields +pub fn list_fields_handler( + search_service: Arc, +) -> impl Filter + Clone { + list_fields_filter() + .and(with_arg(search_service)) + .then(list_fields) +} + #[cfg(test)] mod tests { use assert_json_diff::{assert_json_eq, assert_json_include}; diff --git a/quickwit/quickwit-ui/cypress.config.js b/quickwit/quickwit-ui/cypress.config.js deleted file mode 100644 index 00266a27db5..00000000000 --- a/quickwit/quickwit-ui/cypress.config.js +++ /dev/null @@ -1,12 +0,0 @@ -const { defineConfig } = require("cypress"); - -module.exports = defineConfig({ - video: false, - screenshotOnRunFailure: false, - - e2e: { - setupNodeEvents(on, config) { - // implement node event listeners here - }, - }, -}); diff --git a/quickwit/quickwit-ui/cypress/e2e/homepage.spec.cy.js b/quickwit/quickwit-ui/cypress/e2e/homepage.spec.cy.js deleted file mode 100644 index f61352f94bd..00000000000 --- a/quickwit/quickwit-ui/cypress/e2e/homepage.spec.cy.js +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2021-Present Datadog, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -describe("Home navigation", () => { - it("Should display sidebar links", () => { - cy.visit("http://127.0.0.1:7280/ui"); - cy.get("a") - .should("be.visible") - .should("contain.text", "Query editor") - .should("contain.text", "Indexes") - .should("contain.text", "Cluster"); - }); - it("Should navigate to cluster state", () => { - cy.visit("http://127.0.0.1:7280/ui"); - cy.get("a").contains("Cluster").click(); - cy.get("p").should("contain.text", "Cluster"); - cy.get("span").should("contain.text", "cluster_id"); - }); - it("Should display otel logs index page", () => { - cy.visit("http://127.0.0.1:7280/ui/indexes/otel-logs-v0_7"); - cy.get("a").should("be.visible").should("contain.text", "Indexes"); - }); -}); diff --git a/quickwit/quickwit-ui/cypress/fixtures/example.json b/quickwit/quickwit-ui/cypress/fixtures/example.json deleted file mode 100644 index 02e4254378e..00000000000 --- a/quickwit/quickwit-ui/cypress/fixtures/example.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "name": "Using fixtures to represent data", - "email": "hello@cypress.io", - "body": "Fixtures are a great way to mock data for responses to routes" -} diff --git a/quickwit/quickwit-ui/cypress/plugins/index.js b/quickwit/quickwit-ui/cypress/plugins/index.js deleted file mode 100644 index 8dd144a6c1a..00000000000 --- a/quickwit/quickwit-ui/cypress/plugins/index.js +++ /dev/null @@ -1,21 +0,0 @@ -/// -// *********************************************************** -// This example plugins/index.js can be used to load plugins -// -// You can change the location of this file or turn off loading -// the plugins file with the 'pluginsFile' configuration option. -// -// You can read more here: -// https://on.cypress.io/plugins-guide -// *********************************************************** - -// This function is called when a project is opened or re-opened (e.g. due to -// the project's config changing) - -/** - * @type {Cypress.PluginConfig} - */ -module.exports = (on, config) => { - // `on` is used to hook into various events Cypress emits - // `config` is the resolved Cypress config -}; diff --git a/quickwit/quickwit-ui/cypress/support/commands.js b/quickwit/quickwit-ui/cypress/support/commands.js deleted file mode 100644 index 119ab03f7cd..00000000000 --- a/quickwit/quickwit-ui/cypress/support/commands.js +++ /dev/null @@ -1,25 +0,0 @@ -// *********************************************** -// This example commands.js shows you how to -// create various custom commands and overwrite -// existing commands. -// -// For more comprehensive examples of custom -// commands please read more here: -// https://on.cypress.io/custom-commands -// *********************************************** -// -// -// -- This is a parent command -- -// Cypress.Commands.add('login', (email, password) => { ... }) -// -// -// -- This is a child command -- -// Cypress.Commands.add('drag', { prevSubject: 'element'}, (subject, options) => { ... }) -// -// -// -- This is a dual command -- -// Cypress.Commands.add('dismiss', { prevSubject: 'optional'}, (subject, options) => { ... }) -// -// -// -- This will overwrite an existing command -- -// Cypress.Commands.overwrite('visit', (originalFn, url, options) => { ... }) diff --git a/quickwit/quickwit-ui/cypress/support/e2e.js b/quickwit/quickwit-ui/cypress/support/e2e.js deleted file mode 100644 index 3a252243880..00000000000 --- a/quickwit/quickwit-ui/cypress/support/e2e.js +++ /dev/null @@ -1,20 +0,0 @@ -// *********************************************************** -// This example support/e2e.js is processed and -// loaded automatically before your test files. -// -// This is a great place to put global configuration and -// behavior that modifies Cypress. -// -// You can change the location of this file or turn off -// automatically serving support files with the -// 'supportFile' configuration option. -// -// You can read more here: -// https://on.cypress.io/configuration -// *********************************************************** - -// Import commands.js using ES2015 syntax: -import "./commands"; - -// Alternatively you can use CommonJS syntax: -// require('./commands') diff --git a/quickwit/quickwit-ui/cypress/support/index.js b/quickwit/quickwit-ui/cypress/support/index.js deleted file mode 100644 index 7c3a7f398dc..00000000000 --- a/quickwit/quickwit-ui/cypress/support/index.js +++ /dev/null @@ -1,30 +0,0 @@ -// *********************************************************** -// This example support/index.js is processed and -// loaded automatically before your test files. -// -// This is a great place to put global configuration and -// behavior that modifies Cypress. -// -// You can change the location of this file or turn off -// automatically serving support files with the -// 'supportFile' configuration option. -// -// You can read more here: -// https://on.cypress.io/configuration -// *********************************************************** - -// Import commands.js using ES2015 syntax: -import "./commands"; - -// Alternatively you can use CommonJS syntax: -// require('./commands') - -Cypress.on("uncaught:exception", (err, runnable) => { - // we expect a 3rd party library error with message 'list not defined' - // and don't want to fail the test so we return false - if (err.message.includes("monaco-editor")) { - return false; - } - // we still want to ensure there are no other unexpected - // errors, so we let them fail the test -}); diff --git a/quickwit/quickwit-ui/e2e/homepage.spec.ts b/quickwit/quickwit-ui/e2e/homepage.spec.ts new file mode 100644 index 00000000000..73b86b64f06 --- /dev/null +++ b/quickwit/quickwit-ui/e2e/homepage.spec.ts @@ -0,0 +1,40 @@ +// Copyright 2021-Present Datadog, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import { expect, test } from "@playwright/test"; + +test.describe("Home navigation", () => { + test("Should display sidebar links", async ({ page }) => { + await page.goto("/ui"); + await expect(page.locator("a")).toContainText([ + "Query editor", + "Indexes", + "Cluster", + ]); + }); + + test("Should navigate to cluster state", async ({ page }) => { + await page.goto("/ui"); + await page.getByRole("link", { name: "Cluster" }).click(); + await expect(page.getByLabel("breadcrumb")).toContainText("Cluster"); + await expect(page.getByText("cluster_id")).toBeVisible(); + }); + + test("Should display otel logs index page", async ({ page }) => { + await page.goto("/ui/indexes/otel-logs-v0_7"); + await expect( + page.getByLabel("breadcrumb").getByRole("link", { name: "Indexes" }), + ).toBeVisible(); + }); +}); diff --git a/quickwit/quickwit-ui/jest.config.js b/quickwit/quickwit-ui/jest.config.js index 8dfa593da6a..1ed3ed949da 100644 --- a/quickwit/quickwit-ui/jest.config.js +++ b/quickwit/quickwit-ui/jest.config.js @@ -32,5 +32,7 @@ module.exports = { "@mui/x-charts": "/mocks/x-charts.js", }, + testPathIgnorePatterns: ["/node_modules/", "/e2e/"], + resetMocks: true, }; diff --git a/quickwit/quickwit-ui/mocks/monacoMock.js b/quickwit/quickwit-ui/mocks/monacoMock.js index 1ac9215b67b..4c11a2f0ccf 100644 --- a/quickwit/quickwit-ui/mocks/monacoMock.js +++ b/quickwit/quickwit-ui/mocks/monacoMock.js @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -import * as React from "react"; - // Mock MonocoEditor as the current jest setup does not work when Monaco JS files // are loaded. export const Editor = (props) => { diff --git a/quickwit/quickwit-ui/mocks/swaggerUIMock.js b/quickwit/quickwit-ui/mocks/swaggerUIMock.js index 50d9495839c..3ccd93b6c0b 100644 --- a/quickwit/quickwit-ui/mocks/swaggerUIMock.js +++ b/quickwit/quickwit-ui/mocks/swaggerUIMock.js @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -import * as React from "react"; - // Mock SwaggerUI as the current jest setup does not work when Monaco JS files // are loaded. export default function SwaggerUI(props) { diff --git a/quickwit/quickwit-ui/package.json b/quickwit/quickwit-ui/package.json index eee5a5e49ed..1f6b30061b5 100644 --- a/quickwit/quickwit-ui/package.json +++ b/quickwit/quickwit-ui/package.json @@ -5,52 +5,49 @@ "private": true, "packageManager": "yarn@1.22.22", "dependencies": { - "@babel/core": "7.28.5", + "@babel/core": "7.29.0", "@babel/runtime": "7.28.6", - "@biomejs/biome": "2.3.5", - "@dr.pogodin/babel-plugin-transform-assets": "1.2.5", + "@biomejs/biome": "2.4.4", + "@dr.pogodin/babel-plugin-transform-assets": "1.2.6", "@emotion/react": "11.14.0", "@emotion/styled": "11.14.1", "@monaco-editor/react": "4.7.0", - "@mui/icons-material": "7.3.5", - "@mui/lab": "7.0.1-beta.19", - "@mui/material": "7.3.5", - "@mui/system": "7.3.5", - "@mui/x-charts": "8.18.0", - "@mui/x-date-pickers": "8.18.0", + "@mui/icons-material": "7.3.8", + "@mui/lab": "7.0.1-beta.22", + "@mui/material": "7.3.8", + "@mui/system": "7.3.8", + "@mui/x-charts": "8.27.0", + "@mui/x-date-pickers": "8.27.2", "@testing-library/dom": "10.4.1", "@testing-library/jest-dom": "6.9.1", - "@testing-library/react": "16.3.0", + "@testing-library/react": "16.3.2", "@testing-library/user-event": "14.6.1", "@types/jest": "30.0.0", "@types/node": "24.10.9", - "@types/react": "19.2.6", + "@types/react": "19.2.14", "@types/react-dom": "19.2.3", "@types/swagger-ui-react": "5.18.0", "babel-jest": "30.2.0", "babel-preset-react-app": "10.1.0", - "cypress": "13.3.2", - "dayjs": "1.11.7", + "dayjs": "1.11.19", "jest": "30.2.0", "jest-environment-jsdom": "30.2.0", - "monaco-editor": "0.54.0", - "react": "19.2.0", + "monaco-editor": "0.55.1", + "react": "19.2.4", "react-app-polyfill": "3.0.0", - "react-dom": "19.2.0", - "react-monaco-editor": "0.59.0", + "react-dom": "19.2.4", "react-number-format": "5.4.4", - "react-router": "7.12.0", + "react-router": "7.13.1", "styled-components": "6.1.19", "styled-icons": "10.47.1", - "swagger-ui-react": "5.30.2", + "swagger-ui-react": "5.32.0", "typescript": "5.9.3", - "vite": "7.2.2", - "web-vitals": "2.1.4" + "vite": "7.3.1" }, "resolutions": { - "@types/react": "19.2.6", + "@types/react": "19.2.14", "@types/react-dom": "19.2.3", - "dompurify": "3.2.4", + "dompurify": "3.3.1", "glob": "11.1.0" }, "scripts": { @@ -61,6 +58,9 @@ "lint": "biome check", "format": "biome check --write", "type": "tsc", - "e2e-test": "cypress run" + "e2e-test": "playwright test" + }, + "devDependencies": { + "@playwright/test": "^1.58.2" } } diff --git a/quickwit/quickwit-ui/src/reportWebVitals.ts b/quickwit/quickwit-ui/playwright.config.ts similarity index 58% rename from quickwit/quickwit-ui/src/reportWebVitals.ts rename to quickwit/quickwit-ui/playwright.config.ts index e98fd45dfde..56d41855388 100644 --- a/quickwit/quickwit-ui/src/reportWebVitals.ts +++ b/quickwit/quickwit-ui/playwright.config.ts @@ -12,18 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -import { ReportHandler } from "web-vitals"; +import { defineConfig } from "@playwright/test"; -const reportWebVitals = (onPerfEntry?: ReportHandler) => { - if (onPerfEntry && onPerfEntry instanceof Function) { - import("web-vitals").then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => { - getCLS(onPerfEntry); - getFID(onPerfEntry); - getFCP(onPerfEntry); - getLCP(onPerfEntry); - getTTFB(onPerfEntry); - }); - } -}; - -export default reportWebVitals; +export default defineConfig({ + testDir: "./e2e", + use: { + baseURL: "http://127.0.0.1:7280/ui", + browserName: "chromium", + video: "off", + screenshot: "off", + }, +}); diff --git a/quickwit/quickwit-ui/src/index.tsx b/quickwit/quickwit-ui/src/index.tsx index b2a2d544ce0..8aeb2cbd60b 100644 --- a/quickwit/quickwit-ui/src/index.tsx +++ b/quickwit/quickwit-ui/src/index.tsx @@ -16,7 +16,6 @@ import React from "react"; import { createRoot } from "react-dom/client"; import "./index.css"; import { BrowserRouter } from "react-router"; -import reportWebVitals from "./reportWebVitals"; import App from "./views/App"; const root = createRoot(document.getElementById("root")!); @@ -27,8 +26,3 @@ root.render( , ); - -// If you want to start measuring performance in your app, pass a function -// to log results (for example: reportWebVitals(console.log)) -// or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals -reportWebVitals(); diff --git a/quickwit/quickwit-ui/yarn.lock b/quickwit/quickwit-ui/yarn.lock index 17d921b76b6..bd05302217f 100644 --- a/quickwit/quickwit-ui/yarn.lock +++ b/quickwit/quickwit-ui/yarn.lock @@ -50,6 +50,15 @@ "@babel/highlight" "^7.22.13" chalk "^2.4.2" +"@babel/code-frame@^7.28.6", "@babel/code-frame@^7.29.0": + version "7.29.0" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.29.0.tgz#7cd7a59f15b3cc0dcd803038f7792712a7d0b15c" + integrity sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw== + dependencies: + "@babel/helper-validator-identifier" "^7.28.5" + js-tokens "^4.0.0" + picocolors "^1.1.1" + "@babel/compat-data@^7.17.7", "@babel/compat-data@^7.20.1", "@babel/compat-data@^7.20.5": version "7.20.10" resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.20.10.tgz#9d92fa81b87542fff50e848ed585b4212c1d34ec" @@ -60,20 +69,25 @@ resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.28.5.tgz#a8a4962e1567121ac0b3b487f52107443b455c7f" integrity sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA== -"@babel/core@7.28.5", "@babel/core@^7.23.9", "@babel/core@^7.27.4": - version "7.28.5" - resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.28.5.tgz#4c81b35e51e1b734f510c99b07dfbc7bbbb48f7e" - integrity sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw== - dependencies: - "@babel/code-frame" "^7.27.1" - "@babel/generator" "^7.28.5" - "@babel/helper-compilation-targets" "^7.27.2" - "@babel/helper-module-transforms" "^7.28.3" - "@babel/helpers" "^7.28.4" - "@babel/parser" "^7.28.5" - "@babel/template" "^7.27.2" - "@babel/traverse" "^7.28.5" - "@babel/types" "^7.28.5" +"@babel/compat-data@^7.28.6": + version "7.29.0" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.29.0.tgz#00d03e8c0ac24dd9be942c5370990cbe1f17d88d" + integrity sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg== + +"@babel/core@7.29.0": + version "7.29.0" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.29.0.tgz#5286ad785df7f79d656e88ce86e650d16ca5f322" + integrity sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA== + dependencies: + "@babel/code-frame" "^7.29.0" + "@babel/generator" "^7.29.0" + "@babel/helper-compilation-targets" "^7.28.6" + "@babel/helper-module-transforms" "^7.28.6" + "@babel/helpers" "^7.28.6" + "@babel/parser" "^7.29.0" + "@babel/template" "^7.28.6" + "@babel/traverse" "^7.29.0" + "@babel/types" "^7.29.0" "@jridgewell/remapping" "^2.3.5" convert-source-map "^2.0.0" debug "^4.1.0" @@ -102,6 +116,27 @@ json5 "^2.2.1" semver "^6.3.0" +"@babel/core@^7.23.9", "@babel/core@^7.27.4": + version "7.28.5" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.28.5.tgz#4c81b35e51e1b734f510c99b07dfbc7bbbb48f7e" + integrity sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw== + dependencies: + "@babel/code-frame" "^7.27.1" + "@babel/generator" "^7.28.5" + "@babel/helper-compilation-targets" "^7.27.2" + "@babel/helper-module-transforms" "^7.28.3" + "@babel/helpers" "^7.28.4" + "@babel/parser" "^7.28.5" + "@babel/template" "^7.27.2" + "@babel/traverse" "^7.28.5" + "@babel/types" "^7.28.5" + "@jridgewell/remapping" "^2.3.5" + convert-source-map "^2.0.0" + debug "^4.1.0" + gensync "^1.0.0-beta.2" + json5 "^2.2.3" + semver "^6.3.1" + "@babel/generator@^7.20.7": version "7.20.7" resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.20.7.tgz#f8ef57c8242665c5929fe2e8d82ba75460187b4a" @@ -132,6 +167,17 @@ "@jridgewell/trace-mapping" "^0.3.28" jsesc "^3.0.2" +"@babel/generator@^7.29.0": + version "7.29.1" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.29.1.tgz#d09876290111abbb00ef962a7b83a5307fba0d50" + integrity sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw== + dependencies: + "@babel/parser" "^7.29.0" + "@babel/types" "^7.29.0" + "@jridgewell/gen-mapping" "^0.3.12" + "@jridgewell/trace-mapping" "^0.3.28" + jsesc "^3.0.2" + "@babel/helper-annotate-as-pure@^7.18.6": version "7.18.6" resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.18.6.tgz#eaa49f6f80d5a33f9a5dd2276e6d6e451be0a6bb" @@ -176,6 +222,17 @@ lru-cache "^5.1.1" semver "^6.3.1" +"@babel/helper-compilation-targets@^7.28.6": + version "7.28.6" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz#32c4a3f41f12ed1532179b108a4d746e105c2b25" + integrity sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA== + dependencies: + "@babel/compat-data" "^7.28.6" + "@babel/helper-validator-option" "^7.27.1" + browserslist "^4.24.0" + lru-cache "^5.1.1" + semver "^6.3.1" + "@babel/helper-create-class-features-plugin@^7.18.6", "@babel/helper-create-class-features-plugin@^7.20.5", "@babel/helper-create-class-features-plugin@^7.20.7": version "7.20.7" resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.20.7.tgz#d0e1f8d7e4ed5dac0389364d9c0c191d948ade6f" @@ -304,6 +361,14 @@ "@babel/traverse" "^7.27.1" "@babel/types" "^7.27.1" +"@babel/helper-module-imports@^7.28.6": + version "7.28.6" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz#60632cbd6ffb70b22823187201116762a03e2d5c" + integrity sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw== + dependencies: + "@babel/traverse" "^7.28.6" + "@babel/types" "^7.28.6" + "@babel/helper-module-transforms@^7.18.6", "@babel/helper-module-transforms@^7.20.11", "@babel/helper-module-transforms@^7.20.7": version "7.20.11" resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.20.11.tgz#df4c7af713c557938c50ea3ad0117a7944b2f1b0" @@ -327,6 +392,15 @@ "@babel/helper-validator-identifier" "^7.27.1" "@babel/traverse" "^7.28.3" +"@babel/helper-module-transforms@^7.28.6": + version "7.28.6" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz#9312d9d9e56edc35aeb6e95c25d4106b50b9eb1e" + integrity sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA== + dependencies: + "@babel/helper-module-imports" "^7.28.6" + "@babel/helper-validator-identifier" "^7.28.5" + "@babel/traverse" "^7.28.6" + "@babel/helper-optimise-call-expression@^7.18.6": version "7.18.6" resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.18.6.tgz#9369aa943ee7da47edab2cb4e838acf09d290ffe" @@ -485,6 +559,14 @@ "@babel/template" "^7.27.2" "@babel/types" "^7.28.4" +"@babel/helpers@^7.28.6": + version "7.28.6" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.28.6.tgz#fca903a313ae675617936e8998b814c415cbf5d7" + integrity sha512-xOBvwq86HHdB7WUDTfKfT/Vuxh7gElQ+Sfti2Cy6yIWNW05P8iUslOVcZ4/sKbE+/jQaukQAdz/gf3724kYdqw== + dependencies: + "@babel/template" "^7.28.6" + "@babel/types" "^7.28.6" + "@babel/highlight@^7.18.6": version "7.18.6" resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.18.6.tgz#81158601e93e2563795adcbfbdf5d64be3f2ecdf" @@ -520,6 +602,13 @@ dependencies: "@babel/types" "^7.28.5" +"@babel/parser@^7.28.6", "@babel/parser@^7.29.0": + version "7.29.0" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.29.0.tgz#669ef345add7d057e92b7ed15f0bac07611831b6" + integrity sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww== + dependencies: + "@babel/types" "^7.29.0" + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.18.6": version "7.18.6" resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.18.6.tgz#da5b8f9a580acdfbe53494dba45ea389fb09a4d2" @@ -1287,7 +1376,7 @@ dependencies: core-js-pure "^3.43.0" -"@babel/runtime@7.28.6", "@babel/runtime@^7.12.5", "@babel/runtime@^7.16.3", "@babel/runtime@^7.18.3", "@babel/runtime@^7.19.0", "@babel/runtime@^7.20.7", "@babel/runtime@^7.28.4", "@babel/runtime@^7.5.5", "@babel/runtime@^7.8.4", "@babel/runtime@^7.8.7": +"@babel/runtime@7.28.6", "@babel/runtime@^7.12.5", "@babel/runtime@^7.16.3", "@babel/runtime@^7.18.3", "@babel/runtime@^7.19.0", "@babel/runtime@^7.20.7", "@babel/runtime@^7.28.4", "@babel/runtime@^7.28.6", "@babel/runtime@^7.5.5", "@babel/runtime@^7.8.4", "@babel/runtime@^7.8.7": version "7.28.6" resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.28.6.tgz#d267a43cb1836dc4d182cce93ae75ba954ef6d2b" integrity sha512-05WQkdpL9COIMz4LjTxGpPNCdlpyimKppYNoJ5Di5EUObifl8t4tuLuUBBZEpoLYOmfvIWrsp9fCl0HoPRVTdA== @@ -1319,6 +1408,15 @@ "@babel/parser" "^7.27.2" "@babel/types" "^7.27.1" +"@babel/template@^7.28.6": + version "7.28.6" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.28.6.tgz#0e7e56ecedb78aeef66ce7972b082fce76a23e57" + integrity sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ== + dependencies: + "@babel/code-frame" "^7.28.6" + "@babel/parser" "^7.28.6" + "@babel/types" "^7.28.6" + "@babel/traverse@^7.20.10", "@babel/traverse@^7.20.5", "@babel/traverse@^7.20.7": version "7.23.2" resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.23.2.tgz#329c7a06735e144a506bdb2cad0268b7f46f4ad8" @@ -1348,6 +1446,19 @@ "@babel/types" "^7.28.5" debug "^4.3.1" +"@babel/traverse@^7.28.6", "@babel/traverse@^7.29.0": + version "7.29.0" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.29.0.tgz#f323d05001440253eead3c9c858adbe00b90310a" + integrity sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA== + dependencies: + "@babel/code-frame" "^7.29.0" + "@babel/generator" "^7.29.0" + "@babel/helper-globals" "^7.28.0" + "@babel/parser" "^7.29.0" + "@babel/template" "^7.28.6" + "@babel/types" "^7.29.0" + debug "^4.3.1" + "@babel/types@^7.0.0", "@babel/types@^7.18.6", "@babel/types@^7.18.9", "@babel/types@^7.19.0", "@babel/types@^7.20.0", "@babel/types@^7.20.2", "@babel/types@^7.20.5", "@babel/types@^7.20.7", "@babel/types@^7.3.0", "@babel/types@^7.4.4": version "7.20.7" resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.20.7.tgz#54ec75e252318423fc07fb644dc6a58a64c09b7f" @@ -1374,69 +1485,72 @@ "@babel/helper-string-parser" "^7.27.1" "@babel/helper-validator-identifier" "^7.28.5" +"@babel/types@^7.28.6", "@babel/types@^7.29.0": + version "7.29.0" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.29.0.tgz#9f5b1e838c446e72cf3cd4b918152b8c605e37c7" + integrity sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A== + dependencies: + "@babel/helper-string-parser" "^7.27.1" + "@babel/helper-validator-identifier" "^7.28.5" + "@bcoe/v8-coverage@^0.2.3": version "0.2.3" resolved "https://registry.yarnpkg.com/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz#75a2e8b51cb758a7553d6804a5932d7aace75c39" integrity sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw== -"@biomejs/biome@2.3.5": - version "2.3.5" - resolved "https://registry.yarnpkg.com/@biomejs/biome/-/biome-2.3.5.tgz#fae9977584fc7fe28f6d4a140982871aa11ae8f6" - integrity sha512-HvLhNlIlBIbAV77VysRIBEwp55oM/QAjQEin74QQX9Xb259/XP/D5AGGnZMOyF1el4zcvlNYYR3AyTMUV3ILhg== +"@biomejs/biome@2.4.4": + version "2.4.4" + resolved "https://registry.yarnpkg.com/@biomejs/biome/-/biome-2.4.4.tgz#009d1751d061946b320e663e5894af60310f355a" + integrity sha512-tigwWS5KfJf0cABVd52NVaXyAVv4qpUXOWJ1rxFL8xF1RVoeS2q/LK+FHgYoKMclJCuRoCWAPy1IXaN9/mS61Q== optionalDependencies: - "@biomejs/cli-darwin-arm64" "2.3.5" - "@biomejs/cli-darwin-x64" "2.3.5" - "@biomejs/cli-linux-arm64" "2.3.5" - "@biomejs/cli-linux-arm64-musl" "2.3.5" - "@biomejs/cli-linux-x64" "2.3.5" - "@biomejs/cli-linux-x64-musl" "2.3.5" - "@biomejs/cli-win32-arm64" "2.3.5" - "@biomejs/cli-win32-x64" "2.3.5" - -"@biomejs/cli-darwin-arm64@2.3.5": - version "2.3.5" - resolved "https://registry.yarnpkg.com/@biomejs/cli-darwin-arm64/-/cli-darwin-arm64-2.3.5.tgz#364de69c055851223d8bab37390ead748e4b208d" - integrity sha512-fLdTur8cJU33HxHUUsii3GLx/TR0BsfQx8FkeqIiW33cGMtUD56fAtrh+2Fx1uhiCsVZlFh6iLKUU3pniZREQw== - -"@biomejs/cli-darwin-x64@2.3.5": - version "2.3.5" - resolved "https://registry.yarnpkg.com/@biomejs/cli-darwin-x64/-/cli-darwin-x64-2.3.5.tgz#6ecfdfb9644e86278801081db88dc4fceb36a666" - integrity sha512-qpT8XDqeUlzrOW8zb4k3tjhT7rmvVRumhi2657I2aGcY4B+Ft5fNwDdZGACzn8zj7/K1fdWjgwYE3i2mSZ+vOA== - -"@biomejs/cli-linux-arm64-musl@2.3.5": - version "2.3.5" - resolved "https://registry.yarnpkg.com/@biomejs/cli-linux-arm64-musl/-/cli-linux-arm64-musl-2.3.5.tgz#cc064d50165687ec5917f0d04d258cc72f6b143f" - integrity sha512-eGUG7+hcLgGnMNl1KHVZUYxahYAhC462jF/wQolqu4qso2MSk32Q+QrpN7eN4jAHAg7FUMIo897muIhK4hXhqg== - -"@biomejs/cli-linux-arm64@2.3.5": - version "2.3.5" - resolved "https://registry.yarnpkg.com/@biomejs/cli-linux-arm64/-/cli-linux-arm64-2.3.5.tgz#b340d9b8b45f568fc719b9c00b3d725a38d2c6be" - integrity sha512-u/pybjTBPGBHB66ku4pK1gj+Dxgx7/+Z0jAriZISPX1ocTO8aHh8x8e7Kb1rB4Ms0nA/SzjtNOVJ4exVavQBCw== - -"@biomejs/cli-linux-x64-musl@2.3.5": - version "2.3.5" - resolved "https://registry.yarnpkg.com/@biomejs/cli-linux-x64-musl/-/cli-linux-x64-musl-2.3.5.tgz#097d46b6cac00bd58e41dfcc02afcfaff834e2ab" - integrity sha512-awVuycTPpVTH/+WDVnEEYSf6nbCBHf/4wB3lquwT7puhNg8R4XvonWNZzUsfHZrCkjkLhFH/vCZK5jHatD9FEg== - -"@biomejs/cli-linux-x64@2.3.5": - version "2.3.5" - resolved "https://registry.yarnpkg.com/@biomejs/cli-linux-x64/-/cli-linux-x64-2.3.5.tgz#0c02b06aca6ba905c674175bd6b24d88c862ae9f" - integrity sha512-XrIVi9YAW6ye0CGQ+yax0gLfx+BFOtKaNX74n+xHWla6Cl6huUmcKNO7HPx7BiKnJUzrxXY1qYlm7xMvi08X4g== - -"@biomejs/cli-win32-arm64@2.3.5": - version "2.3.5" - resolved "https://registry.yarnpkg.com/@biomejs/cli-win32-arm64/-/cli-win32-arm64-2.3.5.tgz#c06653bfc77c09aa1a5e714042d737984ea8c1e1" - integrity sha512-DlBiMlBZZ9eIq4H7RimDSGsYcOtfOIfZOaI5CqsWiSlbTfqbPVfWtCf92wNzx8GNMbu1s7/g3ZZESr6+GwM/SA== - -"@biomejs/cli-win32-x64@2.3.5": - version "2.3.5" - resolved "https://registry.yarnpkg.com/@biomejs/cli-win32-x64/-/cli-win32-x64-2.3.5.tgz#8594814adb902c996603c729dda9ca359eeec316" - integrity sha512-nUmR8gb6yvrKhtRgzwo/gDimPwnO5a4sCydf8ZS2kHIJhEmSmk+STsusr1LHTuM//wXppBawvSQi2xFXJCdgKQ== - -"@colors/colors@1.5.0": - version "1.5.0" - resolved "https://registry.yarnpkg.com/@colors/colors/-/colors-1.5.0.tgz#bb504579c1cae923e6576a4f5da43d25f97bdbd9" - integrity sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ== + "@biomejs/cli-darwin-arm64" "2.4.4" + "@biomejs/cli-darwin-x64" "2.4.4" + "@biomejs/cli-linux-arm64" "2.4.4" + "@biomejs/cli-linux-arm64-musl" "2.4.4" + "@biomejs/cli-linux-x64" "2.4.4" + "@biomejs/cli-linux-x64-musl" "2.4.4" + "@biomejs/cli-win32-arm64" "2.4.4" + "@biomejs/cli-win32-x64" "2.4.4" + +"@biomejs/cli-darwin-arm64@2.4.4": + version "2.4.4" + resolved "https://registry.yarnpkg.com/@biomejs/cli-darwin-arm64/-/cli-darwin-arm64-2.4.4.tgz#a8af5254fcc9dda28a1da26125427df083a13579" + integrity sha512-jZ+Xc6qvD6tTH5jM6eKX44dcbyNqJHssfl2nnwT6vma6B1sj7ZLTGIk6N5QwVBs5xGN52r3trk5fgd3sQ9We9A== + +"@biomejs/cli-darwin-x64@2.4.4": + version "2.4.4" + resolved "https://registry.yarnpkg.com/@biomejs/cli-darwin-x64/-/cli-darwin-x64-2.4.4.tgz#6de17d5bb3afaf3275bc602fb7e61fefe31dab84" + integrity sha512-Dh1a/+W+SUCXhEdL7TiX3ArPTFCQKJTI1mGncZNWfO+6suk+gYA4lNyJcBB+pwvF49uw0pEbUS49BgYOY4hzUg== + +"@biomejs/cli-linux-arm64-musl@2.4.4": + version "2.4.4" + resolved "https://registry.yarnpkg.com/@biomejs/cli-linux-arm64-musl/-/cli-linux-arm64-musl-2.4.4.tgz#4c55afe87d555782f46e729212acd4e0411b1703" + integrity sha512-+sPAXq3bxmFwhVFJnSwkSF5Rw2ZAJMH3MF6C9IveAEOdSpgajPhoQhbbAK12SehN9j2QrHpk4J/cHsa/HqWaYQ== + +"@biomejs/cli-linux-arm64@2.4.4": + version "2.4.4" + resolved "https://registry.yarnpkg.com/@biomejs/cli-linux-arm64/-/cli-linux-arm64-2.4.4.tgz#2c8848461bd5195d3e887b0c9fc9c1b441c727ec" + integrity sha512-V/NFfbWhsUU6w+m5WYbBenlEAz8eYnSqRMDMAW3K+3v0tYVkNyZn8VU0XPxk/lOqNXLSCCrV7FmV/u3SjCBShg== + +"@biomejs/cli-linux-x64-musl@2.4.4": + version "2.4.4" + resolved "https://registry.yarnpkg.com/@biomejs/cli-linux-x64-musl/-/cli-linux-x64-musl-2.4.4.tgz#f40336d8c1534887c7ef4198b2b4772aca702ee8" + integrity sha512-gGvFTGpOIQDb5CQ2VC0n9Z2UEqlP46c4aNgHmAMytYieTGEcfqhfCFnhs6xjt0S3igE6q5GLuIXtdQt3Izok+g== + +"@biomejs/cli-linux-x64@2.4.4": + version "2.4.4" + resolved "https://registry.yarnpkg.com/@biomejs/cli-linux-x64/-/cli-linux-x64-2.4.4.tgz#397cadb0ec1e76a993d21ee424bcf2cf7976243e" + integrity sha512-R4+ZCDtG9kHArasyBO+UBD6jr/FcFCTH8QkNTOCu0pRJzCWyWC4EtZa2AmUZB5h3e0jD7bRV2KvrENcf8rndBg== + +"@biomejs/cli-win32-arm64@2.4.4": + version "2.4.4" + resolved "https://registry.yarnpkg.com/@biomejs/cli-win32-arm64/-/cli-win32-arm64-2.4.4.tgz#7d767c15ce20ee39bc1430c2057ccd0208cf12fe" + integrity sha512-trzCqM7x+Gn832zZHgr28JoYagQNX4CZkUZhMUac2YxvvyDRLJDrb5m9IA7CaZLlX6lTQmADVfLEKP1et1Ma4Q== + +"@biomejs/cli-win32-x64@2.4.4": + version "2.4.4" + resolved "https://registry.yarnpkg.com/@biomejs/cli-win32-x64/-/cli-win32-x64-2.4.4.tgz#998e87083ff9657a24b4b2f6afcb803a2e820000" + integrity sha512-gnOHKVPFAAPrpoPt2t+Q6FZ7RPry/FDV3GcpU53P3PtLNnQjBmKyN2Vh/JtqXet+H4pme8CC76rScwdjDcT1/A== "@csstools/color-helpers@^5.1.0": version "5.1.0" @@ -1466,44 +1580,12 @@ resolved "https://registry.yarnpkg.com/@csstools/css-tokenizer/-/css-tokenizer-3.0.4.tgz#333fedabc3fd1a8e5d0100013731cf19e6a8c5d3" integrity sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw== -"@cypress/request@^3.0.0": - version "3.0.1" - resolved "https://registry.yarnpkg.com/@cypress/request/-/request-3.0.1.tgz#72d7d5425236a2413bd3d8bb66d02d9dc3168960" - integrity sha512-TWivJlJi8ZDx2wGOw1dbLuHJKUYX7bWySw377nlnGOW3hP9/MUKIsEdXT/YngWxVdgNCHRBmFlBipE+5/2ZZlQ== - dependencies: - aws-sign2 "~0.7.0" - aws4 "^1.8.0" - caseless "~0.12.0" - combined-stream "~1.0.6" - extend "~3.0.2" - forever-agent "~0.6.1" - form-data "~2.3.2" - http-signature "~1.3.6" - is-typedarray "~1.0.0" - isstream "~0.1.2" - json-stringify-safe "~5.0.1" - mime-types "~2.1.19" - performance-now "^2.1.0" - qs "6.10.4" - safe-buffer "^5.1.2" - tough-cookie "^4.1.3" - tunnel-agent "^0.6.0" - uuid "^8.3.2" - -"@cypress/xvfb@^1.2.4": - version "1.2.4" - resolved "https://registry.yarnpkg.com/@cypress/xvfb/-/xvfb-1.2.4.tgz#2daf42e8275b39f4aa53c14214e557bd14e7748a" - integrity sha512-skbBzPggOVYCbnGgV+0dmBdW/s77ZkAOXIC1knS8NagwDjBrNC1LuXtQJeiN6l+m7lzmHtaoUw/ctJKdqkG57Q== +"@dr.pogodin/babel-plugin-transform-assets@1.2.6": + version "1.2.6" + resolved "https://registry.yarnpkg.com/@dr.pogodin/babel-plugin-transform-assets/-/babel-plugin-transform-assets-1.2.6.tgz#fc93014091bab73a4d38912fa144a364f99002dd" + integrity sha512-ZMLKzWr5zCszqOKeEXcDECjGpTozjO+yze+iaH52XlZp83KP5LyE7dVLjjXaJ6LxYe3qWRcCNKl9tr/yxrAdbQ== dependencies: - debug "^3.1.0" - lodash.once "^4.1.1" - -"@dr.pogodin/babel-plugin-transform-assets@1.2.5": - version "1.2.5" - resolved "https://registry.yarnpkg.com/@dr.pogodin/babel-plugin-transform-assets/-/babel-plugin-transform-assets-1.2.5.tgz#18bcfce67ef6340daa4d346f4ee0ed2ca4af96a9" - integrity sha512-6X5rYDXyaLbN9BFoT7Vca6BQnrbJcSHQtw2IgtKo8WXSIdr3GwpgnyLrbLRq8OJqy7+itTE1vLyceUAPXBfNmA== - dependencies: - enhanced-resolve "^5.18.1" + enhanced-resolve "^5.19.0" loader-utils "^3.3.1" "@emnapi/core@^1.4.3": @@ -1652,135 +1734,135 @@ resolved "https://registry.yarnpkg.com/@emotion/weak-memoize/-/weak-memoize-0.4.0.tgz#5e13fac887f08c44f76b0ccaf3370eb00fec9bb6" integrity sha512-snKqtPW01tN0ui7yu9rGv69aJXr/a/Ywvl11sUjNtEcRc+ng/mQriFL0wLXMef74iHa/EkftbDzU9F8iFbH+zg== -"@esbuild/aix-ppc64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/aix-ppc64/-/aix-ppc64-0.25.12.tgz#80fcbe36130e58b7670511e888b8e88a259ed76c" - integrity sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA== - -"@esbuild/android-arm64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.25.12.tgz#8aa4965f8d0a7982dc21734bf6601323a66da752" - integrity sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg== - -"@esbuild/android-arm@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.25.12.tgz#300712101f7f50f1d2627a162e6e09b109b6767a" - integrity sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg== - -"@esbuild/android-x64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.25.12.tgz#87dfb27161202bdc958ef48bb61b09c758faee16" - integrity sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg== - -"@esbuild/darwin-arm64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.25.12.tgz#79197898ec1ff745d21c071e1c7cc3c802f0c1fd" - integrity sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg== - -"@esbuild/darwin-x64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.25.12.tgz#146400a8562133f45c4d2eadcf37ddd09718079e" - integrity sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA== - -"@esbuild/freebsd-arm64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.12.tgz#1c5f9ba7206e158fd2b24c59fa2d2c8bb47ca0fe" - integrity sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg== - -"@esbuild/freebsd-x64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.25.12.tgz#ea631f4a36beaac4b9279fa0fcc6ca29eaeeb2b3" - integrity sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ== - -"@esbuild/linux-arm64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.25.12.tgz#e1066bce58394f1b1141deec8557a5f0a22f5977" - integrity sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ== - -"@esbuild/linux-arm@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.25.12.tgz#452cd66b20932d08bdc53a8b61c0e30baf4348b9" - integrity sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw== - -"@esbuild/linux-ia32@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.25.12.tgz#b24f8acc45bcf54192c7f2f3be1b53e6551eafe0" - integrity sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA== - -"@esbuild/linux-loong64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.25.12.tgz#f9cfffa7fc8322571fbc4c8b3268caf15bd81ad0" - integrity sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng== - -"@esbuild/linux-mips64el@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.25.12.tgz#575a14bd74644ffab891adc7d7e60d275296f2cd" - integrity sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw== - -"@esbuild/linux-ppc64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.25.12.tgz#75b99c70a95fbd5f7739d7692befe60601591869" - integrity sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA== - -"@esbuild/linux-riscv64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.25.12.tgz#2e3259440321a44e79ddf7535c325057da875cd6" - integrity sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w== - -"@esbuild/linux-s390x@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.25.12.tgz#17676cabbfe5928da5b2a0d6df5d58cd08db2663" - integrity sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg== - -"@esbuild/linux-x64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.25.12.tgz#0583775685ca82066d04c3507f09524d3cd7a306" - integrity sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw== - -"@esbuild/netbsd-arm64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.12.tgz#f04c4049cb2e252fe96b16fed90f70746b13f4a4" - integrity sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg== - -"@esbuild/netbsd-x64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.25.12.tgz#77da0d0a0d826d7c921eea3d40292548b258a076" - integrity sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ== - -"@esbuild/openbsd-arm64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.12.tgz#6296f5867aedef28a81b22ab2009c786a952dccd" - integrity sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A== - -"@esbuild/openbsd-x64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.25.12.tgz#f8d23303360e27b16cf065b23bbff43c14142679" - integrity sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw== - -"@esbuild/openharmony-arm64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.12.tgz#49e0b768744a3924be0d7fd97dd6ce9b2923d88d" - integrity sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg== - -"@esbuild/sunos-x64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.25.12.tgz#a6ed7d6778d67e528c81fb165b23f4911b9b13d6" - integrity sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w== - -"@esbuild/win32-arm64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.25.12.tgz#9ac14c378e1b653af17d08e7d3ce34caef587323" - integrity sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg== - -"@esbuild/win32-ia32@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.25.12.tgz#918942dcbbb35cc14fca39afb91b5e6a3d127267" - integrity sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ== - -"@esbuild/win32-x64@0.25.12": - version "0.25.12" - resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.25.12.tgz#9bdad8176be7811ad148d1f8772359041f46c6c5" - integrity sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA== +"@esbuild/aix-ppc64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/aix-ppc64/-/aix-ppc64-0.27.3.tgz#815b39267f9bffd3407ea6c376ac32946e24f8d2" + integrity sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg== + +"@esbuild/android-arm64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.27.3.tgz#19b882408829ad8e12b10aff2840711b2da361e8" + integrity sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg== + +"@esbuild/android-arm@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.27.3.tgz#90be58de27915efa27b767fcbdb37a4470627d7b" + integrity sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA== + +"@esbuild/android-x64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.27.3.tgz#d7dcc976f16e01a9aaa2f9b938fbec7389f895ac" + integrity sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ== + +"@esbuild/darwin-arm64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.27.3.tgz#9f6cac72b3a8532298a6a4493ed639a8988e8abd" + integrity sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg== + +"@esbuild/darwin-x64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.27.3.tgz#ac61d645faa37fd650340f1866b0812e1fb14d6a" + integrity sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg== + +"@esbuild/freebsd-arm64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.3.tgz#b8625689d73cf1830fe58c39051acdc12474ea1b" + integrity sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w== + +"@esbuild/freebsd-x64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.27.3.tgz#07be7dd3c9d42fe0eccd2ab9f9ded780bc53bead" + integrity sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA== + +"@esbuild/linux-arm64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.27.3.tgz#bf31918fe5c798586460d2b3d6c46ed2c01ca0b6" + integrity sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg== + +"@esbuild/linux-arm@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.27.3.tgz#28493ee46abec1dc3f500223cd9f8d2df08f9d11" + integrity sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw== + +"@esbuild/linux-ia32@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.27.3.tgz#750752a8b30b43647402561eea764d0a41d0ee29" + integrity sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg== + +"@esbuild/linux-loong64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.27.3.tgz#a5a92813a04e71198c50f05adfaf18fc1e95b9ed" + integrity sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA== + +"@esbuild/linux-mips64el@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.27.3.tgz#deb45d7fd2d2161eadf1fbc593637ed766d50bb1" + integrity sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw== + +"@esbuild/linux-ppc64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.27.3.tgz#6f39ae0b8c4d3d2d61a65b26df79f6e12a1c3d78" + integrity sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA== + +"@esbuild/linux-riscv64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.27.3.tgz#4c5c19c3916612ec8e3915187030b9df0b955c1d" + integrity sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ== + +"@esbuild/linux-s390x@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.27.3.tgz#9ed17b3198fa08ad5ccaa9e74f6c0aff7ad0156d" + integrity sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw== + +"@esbuild/linux-x64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.27.3.tgz#12383dcbf71b7cf6513e58b4b08d95a710bf52a5" + integrity sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA== + +"@esbuild/netbsd-arm64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.3.tgz#dd0cb2fa543205fcd931df44f4786bfcce6df7d7" + integrity sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA== + +"@esbuild/netbsd-x64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.27.3.tgz#028ad1807a8e03e155153b2d025b506c3787354b" + integrity sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA== + +"@esbuild/openbsd-arm64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.3.tgz#e3c16ff3490c9b59b969fffca87f350ffc0e2af5" + integrity sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw== + +"@esbuild/openbsd-x64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.27.3.tgz#c5a4693fcb03d1cbecbf8b422422468dfc0d2a8b" + integrity sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ== + +"@esbuild/openharmony-arm64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.3.tgz#082082444f12db564a0775a41e1991c0e125055e" + integrity sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g== + +"@esbuild/sunos-x64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.27.3.tgz#5ab036c53f929e8405c4e96e865a424160a1b537" + integrity sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA== + +"@esbuild/win32-arm64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.27.3.tgz#38de700ef4b960a0045370c171794526e589862e" + integrity sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA== + +"@esbuild/win32-ia32@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.27.3.tgz#451b93dc03ec5d4f38619e6cd64d9f9eff06f55c" + integrity sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q== + +"@esbuild/win32-x64@0.27.3": + version "0.27.3" + resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.27.3.tgz#0eaf705c941a218a43dba8e09f1df1d6cd2f1f17" + integrity sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA== "@isaacs/balanced-match@^4.0.1": version "4.0.1" @@ -2178,83 +2260,90 @@ dependencies: "@monaco-editor/loader" "^1.5.0" -"@mui/core-downloads-tracker@^7.3.5": - version "7.3.5" - resolved "https://registry.yarnpkg.com/@mui/core-downloads-tracker/-/core-downloads-tracker-7.3.5.tgz#2c7769498a287eb9456269571b328f807b69f731" - integrity sha512-kOLwlcDPnVz2QMhiBv0OQ8le8hTCqKM9cRXlfVPL91l3RGeOsxrIhNRsUt3Xb8wb+pTVUolW+JXKym93vRKxCw== +"@mui/core-downloads-tracker@^7.3.8": + version "7.3.8" + resolved "https://registry.yarnpkg.com/@mui/core-downloads-tracker/-/core-downloads-tracker-7.3.8.tgz#67949c6b3b4b3ae95dde3a9ddfbb7549f3ca56b1" + integrity sha512-s9UHZo7QJVly7gNArEZkbbsimHqJZhElgBpXIJdehZ4OWXt+CCr0SBDgUCDJnQrqpd1dWK2dLq5rmO4mCBmI3w== -"@mui/icons-material@7.3.5": - version "7.3.5" - resolved "https://registry.yarnpkg.com/@mui/icons-material/-/icons-material-7.3.5.tgz#ebb94784fc49ab477f97d4cae097ec3cee32758f" - integrity sha512-LciL1GLMZ+VlzyHAALSVAR22t8IST4LCXmljcUSx2NOutgO2XnxdIp8ilFbeNf9wpo0iUFbAuoQcB7h+HHIf3A== +"@mui/icons-material@7.3.8": + version "7.3.8" + resolved "https://registry.yarnpkg.com/@mui/icons-material/-/icons-material-7.3.8.tgz#a31d9970be5557b73c83a912f2bbfdacd6319fbc" + integrity sha512-88sWg/UJc1X82OMO+ISR4E3P58I3BjFVg0qkmDu7OWlN8VijneZD3ylFA+ImxuPjMHW3SHosfSJYy1fztoz0fw== dependencies: - "@babel/runtime" "^7.28.4" + "@babel/runtime" "^7.28.6" -"@mui/lab@7.0.1-beta.19": - version "7.0.1-beta.19" - resolved "https://registry.yarnpkg.com/@mui/lab/-/lab-7.0.1-beta.19.tgz#c429589b9a895004e4f919f322322ae78ce455c3" - integrity sha512-Ekxd2mPnr5iKwrMXjN/y2xgpxPX8ithBBcDenjqNdBt/ZQumrmBl0ifVoqAHsL6lxN6DOgRsWTRc4eOdDiB+0Q== +"@mui/lab@7.0.1-beta.22": + version "7.0.1-beta.22" + resolved "https://registry.yarnpkg.com/@mui/lab/-/lab-7.0.1-beta.22.tgz#2c8c81134af99aedcf40553ad2be558b7b8d23b5" + integrity sha512-NHNIg51/CxiJTMTaWCxmqlqYIUfpXbdVjv/OyhSmuAb0z/lIOCMMu9fz0bIbQnZEh6H5L7DhTblynT7xwkXMIQ== dependencies: - "@babel/runtime" "^7.28.4" - "@mui/system" "^7.3.5" - "@mui/types" "^7.4.8" - "@mui/utils" "^7.3.5" + "@babel/runtime" "^7.28.6" + "@mui/system" "^7.3.8" + "@mui/types" "^7.4.11" + "@mui/utils" "^7.3.8" clsx "^2.1.1" prop-types "^15.8.1" -"@mui/material@7.3.5": - version "7.3.5" - resolved "https://registry.yarnpkg.com/@mui/material/-/material-7.3.5.tgz#2a30e9ed33c58cfa90d8a5d74c12cfa1064f52ef" - integrity sha512-8VVxFmp1GIm9PpmnQoCoYo0UWHoOrdA57tDL62vkpzEgvb/d71Wsbv4FRg7r1Gyx7PuSo0tflH34cdl/NvfHNQ== +"@mui/material@7.3.8": + version "7.3.8" + resolved "https://registry.yarnpkg.com/@mui/material/-/material-7.3.8.tgz#230230b5aa6a791558874ecffe9c50ffa0be6817" + integrity sha512-QKd1RhDXE1hf2sQDNayA9ic9jGkEgvZOf0tTkJxlBPG8ns8aS4rS8WwYURw2x5y3739p0HauUXX9WbH7UufFLw== dependencies: - "@babel/runtime" "^7.28.4" - "@mui/core-downloads-tracker" "^7.3.5" - "@mui/system" "^7.3.5" - "@mui/types" "^7.4.8" - "@mui/utils" "^7.3.5" + "@babel/runtime" "^7.28.6" + "@mui/core-downloads-tracker" "^7.3.8" + "@mui/system" "^7.3.8" + "@mui/types" "^7.4.11" + "@mui/utils" "^7.3.8" "@popperjs/core" "^2.11.8" "@types/react-transition-group" "^4.4.12" clsx "^2.1.1" - csstype "^3.1.3" + csstype "^3.2.3" prop-types "^15.8.1" - react-is "^19.2.0" + react-is "^19.2.3" react-transition-group "^4.4.5" -"@mui/private-theming@^7.3.5": - version "7.3.5" - resolved "https://registry.yarnpkg.com/@mui/private-theming/-/private-theming-7.3.5.tgz#53f9203d7d82e69e94dd8df0a19fd4744a330a8f" - integrity sha512-cTx584W2qrLonwhZLbEN7P5pAUu0nZblg8cLBlTrZQ4sIiw8Fbvg7GvuphQaSHxPxrCpa7FDwJKtXdbl2TSmrA== +"@mui/private-theming@^7.3.8": + version "7.3.8" + resolved "https://registry.yarnpkg.com/@mui/private-theming/-/private-theming-7.3.8.tgz#025659d822e92b50586f99bd463dae3857c15feb" + integrity sha512-du5dlPZ9XL3xW2apHoGDXBI+QLtyVJGrXNCfcNYfP/ojkz1RQ0rRV6VG9Rkm1DqEFRG8mjjTL7zmE1Bvn1eR4A== dependencies: - "@babel/runtime" "^7.28.4" - "@mui/utils" "^7.3.5" + "@babel/runtime" "^7.28.6" + "@mui/utils" "^7.3.8" prop-types "^15.8.1" -"@mui/styled-engine@^7.3.5": - version "7.3.5" - resolved "https://registry.yarnpkg.com/@mui/styled-engine/-/styled-engine-7.3.5.tgz#b087d791d85eea97812f0e23e9b9fdeb37abad77" - integrity sha512-zbsZ0uYYPndFCCPp2+V3RLcAN6+fv4C8pdwRx6OS3BwDkRCN8WBehqks7hWyF3vj1kdQLIWrpdv/5Y0jHRxYXQ== +"@mui/styled-engine@^7.3.8": + version "7.3.8" + resolved "https://registry.yarnpkg.com/@mui/styled-engine/-/styled-engine-7.3.8.tgz#0cf761b153061815687488bf4bec5f84ce21e0f9" + integrity sha512-JHAeXQzS0tJ+Fq3C6J4TVDsW+yKhO4uuxuiLaopNStJeQYBIUCXpKYyUCcgXym4AmhbznQnv9RlHywSH6b0FOg== dependencies: - "@babel/runtime" "^7.28.4" + "@babel/runtime" "^7.28.6" "@emotion/cache" "^11.14.0" "@emotion/serialize" "^1.3.3" "@emotion/sheet" "^1.4.0" - csstype "^3.1.3" + csstype "^3.2.3" prop-types "^15.8.1" -"@mui/system@7.3.5", "@mui/system@^7.3.5": - version "7.3.5" - resolved "https://registry.yarnpkg.com/@mui/system/-/system-7.3.5.tgz#ea077787ba9e9efc00a6df4db55a833de6a530fc" - integrity sha512-yPaf5+gY3v80HNkJcPi6WT+r9ebeM4eJzrREXPxMt7pNTV/1eahyODO4fbH3Qvd8irNxDFYn5RQ3idHW55rA6g== +"@mui/system@7.3.8", "@mui/system@^7.3.8": + version "7.3.8" + resolved "https://registry.yarnpkg.com/@mui/system/-/system-7.3.8.tgz#315ee3f4285f8611f32851d3699a5d5e5ef0b818" + integrity sha512-hoFRj4Zw2Km8DPWZp/nKG+ao5Jw5LSk2m/e4EGc6M3RRwXKEkMSG4TgtfVJg7dS2homRwtdXSMW+iRO0ZJ4+IA== dependencies: - "@babel/runtime" "^7.28.4" - "@mui/private-theming" "^7.3.5" - "@mui/styled-engine" "^7.3.5" - "@mui/types" "^7.4.8" - "@mui/utils" "^7.3.5" + "@babel/runtime" "^7.28.6" + "@mui/private-theming" "^7.3.8" + "@mui/styled-engine" "^7.3.8" + "@mui/types" "^7.4.11" + "@mui/utils" "^7.3.8" clsx "^2.1.1" - csstype "^3.1.3" + csstype "^3.2.3" prop-types "^15.8.1" +"@mui/types@^7.4.11": + version "7.4.11" + resolved "https://registry.yarnpkg.com/@mui/types/-/types-7.4.11.tgz#d9c8e028a354a52fe86cb8ffbc306faf4c090608" + integrity sha512-fZ2xO9D08IKOxO2oUBi1nnVKH6oJUD+64cnv4YAaFoC0E5+i1+S5AHbNqqvZlYYsbPEQ6qEVwuBqY3jl5W4G+Q== + dependencies: + "@babel/runtime" "^7.28.6" + "@mui/types@^7.4.8": version "7.4.8" resolved "https://registry.yarnpkg.com/@mui/types/-/types-7.4.8.tgz#0c1829353cd7d196be9ac0332a30cdd2792f3558" @@ -2274,68 +2363,87 @@ prop-types "^15.8.1" react-is "^19.2.0" -"@mui/x-charts-vendor@8.18.0": - version "8.18.0" - resolved "https://registry.yarnpkg.com/@mui/x-charts-vendor/-/x-charts-vendor-8.18.0.tgz#abe15a80182e745ef10d5fde1e30fa4ed2a6ad00" - integrity sha512-NFbFMOR8tsa02C3+YKQOdbzPaDtZLJPprsySw9xVxJpcYw5y/v02TuV/yQCIE0Pk1dVGHW2yvCMBs8AS+irjLA== +"@mui/utils@^7.3.8": + version "7.3.8" + resolved "https://registry.yarnpkg.com/@mui/utils/-/utils-7.3.8.tgz#0d6dff4c623682b603f27d6ff37839b3d16832de" + integrity sha512-kZRcE2620CBGr+XI8YMmwPj6WIPwSF7uMJjvSfqd8zXVvlz0MCJbzRRUGNf8NgflCLthdji2DdS643TeyJ3+nA== + dependencies: + "@babel/runtime" "^7.28.6" + "@mui/types" "^7.4.11" + "@types/prop-types" "^15.7.15" + clsx "^2.1.1" + prop-types "^15.8.1" + react-is "^19.2.3" + +"@mui/x-charts-vendor@8.26.0": + version "8.26.0" + resolved "https://registry.yarnpkg.com/@mui/x-charts-vendor/-/x-charts-vendor-8.26.0.tgz#58ab81deeb330b832acc535732dd03eb0e6d7d05" + integrity sha512-R//+WSWvsLJRTjTRN90EKX9sgRzAb4HQBvtUA3cTQpkGrmEjmatD4BJAm3IdRdkSagf6yKWF+ypESctyRhbwnA== dependencies: "@babel/runtime" "^7.28.4" + "@types/d3-array" "^3.2.2" "@types/d3-color" "^3.1.3" + "@types/d3-format" "^3.0.4" "@types/d3-interpolate" "^3.0.4" - "@types/d3-sankey" "^0.12.4" + "@types/d3-path" "^3.1.1" "@types/d3-scale" "^4.0.9" "@types/d3-shape" "^3.1.7" "@types/d3-time" "^3.0.4" + "@types/d3-time-format" "^4.0.3" "@types/d3-timer" "^3.0.2" + d3-array "^3.2.4" d3-color "^3.1.0" + d3-format "^3.1.0" d3-interpolate "^3.0.1" - d3-sankey "^0.12.3" + d3-path "^3.1.0" d3-scale "^4.0.2" d3-shape "^3.2.0" d3-time "^3.1.0" + d3-time-format "^4.1.0" d3-timer "^3.0.1" + flatqueue "^3.0.0" + internmap "^2.0.3" -"@mui/x-charts@8.18.0": - version "8.18.0" - resolved "https://registry.yarnpkg.com/@mui/x-charts/-/x-charts-8.18.0.tgz#acc192b7911326b5dd3e08d533b578d1989f51fd" - integrity sha512-3ivGI//EKZaUFDbit85Z+3fM85kU4417uz7xULDO/BBxSHkwlowuHcb5EewDWFb2Rn2Nmstuv0bGYu5N8r6fvA== +"@mui/x-charts@8.27.0": + version "8.27.0" + resolved "https://registry.yarnpkg.com/@mui/x-charts/-/x-charts-8.27.0.tgz#8f769c855414cf0ad4151de3fe553fa0c9eaecdd" + integrity sha512-MzP1jeiEkMOPWQfzRNo11iwbTwXcSDP7hKd3s/mSN0U4aINKpyHEQX6RAM8OkWzqK8ijTXYWRPVKaRYTLBx7+A== dependencies: "@babel/runtime" "^7.28.4" "@mui/utils" "^7.3.5" - "@mui/x-charts-vendor" "8.18.0" - "@mui/x-internal-gestures" "0.3.5" - "@mui/x-internals" "8.18.0" + "@mui/x-charts-vendor" "8.26.0" + "@mui/x-internal-gestures" "0.4.0" + "@mui/x-internals" "8.26.0" bezier-easing "^2.1.0" clsx "^2.1.1" - flatqueue "^3.0.0" prop-types "^15.8.1" reselect "^5.1.1" use-sync-external-store "^1.6.0" -"@mui/x-date-pickers@8.18.0": - version "8.18.0" - resolved "https://registry.yarnpkg.com/@mui/x-date-pickers/-/x-date-pickers-8.18.0.tgz#0be7c502cb6cd46d57ae053688f4a1964b48f7ae" - integrity sha512-lgq60mOhOf5AKfiCl37eOVSkCZQo3sHhE6tbwbcS93aNkdtlsTQlqy/s6O89RoIi8QS3/7rgCKy+WuC6YzwZrA== +"@mui/x-date-pickers@8.27.2": + version "8.27.2" + resolved "https://registry.yarnpkg.com/@mui/x-date-pickers/-/x-date-pickers-8.27.2.tgz#5ada1fb3adffff3e0fd0fee7702fba7f770dca68" + integrity sha512-06LFkHFRXJ2O9DMXtWAA3kY0jpbL7XH8iqa8L5cBlN+8bRx/UVLKlZYlhGv06C88jF9kuZWY1bUgrv/EoY/2Ww== dependencies: "@babel/runtime" "^7.28.4" "@mui/utils" "^7.3.5" - "@mui/x-internals" "8.18.0" + "@mui/x-internals" "8.26.0" "@types/react-transition-group" "^4.4.12" clsx "^2.1.1" prop-types "^15.8.1" react-transition-group "^4.4.5" -"@mui/x-internal-gestures@0.3.5": - version "0.3.5" - resolved "https://registry.yarnpkg.com/@mui/x-internal-gestures/-/x-internal-gestures-0.3.5.tgz#2b615135fda6686ab6c1ee3ed3b1483d822b44d4" - integrity sha512-7G3ydqRdBT/mKRSiA/NLDvfmKo/oMN9PtXUv8CtNyEwzyXFWXiv1LvG1pIHS8xSk3lw5dL8tt0Vl4X0sTJMrgA== +"@mui/x-internal-gestures@0.4.0": + version "0.4.0" + resolved "https://registry.yarnpkg.com/@mui/x-internal-gestures/-/x-internal-gestures-0.4.0.tgz#68d0d23ab794de186e02e6f29116d1fe1d9211c8" + integrity sha512-i0W6v9LoiNY8Yf1goOmaygtz/ncPJGBedhpDfvNg/i8BvzPwJcBaeW4rqPucJfVag9KQ8MSssBBrvYeEnrQmhw== dependencies: "@babel/runtime" "^7.28.4" -"@mui/x-internals@8.18.0": - version "8.18.0" - resolved "https://registry.yarnpkg.com/@mui/x-internals/-/x-internals-8.18.0.tgz#480a7f86c09f1fc96bc366bc8e75dddd3b08b485" - integrity sha512-iM2SJALLo4kNqxTel8lfjIymYV9MgTa6021/rAlfdh/vwPMglaKyXQHrxkkWs2Eu/JFKkCKr5Fd34Gsdp63wIg== +"@mui/x-internals@8.26.0": + version "8.26.0" + resolved "https://registry.yarnpkg.com/@mui/x-internals/-/x-internals-8.26.0.tgz#49caacac954c29a1b10425c67418310ceb9c8cfa" + integrity sha512-B9OZau5IQUvIxwpJZhoFJKqRpmWf5r0yMmSXjQuqb5WuqM755EuzWJOenY48denGoENzMLT8hQpA0hRTeU2IPA== dependencies: "@babel/runtime" "^7.28.4" "@mui/utils" "^7.3.5" @@ -2356,6 +2464,13 @@ resolved "https://registry.yarnpkg.com/@pkgr/core/-/core-0.2.9.tgz#d229a7b7f9dac167a156992ef23c7f023653f53b" integrity sha512-QNqXyfVS2wm9hweSYD2O7F0G06uurj9kZ96TRQE5Y9hU7+tgdZwIkbAKc5Ocy1HxEY2kuDQa6cQ1WRs/O5LFKA== +"@playwright/test@^1.58.2": + version "1.58.2" + resolved "https://registry.yarnpkg.com/@playwright/test/-/test-1.58.2.tgz#b0ad585d2e950d690ef52424967a42f40c6d2cbd" + integrity sha512-akea+6bHYBBfA9uQqSYmlJXn61cTa+jbO87xVLCWbTqbWadRVmhxlXATaOjOgcBaWU4ePo0wB41KMFv3o35IXA== + dependencies: + playwright "1.58.2" + "@popperjs/core@^2.11.8": version "2.11.8" resolved "https://registry.yarnpkg.com/@popperjs/core/-/core-2.11.8.tgz#6b79032e760a0899cd4204710beede972a3a185f" @@ -2797,26 +2912,26 @@ "@babel/runtime" "^7.19.0" "@styled-icons/styled-icon" "^10.7.0" -"@swagger-api/apidom-ast@^1.0.0-rc.3": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ast/-/apidom-ast-1.0.0-rc.3.tgz#f269ba89ccd2f2512ce78d30724c5f22469fa81b" - integrity sha512-lGxvtanmQYqepjVWwPROR/97BIP3sUtwzoHbMSMag2/C3+Un8p6Xz8+I+1sPG2UOBlvDsQe3Di0hlSET7EFwAQ== +"@swagger-api/apidom-ast@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ast/-/apidom-ast-1.6.0.tgz#c72f345023435e4033a5c67cdc064a51f75cd799" + integrity sha512-ez1KnBdAzoh5a6ijDXzu5nADkVZXlnL1RkLl8n2u2tjiNg9597xxmFdEHLVa31Vxr1yYj0WtYGLA5e2Kp0KNrQ== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-error" "^1.0.0-rc.3" + "@swagger-api/apidom-error" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" unraw "^3.0.0" -"@swagger-api/apidom-core@^1.0.0-rc.1", "@swagger-api/apidom-core@^1.0.0-rc.3": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-core/-/apidom-core-1.0.0-rc.3.tgz#a350627872a2f7f7ab99dd88f459a0c965fcbf19" - integrity sha512-cRf+HzoXl3iDPc7alVxdPbLb1TqRePqsxI0id2KaB8HYbyxTUy3ygqY/jmxGtfAAK0Ba85Bw8j4N0crw23vLTg== +"@swagger-api/apidom-core@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-core/-/apidom-core-1.6.0.tgz#4eb1ff8955ec623d6c1a9e5a5d3fb34714667cb9" + integrity sha512-gA1MVoXe19sjFLKGkWxp5VvSw3Tk0CSChfItJjFeFHpLSGrfm+LlXp37TmNSns53Ky0F7x7TB/5kAX5I/TO4xw== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-ast" "^1.0.0-rc.3" - "@swagger-api/apidom-error" "^1.0.0-rc.3" + "@swagger-api/apidom-ast" "^1.6.0" + "@swagger-api/apidom-error" "^1.6.0" "@types/ramda" "~0.30.0" minim "~0.23.8" ramda "~0.30.0" @@ -2824,263 +2939,319 @@ short-unique-id "^5.3.2" ts-mixer "^6.0.3" -"@swagger-api/apidom-error@^1.0.0-rc.1", "@swagger-api/apidom-error@^1.0.0-rc.3": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-error/-/apidom-error-1.0.0-rc.3.tgz#311c0655bc1284ee63342f2f7c494a31c4499b52" - integrity sha512-E9WsxzR9wwD4+1zmZm9PVvxXBAYxMtGJjpRYR/FthvxhIwx+Vsey2h5k7FPS8yJsawIrdGPQtdiFMLPvnQXUFg== +"@swagger-api/apidom-error@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-error/-/apidom-error-1.6.0.tgz#e813c5491142bbb2518fed71133b28407e0c846a" + integrity sha512-xp/cQ1xQ/4Vd/hhQfONK7ea9oVc3JUXAYyfRzvDR0lxISly/SyD2jMcqXzHtrylBAnv7V2HSsbC1BWo7ZJDLSQ== dependencies: "@babel/runtime-corejs3" "^7.20.7" -"@swagger-api/apidom-json-pointer@^1.0.0-rc.0", "@swagger-api/apidom-json-pointer@^1.0.0-rc.1", "@swagger-api/apidom-json-pointer@^1.0.0-rc.3": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-json-pointer/-/apidom-json-pointer-1.0.0-rc.3.tgz#1698202192a49062836bcfbbc8d53637bbc3fe43" - integrity sha512-cj83L5ntai/RJcZV0++lQiCHPWE6lTy62bGC2lQ0yi/kyCc+Ig+Sn08qpiLSrkQ4OooK85X+wgAy6pMK+Vt/8Q== +"@swagger-api/apidom-json-pointer@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-json-pointer/-/apidom-json-pointer-1.6.0.tgz#21aad5634d997a7ad8976f1307393ec4d8c1fef0" + integrity sha512-RO6P5Gt64AnthGXKeqIFjQCLVFbAJvLYAb67TkvRQ9US4lNixFtFsYJnhLCC4ymz4dTT1hacG0cmTRGcEHF9ig== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-error" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-error" "^1.6.0" "@swaggerexpert/json-pointer" "^2.10.1" -"@swagger-api/apidom-ns-api-design-systems@^1.0.0-rc.3": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-api-design-systems/-/apidom-ns-api-design-systems-1.0.0-rc.3.tgz#3d10228b140c33c61b5902c66a5a2e7df0bfb656" - integrity sha512-JB06VDEKPvyOcJ9qIJmr2vI2FSWjdZh+BiRExZPW4tv/mTvdOxt1n38WA+mKzfFHQuoTR4ork/wR481CjAfGGQ== +"@swagger-api/apidom-ns-api-design-systems@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-api-design-systems/-/apidom-ns-api-design-systems-1.6.0.tgz#9d1b17c3b660fa310f31e77da8b3fc53b430ad83" + integrity sha512-EYJfQ4JYuUo2J4QiiLnA/8LmM1k7AQcf1XVE+NrIpZ1160GIzqE+W5uOXkhAOImkP2Cb7EZZdE2cFE/tMYxNvw== + dependencies: + "@babel/runtime-corejs3" "^7.26.10" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-error" "^1.6.0" + "@swagger-api/apidom-ns-openapi-3-1" "^1.6.0" + "@types/ramda" "~0.30.0" + ramda "~0.30.0" + ramda-adjunct "^5.0.0" + ts-mixer "^6.0.3" + +"@swagger-api/apidom-ns-arazzo-1@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-arazzo-1/-/apidom-ns-arazzo-1-1.6.0.tgz#0eb9ef09046e347df120c74f67462b623637c2a5" + integrity sha512-5rF8PyBiIHh6NfC5Y0WypW11X6hQIWr88EKNOQbBuT/nnzAsOznrUCfQ99FYGLucwdOHaMIBn/b/n4ejGBto/A== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-error" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-openapi-3-1" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-json-schema-2020-12" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" ts-mixer "^6.0.3" -"@swagger-api/apidom-ns-arazzo-1@^1.0.0-rc.0", "@swagger-api/apidom-ns-arazzo-1@^1.0.0-rc.3": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-arazzo-1/-/apidom-ns-arazzo-1-1.0.0-rc.3.tgz#482e8a319478a10ccebbf1b04458acca360d3564" - integrity sha512-Um0MGGsGLQWvnASDoguSuE5X/NpS/9RlXlOHHG5nqzG2cdTlifRcN5tiz7H997162+ahEsD5aHD6tUKWOPCLtQ== +"@swagger-api/apidom-ns-asyncapi-2@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-asyncapi-2/-/apidom-ns-asyncapi-2-1.6.0.tgz#c5a45530720429c1bc2513b1d9f7704454d4c176" + integrity sha512-tOodfX+o7lonEAnSAxet7nCayW+EqtKPegT06WXt7Llq1LS9eYZ9YzXdFgIwCm8UzfEpZdVLqtxbdLX9vuUtSg== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-json-schema-2020-12" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-json-schema-draft-7" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" ts-mixer "^6.0.3" -"@swagger-api/apidom-ns-asyncapi-2@^1.0.0-rc.0", "@swagger-api/apidom-ns-asyncapi-2@^1.0.0-rc.3": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-asyncapi-2/-/apidom-ns-asyncapi-2-1.0.0-rc.3.tgz#a8a6bcf90dd294eefb7611d4da01d417eba4698e" - integrity sha512-UFmnbvEsN7jVvS/8V7X37UPvn8uxdqYBhDzdPSivjxpu/5Ag5Q1P2gHJnO6K2EfTCFL4S1qDObW2TUFdV1b6pg== +"@swagger-api/apidom-ns-asyncapi-3@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-asyncapi-3/-/apidom-ns-asyncapi-3-1.6.0.tgz#af66f63137479ffa298f4f4b76a58db690894853" + integrity sha512-lRMvwTdtuPcwJEYLTX/UGtECpHi9UNYeT9rmWMw3LiKZrZzYc2L8q4ipPbpWwH8t7QfsF2u0iggCODU99lXCnw== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-json-schema-draft-7" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-asyncapi-2" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" ts-mixer "^6.0.3" -"@swagger-api/apidom-ns-json-schema-2019-09@^1.0.0-rc.3": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-json-schema-2019-09/-/apidom-ns-json-schema-2019-09-1.0.0-rc.3.tgz#3d462a3235eea31fa40f44fa1238edb9aec5384c" - integrity sha512-fxQo/GK5NGdx4gN2snj4DpBcDc8bORLehTUqcwp33ikJ2PGugtpV3IQrBjxSWP05PyLOZAMpq1SM9gkCPgZNRA== +"@swagger-api/apidom-ns-json-schema-2019-09@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-json-schema-2019-09/-/apidom-ns-json-schema-2019-09-1.6.0.tgz#439faf74e8ffa1006ae144b00a8dfb89b8cefba9" + integrity sha512-dee1i8wcAFgDEOzTsyoCzQhFLZ2JKzkK5KkRuryabvwS0hG2mKlogToFc8cO2MkkiLSpERm7DREALwSTFVHa0w== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-error" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-json-schema-draft-7" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-error" "^1.6.0" + "@swagger-api/apidom-ns-json-schema-draft-7" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" ts-mixer "^6.0.4" -"@swagger-api/apidom-ns-json-schema-2020-12@^1.0.0-rc.3": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-json-schema-2020-12/-/apidom-ns-json-schema-2020-12-1.0.0-rc.3.tgz#dd9dd0a7bf0b676c48e660810dada58301979ec9" - integrity sha512-iDPbua9HajFwkH9vFUIbkmKVI/VXKuV9G+jLGkyBlF/Zu++1Rv6CstBt+F9CgNThSUqkKt3YA9Rcd82uh1+HnQ== +"@swagger-api/apidom-ns-json-schema-2020-12@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-json-schema-2020-12/-/apidom-ns-json-schema-2020-12-1.6.0.tgz#d2e0e1ef89141c0eea013687aebaf2580e8a6c7c" + integrity sha512-ldTxSnnIXskwpN6yCJkasqs32pJXwoXyad95crKT0xjZZr4fTrcAXXIyzdjBubiY9tK6elSrQGQxinJcV7ivWw== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-error" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-json-schema-2019-09" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-error" "^1.6.0" + "@swagger-api/apidom-ns-json-schema-2019-09" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" ts-mixer "^6.0.4" -"@swagger-api/apidom-ns-json-schema-draft-4@^1.0.0-rc.3": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-json-schema-draft-4/-/apidom-ns-json-schema-draft-4-1.0.0-rc.3.tgz#6ec561d648e1f49348553bb4000598d0adfffd1c" - integrity sha512-8lft8qCo/KAHqiUpfwUMifP9JDhuhXKMNYSSahP2SN0PnbujoS1h3DOXtpR9/+0N6fKPUT8I6GLEwgq8TX2yvA== +"@swagger-api/apidom-ns-json-schema-draft-4@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-json-schema-draft-4/-/apidom-ns-json-schema-draft-4-1.6.0.tgz#57f2f0fb4c4a2b907cd096efc96e0bc9cd3594fc" + integrity sha512-t9HvHwrevEG7usosO6AdXmC8oYqje5nxHpUmODr72tUtCeAeGEGEb9lgqx7fBhjc3BYsRzOL1hX56m1gjEyCog== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-ast" "^1.0.0-rc.3" - "@swagger-api/apidom-core" "^1.0.0-rc.3" + "@swagger-api/apidom-ast" "^1.6.0" + "@swagger-api/apidom-core" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" ts-mixer "^6.0.4" -"@swagger-api/apidom-ns-json-schema-draft-6@^1.0.0-rc.3": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-json-schema-draft-6/-/apidom-ns-json-schema-draft-6-1.0.0-rc.3.tgz#164339fa76a2aedffc339eee52b2aef60043b3ee" - integrity sha512-IDC+98ur+7L3YaZZnnCytx9+cihElj24CcjX/X2mOBqOTaAwZ/Exb7LiBnvUswV1lOE2X2CX4donRemjk+e32Q== +"@swagger-api/apidom-ns-json-schema-draft-6@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-json-schema-draft-6/-/apidom-ns-json-schema-draft-6-1.6.0.tgz#6d191a7ebf8c64cb6e29002a4f06ee454b960d64" + integrity sha512-aoyvQWgAOcZGTe5OfJ3r24DvXHHbrkKtAnxTOEdZzV/uOm6/cbuT8m02+aMOqWPxei1naC3ZHW9iHrETtfgV3w== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-error" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-json-schema-draft-4" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-error" "^1.6.0" + "@swagger-api/apidom-ns-json-schema-draft-4" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" ts-mixer "^6.0.4" -"@swagger-api/apidom-ns-json-schema-draft-7@^1.0.0-rc.3": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-json-schema-draft-7/-/apidom-ns-json-schema-draft-7-1.0.0-rc.3.tgz#8bc042b7dc6529dbd50d791b5ccb570b92c369c0" - integrity sha512-P0dk9WhH7CINBCh1u8GfcQFycrZcw3qCXug0w6M0wiSrjqZv+Mv/AI68dc0Rb+Dzshe4aZy0bZFjAQb3NHfrSg== +"@swagger-api/apidom-ns-json-schema-draft-7@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-json-schema-draft-7/-/apidom-ns-json-schema-draft-7-1.6.0.tgz#5f95e9b055c9c9b1dc00c468a4f88449bdc89865" + integrity sha512-GjmC4+AHQh22fRZOmV+jSYMJTXh243XvdACfIQ//39kQu7gQsimF4PVSY2IgWSvS/I1ukWdPBYmDvOKryBPGrw== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-error" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-json-schema-draft-6" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-error" "^1.6.0" + "@swagger-api/apidom-ns-json-schema-draft-6" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" ts-mixer "^6.0.4" -"@swagger-api/apidom-ns-openapi-2@^1.0.0-rc.0", "@swagger-api/apidom-ns-openapi-2@^1.0.0-rc.3": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-openapi-2/-/apidom-ns-openapi-2-1.0.0-rc.3.tgz#a57ad88857009ba892a60c205b971b5bb4de69cd" - integrity sha512-zwriSfjG+qiPWBHLZRyfdZa305xrB24aZjiAY8r2ikZsdQhC/WHI+e6YqeVCkJwkLzA/oZgrlmyci0mvtkFDQA== +"@swagger-api/apidom-ns-openapi-2@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-openapi-2/-/apidom-ns-openapi-2-1.6.0.tgz#2a8fe09b6e063ffe90ad75c31023ccbda558e097" + integrity sha512-xbmYzagnB8rO7sYwNGVyxYbNBkjCWnMhlnMrxkPtfQ/2u2ANAmTnCB/S/cMswX5XofiRJbznKAjLDSKBS+mLpQ== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-error" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-json-schema-draft-4" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-error" "^1.6.0" + "@swagger-api/apidom-ns-json-schema-draft-4" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" ts-mixer "^6.0.3" -"@swagger-api/apidom-ns-openapi-3-0@^1.0.0-rc.0", "@swagger-api/apidom-ns-openapi-3-0@^1.0.0-rc.3": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-openapi-3-0/-/apidom-ns-openapi-3-0-1.0.0-rc.3.tgz#03d27e16ade400d4f962ecdb34de429f2da3ab2f" - integrity sha512-RCufXt7ja7fqFS/EqWOMZ54J4uEnqPQkCXMwwCqUrFHXQ7nGN1J9nmwj2hFQUFYraajmtnk2dNByO46+XefV1w== +"@swagger-api/apidom-ns-openapi-3-0@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-openapi-3-0/-/apidom-ns-openapi-3-0-1.6.0.tgz#2f838c8ba216541d835793fd5947e1fed7b87445" + integrity sha512-AOvW7a2H27inepcTBAWaBMjJLrCh5IPWD4nTU+gysULC7IW6gphO8hj3iUuTmFBcGh9be89GBbvv2y/EGAfx9w== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-error" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-json-schema-draft-4" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-error" "^1.6.0" + "@swagger-api/apidom-ns-json-schema-draft-4" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" ts-mixer "^6.0.3" -"@swagger-api/apidom-ns-openapi-3-1@^1.0.0-rc.0", "@swagger-api/apidom-ns-openapi-3-1@^1.0.0-rc.1", "@swagger-api/apidom-ns-openapi-3-1@^1.0.0-rc.3": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-openapi-3-1/-/apidom-ns-openapi-3-1-1.0.0-rc.3.tgz#3e3c3be69eaa70a6a14e31f37c91d5a984dcdd8b" - integrity sha512-Nc28G/ikbypcXVricv8+PGEGXKAmOwZjkBxB3wN5D4+D0+AiUy1lV07Z7+xFWdql65Y5WWxxfU2/Ej01Bnqt4Q== +"@swagger-api/apidom-ns-openapi-3-1@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-openapi-3-1/-/apidom-ns-openapi-3-1-1.6.0.tgz#786698fc0a1c9162782024a66d8be2b4e0e106db" + integrity sha512-jCVypc8503zDSxAQlyV8j1vzwc75VBdWHtE2O0F+q5j9qNtGxw/ekbDkgrydYRaGBl92mf16dtPjtp5LwJD0Hw== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-ast" "^1.0.0-rc.3" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-json-pointer" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-json-schema-2020-12" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-openapi-3-0" "^1.0.0-rc.3" + "@swagger-api/apidom-ast" "^1.6.0" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-json-pointer" "^1.6.0" + "@swagger-api/apidom-ns-json-schema-2020-12" "^1.6.0" + "@swagger-api/apidom-ns-openapi-3-0" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" ts-mixer "^6.0.3" -"@swagger-api/apidom-parser-adapter-api-design-systems-json@^1.0.0-rc.0": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-api-design-systems-json/-/apidom-parser-adapter-api-design-systems-json-1.0.0-rc.3.tgz#49b493a0d3822995578dd333589d2bf103500ee0" - integrity sha512-ZXKuMd6nqBrpCqTJmbd2pS46ZmL8bIra1KqWVjcvkA/E032nmgDeaT78Cf0Ulha6j+CAzcwL0AnR7GrtFpSfSw== +"@swagger-api/apidom-ns-openapi-3-2@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-ns-openapi-3-2/-/apidom-ns-openapi-3-2-1.6.0.tgz#0ba997d1321a2af1af36ccd30f335b0c9da24753" + integrity sha512-QcFAUucaPaWiOKOEaaGqSfK3OtjeGJodWZLsuBQ0vrHaHkWyQ7jwsM1DJbc1Y8geOBeD2wIwdrdRjoulmqU1SA== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-api-design-systems" "^1.0.0-rc.3" - "@swagger-api/apidom-parser-adapter-json" "^1.0.0-rc.3" + "@swagger-api/apidom-ast" "^1.6.0" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-json-pointer" "^1.6.0" + "@swagger-api/apidom-ns-json-schema-2020-12" "^1.6.0" + "@swagger-api/apidom-ns-openapi-3-0" "^1.6.0" + "@swagger-api/apidom-ns-openapi-3-1" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" + ts-mixer "^6.0.3" -"@swagger-api/apidom-parser-adapter-api-design-systems-yaml@^1.0.0-rc.0": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-api-design-systems-yaml/-/apidom-parser-adapter-api-design-systems-yaml-1.0.0-rc.3.tgz#812d835132f676aa847e91836e52e502440c29c7" - integrity sha512-Qg1yTPPzGF3EhlqcxIZeDVBxxvZzylGM6CTHg5cltGOSoFQ7+NJFE9Ktvk0gbVaFUyElFduCno9FvIfzxPlj8g== +"@swagger-api/apidom-parser-adapter-api-design-systems-json@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-api-design-systems-json/-/apidom-parser-adapter-api-design-systems-json-1.6.0.tgz#8495b058dc7962ca2c17b0528aed78dac394f654" + integrity sha512-vz/9k0X/kh6mLm+Fi+LGNk/yyFq28wxI29ZVLW+b7ulcODikv+NaDnyN2n2kLKCvIchPATzAEvqMvVMuuQwWlg== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-api-design-systems" "^1.0.0-rc.3" - "@swagger-api/apidom-parser-adapter-yaml-1-2" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-api-design-systems" "^1.6.0" + "@swagger-api/apidom-parser-adapter-json" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" -"@swagger-api/apidom-parser-adapter-arazzo-json-1@^1.0.0-rc.0": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-arazzo-json-1/-/apidom-parser-adapter-arazzo-json-1-1.0.0-rc.3.tgz#b996a87586b4cb2f8706042ca153e9215e20fc7b" - integrity sha512-T7MbfTSDqdHgSr+cSC6gcGIsiwK3NXmdo28ZUv6LWsgcWDj2zw2Jie+7rXQaDN3JFEL34M/BIcMLyvrG7gYN/Q== +"@swagger-api/apidom-parser-adapter-api-design-systems-yaml@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-api-design-systems-yaml/-/apidom-parser-adapter-api-design-systems-yaml-1.6.0.tgz#a56b618504f0168b3687a42553b5d26860d85adc" + integrity sha512-QAq4H6YzRtysSpvLtlJ8WZ22/1Mht+/iarrUOijxDZQPAGfYeUoIicnCqxkVZYSea85sQl+3kiCCB3nhSH+L0g== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-arazzo-1" "^1.0.0-rc.3" - "@swagger-api/apidom-parser-adapter-json" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-api-design-systems" "^1.6.0" + "@swagger-api/apidom-parser-adapter-yaml-1-2" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" -"@swagger-api/apidom-parser-adapter-arazzo-yaml-1@^1.0.0-rc.0": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-arazzo-yaml-1/-/apidom-parser-adapter-arazzo-yaml-1-1.0.0-rc.3.tgz#3423f73dcb40fb8ddd58db6c1e2384bf9f31d614" - integrity sha512-mUmxQVXPoemP2ak/77g/o8kpP2DNd1EDjteuyGHyw1EHk/t4xYPAP05rQ2DfIQ5yVHmxBKRDQ15kfVNEpfUfYQ== +"@swagger-api/apidom-parser-adapter-arazzo-json-1@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-arazzo-json-1/-/apidom-parser-adapter-arazzo-json-1-1.6.0.tgz#63790158816fc50d81bcf59631b3536e3af8d5ca" + integrity sha512-syKPG3a9IGRvlGhXIEUzWhwbEuFbj+UwwtqaKu8zu771V+DRtH+wxyOkX54vKAIlApz/FgeUbmlWA1ZtYBlSIQ== + dependencies: + "@babel/runtime-corejs3" "^7.26.10" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-arazzo-1" "^1.6.0" + "@swagger-api/apidom-parser-adapter-json" "^1.6.0" + "@types/ramda" "~0.30.0" + ramda "~0.30.0" + ramda-adjunct "^5.0.0" + +"@swagger-api/apidom-parser-adapter-arazzo-yaml-1@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-arazzo-yaml-1/-/apidom-parser-adapter-arazzo-yaml-1-1.6.0.tgz#390a74c9cabaf175f35086075e82579edcefeb40" + integrity sha512-IVVLn+a8Q1iQcQsm4tXiAPghHJuJSB1rhIlDyHe3tSQgt9HOSiVpbnJDpwE/JBxxDxSAkeT6Ovo+fi2T5AmHYg== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-arazzo-1" "^1.0.0-rc.3" - "@swagger-api/apidom-parser-adapter-yaml-1-2" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-arazzo-1" "^1.6.0" + "@swagger-api/apidom-parser-adapter-yaml-1-2" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" -"@swagger-api/apidom-parser-adapter-asyncapi-json-2@^1.0.0-rc.0": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-asyncapi-json-2/-/apidom-parser-adapter-asyncapi-json-2-1.0.0-rc.3.tgz#324c612bb8bbf48a4f249d852d61304cb1d8c05f" - integrity sha512-K2BaslenC4ouPyzOQSB7wQPSsIGKGIj4VfP4M9y3fJaX9dIi+z3kzYQV7NFhZHAnq6pVybIDA44FLHF/WLCxUg== +"@swagger-api/apidom-parser-adapter-asyncapi-json-2@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-asyncapi-json-2/-/apidom-parser-adapter-asyncapi-json-2-1.6.0.tgz#3573506e856350ef0a24c64b517c3461eaa64fa7" + integrity sha512-aSUi22ELTDvdCLA3nIUOehuNBcHSeCqU7S7YNiHP/mwE4Q07pwQrYXijH2PROfCdjlZNNN34m6Ptakd92jliJQ== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-asyncapi-2" "^1.0.0-rc.3" - "@swagger-api/apidom-parser-adapter-json" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-asyncapi-2" "^1.6.0" + "@swagger-api/apidom-parser-adapter-json" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" -"@swagger-api/apidom-parser-adapter-asyncapi-yaml-2@^1.0.0-rc.0": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-asyncapi-yaml-2/-/apidom-parser-adapter-asyncapi-yaml-2-1.0.0-rc.3.tgz#9e04639e08d17329bb92385edb9e6c8308fa7b2b" - integrity sha512-xJezoi5d+RtV7sG9VRcfpbLlJwaR6GoJr2S8lbsnMUkk/B2vZGdRbA2Fc67REQIJTEfxXcU8T3+5m8j0WrG9Xw== +"@swagger-api/apidom-parser-adapter-asyncapi-json-3@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-asyncapi-json-3/-/apidom-parser-adapter-asyncapi-json-3-1.6.0.tgz#c18fb9cb2ce69fbe82a4b4fee0bb88bd972956f2" + integrity sha512-Ic53vcFF9zniDyCXOGSwwuAdEBUn5lFEAa0m2i30R36cQFHBCCuvbzbMQjWdr+oML0Aw4XoqOwZCQgkJJICpPA== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-asyncapi-2" "^1.0.0-rc.3" - "@swagger-api/apidom-parser-adapter-yaml-1-2" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-asyncapi-3" "^1.6.0" + "@swagger-api/apidom-parser-adapter-json" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" -"@swagger-api/apidom-parser-adapter-json@^1.0.0-rc.0", "@swagger-api/apidom-parser-adapter-json@^1.0.0-rc.3": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-json/-/apidom-parser-adapter-json-1.0.0-rc.3.tgz#454dbeef874dc528aeb2ad9e27104c1199a4bdba" - integrity sha512-Y0dfIYvQE+OLjormlx6RjmA6ymNA6+nkqJC/6qkFt+4fSjfOiXwbOOnfZp9pJXb2ssmDDdrPTFc3ninx5k7jNw== +"@swagger-api/apidom-parser-adapter-asyncapi-yaml-2@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-asyncapi-yaml-2/-/apidom-parser-adapter-asyncapi-yaml-2-1.6.0.tgz#209e4ceefa2c70ce0bfd957bf0e786474f763fb9" + integrity sha512-d/w7X+T4vT+KPqb+8xUm6n4pbHsGB28jdxE9rNVbxhu6D3owny2uxfglwaFh4fJG6FQMavCwl/QzfB4newdoKQ== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-ast" "^1.0.0-rc.3" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-error" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-asyncapi-2" "^1.6.0" + "@swagger-api/apidom-parser-adapter-yaml-1-2" "^1.6.0" + "@types/ramda" "~0.30.0" + ramda "~0.30.0" + ramda-adjunct "^5.0.0" + +"@swagger-api/apidom-parser-adapter-asyncapi-yaml-3@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-asyncapi-yaml-3/-/apidom-parser-adapter-asyncapi-yaml-3-1.6.0.tgz#37f0c6d4562d93a910c883392f51ba3e2e004e17" + integrity sha512-Wmf0LY59TZxQhqrJU2pcnUikcChVB4IqGPgjtOFLUoqPpz8FSwYbJ/SPnSMSl+QuncxROheSFsgZ6Tupv0sPHw== + dependencies: + "@babel/runtime-corejs3" "^7.26.10" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-asyncapi-3" "^1.6.0" + "@swagger-api/apidom-parser-adapter-yaml-1-2" "^1.6.0" + "@types/ramda" "~0.30.0" + ramda "~0.30.0" + ramda-adjunct "^5.0.0" + +"@swagger-api/apidom-parser-adapter-json@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-json/-/apidom-parser-adapter-json-1.6.0.tgz#cb950140c66cb27cc7517f604e50e9c8bb6b2753" + integrity sha512-WdAS+dBAB2t18HuUgSZy5b8JM7uXfn1RlPymJNRMUsrKYCTtPrQ/0q3YfnBjPhtjSSNCp+p1wajxHAFS7cj2VA== + dependencies: + "@babel/runtime-corejs3" "^7.26.10" + "@swagger-api/apidom-ast" "^1.6.0" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-error" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" @@ -3088,93 +3259,119 @@ tree-sitter-json "=0.24.8" web-tree-sitter "=0.24.5" -"@swagger-api/apidom-parser-adapter-openapi-json-2@^1.0.0-rc.0": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-openapi-json-2/-/apidom-parser-adapter-openapi-json-2-1.0.0-rc.3.tgz#67e9b647c57cd3eed5ca818e615b6a925754ffad" - integrity sha512-yaMS11FZVJLF062s+dch1kmUvBqdIS6mwAg/4XUL7XwSYat6pnV2ONCqdcUO9JSc9KJMZQiVAZjAZSj096ssNg== +"@swagger-api/apidom-parser-adapter-openapi-json-2@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-openapi-json-2/-/apidom-parser-adapter-openapi-json-2-1.6.0.tgz#278eac1ec8fec3b0c7e2cad6cc8f75f636ae8a3c" + integrity sha512-Q36W1FzdVaY7Oh98533dzCUghwb8k3ZMdlnV37V1H13FlUkj3tVZiWaeaCLwIakzQ7XXYaQTOP+VrRhDRjzhUA== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-openapi-2" "^1.0.0-rc.3" - "@swagger-api/apidom-parser-adapter-json" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-openapi-2" "^1.6.0" + "@swagger-api/apidom-parser-adapter-json" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" -"@swagger-api/apidom-parser-adapter-openapi-json-3-0@^1.0.0-rc.0": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-openapi-json-3-0/-/apidom-parser-adapter-openapi-json-3-0-1.0.0-rc.3.tgz#60380b22a287026052ecb6505f3ce9215570f378" - integrity sha512-5OdImG3eEgYpFvSo0EiZVvJJahk+f6cm5WZNn9lVdRlmxmtpzKM3UNfIYcBgVcAcLvfi8g6G7xRzD1DshaS8sw== +"@swagger-api/apidom-parser-adapter-openapi-json-3-0@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-openapi-json-3-0/-/apidom-parser-adapter-openapi-json-3-0-1.6.0.tgz#9dfebd27032acef17d1c48820ab6d95db6561910" + integrity sha512-UY+obOLTPHJvnXscdMY9XwZyuqcnBe6cu9TURjJgkO/QpOpPDqqZoRyurKZgRrX0Pv9B1zR3EIzhl01u/jeUaw== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-openapi-3-0" "^1.0.0-rc.3" - "@swagger-api/apidom-parser-adapter-json" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-openapi-3-0" "^1.6.0" + "@swagger-api/apidom-parser-adapter-json" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" -"@swagger-api/apidom-parser-adapter-openapi-json-3-1@^1.0.0-rc.0": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-openapi-json-3-1/-/apidom-parser-adapter-openapi-json-3-1-1.0.0-rc.3.tgz#dde4d955346fcc6d3e66cfd43220b5d2a36e84a3" - integrity sha512-UWlH29DOqKfHF2zwv7r5b7pgrc7Yxdus7FjYWA8p8yoIB02xDwHBaH4KhccIAXkm1qNMo+4TwSKFvO/boE8LMA== +"@swagger-api/apidom-parser-adapter-openapi-json-3-1@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-openapi-json-3-1/-/apidom-parser-adapter-openapi-json-3-1-1.6.0.tgz#2ede833b39831973525f8b45680010e957ead469" + integrity sha512-4ch04/96lYMXQu6odqa6H0aJmV8UefnBJKX1CPuL4qcPSPMFCurcXHGpPHrwMu1p/4Q9H+yRVlYeNQV10xvM0w== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-openapi-3-1" "^1.0.0-rc.3" - "@swagger-api/apidom-parser-adapter-json" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-openapi-3-1" "^1.6.0" + "@swagger-api/apidom-parser-adapter-json" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" -"@swagger-api/apidom-parser-adapter-openapi-yaml-2@^1.0.0-rc.0": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-openapi-yaml-2/-/apidom-parser-adapter-openapi-yaml-2-1.0.0-rc.3.tgz#a8ae1bb82037a913d3ddfd033026a180893dfc8b" - integrity sha512-kSWzmalm98ScImQHHtpTBDAIEzLsfE24Pe1IIJP1TaI2rk1AuxzaCsqMl6NQIlnIEawghPOXlG0hLsgtswn/Jg== +"@swagger-api/apidom-parser-adapter-openapi-json-3-2@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-openapi-json-3-2/-/apidom-parser-adapter-openapi-json-3-2-1.6.0.tgz#e019e0321928affbdb7e3b581da811b6da9b3665" + integrity sha512-fWR2gjMQg00QIimcXQMSVeLnCH/2iuDD/Dx8TzVHmKV/IKlu+TnmIVosdlDfRmOB+4duwU6/yfoA79IEhFeZdw== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-openapi-2" "^1.0.0-rc.3" - "@swagger-api/apidom-parser-adapter-yaml-1-2" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-openapi-3-2" "^1.6.0" + "@swagger-api/apidom-parser-adapter-json" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" -"@swagger-api/apidom-parser-adapter-openapi-yaml-3-0@^1.0.0-rc.0": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-openapi-yaml-3-0/-/apidom-parser-adapter-openapi-yaml-3-0-1.0.0-rc.3.tgz#bd81a4bc26a459a80b94cbe2f6cb1c1c60776d92" - integrity sha512-IRxjOgmGpaA1ay/NITOqk3TKTXnGiJtNP8KsPm//i+HkGcg87lZEvRDflB2Z70aRofKncXM2rCMAEqFqV7A9ug== +"@swagger-api/apidom-parser-adapter-openapi-yaml-2@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-openapi-yaml-2/-/apidom-parser-adapter-openapi-yaml-2-1.6.0.tgz#6ca83d9027e1adf9a2a8e7ef1f0b4599bcf6fbf0" + integrity sha512-dkEh1Rw9uvuIAOTfKjWRX2rLWP+xJ/Eqdkqeo0I0BWFKXX49YcDpHJV4XHpmd5FbsjJ9vBYr0hAmkbl32TtR4g== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-openapi-3-0" "^1.0.0-rc.3" - "@swagger-api/apidom-parser-adapter-yaml-1-2" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-openapi-2" "^1.6.0" + "@swagger-api/apidom-parser-adapter-yaml-1-2" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" -"@swagger-api/apidom-parser-adapter-openapi-yaml-3-1@^1.0.0-rc.0": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-openapi-yaml-3-1/-/apidom-parser-adapter-openapi-yaml-3-1-1.0.0-rc.3.tgz#6b1c2df1aeb86431f71cc49916696a9d38b9db7c" - integrity sha512-uvDMPiKt7uZSAOUVe+q/AygTFXw1odxxu5mi5voQM3/0KbR/vlt8f1dO9sQkys+G6ped2nL4r8B0p6bXR8uAMQ== +"@swagger-api/apidom-parser-adapter-openapi-yaml-3-0@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-openapi-yaml-3-0/-/apidom-parser-adapter-openapi-yaml-3-0-1.6.0.tgz#bd9ad26ed181153fb69eb73bda9fca9291b4eaed" + integrity sha512-6azq5YonWdzHcO9llK9zn1a+rGxlTz2Uf8p8NWDQnl2AZ56neDLYEL3mNDlrMXAy8dSJIHw+u9VF1OOzdslIHQ== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-ns-openapi-3-1" "^1.0.0-rc.3" - "@swagger-api/apidom-parser-adapter-yaml-1-2" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-openapi-3-0" "^1.6.0" + "@swagger-api/apidom-parser-adapter-yaml-1-2" "^1.6.0" "@types/ramda" "~0.30.0" ramda "~0.30.0" ramda-adjunct "^5.0.0" -"@swagger-api/apidom-parser-adapter-yaml-1-2@^1.0.0-rc.0", "@swagger-api/apidom-parser-adapter-yaml-1-2@^1.0.0-rc.3": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-yaml-1-2/-/apidom-parser-adapter-yaml-1-2-1.0.0-rc.3.tgz#5cd66884d43c22a2f774bec7edd8e2103b751e32" - integrity sha512-IiLIw74NRpRwi2YkV1hzmHC5JvvAm/TdeVYZoYK0QxeT2Ozr6MvhnUnRFjjSL3wcmku9+rLz2d8EGL2kO46qRA== +"@swagger-api/apidom-parser-adapter-openapi-yaml-3-1@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-openapi-yaml-3-1/-/apidom-parser-adapter-openapi-yaml-3-1-1.6.0.tgz#cb9e42504cd38564c7c444b958ccb8656314eac5" + integrity sha512-g2tGCXyIAC0IA6JjA0HVxHWyCovyfAxDQ+pMAJ6qm4PfrZHB+oXKWKZHNNmQaFiKdc/SVdMQq6Up0mXOQs7IOQ== + dependencies: + "@babel/runtime-corejs3" "^7.26.10" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-openapi-3-1" "^1.6.0" + "@swagger-api/apidom-parser-adapter-yaml-1-2" "^1.6.0" + "@types/ramda" "~0.30.0" + ramda "~0.30.0" + ramda-adjunct "^5.0.0" + +"@swagger-api/apidom-parser-adapter-openapi-yaml-3-2@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-openapi-yaml-3-2/-/apidom-parser-adapter-openapi-yaml-3-2-1.6.0.tgz#5851ff4cfeae7712ffab022b347e4877859ffc17" + integrity sha512-NGkdG9X5Svi89ZBluNseyUBNdgB9MkbTTNmerVKKOmCCHaVbzIb6UFPXf1MifSFyT+wTeGZk6WZLgRIDsTAZ5Q== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-ast" "^1.0.0-rc.3" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-error" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-ns-openapi-3-2" "^1.6.0" + "@swagger-api/apidom-parser-adapter-yaml-1-2" "^1.6.0" + "@types/ramda" "~0.30.0" + ramda "~0.30.0" + ramda-adjunct "^5.0.0" + +"@swagger-api/apidom-parser-adapter-yaml-1-2@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-parser-adapter-yaml-1-2/-/apidom-parser-adapter-yaml-1-2-1.6.0.tgz#3bd4c52755f39c76a159645aadc9f96bec9156cd" + integrity sha512-UwSE5pPUJ+ag7ZCbesgx/SJ8zUD3Sx+2U4AD3/1G1EJ+0gb7FMYgihuOT8ujmBfZVGGm3HMIEIa1w3zha08v2g== + dependencies: + "@babel/runtime-corejs3" "^7.26.10" + "@swagger-api/apidom-ast" "^1.6.0" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-error" "^1.6.0" "@tree-sitter-grammars/tree-sitter-yaml" "=0.7.1" "@types/ramda" "~0.30.0" ramda "~0.30.0" @@ -3182,41 +3379,45 @@ tree-sitter "=0.22.4" web-tree-sitter "=0.24.5" -"@swagger-api/apidom-reference@^1.0.0-rc.1": - version "1.0.0-rc.3" - resolved "https://registry.yarnpkg.com/@swagger-api/apidom-reference/-/apidom-reference-1.0.0-rc.3.tgz#fbce3bf7a1b9a60337cd1c6e82fabe7560df38ba" - integrity sha512-xZ9B6lGpdlHGSZGEhYe/MAyULCN4d+w4LKK5P1C/i6W6AU4iDEMjMjSawRV9ptJcObnu9ArEe92rgI7XS6s0TQ== +"@swagger-api/apidom-reference@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@swagger-api/apidom-reference/-/apidom-reference-1.6.0.tgz#1060b54d135b1888850678cd35d862ffb1ac6f77" + integrity sha512-gYTDfWQM1heqrCCrCsZH+EWDyAkIGqEJnSJcVWKngwOkXJKeUwat8p1TOW4q3rkaTT+fBaYbrjTr9SkFtVbdMg== dependencies: "@babel/runtime-corejs3" "^7.26.10" - "@swagger-api/apidom-core" "^1.0.0-rc.3" - "@swagger-api/apidom-error" "^1.0.0-rc.3" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-error" "^1.6.0" "@types/ramda" "~0.30.0" axios "^1.12.2" - minimatch "^7.4.3" - process "^0.11.10" + minimatch "^10.2.1" ramda "~0.30.0" ramda-adjunct "^5.0.0" optionalDependencies: - "@swagger-api/apidom-json-pointer" "^1.0.0-rc.0" - "@swagger-api/apidom-ns-arazzo-1" "^1.0.0-rc.0" - "@swagger-api/apidom-ns-asyncapi-2" "^1.0.0-rc.0" - "@swagger-api/apidom-ns-openapi-2" "^1.0.0-rc.0" - "@swagger-api/apidom-ns-openapi-3-0" "^1.0.0-rc.0" - "@swagger-api/apidom-ns-openapi-3-1" "^1.0.0-rc.0" - "@swagger-api/apidom-parser-adapter-api-design-systems-json" "^1.0.0-rc.0" - "@swagger-api/apidom-parser-adapter-api-design-systems-yaml" "^1.0.0-rc.0" - "@swagger-api/apidom-parser-adapter-arazzo-json-1" "^1.0.0-rc.0" - "@swagger-api/apidom-parser-adapter-arazzo-yaml-1" "^1.0.0-rc.0" - "@swagger-api/apidom-parser-adapter-asyncapi-json-2" "^1.0.0-rc.0" - "@swagger-api/apidom-parser-adapter-asyncapi-yaml-2" "^1.0.0-rc.0" - "@swagger-api/apidom-parser-adapter-json" "^1.0.0-rc.0" - "@swagger-api/apidom-parser-adapter-openapi-json-2" "^1.0.0-rc.0" - "@swagger-api/apidom-parser-adapter-openapi-json-3-0" "^1.0.0-rc.0" - "@swagger-api/apidom-parser-adapter-openapi-json-3-1" "^1.0.0-rc.0" - "@swagger-api/apidom-parser-adapter-openapi-yaml-2" "^1.0.0-rc.0" - "@swagger-api/apidom-parser-adapter-openapi-yaml-3-0" "^1.0.0-rc.0" - "@swagger-api/apidom-parser-adapter-openapi-yaml-3-1" "^1.0.0-rc.0" - "@swagger-api/apidom-parser-adapter-yaml-1-2" "^1.0.0-rc.0" + "@swagger-api/apidom-json-pointer" "^1.6.0" + "@swagger-api/apidom-ns-arazzo-1" "^1.6.0" + "@swagger-api/apidom-ns-asyncapi-2" "^1.6.0" + "@swagger-api/apidom-ns-openapi-2" "^1.6.0" + "@swagger-api/apidom-ns-openapi-3-0" "^1.6.0" + "@swagger-api/apidom-ns-openapi-3-1" "^1.6.0" + "@swagger-api/apidom-ns-openapi-3-2" "^1.6.0" + "@swagger-api/apidom-parser-adapter-api-design-systems-json" "^1.6.0" + "@swagger-api/apidom-parser-adapter-api-design-systems-yaml" "^1.6.0" + "@swagger-api/apidom-parser-adapter-arazzo-json-1" "^1.6.0" + "@swagger-api/apidom-parser-adapter-arazzo-yaml-1" "^1.6.0" + "@swagger-api/apidom-parser-adapter-asyncapi-json-2" "^1.6.0" + "@swagger-api/apidom-parser-adapter-asyncapi-json-3" "^1.6.0" + "@swagger-api/apidom-parser-adapter-asyncapi-yaml-2" "^1.6.0" + "@swagger-api/apidom-parser-adapter-asyncapi-yaml-3" "^1.6.0" + "@swagger-api/apidom-parser-adapter-json" "^1.6.0" + "@swagger-api/apidom-parser-adapter-openapi-json-2" "^1.6.0" + "@swagger-api/apidom-parser-adapter-openapi-json-3-0" "^1.6.0" + "@swagger-api/apidom-parser-adapter-openapi-json-3-1" "^1.6.0" + "@swagger-api/apidom-parser-adapter-openapi-json-3-2" "^1.6.0" + "@swagger-api/apidom-parser-adapter-openapi-yaml-2" "^1.6.0" + "@swagger-api/apidom-parser-adapter-openapi-yaml-3-0" "^1.6.0" + "@swagger-api/apidom-parser-adapter-openapi-yaml-3-1" "^1.6.0" + "@swagger-api/apidom-parser-adapter-openapi-yaml-3-2" "^1.6.0" + "@swagger-api/apidom-parser-adapter-yaml-1-2" "^1.6.0" "@swaggerexpert/cookie@^2.0.2": version "2.0.2" @@ -3258,10 +3459,10 @@ picocolors "^1.1.1" redent "^3.0.0" -"@testing-library/react@16.3.0": - version "16.3.0" - resolved "https://registry.yarnpkg.com/@testing-library/react/-/react-16.3.0.tgz#3a85bb9bdebf180cd76dba16454e242564d598a6" - integrity sha512-kFSyxiEDwv1WLl2fgsq6pPBbw5aWKrsY2/noi1Id0TK0UParSF62oFQFGHXIyaG4pp2tEub/Zlel+fjjZILDsw== +"@testing-library/react@16.3.2": + version "16.3.2" + resolved "https://registry.yarnpkg.com/@testing-library/react/-/react-16.3.2.tgz#672883b7acb8e775fc0492d9e9d25e06e89786d0" + integrity sha512-XU5/SytQM+ykqMnAnvB2umaJNIOsLF3PVv//1Ew4CTcpz0/BRyy/af40qqrt7SjKpDdT1saBMc42CUok5gaw+g== dependencies: "@babel/runtime" "^7.12.5" @@ -3323,11 +3524,21 @@ dependencies: "@babel/types" "^7.3.0" +"@types/d3-array@^3.2.2": + version "3.2.2" + resolved "https://registry.yarnpkg.com/@types/d3-array/-/d3-array-3.2.2.tgz#e02151464d02d4a1b44646d0fcdb93faf88fde8c" + integrity sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw== + "@types/d3-color@*", "@types/d3-color@^3.1.3": version "3.1.3" resolved "https://registry.yarnpkg.com/@types/d3-color/-/d3-color-3.1.3.tgz#368c961a18de721da8200e80bf3943fb53136af2" integrity sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A== +"@types/d3-format@^3.0.4": + version "3.0.4" + resolved "https://registry.yarnpkg.com/@types/d3-format/-/d3-format-3.0.4.tgz#b1e4465644ddb3fdf3a263febb240a6cd616de90" + integrity sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g== + "@types/d3-interpolate@^3.0.4": version "3.0.4" resolved "https://registry.yarnpkg.com/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz#412b90e84870285f2ff8a846c6eb60344f12a41c" @@ -3335,23 +3546,11 @@ dependencies: "@types/d3-color" "*" -"@types/d3-path@*": +"@types/d3-path@*", "@types/d3-path@^3.1.1": version "3.1.1" resolved "https://registry.yarnpkg.com/@types/d3-path/-/d3-path-3.1.1.tgz#f632b380c3aca1dba8e34aa049bcd6a4af23df8a" integrity sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg== -"@types/d3-path@^1": - version "1.0.11" - resolved "https://registry.yarnpkg.com/@types/d3-path/-/d3-path-1.0.11.tgz#45420fee2d93387083b34eae4fe6d996edf482bc" - integrity sha512-4pQMp8ldf7UaB/gR8Fvvy69psNHkTpD/pVw3vmEi8iZAB9EPMBruB1JvHO4BIq9QkUUd2lV1F5YXpMNj7JPBpw== - -"@types/d3-sankey@^0.12.4": - version "0.12.5" - resolved "https://registry.yarnpkg.com/@types/d3-sankey/-/d3-sankey-0.12.5.tgz#9b79ff5768250cfd8c6340b167c409a0374f40bd" - integrity sha512-/3RZSew0cLAtzGQ+C89hq/Rp3H20QJuVRSqFy6RKLe7E0B8kd2iOS1oBsodrgds4PcNVpqWhdUEng/SHvBcJ6Q== - dependencies: - "@types/d3-shape" "^1" - "@types/d3-scale@^4.0.9": version "4.0.9" resolved "https://registry.yarnpkg.com/@types/d3-scale/-/d3-scale-4.0.9.tgz#57a2f707242e6fe1de81ad7bfcccaaf606179afb" @@ -3359,13 +3558,6 @@ dependencies: "@types/d3-time" "*" -"@types/d3-shape@^1": - version "1.3.12" - resolved "https://registry.yarnpkg.com/@types/d3-shape/-/d3-shape-1.3.12.tgz#8f2f9f7a12e631ce6700d6d55b84795ce2c8b259" - integrity sha512-8oMzcd4+poSLGgV0R1Q1rOlx/xdmozS4Xab7np0eamFFUYq71AU9pOCJEFnkXW2aI/oXdVYJzw6pssbSut7Z9Q== - dependencies: - "@types/d3-path" "^1" - "@types/d3-shape@^3.1.7": version "3.1.7" resolved "https://registry.yarnpkg.com/@types/d3-shape/-/d3-shape-3.1.7.tgz#2b7b423dc2dfe69c8c93596e673e37443348c555" @@ -3373,6 +3565,11 @@ dependencies: "@types/d3-path" "*" +"@types/d3-time-format@^4.0.3": + version "4.0.3" + resolved "https://registry.yarnpkg.com/@types/d3-time-format/-/d3-time-format-4.0.3.tgz#d6bc1e6b6a7db69cccfbbdd4c34b70632d9e9db2" + integrity sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg== + "@types/d3-time@*", "@types/d3-time@^3.0.4": version "3.0.4" resolved "https://registry.yarnpkg.com/@types/d3-time/-/d3-time-3.0.4.tgz#8472feecd639691450dd8000eb33edd444e1323f" @@ -3448,11 +3645,6 @@ dependencies: undici-types "~7.16.0" -"@types/node@^18.17.5": - version "18.18.6" - resolved "https://registry.yarnpkg.com/@types/node/-/node-18.18.6.tgz#26da694f75cdb057750f49d099da5e3f3824cb3e" - integrity sha512-wf3Vz+jCmOQ2HV1YUJuCWdL64adYxumkrxtc+H1VUQlnQI04+5HtH+qZCOE21lBE7gIrt+CwX2Wv8Acrw5Ak6w== - "@types/parse-json@^4.0.0": version "4.0.0" resolved "https://registry.yarnpkg.com/@types/parse-json/-/parse-json-4.0.0.tgz#2f8bb441434d163b35fb8ffdccd7138927ffb8c0" @@ -3485,23 +3677,13 @@ resolved "https://registry.yarnpkg.com/@types/react-transition-group/-/react-transition-group-4.4.12.tgz#b5d76568485b02a307238270bfe96cb51ee2a044" integrity sha512-8TV6R3h2j7a91c+1DXdJi3Syo69zzIZbz7Lg5tORM5LEJG7X/E6a1V3drRyBRZq7/utz7A+c4OgYLiLcYGHG6w== -"@types/react@*", "@types/react@19.2.6": - version "19.2.6" - resolved "https://registry.yarnpkg.com/@types/react/-/react-19.2.6.tgz#d27db1ff45012d53980f5589fda925278e1249ca" - integrity sha512-p/jUvulfgU7oKtj6Xpk8cA2Y1xKTtICGpJYeJXz2YVO2UcvjQgeRMLDGfDeqeRW2Ta+0QNFwcc8X3GH8SxZz6w== +"@types/react@*", "@types/react@19.2.14": + version "19.2.14" + resolved "https://registry.yarnpkg.com/@types/react/-/react-19.2.14.tgz#39604929b5e3957e3a6fa0001dafb17c7af70bad" + integrity sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w== dependencies: csstype "^3.2.2" -"@types/sinonjs__fake-timers@8.1.1": - version "8.1.1" - resolved "https://registry.yarnpkg.com/@types/sinonjs__fake-timers/-/sinonjs__fake-timers-8.1.1.tgz#b49c2c70150141a15e0fa7e79cf1f92a72934ce3" - integrity sha512-0kSuKjAS0TrGLJ0M/+8MaFkGsQhZpB6pxOmvS3K8FYI72K//YmdfoW9X2qPsAKh1mkwxGD5zib9s1FIFed6E8g== - -"@types/sizzle@^2.3.2": - version "2.3.3" - resolved "https://registry.yarnpkg.com/@types/sizzle/-/sizzle-2.3.3.tgz#ff5e2f1902969d305225a047c8a0fd5c915cebef" - integrity sha512-JYM8x9EGF163bEyhdJBpR2QX1R5naCJHC8ucJylJ3w9/CVBaskdQ8WqBf8MmQrd1kRvp/a4TS8HJ+bxzR7ZJYQ== - "@types/stack-utils@^2.0.3": version "2.0.3" resolved "https://registry.yarnpkg.com/@types/stack-utils/-/stack-utils-2.0.3.tgz#6209321eb2c1712a7e7466422b8cb1fc0d9dd5d8" @@ -3556,13 +3738,6 @@ dependencies: "@types/yargs-parser" "*" -"@types/yauzl@^2.9.1": - version "2.10.0" - resolved "https://registry.yarnpkg.com/@types/yauzl/-/yauzl-2.10.0.tgz#b3248295276cf8c6f153ebe6a9aba0c988cb2599" - integrity sha512-Cn6WYCm0tXv8p6k+A8PvbDG763EDpBoTzHdA+Q/MF6H3sapGjCm9NzoaJncJS9tUKSuCoDs9XHxYYsQDgxR6kw== - dependencies: - "@types/node" "*" - "@ungap/structured-clone@^1.3.0": version "1.3.0" resolved "https://registry.yarnpkg.com/@ungap/structured-clone/-/structured-clone-1.3.0.tgz#d06bbb384ebcf6c505fde1c3d0ed4ddffe0aaff8" @@ -3670,20 +3845,7 @@ agent-base@^7.1.0, agent-base@^7.1.2: resolved "https://registry.yarnpkg.com/agent-base/-/agent-base-7.1.4.tgz#e3cd76d4c548ee895d3c3fd8dc1f6c5b9032e7a8" integrity sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ== -aggregate-error@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-3.1.0.tgz#92670ff50f5359bdb7a3e0d40d0ec30c5737687a" - integrity sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA== - dependencies: - clean-stack "^2.0.0" - indent-string "^4.0.0" - -ansi-colors@^4.1.1: - version "4.1.3" - resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-4.1.3.tgz#37611340eb2243e70cc604cad35d63270d48781b" - integrity sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw== - -ansi-escapes@^4.3.0, ansi-escapes@^4.3.2: +ansi-escapes@^4.3.2: version "4.3.2" resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-4.3.2.tgz#6b2291d1db7d98b6521d5f1efa42d0f3a9feb65e" integrity sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ== @@ -3737,11 +3899,6 @@ apg-lite@^1.0.3, apg-lite@^1.0.4: resolved "https://registry.yarnpkg.com/apg-lite/-/apg-lite-1.0.5.tgz#17aee0e8452cb4ce7c7018dc54e53046cb31878b" integrity sha512-SlI+nLMQDzCZfS39ihzjGp3JNBQfJXyMi6cg9tkLOCPVErgFsUIAEdO9IezR7kbP5Xd0ozcPNQBkf9TO5cHgWw== -arch@^2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/arch/-/arch-2.2.0.tgz#1bc47818f305764f23ab3306b0bfc086c5a29d11" - integrity sha512-Of/R0wqp83cgHozfIYLbBMnej79U/SVGOOyuB3VVFv1NRM/PSFMK12x9KVtiYzJqmnU5WR2qp0Z5rHb7sWGnFQ== - argparse@^1.0.10, argparse@^1.0.7: version "1.0.10" resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" @@ -3773,38 +3930,11 @@ asap@~2.0.6: resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.6.tgz#e50347611d7e690943208bbdafebcbc2fb866d46" integrity sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA== -asn1@~0.2.3: - version "0.2.6" - resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.6.tgz#0d3a7bb6e64e02a90c0303b31f292868ea09a08d" - integrity sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ== - dependencies: - safer-buffer "~2.1.0" - -assert-plus@1.0.0, assert-plus@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" - integrity sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw== - -astral-regex@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/astral-regex/-/astral-regex-2.0.0.tgz#483143c567aeed4785759c0865786dc77d7d2e31" - integrity sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ== - -async@^3.2.0: - version "3.2.4" - resolved "https://registry.yarnpkg.com/async/-/async-3.2.4.tgz#2d22e00f8cddeb5fde5dd33522b56d1cf569a81c" - integrity sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ== - asynckit@^0.4.0: version "0.4.0" resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q== -at-least-node@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/at-least-node/-/at-least-node-1.0.0.tgz#602cd4b46e844ad4effc92a8011a3c46e0238dc2" - integrity sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg== - autolinker@^3.11.0: version "3.16.2" resolved "https://registry.yarnpkg.com/autolinker/-/autolinker-3.16.2.tgz#6bb4f32432fc111b65659336863e653973bfbcc9" @@ -3824,16 +3954,6 @@ available-typed-arrays@^1.0.7: dependencies: possible-typed-array-names "^1.0.0" -aws-sign2@~0.7.0: - version "0.7.0" - resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" - integrity sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA== - -aws4@^1.8.0: - version "1.11.0" - resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.11.0.tgz#d61f46d83b2519250e2784daf5b09479a8b41c59" - integrity sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA== - axios@^1.12.2: version "1.13.2" resolved "https://registry.yarnpkg.com/axios/-/axios-1.13.2.tgz#9ada120b7b5ab24509553ec3e40123521117f687" @@ -3969,6 +4089,11 @@ balanced-match@^1.0.0: resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== +balanced-match@^4.0.2: + version "4.0.4" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-4.0.4.tgz#bfb10662feed8196a2c62e7c68e17720c274179a" + integrity sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA== + base64-js@^1.3.1, base64-js@^1.5.1: version "1.5.1" resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" @@ -3979,28 +4104,11 @@ baseline-browser-mapping@^2.8.25: resolved "https://registry.yarnpkg.com/baseline-browser-mapping/-/baseline-browser-mapping-2.8.28.tgz#9ef511f5a7c19d74a94cafcbf951608398e9bdb3" integrity sha512-gYjt7OIqdM0PcttNYP2aVrr2G0bMALkBaoehD4BuRGjAOtipg0b6wHg1yNL+s5zSnLZZrGHOw4IrND8CD+3oIQ== -bcrypt-pbkdf@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz#a4301d389b6a43f9b67ff3ca11a3f6637e360e9e" - integrity sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w== - dependencies: - tweetnacl "^0.14.3" - bezier-easing@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/bezier-easing/-/bezier-easing-2.1.0.tgz#c04dfe8b926d6ecaca1813d69ff179b7c2025d86" integrity sha512-gbIqZ/eslnUFC1tjEvtz0sgx+xTK20wDnYMIA27VA04R7w6xxXQPZDbibjA9DTWZRA2CXtwHykkVzlCaAJAZig== -blob-util@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/blob-util/-/blob-util-2.0.2.tgz#3b4e3c281111bb7f11128518006cdc60b403a1eb" - integrity sha512-T7JQa+zsXXEa6/8ZhHcQEW1UFfVM49Ts65uBkFL6fz2QmrElqmbajIDJvuA0tEhRe5eIjpV9ZF+0RfZR9voJFQ== - -bluebird@^3.7.2: - version "3.7.2" - resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.7.2.tgz#9f229c15be272454ffa973ace0dbee79a1b0c36f" - integrity sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg== - brace-expansion@^1.1.7: version "1.1.11" resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" @@ -4009,12 +4117,12 @@ brace-expansion@^1.1.7: balanced-match "^1.0.0" concat-map "0.0.1" -brace-expansion@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-2.0.1.tgz#1edc459e0f0c548486ecf9fc99f2221364b9a0ae" - integrity sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA== +brace-expansion@^5.0.2: + version "5.0.4" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-5.0.4.tgz#614daaecd0a688f660bbbc909a8748c3d80d4336" + integrity sha512-h+DEnpVvxmfVefa4jFbCf5HdH5YMDXRsmKflpf1pILZWRFlTbJpxeU55nJl4Smt5HQaGzg1o6RHFPJaOqnmBDg== dependencies: - balanced-match "^1.0.0" + balanced-match "^4.0.2" braces@^3.0.3: version "3.0.3" @@ -4051,24 +4159,11 @@ bser@2.1.1: dependencies: node-int64 "^0.4.0" -buffer-crc32@~0.2.3: - version "0.2.13" - resolved "https://registry.yarnpkg.com/buffer-crc32/-/buffer-crc32-0.2.13.tgz#0d333e3f00eac50aa1454abd30ef8c2a5d9a7242" - integrity sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ== - buffer-from@^1.0.0: version "1.1.2" resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5" integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ== -buffer@^5.6.0: - version "5.7.1" - resolved "https://registry.yarnpkg.com/buffer/-/buffer-5.7.1.tgz#ba62e7c13133053582197160851a8f648e99eed0" - integrity sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ== - dependencies: - base64-js "^1.3.1" - ieee754 "^1.1.13" - buffer@^6.0.3: version "6.0.3" resolved "https://registry.yarnpkg.com/buffer/-/buffer-6.0.3.tgz#2ace578459cc8fbe2a70aaa8f52ee63b6a74c6c6" @@ -4077,11 +4172,6 @@ buffer@^6.0.3: base64-js "^1.3.1" ieee754 "^1.2.1" -cachedir@^2.3.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/cachedir/-/cachedir-2.3.0.tgz#0c75892a052198f0b21c7c1804d8331edfcae0e8" - integrity sha512-A+Fezp4zxnit6FanDmv9EqXNAi3vt9DWp51/71UEhXukb7QUuvtv9344h91dyAxuTLoSYJFU299qzR3tzwPAhw== - call-bind-apply-helpers@^1.0.0, call-bind-apply-helpers@^1.0.1, call-bind-apply-helpers@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz#4b5428c222be985d79c3d82657479dbe0b59b2d6" @@ -4146,11 +4236,6 @@ caniuse-lite@^1.0.30001754: resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001754.tgz#7758299d9a72cce4e6b038788a15b12b44002759" integrity sha512-x6OeBXueoAceOmotzx3PO4Zpt4rzpeIFsSr6AAePTZxSkXiYDUmpypEl7e2+8NCd9bD7bXjqyef8CJYPC1jfxg== -caseless@~0.12.0: - version "0.12.0" - resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" - integrity sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw== - chalk@^2.0.0, chalk@^2.4.2: version "2.4.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" @@ -4160,7 +4245,7 @@ chalk@^2.0.0, chalk@^2.4.2: escape-string-regexp "^1.0.5" supports-color "^5.3.0" -chalk@^4.1.0, chalk@^4.1.2: +chalk@^4.1.2: version "4.1.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== @@ -4188,16 +4273,6 @@ character-reference-invalid@^2.0.0: resolved "https://registry.yarnpkg.com/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz#85c66b041e43b47210faf401278abf808ac45cb9" integrity sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw== -check-more-types@^2.24.0: - version "2.24.0" - resolved "https://registry.yarnpkg.com/check-more-types/-/check-more-types-2.24.0.tgz#1420ffb10fd444dcfc79b43891bbfffd32a84600" - integrity sha512-Pj779qHxV2tuapviy1bSZNEL1maXr13bPYpsvSDB68HlYcYuhlDrmGd63i0JHMCLKzc7rUSNIrpdJlhVlNwrxA== - -ci-info@^3.2.0: - version "3.7.0" - resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-3.7.0.tgz#6d01b3696c59915b6ce057e4aa4adfc2fa25f5ef" - integrity sha512-2CpRNYmImPx+RXKLq6jko/L07phmS9I02TyqkcNU20GCF/GgaWvc58hPtjxDX8lPpkdwc9sNh72V9k00S7ezog== - ci-info@^4.2.0: version "4.3.1" resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-4.3.1.tgz#355ad571920810b5623e11d40232f443f16f1daa" @@ -4213,35 +4288,6 @@ classnames@^2.5.1: resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.5.1.tgz#ba774c614be0f016da105c858e7159eae8e7687b" integrity sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow== -clean-stack@^2.0.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/clean-stack/-/clean-stack-2.2.0.tgz#ee8472dbb129e727b31e8a10a427dee9dfe4008b" - integrity sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A== - -cli-cursor@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-3.1.0.tgz#264305a7ae490d1d03bf0c9ba7c925d1753af307" - integrity sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw== - dependencies: - restore-cursor "^3.1.0" - -cli-table3@~0.6.1: - version "0.6.3" - resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.6.3.tgz#61ab765aac156b52f222954ffc607a6f01dbeeb2" - integrity sha512-w5Jac5SykAeZJKntOxJCrm63Eg5/4dhMWIcuTbo9rpE+brgaSZo0RuNJZeOyMgsUdhDeojvgyQLmjI+K50ZGyg== - dependencies: - string-width "^4.2.0" - optionalDependencies: - "@colors/colors" "1.5.0" - -cli-truncate@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/cli-truncate/-/cli-truncate-2.1.0.tgz#c39e28bf05edcde5be3b98992a22deed5a2b93c7" - integrity sha512-n8fOixwDD6b/ObinzTrp1ZKFzbgvKZvuz/TvejnLn1aQfC6r52XEx85FmuC+3HI+JM7coBRXUvNqEU2PHVrHpg== - dependencies: - slice-ansi "^3.0.0" - string-width "^4.2.0" - cliui@^8.0.1: version "8.0.1" resolved "https://registry.yarnpkg.com/cliui/-/cliui-8.0.1.tgz#0c04b075db02cbfe60dc8e6cf2f5486b1a3608aa" @@ -4290,12 +4336,7 @@ color-name@~1.1.4: resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== -colorette@^2.0.16: - version "2.0.19" - resolved "https://registry.yarnpkg.com/colorette/-/colorette-2.0.19.tgz#cdf044f47ad41a0f4b56b3a0d5b4e6e1a2d5a798" - integrity sha512-3tlv/dIP7FWvj3BsbHrGLJ6l/oKh1O3TcgBqMn+yyCagOxc23fyzDS6HypQbgxWbkpDnf52p1LuR4eWDQ/K9WQ== - -combined-stream@^1.0.6, combined-stream@^1.0.8, combined-stream@~1.0.6: +combined-stream@^1.0.8: version "1.0.8" resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg== @@ -4307,16 +4348,6 @@ comma-separated-tokens@^2.0.0: resolved "https://registry.yarnpkg.com/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz#4e89c9458acb61bc8fef19f4529973b2392839ee" integrity sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg== -commander@^6.2.1: - version "6.2.1" - resolved "https://registry.yarnpkg.com/commander/-/commander-6.2.1.tgz#0792eb682dfbc325999bb2b84fddddba110ac73c" - integrity sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA== - -common-tags@^1.8.0: - version "1.8.2" - resolved "https://registry.yarnpkg.com/common-tags/-/common-tags-1.8.2.tgz#94ebb3c076d26032745fd54face7f688ef5ac9c6" - integrity sha512-gk/Z852D2Wtb//0I+kRFNKKE9dIIVirjoqPoA1wJU+XePVXZfGeBpk45+A1rKO4Q43prqWBNY/MiIeRLbPWUaA== - concat-map@0.0.1: version "0.0.1" resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" @@ -4361,11 +4392,6 @@ core-js@^3.19.2: resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.27.1.tgz#23cc909b315a6bb4e418bf40a52758af2103ba46" integrity sha512-GutwJLBChfGCpwwhbYoqfv03LAfmiz7e7D/BNxzeMxwQf10GRSzqiOjx7AmtEk+heiD/JWmBuyBPgFtx0Sg1ww== -core-util-is@1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" - integrity sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ== - cosmiconfig@^7.0.0: version "7.1.0" resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-7.1.0.tgz#1443b9afa596b670082ea46cbd8f6a62b84635f6" @@ -4377,7 +4403,7 @@ cosmiconfig@^7.0.0: path-type "^4.0.0" yaml "^1.10.0" -cross-spawn@^7.0.0, cross-spawn@^7.0.3, cross-spawn@^7.0.6: +cross-spawn@^7.0.3, cross-spawn@^7.0.6: version "7.0.6" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f" integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== @@ -4413,7 +4439,7 @@ cssstyle@^4.2.1: "@asamuzakjp/css-color" "^3.2.0" rrweb-cssom "^0.8.0" -csstype@3.1.3, csstype@^3.1.3: +csstype@3.1.3: version "3.1.3" resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.3.tgz#d80ff294d114fb0e6ac500fbf85b60137d7eff81" integrity sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw== @@ -4423,68 +4449,12 @@ csstype@^3.0.2: resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.1.tgz#841b532c45c758ee546a11d5bd7b7b473c8c30b9" integrity sha512-DJR/VvkAvSZW9bTouZue2sSxDwdTN92uHjqeKVm+0dAqdfNykRzQ95tay8aXMBAAPpUiq4Qcug2L7neoRh2Egw== -csstype@^3.2.2: +csstype@^3.2.2, csstype@^3.2.3: version "3.2.3" resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.2.3.tgz#ec48c0f3e993e50648c86da559e2610995cf989a" integrity sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ== -cypress@13.3.2: - version "13.3.2" - resolved "https://registry.yarnpkg.com/cypress/-/cypress-13.3.2.tgz#b4baa64ce37d7874f6bdd8efbc28a9c722c0686f" - integrity sha512-ArLmZObcLC+xxCp7zJZZbhby9FUf5CueLej9dUM4+5j37FTS4iMSgHxQLDu01PydFUvDXcNoIVRCYrHHxD7Ybg== - dependencies: - "@cypress/request" "^3.0.0" - "@cypress/xvfb" "^1.2.4" - "@types/node" "^18.17.5" - "@types/sinonjs__fake-timers" "8.1.1" - "@types/sizzle" "^2.3.2" - arch "^2.2.0" - blob-util "^2.0.2" - bluebird "^3.7.2" - buffer "^5.6.0" - cachedir "^2.3.0" - chalk "^4.1.0" - check-more-types "^2.24.0" - cli-cursor "^3.1.0" - cli-table3 "~0.6.1" - commander "^6.2.1" - common-tags "^1.8.0" - dayjs "^1.10.4" - debug "^4.3.4" - enquirer "^2.3.6" - eventemitter2 "6.4.7" - execa "4.1.0" - executable "^4.1.1" - extract-zip "2.0.1" - figures "^3.2.0" - fs-extra "^9.1.0" - getos "^3.2.1" - is-ci "^3.0.0" - is-installed-globally "~0.4.0" - lazy-ass "^1.6.0" - listr2 "^3.8.3" - lodash "^4.17.21" - log-symbols "^4.0.0" - minimist "^1.2.8" - ospath "^1.2.2" - pretty-bytes "^5.6.0" - process "^0.11.10" - proxy-from-env "1.0.0" - request-progress "^3.0.0" - semver "^7.5.3" - supports-color "^8.1.1" - tmp "~0.2.1" - untildify "^4.0.0" - yauzl "^2.10.0" - -"d3-array@1 - 2": - version "2.12.1" - resolved "https://registry.yarnpkg.com/d3-array/-/d3-array-2.12.1.tgz#e20b41aafcdffdf5d50928004ececf815a465e81" - integrity sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ== - dependencies: - internmap "^1.0.0" - -"d3-array@2 - 3", "d3-array@2.10.0 - 3": +"d3-array@2 - 3", "d3-array@2.10.0 - 3", d3-array@^3.2.4: version "3.2.4" resolved "https://registry.yarnpkg.com/d3-array/-/d3-array-3.2.4.tgz#15fec33b237f97ac5d7c986dc77da273a8ed0bb5" integrity sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg== @@ -4501,6 +4471,11 @@ cypress@13.3.2: resolved "https://registry.yarnpkg.com/d3-format/-/d3-format-3.1.0.tgz#9260e23a28ea5cb109e93b21a06e24e2ebd55641" integrity sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA== +d3-format@^3.1.0: + version "3.1.2" + resolved "https://registry.yarnpkg.com/d3-format/-/d3-format-3.1.2.tgz#01fdb46b58beb1f55b10b42ad70b6e344d5eb2ae" + integrity sha512-AJDdYOdnyRDV5b6ArilzCPPwc1ejkHcoyFarqlPqT7zRYjhavcT3uSrqcMvsgh2CgoPbK3RCwyHaVyxYcP2Arg== + "d3-interpolate@1.2.0 - 3", d3-interpolate@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/d3-interpolate/-/d3-interpolate-3.0.1.tgz#3c47aa5b32c5b3dfb56ef3fd4342078a632b400d" @@ -4508,24 +4483,11 @@ cypress@13.3.2: dependencies: d3-color "1 - 3" -d3-path@1: - version "1.0.9" - resolved "https://registry.yarnpkg.com/d3-path/-/d3-path-1.0.9.tgz#48c050bb1fe8c262493a8caf5524e3e9591701cf" - integrity sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg== - d3-path@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/d3-path/-/d3-path-3.1.0.tgz#22df939032fb5a71ae8b1800d61ddb7851c42526" integrity sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ== -d3-sankey@^0.12.3: - version "0.12.3" - resolved "https://registry.yarnpkg.com/d3-sankey/-/d3-sankey-0.12.3.tgz#b3c268627bd72e5d80336e8de6acbfec9d15d01d" - integrity sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ== - dependencies: - d3-array "1 - 2" - d3-shape "^1.2.0" - d3-scale@^4.0.2: version "4.0.2" resolved "https://registry.yarnpkg.com/d3-scale/-/d3-scale-4.0.2.tgz#82b38e8e8ff7080764f8dcec77bd4be393689396" @@ -4537,13 +4499,6 @@ d3-scale@^4.0.2: d3-time "2.1.1 - 3" d3-time-format "2 - 4" -d3-shape@^1.2.0: - version "1.3.7" - resolved "https://registry.yarnpkg.com/d3-shape/-/d3-shape-1.3.7.tgz#df63801be07bc986bc54f63789b4fe502992b5d7" - integrity sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw== - dependencies: - d3-path "1" - d3-shape@^3.2.0: version "3.2.0" resolved "https://registry.yarnpkg.com/d3-shape/-/d3-shape-3.2.0.tgz#a1a839cbd9ba45f28674c69d7f855bcf91dfc6a5" @@ -4551,7 +4506,7 @@ d3-shape@^3.2.0: dependencies: d3-path "^3.1.0" -"d3-time-format@2 - 4": +"d3-time-format@2 - 4", d3-time-format@^4.1.0: version "4.1.0" resolved "https://registry.yarnpkg.com/d3-time-format/-/d3-time-format-4.1.0.tgz#7ab5257a5041d11ecb4fe70a5c7d16a195bb408a" integrity sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg== @@ -4570,13 +4525,6 @@ d3-timer@^3.0.1: resolved "https://registry.yarnpkg.com/d3-timer/-/d3-timer-3.0.1.tgz#6284d2a2708285b1abb7e201eda4380af35e63b0" integrity sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA== -dashdash@^1.12.0: - version "1.14.1" - resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" - integrity sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g== - dependencies: - assert-plus "^1.0.0" - data-urls@^5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/data-urls/-/data-urls-5.0.0.tgz#2f76906bce1824429ffecb6920f45a0b30f00dde" @@ -4585,10 +4533,10 @@ data-urls@^5.0.0: whatwg-mimetype "^4.0.0" whatwg-url "^14.0.0" -dayjs@1.11.7, dayjs@^1.10.4: - version "1.11.7" - resolved "https://registry.yarnpkg.com/dayjs/-/dayjs-1.11.7.tgz#4b296922642f70999544d1144a2c25730fce63e2" - integrity sha512-+Yw9U6YO5TQohxLcIkrXBeY73WP3ejHWVvx8XCk3gxvQDCTEmS48ZrSZCKciI7Bhl/uCMyxYtE9UqRILmFphkQ== +dayjs@1.11.19: + version "1.11.19" + resolved "https://registry.yarnpkg.com/dayjs/-/dayjs-1.11.19.tgz#15dc98e854bb43917f12021806af897c58ae2938" + integrity sha512-t5EcLVS6QPBNqM2z8fakk/NKel+Xzshgt8FFKAn+qwlD1pzZWxh0nVCrvFK7ZDb6XucZeF9z8C7CBWTRIVApAw== debug@4, debug@^4.1.0, debug@^4.1.1, debug@^4.3.4: version "4.3.4" @@ -4597,13 +4545,6 @@ debug@4, debug@^4.1.0, debug@^4.1.1, debug@^4.3.4: dependencies: ms "2.1.2" -debug@^3.1.0: - version "3.2.7" - resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.7.tgz#72580b7e9145fb39b6676f9c5e5fb100b934179a" - integrity sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ== - dependencies: - ms "^2.1.1" - debug@^4.3.1: version "4.4.3" resolved "https://registry.yarnpkg.com/debug/-/debug-4.4.3.tgz#c6ae432d9bd9662582fce08709b038c58e9e3d6a" @@ -4714,10 +4655,10 @@ dom-helpers@^5.0.1: "@babel/runtime" "^7.8.7" csstype "^3.0.2" -dompurify@3.1.7, dompurify@3.2.4, dompurify@=3.2.6: - version "3.2.4" - resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-3.2.4.tgz#af5a5a11407524431456cf18836c55d13441cd8e" - integrity sha512-ysFSFEDVduQpyhzAob/kkuJjf5zWkZD8/A9ywSp1byueyuCfHamrCBa14/Oc2iiB0e51B+NpxSl5gmzn+Ms/mg== +dompurify@3.2.7, dompurify@3.3.1, dompurify@=3.2.6: + version "3.3.1" + resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-3.3.1.tgz#c7e1ddebfe3301eacd6c0c12a4af284936dbbb86" + integrity sha512-qkdCKzLNtrgPFP1Vo+98FRzJnBRGe4ffyCea9IwHB1fyxPOeNTHpLKYGd4Uk9xvNoH0ZoOjwZxNptyMwqrId1Q== optionalDependencies: "@types/trusted-types" "^2.0.7" @@ -4740,14 +4681,6 @@ eastasianwidth@^0.2.0: resolved "https://registry.yarnpkg.com/eastasianwidth/-/eastasianwidth-0.2.0.tgz#696ce2ec0aa0e6ea93a397ffcf24aa7840c827cb" integrity sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA== -ecc-jsbn@~0.1.1: - version "0.1.2" - resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9" - integrity sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw== - dependencies: - jsbn "~0.1.0" - safer-buffer "^2.1.0" - electron-to-chromium@^1.4.251: version "1.4.284" resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.284.tgz#61046d1e4cab3a25238f6bf7413795270f125592" @@ -4773,27 +4706,13 @@ emoji-regex@^9.2.2: resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-9.2.2.tgz#840c8803b0d8047f4ff0cf963176b32d4ef3ed72" integrity sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg== -end-of-stream@^1.1.0: - version "1.4.4" - resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.4.tgz#5ae64a5f45057baf3626ec14da0ca5e4b2431eb0" - integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q== - dependencies: - once "^1.4.0" - -enhanced-resolve@^5.18.1: - version "5.18.3" - resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.18.3.tgz#9b5f4c5c076b8787c78fe540392ce76a88855b44" - integrity sha512-d4lC8xfavMeBjzGr2vECC3fsGXziXZQyJxD868h2M/mBI3PwAuODxAkLkq5HYuvrPYcUtiLzsTo8U3PgX3Ocww== +enhanced-resolve@^5.19.0: + version "5.19.0" + resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.19.0.tgz#6687446a15e969eaa63c2fa2694510e17ae6d97c" + integrity sha512-phv3E1Xl4tQOShqSte26C7Fl84EwUdZsyOuSSk9qtAGyyQs2s3jJzComh+Abf4g187lUUAvH+H26omrqia2aGg== dependencies: graceful-fs "^4.2.4" - tapable "^2.2.0" - -enquirer@^2.3.6: - version "2.3.6" - resolved "https://registry.yarnpkg.com/enquirer/-/enquirer-2.3.6.tgz#2a7fe5dd634a1e4125a975ec994ff5456dc3734d" - integrity sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg== - dependencies: - ansi-colors "^4.1.1" + tapable "^2.3.0" entities@^6.0.0: version "6.0.1" @@ -4848,37 +4767,37 @@ es-set-tostringtag@^2.1.0: has-tostringtag "^1.0.2" hasown "^2.0.2" -esbuild@^0.25.0: - version "0.25.12" - resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.25.12.tgz#97a1d041f4ab00c2fce2f838d2b9969a2d2a97a5" - integrity sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg== +esbuild@^0.27.0: + version "0.27.3" + resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.27.3.tgz#5859ca8e70a3af956b26895ce4954d7e73bd27a8" + integrity sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg== optionalDependencies: - "@esbuild/aix-ppc64" "0.25.12" - "@esbuild/android-arm" "0.25.12" - "@esbuild/android-arm64" "0.25.12" - "@esbuild/android-x64" "0.25.12" - "@esbuild/darwin-arm64" "0.25.12" - "@esbuild/darwin-x64" "0.25.12" - "@esbuild/freebsd-arm64" "0.25.12" - "@esbuild/freebsd-x64" "0.25.12" - "@esbuild/linux-arm" "0.25.12" - "@esbuild/linux-arm64" "0.25.12" - "@esbuild/linux-ia32" "0.25.12" - "@esbuild/linux-loong64" "0.25.12" - "@esbuild/linux-mips64el" "0.25.12" - "@esbuild/linux-ppc64" "0.25.12" - "@esbuild/linux-riscv64" "0.25.12" - "@esbuild/linux-s390x" "0.25.12" - "@esbuild/linux-x64" "0.25.12" - "@esbuild/netbsd-arm64" "0.25.12" - "@esbuild/netbsd-x64" "0.25.12" - "@esbuild/openbsd-arm64" "0.25.12" - "@esbuild/openbsd-x64" "0.25.12" - "@esbuild/openharmony-arm64" "0.25.12" - "@esbuild/sunos-x64" "0.25.12" - "@esbuild/win32-arm64" "0.25.12" - "@esbuild/win32-ia32" "0.25.12" - "@esbuild/win32-x64" "0.25.12" + "@esbuild/aix-ppc64" "0.27.3" + "@esbuild/android-arm" "0.27.3" + "@esbuild/android-arm64" "0.27.3" + "@esbuild/android-x64" "0.27.3" + "@esbuild/darwin-arm64" "0.27.3" + "@esbuild/darwin-x64" "0.27.3" + "@esbuild/freebsd-arm64" "0.27.3" + "@esbuild/freebsd-x64" "0.27.3" + "@esbuild/linux-arm" "0.27.3" + "@esbuild/linux-arm64" "0.27.3" + "@esbuild/linux-ia32" "0.27.3" + "@esbuild/linux-loong64" "0.27.3" + "@esbuild/linux-mips64el" "0.27.3" + "@esbuild/linux-ppc64" "0.27.3" + "@esbuild/linux-riscv64" "0.27.3" + "@esbuild/linux-s390x" "0.27.3" + "@esbuild/linux-x64" "0.27.3" + "@esbuild/netbsd-arm64" "0.27.3" + "@esbuild/netbsd-x64" "0.27.3" + "@esbuild/openbsd-arm64" "0.27.3" + "@esbuild/openbsd-x64" "0.27.3" + "@esbuild/openharmony-arm64" "0.27.3" + "@esbuild/sunos-x64" "0.27.3" + "@esbuild/win32-arm64" "0.27.3" + "@esbuild/win32-ia32" "0.27.3" + "@esbuild/win32-x64" "0.27.3" escalade@^3.1.1: version "3.1.1" @@ -4915,26 +4834,6 @@ esutils@^2.0.2: resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64" integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g== -eventemitter2@6.4.7: - version "6.4.7" - resolved "https://registry.yarnpkg.com/eventemitter2/-/eventemitter2-6.4.7.tgz#a7f6c4d7abf28a14c1ef3442f21cb306a054271d" - integrity sha512-tYUSVOGeQPKt/eC1ABfhHy5Xd96N3oIijJvN3O9+TsC28T5V9yX9oEfEK5faP0EFSNVOG97qtAS68GBrQB2hDg== - -execa@4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/execa/-/execa-4.1.0.tgz#4e5491ad1572f2f17a77d388c6c857135b22847a" - integrity sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA== - dependencies: - cross-spawn "^7.0.0" - get-stream "^5.0.0" - human-signals "^1.1.1" - is-stream "^2.0.0" - merge-stream "^2.0.0" - npm-run-path "^4.0.0" - onetime "^5.1.0" - signal-exit "^3.0.2" - strip-final-newline "^2.0.0" - execa@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/execa/-/execa-5.1.1.tgz#f80ad9cbf4298f7bd1d4c9555c21e93741c411dd" @@ -4950,13 +4849,6 @@ execa@^5.1.1: signal-exit "^3.0.3" strip-final-newline "^2.0.0" -executable@^4.1.1: - version "4.1.1" - resolved "https://registry.yarnpkg.com/executable/-/executable-4.1.1.tgz#41532bff361d3e57af4d763b70582db18f5d133c" - integrity sha512-8iA79xD3uAch729dUG8xaaBBFGaEa0wdD2VkYLFHwlqosEj/jT66AzcreRDSgV7ehnNLBW2WR5jIXwGKjVdTLg== - dependencies: - pify "^2.2.0" - exit-x@^0.2.2: version "0.2.2" resolved "https://registry.yarnpkg.com/exit-x/-/exit-x-0.2.2.tgz#1f9052de3b8d99a696b10dad5bced9bdd5c3aa64" @@ -4974,32 +4866,6 @@ expect@30.2.0, expect@^30.0.0: jest-mock "30.2.0" jest-util "30.2.0" -extend@~3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" - integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g== - -extract-zip@2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/extract-zip/-/extract-zip-2.0.1.tgz#663dca56fe46df890d5f131ef4a06d22bb8ba13a" - integrity sha512-GDhU9ntwuKyGXdZBUgTIe+vXnWj0fppUEtMDL0+idd5Sta8TGpHssn/eusA9mrPr9qNDym6SxAYZjNvCn/9RBg== - dependencies: - debug "^4.1.1" - get-stream "^5.1.0" - yauzl "^2.10.0" - optionalDependencies: - "@types/yauzl" "^2.9.1" - -extsprintf@1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05" - integrity sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g== - -extsprintf@^1.2.0: - version "1.4.1" - resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.4.1.tgz#8d172c064867f235c0c84a596806d279bf4bcc07" - integrity sha512-Wrk35e8ydCKDj/ArClo1VrPVmN8zph5V4AtHwIuHhvMXsKf73UT3BOD+azBIW+3wOJ4FhEH7zyaJCFvChjYvMA== - fast-json-patch@^3.0.0-1: version "3.1.1" resolved "https://registry.yarnpkg.com/fast-json-patch/-/fast-json-patch-3.1.1.tgz#85064ea1b1ebf97a3f7ad01e23f9337e72c66947" @@ -5024,25 +4890,11 @@ fb-watchman@^2.0.2: dependencies: bser "2.1.1" -fd-slicer@~1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/fd-slicer/-/fd-slicer-1.1.0.tgz#25c7c89cb1f9077f8891bbe61d8f390eae256f1e" - integrity sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g== - dependencies: - pend "~1.2.0" - fdir@^6.5.0: version "6.5.0" resolved "https://registry.yarnpkg.com/fdir/-/fdir-6.5.0.tgz#ed2ab967a331ade62f18d077dae192684d50d350" integrity sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg== -figures@^3.2.0: - version "3.2.0" - resolved "https://registry.yarnpkg.com/figures/-/figures-3.2.0.tgz#625c18bd293c604dc4a8ddb2febf0c88341746af" - integrity sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg== - dependencies: - escape-string-regexp "^1.0.5" - fill-range@^7.1.1: version "7.1.1" resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" @@ -5095,11 +4947,6 @@ foreground-child@^3.3.1: cross-spawn "^7.0.6" signal-exit "^4.0.1" -forever-agent@~0.6.1: - version "0.6.1" - resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" - integrity sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw== - form-data@^4.0.4: version "4.0.5" resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.5.tgz#b49e48858045ff4cbf6b03e1805cebcad3679053" @@ -5111,40 +4958,21 @@ form-data@^4.0.4: hasown "^2.0.2" mime-types "^2.1.12" -form-data@~2.3.2: - version "2.3.3" - resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.3.tgz#dcce52c05f644f298c6a7ab936bd724ceffbf3a6" - integrity sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ== - dependencies: - asynckit "^0.4.0" - combined-stream "^1.0.6" - mime-types "^2.1.12" - format@^0.2.0: version "0.2.2" resolved "https://registry.yarnpkg.com/format/-/format-0.2.2.tgz#d6170107e9efdc4ed30c9dc39016df942b5cb58b" integrity sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww== -fs-extra@^9.1.0: - version "9.1.0" - resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.1.0.tgz#5954460c764a8da2094ba3554bf839e6b9a7c86d" - integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ== - dependencies: - at-least-node "^1.0.0" - graceful-fs "^4.2.0" - jsonfile "^6.0.1" - universalify "^2.0.0" +fsevents@2.3.2, fsevents@~2.3.2: + version "2.3.2" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a" + integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA== fsevents@^2.3.3, fsevents@~2.3.3: version "2.3.3" resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.3.tgz#cac6407785d03675a2a5e1a5305c697b347d90d6" integrity sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw== -fsevents@~2.3.2: - version "2.3.2" - resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a" - integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA== - function-bind@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" @@ -5208,32 +5036,11 @@ get-proto@^1.0.1: dunder-proto "^1.0.1" es-object-atoms "^1.0.0" -get-stream@^5.0.0, get-stream@^5.1.0: - version "5.2.0" - resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-5.2.0.tgz#4966a1795ee5ace65e706c4b7beb71257d6e22d3" - integrity sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA== - dependencies: - pump "^3.0.0" - get-stream@^6.0.0: version "6.0.1" resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-6.0.1.tgz#a262d8eef67aced57c2852ad6167526a43cbf7b7" integrity sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg== -getos@^3.2.1: - version "3.2.1" - resolved "https://registry.yarnpkg.com/getos/-/getos-3.2.1.tgz#0134d1f4e00eb46144c5a9c0ac4dc087cbb27dc5" - integrity sha512-U56CfOK17OKgTVqozZjUKNdkfEv6jk5WISBJ8SHoagjE6L69zOwl3Z+O8myjY9MEW3i2HPWQBt/LTbCgcC973Q== - dependencies: - async "^3.2.0" - -getpass@^0.1.1: - version "0.1.7" - resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa" - integrity sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng== - dependencies: - assert-plus "^1.0.0" - glob@11.1.0, glob@^10.3.10, glob@^7.1.4: version "11.1.0" resolved "https://registry.yarnpkg.com/glob/-/glob-11.1.0.tgz#4f826576e4eb99c7dad383793d2f9f08f67e50a6" @@ -5246,13 +5053,6 @@ glob@11.1.0, glob@^10.3.10, glob@^7.1.4: package-json-from-dist "^1.0.0" path-scurry "^2.0.0" -global-dirs@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/global-dirs/-/global-dirs-3.0.1.tgz#0c488971f066baceda21447aecb1a8b911d22485" - integrity sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA== - dependencies: - ini "2.0.0" - globals@^11.1.0: version "11.12.0" resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" @@ -5270,11 +5070,6 @@ gopd@^1.2.0: resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.2.0.tgz#89f56b8217bdbc8802bd299df6d7f1081d7e51a1" integrity sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg== -graceful-fs@^4.1.6, graceful-fs@^4.2.0: - version "4.2.10" - resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.10.tgz#147d3a006da4ca3ce14728c7aefc287c367d7a6c" - integrity sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA== - graceful-fs@^4.2.11, graceful-fs@^4.2.4: version "4.2.11" resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3" @@ -5402,15 +5197,6 @@ http-proxy-agent@^7.0.2: agent-base "^7.1.0" debug "^4.3.4" -http-signature@~1.3.6: - version "1.3.6" - resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.3.6.tgz#cb6fbfdf86d1c974f343be94e87f7fc128662cf9" - integrity sha512-3adrsD6zqo4GsTqtO7FyrejHNv+NgiIfAfv68+jVlFmSr9OGy7zrxONceFRLKvnnZA5jbxQBX1u9PpB6Wi32Gw== - dependencies: - assert-plus "^1.0.0" - jsprim "^2.0.2" - sshpk "^1.14.1" - https-proxy-agent@^7.0.6: version "7.0.6" resolved "https://registry.yarnpkg.com/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz#da8dfeac7da130b05c2ba4b59c9b6cd66611a6b9" @@ -5419,11 +5205,6 @@ https-proxy-agent@^7.0.6: agent-base "^7.1.2" debug "4" -human-signals@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-1.1.1.tgz#c5b1cd14f50aeae09ab6c59fe63ba3395fe4dfa3" - integrity sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw== - human-signals@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-2.1.0.tgz#dc91fcba42e4d06e4abaed33b3e7a3c02f514ea0" @@ -5436,7 +5217,7 @@ iconv-lite@0.6.3: dependencies: safer-buffer ">= 2.1.2 < 3.0.0" -ieee754@^1.1.13, ieee754@^1.2.1: +ieee754@^1.2.1: version "1.2.1" resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.2.1.tgz#8eb7a10a63fff25d15a57b001586d177d1b0d352" integrity sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA== @@ -5477,21 +5258,11 @@ inherits@^2.0.4: resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== -ini@2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/ini/-/ini-2.0.0.tgz#e5fd556ecdd5726be978fa1001862eacb0a94bc5" - integrity sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA== - -"internmap@1 - 2": +"internmap@1 - 2", internmap@^2.0.3: version "2.0.3" resolved "https://registry.yarnpkg.com/internmap/-/internmap-2.0.3.tgz#6685f23755e43c524e251d29cbc97248e3061009" integrity sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg== -internmap@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/internmap/-/internmap-1.0.1.tgz#0017cc8a3b99605f0302f2b198d272e015e5df95" - integrity sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw== - invariant@^2.2.2: version "2.2.4" resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.4.tgz#610f3c92c9359ce1db616e538008d23ff35158e6" @@ -5545,13 +5316,6 @@ is-callable@^1.1.3, is-callable@^1.2.7: resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.2.7.tgz#3bc2a85ea742d9e36205dcacdd72ca1fdc51b055" integrity sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA== -is-ci@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/is-ci/-/is-ci-3.0.1.tgz#db6ecbed1bd659c43dac0f45661e7674103d1867" - integrity sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ== - dependencies: - ci-info "^3.2.0" - is-core-module@^2.9.0: version "2.11.0" resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.11.0.tgz#ad4cb3e3863e814523c96f3f58d26cc570ff0144" @@ -5586,14 +5350,6 @@ is-hexadecimal@^2.0.0: resolved "https://registry.yarnpkg.com/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz#86b5bf668fca307498d319dfc03289d781a90027" integrity sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg== -is-installed-globally@~0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/is-installed-globally/-/is-installed-globally-0.4.0.tgz#9a0fd407949c30f86eb6959ef1b7994ed0b7b520" - integrity sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ== - dependencies: - global-dirs "^3.0.0" - is-path-inside "^3.0.2" - is-map@^2.0.1, is-map@^2.0.2: version "2.0.2" resolved "https://registry.yarnpkg.com/is-map/-/is-map-2.0.2.tgz#00922db8c9bf73e81b7a335827bc2a43f2b91127" @@ -5611,11 +5367,6 @@ is-number@^7.0.0: resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== -is-path-inside@^3.0.2: - version "3.0.3" - resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-3.0.3.tgz#d231362e53a07ff2b0e0ea7fed049161ffd16283" - integrity sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ== - is-potential-custom-element-name@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz#171ed6f19e3ac554394edf78caa05784a45bebb5" @@ -5671,16 +5422,6 @@ is-typed-array@^1.1.14: dependencies: which-typed-array "^1.1.16" -is-typedarray@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" - integrity sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA== - -is-unicode-supported@^0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz#3f26c76a809593b52bfa2ecb5710ed2779b522a7" - integrity sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw== - is-weakmap@^2.0.1: version "2.0.1" resolved "https://registry.yarnpkg.com/is-weakmap/-/is-weakmap-2.0.1.tgz#5008b59bdc43b698201d18f62b37b2ca243e8cf2" @@ -5704,11 +5445,6 @@ isexe@^2.0.0: resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" integrity sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw== -isstream@~0.1.2: - version "0.1.2" - resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" - integrity sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g== - istanbul-lib-coverage@^3.0.0, istanbul-lib-coverage@^3.2.0: version "3.2.0" resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.0.tgz#189e7909d0a39fa5a3dfad5b03f71947770191d3" @@ -6146,10 +5882,10 @@ js-file-download@^0.4.12: resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== -js-yaml@=4.1.0, js-yaml@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" - integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== +js-yaml@=4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.1.tgz#854c292467705b699476e1a2decc0c8a3458806b" + integrity sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA== dependencies: argparse "^2.0.1" @@ -6161,10 +5897,12 @@ js-yaml@^3.13.1: argparse "^1.0.7" esprima "^4.0.0" -jsbn@~0.1.0: - version "0.1.1" - resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" - integrity sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg== +js-yaml@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" + integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== + dependencies: + argparse "^2.0.1" jsdom@^26.1.0: version "26.1.0" @@ -6212,16 +5950,6 @@ json-parse-even-better-errors@^2.3.0: resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d" integrity sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w== -json-schema@0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.4.0.tgz#f7de4cf6efab838ebaeb3236474cbba5a1930ab5" - integrity sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA== - -json-stringify-safe@~5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" - integrity sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA== - json5@^2.2.1: version "2.2.2" resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.2.tgz#64471c5bdcc564c18f7c1d4df2e2297f2457c5ab" @@ -6232,30 +5960,6 @@ json5@^2.2.3: resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.3.tgz#78cd6f1a19bdc12b73db5ad0c61efd66c1e29283" integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg== -jsonfile@^6.0.1: - version "6.1.0" - resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-6.1.0.tgz#bc55b2634793c679ec6403094eb13698a6ec0aae" - integrity sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ== - dependencies: - universalify "^2.0.0" - optionalDependencies: - graceful-fs "^4.1.6" - -jsprim@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-2.0.2.tgz#77ca23dbcd4135cd364800d22ff82c2185803d4d" - integrity sha512-gqXddjPqQ6G40VdnI6T6yObEC+pDNvyP95wdQhkWkg7crHH3km5qP1FsOXEkzEQwnz6gz5qGTn1c2Y52wP3OyQ== - dependencies: - assert-plus "1.0.0" - extsprintf "1.3.0" - json-schema "0.4.0" - verror "1.10.0" - -lazy-ass@^1.6.0: - version "1.6.0" - resolved "https://registry.yarnpkg.com/lazy-ass/-/lazy-ass-1.6.0.tgz#7999655e8646c17f089fdd187d150d3324d54513" - integrity sha512-cc8oEVoctTvsFZ/Oje/kGnHbpWHYBe8IAJe4C0QNc3t8uM/0Y8+erSz/7Y1ALuXTEZTMvxXwO6YbX1ey3ujiZw== - leven@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/leven/-/leven-3.1.0.tgz#77891de834064cccba82ae7842bb6b14a13ed7f2" @@ -6266,20 +5970,6 @@ lines-and-columns@^1.1.6: resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632" integrity sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg== -listr2@^3.8.3: - version "3.14.0" - resolved "https://registry.yarnpkg.com/listr2/-/listr2-3.14.0.tgz#23101cc62e1375fd5836b248276d1d2b51fdbe9e" - integrity sha512-TyWI8G99GX9GjE54cJ+RrNMcIFBfwMPxc3XTFiAYGN4s10hWROGtOg7+O6u6LE3mNkyld7RSLE6nrKBvTfcs3g== - dependencies: - cli-truncate "^2.1.0" - colorette "^2.0.16" - log-update "^4.0.0" - p-map "^4.0.0" - rfdc "^1.3.0" - rxjs "^7.5.1" - through "^2.3.8" - wrap-ansi "^7.0.0" - loader-utils@^3.3.1: version "3.3.1" resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-3.3.1.tgz#735b9a19fd63648ca7adbd31c2327dfe281304e5" @@ -6297,34 +5987,11 @@ lodash.debounce@^4, lodash.debounce@^4.0.8: resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af" integrity sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow== -lodash.once@^4.1.1: - version "4.1.1" - resolved "https://registry.yarnpkg.com/lodash.once/-/lodash.once-4.1.1.tgz#0dd3971213c7c56df880977d504c88fb471a97ac" - integrity sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg== - lodash@^4.15.0, lodash@^4.17.21: version "4.17.21" resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== -log-symbols@^4.0.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-4.1.0.tgz#3fbdbb95b4683ac9fc785111e792e558d4abd503" - integrity sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg== - dependencies: - chalk "^4.1.0" - is-unicode-supported "^0.1.0" - -log-update@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/log-update/-/log-update-4.0.0.tgz#589ecd352471f2a1c0c570287543a64dfd20e0a1" - integrity sha512-9fkkDevMefjg0mmzWFBW8YkFP91OrizzkW3diF7CpG+S2EYdy4+TVfGwz1zeF8x7hCx1ovSPTOE9Ngib74qqUg== - dependencies: - ansi-escapes "^4.3.0" - cli-cursor "^3.1.0" - slice-ansi "^4.0.0" - wrap-ansi "^6.2.0" - loose-envify@^1.0.0, loose-envify@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" @@ -6411,7 +6078,7 @@ mime-db@1.52.0: resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70" integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== -mime-types@^2.1.12, mime-types@~2.1.19: +mime-types@^2.1.12: version "2.1.35" resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a" integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== @@ -6442,6 +6109,13 @@ minimatch@^10.1.1: dependencies: "@isaacs/brace-expansion" "^5.0.0" +minimatch@^10.2.1: + version "10.2.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-10.2.4.tgz#465b3accbd0218b8281f5301e27cedc697f96fde" + integrity sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg== + dependencies: + brace-expansion "^5.0.2" + minimatch@^3.0.4: version "3.1.2" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" @@ -6449,29 +6123,17 @@ minimatch@^3.0.4: dependencies: brace-expansion "^1.1.7" -minimatch@^7.4.3: - version "7.4.6" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-7.4.6.tgz#845d6f254d8f4a5e4fd6baf44d5f10c8448365fb" - integrity sha512-sBz8G/YjVniEz6lKPNpKxXwazJe4c19fEfV2GDMX6AjFz+MX9uDWIZW8XreVhkFW3fkIdTv/gxWr/Kks5FFAVw== - dependencies: - brace-expansion "^2.0.1" - -minimist@^1.2.8: - version "1.2.8" - resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" - integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== - minipass@^7.1.2: version "7.1.2" resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.2.tgz#93a9626ce5e5e66bd4db86849e7515e92340a707" integrity sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw== -monaco-editor@0.54.0: - version "0.54.0" - resolved "https://registry.yarnpkg.com/monaco-editor/-/monaco-editor-0.54.0.tgz#c0d6ebb46b83f1bef6f67f6aa471e38ba7ef8231" - integrity sha512-hx45SEUoLatgWxHKCmlLJH81xBo0uXP4sRkESUpmDQevfi+e7K1VuiSprK6UpQ8u4zOcKNiH0pMvHvlMWA/4cw== +monaco-editor@0.55.1: + version "0.55.1" + resolved "https://registry.yarnpkg.com/monaco-editor/-/monaco-editor-0.55.1.tgz#e74c6fe5a6bf985b817d2de3eb88d56afc494a1b" + integrity sha512-jz4x+TJNFHwHtwuV9vA9rMujcZRb0CEilTEwG2rRSpe/A7Jdkuj8xPKttCgOh+v/lkHy7HsZ64oj+q3xoAFl9A== dependencies: - dompurify "3.1.7" + dompurify "3.2.7" marked "14.0.0" ms@2.1.2: @@ -6479,7 +6141,7 @@ ms@2.1.2: resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== -ms@^2.1.1, ms@^2.1.3: +ms@^2.1.3: version "2.1.3" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== @@ -6552,7 +6214,7 @@ normalize-path@^3.0.0: resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== -npm-run-path@^4.0.0, npm-run-path@^4.0.1: +npm-run-path@^4.0.1: version "4.0.1" resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-4.0.1.tgz#b7ecd1e5ed53da8e37a55e1c2269e0b97ed748ea" integrity sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw== @@ -6597,14 +6259,7 @@ object.assign@^4.1.4: has-symbols "^1.0.3" object-keys "^1.1.1" -once@^1.3.1, once@^1.4.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" - integrity sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w== - dependencies: - wrappy "1" - -onetime@^5.1.0, onetime@^5.1.2: +onetime@^5.1.2: version "5.1.2" resolved "https://registry.yarnpkg.com/onetime/-/onetime-5.1.2.tgz#d0e96ebb56b07476df1dd9c4806e5237985ca45e" integrity sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg== @@ -6625,11 +6280,6 @@ openapi-server-url-templating@^1.3.0: dependencies: apg-lite "^1.0.4" -ospath@^1.2.2: - version "1.2.2" - resolved "https://registry.yarnpkg.com/ospath/-/ospath-1.2.2.tgz#1276639774a3f8ef2572f7fe4280e0ea4550c07b" - integrity sha512-o6E5qJV5zkAbIDNhGSIlyOhScKXgQrSRMilfph0clDfM0nEnBOlKlH4sWDmG95BW/CvwNz0vmm7dJVtU2KlMiA== - p-limit@^2.2.0: version "2.3.0" resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1" @@ -6651,13 +6301,6 @@ p-locate@^4.1.0: dependencies: p-limit "^2.2.0" -p-map@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/p-map/-/p-map-4.0.0.tgz#bb2f95a5eda2ec168ec9274e06a747c3e2904d2b" - integrity sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ== - dependencies: - aggregate-error "^3.0.0" - p-try@^2.0.0: version "2.2.0" resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" @@ -6733,11 +6376,6 @@ path-type@^4.0.0: resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== -pend@~1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/pend/-/pend-1.2.0.tgz#7a57eb550a6783f9115331fcf4663d5c8e007a50" - integrity sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg== - performance-now@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" @@ -6763,11 +6401,6 @@ picomatch@^4.0.2, picomatch@^4.0.3: resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-4.0.3.tgz#796c76136d1eead715db1e7bad785dedd695a042" integrity sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q== -pify@^2.2.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c" - integrity sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog== - pirates@^4.0.7: version "4.0.7" resolved "https://registry.yarnpkg.com/pirates/-/pirates-4.0.7.tgz#643b4a18c4257c8a65104b73f3049ce9a0a15e22" @@ -6780,6 +6413,20 @@ pkg-dir@^4.2.0: dependencies: find-up "^4.0.0" +playwright-core@1.58.2: + version "1.58.2" + resolved "https://registry.yarnpkg.com/playwright-core/-/playwright-core-1.58.2.tgz#ac5f5b4b10d29bcf934415f0b8d133b34b0dcb13" + integrity sha512-yZkEtftgwS8CsfYo7nm0KE8jsvm6i/PTgVtB8DL726wNf6H2IMsDuxCpJj59KDaxCtSnrWan2AeDqM7JBaultg== + +playwright@1.58.2: + version "1.58.2" + resolved "https://registry.yarnpkg.com/playwright/-/playwright-1.58.2.tgz#afe547164539b0bcfcb79957394a7a3fa8683cfd" + integrity sha512-vA30H8Nvkq/cPBnNw4Q8TWz1EJyqgpuinBcHET0YVJVFldr8JDNiU9LaWAE1KqSkRYazuaBhTpB5ZzShOezQ6A== + dependencies: + playwright-core "1.58.2" + optionalDependencies: + fsevents "2.3.2" + possible-typed-array-names@^1.0.0: version "1.1.0" resolved "https://registry.yarnpkg.com/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz#93e3582bc0e5426586d9d07b79ee40fc841de4ae" @@ -6808,11 +6455,6 @@ postcss@^8.5.6: picocolors "^1.1.1" source-map-js "^1.2.1" -pretty-bytes@^5.6.0: - version "5.6.0" - resolved "https://registry.yarnpkg.com/pretty-bytes/-/pretty-bytes-5.6.0.tgz#356256f643804773c82f64723fe78c92c62beaeb" - integrity sha512-FFw039TmrBqFK8ma/7OL3sDz/VytdtJr044/QUJtH0wK9lb9jLq9tJyIxUwtQJHwar2BqtiA4iCWSwo9JLkzFg== - pretty-format@30.2.0, pretty-format@^30.0.0: version "30.2.0" resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-30.2.0.tgz#2d44fe6134529aed18506f6d11509d8a62775ebe" @@ -6836,11 +6478,6 @@ prismjs@^1.30.0: resolved "https://registry.yarnpkg.com/prismjs/-/prismjs-1.30.0.tgz#d9709969d9d4e16403f6f348c63553b19f0975a9" integrity sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw== -process@^0.11.10: - version "0.11.10" - resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" - integrity sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A== - promise@^8.1.0: version "8.3.0" resolved "https://registry.yarnpkg.com/promise/-/promise-8.3.0.tgz#8cb333d1edeb61ef23869fbb8a4ea0279ab60e0a" @@ -6862,34 +6499,11 @@ property-information@^7.0.0: resolved "https://registry.yarnpkg.com/property-information/-/property-information-7.1.0.tgz#b622e8646e02b580205415586b40804d3e8bfd5d" integrity sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ== -proxy-from-env@1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/proxy-from-env/-/proxy-from-env-1.0.0.tgz#33c50398f70ea7eb96d21f7b817630a55791c7ee" - integrity sha512-F2JHgJQ1iqwnHDcQjVBsq3n/uoaFL+iPW/eAeL7kVxy/2RrWaN4WroKjjvbsoRtv0ftelNyC01bjRhn/bhcf4A== - proxy-from-env@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/proxy-from-env/-/proxy-from-env-1.1.0.tgz#e102f16ca355424865755d2c9e8ea4f24d58c3e2" integrity sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg== -psl@^1.1.33: - version "1.9.0" - resolved "https://registry.yarnpkg.com/psl/-/psl-1.9.0.tgz#d0df2a137f00794565fcaf3b2c00cd09f8d5a5a7" - integrity sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag== - -pump@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/pump/-/pump-3.0.0.tgz#b4a2116815bde2f4e1ea602354e8c75565107a64" - integrity sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww== - dependencies: - end-of-stream "^1.1.0" - once "^1.3.1" - -punycode@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" - integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A== - punycode@^2.3.1: version "2.3.1" resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.3.1.tgz#027422e2faec0b25e1549c3e1bd8309b9133b6e5" @@ -6900,13 +6514,6 @@ pure-rand@^7.0.0: resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-7.0.1.tgz#6f53a5a9e3e4a47445822af96821ca509ed37566" integrity sha512-oTUZM/NAZS8p7ANR3SHh30kXB+zK2r2BPcEn/awJIbOvq82WoMN4p62AWWp3Hhw50G0xMsw1mhIBLqHw64EcNQ== -qs@6.10.4: - version "6.10.4" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.10.4.tgz#6a3003755add91c0ec9eacdc5f878b034e73f9e7" - integrity sha512-OQiU+C+Ds5qiH91qh/mg0w+8nwQuLjM4F4M/PbmhDOoYehPh+Fb0bDjtR1sOvy7YKxvj28Y/M0PhP5uVX0kB+g== - dependencies: - side-channel "^1.0.4" - querystringify@^2.1.1: version "2.2.0" resolved "https://registry.yarnpkg.com/querystringify/-/querystringify-2.2.0.tgz#3345941b4153cb9d082d8eee4cda2016a9aef7f6" @@ -6972,10 +6579,10 @@ react-debounce-input@=3.3.0: lodash.debounce "^4" prop-types "^15.8.1" -react-dom@19.2.0: - version "19.2.0" - resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-19.2.0.tgz#00ed1e959c365e9a9d48f8918377465466ec3af8" - integrity sha512-UlbRu4cAiGaIewkPyiRGJk0imDN2T3JjieT6spoL2UeSf5od4n5LB/mQ4ejmxhCFT1tYe8IvaFulzynWovsEFQ== +react-dom@19.2.4: + version "19.2.4" + resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-19.2.4.tgz#6fac6bd96f7db477d966c7ec17c1a2b1ad8e6591" + integrity sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ== dependencies: scheduler "^0.27.0" @@ -7016,10 +6623,10 @@ react-is@^19.2.0: resolved "https://registry.yarnpkg.com/react-is/-/react-is-19.2.0.tgz#ddc3b4a4e0f3336c3847f18b806506388d7b9973" integrity sha512-x3Ax3kNSMIIkyVYhWPyO09bu0uttcAIoecO/um/rKGQ4EltYWVYtyiGkS/3xMynrbVQdS69Jhlv8FXUEZehlzA== -react-monaco-editor@0.59.0: - version "0.59.0" - resolved "https://registry.yarnpkg.com/react-monaco-editor/-/react-monaco-editor-0.59.0.tgz#a3cdef4a47fd0cb899f412c9d66b365c51a76096" - integrity sha512-SggqfZCdUauNk7GI0388bk5n25zYsQ1ai1i+VhxAgwbCH+MTGl7L1fBNTJ6V+oXeUApf+bpzikprHJEZm9J/zA== +react-is@^19.2.3: + version "19.2.4" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-19.2.4.tgz#a080758243c572ccd4a63386537654298c99d135" + integrity sha512-W+EWGn2v0ApPKgKKCy/7s7WHXkboGcsrXE+2joLyVxkbyVQfO3MUEaUQDHoSmb8TFFrSKYa9mw64WZHNHSDzYA== react-number-format@5.4.4: version "5.4.4" @@ -7034,10 +6641,10 @@ react-redux@^9.2.0: "@types/use-sync-external-store" "^0.0.6" use-sync-external-store "^1.4.0" -react-router@7.12.0: - version "7.12.0" - resolved "https://registry.yarnpkg.com/react-router/-/react-router-7.12.0.tgz#459a86862abbedd02e76e686751fe71f9fd73a4f" - integrity sha512-kTPDYPFzDVGIIGNLS5VJykK0HfHLY5MF3b+xj0/tTyNYL1gF1qs7u67Z9jEhQk2sQ98SUaHxlG31g1JtF7IfVw== +react-router@7.13.1: + version "7.13.1" + resolved "https://registry.yarnpkg.com/react-router/-/react-router-7.13.1.tgz#5e2b3ebafd6c78d9775e135474bf5060645077f7" + integrity sha512-td+xP4X2/6BJvZoX6xw++A2DdEi++YypA69bJUV5oVvqf6/9/9nNlD70YO1e9d3MyamJEBQFEzk6mbfDYbqrSA== dependencies: cookie "^1.0.1" set-cookie-parser "^2.6.0" @@ -7064,10 +6671,10 @@ react-transition-group@^4.4.5: loose-envify "^1.4.0" prop-types "^15.6.2" -react@19.2.0: - version "19.2.0" - resolved "https://registry.yarnpkg.com/react/-/react-19.2.0.tgz#d33dd1721698f4376ae57a54098cb47fc75d93a5" - integrity sha512-tmbWg6W31tQLeB5cdIBOicJDJRR2KzXsV7uSK9iNfLWQ5bIZfxuPEHp7M8wiHyHnn0DD1i7w3Zmin0FtkrwoCQ== +react@19.2.4: + version "19.2.4" + resolved "https://registry.yarnpkg.com/react/-/react-19.2.4.tgz#438e57baa19b77cb23aab516cf635cd0579ee09a" + integrity sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ== redent@^3.0.0: version "3.0.0" @@ -7167,13 +6774,6 @@ repeat-string@^1.5.2: resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637" integrity sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w== -request-progress@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/request-progress/-/request-progress-3.0.0.tgz#4ca754081c7fec63f505e4faa825aa06cd669dbe" - integrity sha512-MnWzEHHaxHO2iWiQuHrUPBi/1WeBf5PkxQqNyNvLl9VAYSdXkP8tQ3pBSeCPD+yw0v0Aq1zosWLz0BdeXpWwZg== - dependencies: - throttleit "^1.0.0" - require-directory@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" @@ -7215,24 +6815,11 @@ resolve@^1.14.2, resolve@^1.19.0: path-parse "^1.0.7" supports-preserve-symlinks-flag "^1.0.0" -restore-cursor@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-3.1.0.tgz#39f67c54b3a7a58cea5236d95cf0034239631f7e" - integrity sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA== - dependencies: - onetime "^5.1.0" - signal-exit "^3.0.2" - ret@^0.2.0: version "0.2.2" resolved "https://registry.yarnpkg.com/ret/-/ret-0.2.2.tgz#b6861782a1f4762dce43402a71eb7a283f44573c" integrity sha512-M0b3YWQs7R3Z917WRQy1HHA7Ba7D8hvZg6UE5mLykJxQVE2ju0IXbGlaHPPlkY+WN7wFP+wUMXmBFA0aV6vYGQ== -rfdc@^1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/rfdc/-/rfdc-1.3.0.tgz#d0b7c441ab2720d05dc4cf26e01c89631d9da08b" - integrity sha512-V2hovdzFbOi77/WajaSMXk2OLm+xNIeQdMMuB7icj7bk6zi2F8GGAxigcnDFpJHbNyNcgyJDiP+8nOrY5cZGrA== - rollup@^4.43.0: version "4.53.2" resolved "https://registry.yarnpkg.com/rollup/-/rollup-4.53.2.tgz#98e73ee51e119cb9d88b07d026c959522416420a" @@ -7269,19 +6856,12 @@ rrweb-cssom@^0.8.0: resolved "https://registry.yarnpkg.com/rrweb-cssom/-/rrweb-cssom-0.8.0.tgz#3021d1b4352fbf3b614aaeed0bc0d5739abe0bc2" integrity sha512-guoltQEx+9aMf2gDZ0s62EcV8lsXR+0w8915TC3ITdn2YueuNjdAYh/levpU9nFaoChh9RUS5ZdQMrKfVEN9tw== -rxjs@^7.5.1: - version "7.8.0" - resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-7.8.0.tgz#90a938862a82888ff4c7359811a595e14e1e09a4" - integrity sha512-F2+gxDshqmIub1KdvZkaEfGDwLNpPvk9Fs6LD/MyQxNgMds/WH9OdDDXOmxUZpME+iSK3rQCctkL0DYyytUqMg== - dependencies: - tslib "^2.1.0" - -safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.2, safe-buffer@^5.2.1: +safe-buffer@^5.1.0, safe-buffer@^5.2.1: version "5.2.1" resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== -"safer-buffer@>= 2.1.2 < 3.0.0", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0: +"safer-buffer@>= 2.1.2 < 3.0.0": version "2.1.2" resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== @@ -7303,7 +6883,7 @@ semver@^6.0.0, semver@^6.1.1, semver@^6.1.2, semver@^6.3.0, semver@^6.3.1: resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== -semver@^7.5.3, semver@^7.5.4: +semver@^7.5.4: version "7.5.4" resolved "https://registry.yarnpkg.com/semver/-/semver-7.5.4.tgz#483986ec4ed38e1c6c48c34894a9182dbff68a6e" integrity sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA== @@ -7379,7 +6959,7 @@ side-channel@^1.0.4: get-intrinsic "^1.0.2" object-inspect "^1.9.0" -signal-exit@^3.0.2, signal-exit@^3.0.3: +signal-exit@^3.0.3: version "3.0.7" resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== @@ -7394,24 +6974,6 @@ slash@^3.0.0: resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q== -slice-ansi@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-3.0.0.tgz#31ddc10930a1b7e0b67b08c96c2f49b77a789787" - integrity sha512-pSyv7bSTC7ig9Dcgbw9AuRNUb5k5V6oDudjZoMBSr13qpLBG7tB+zgCkARjq7xIUgdz5P1Qe8u+rSGdouOOIyQ== - dependencies: - ansi-styles "^4.0.0" - astral-regex "^2.0.0" - is-fullwidth-code-point "^3.0.0" - -slice-ansi@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-4.0.0.tgz#500e8dd0fd55b05815086255b3195adf2a45fe6b" - integrity sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ== - dependencies: - ansi-styles "^4.0.0" - astral-regex "^2.0.0" - is-fullwidth-code-point "^3.0.0" - source-map-js@^1.2.1: version "1.2.1" resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.2.1.tgz#1ce5650fddd87abc099eda37dcff024c2667ae46" @@ -7445,21 +7007,6 @@ sprintf-js@~1.0.2: resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" integrity sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g== -sshpk@^1.14.1: - version "1.17.0" - resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.17.0.tgz#578082d92d4fe612b13007496e543fa0fbcbe4c5" - integrity sha512-/9HIEs1ZXGhSPE8X6Ccm7Nam1z8KcoCqPdI7ecm1N33EzAetWahvQWVqLZtaZQ+IDKX4IyA2o0gBzqIMkAagHQ== - dependencies: - asn1 "~0.2.3" - assert-plus "^1.0.0" - bcrypt-pbkdf "^1.0.0" - dashdash "^1.12.0" - ecc-jsbn "~0.1.1" - getpass "^0.1.1" - jsbn "~0.1.0" - safer-buffer "^2.0.2" - tweetnacl "~0.14.0" - stack-utils@^2.0.6: version "2.0.6" resolved "https://registry.yarnpkg.com/stack-utils/-/stack-utils-2.0.6.tgz#aaf0748169c02fc33c8232abccf933f54a1cc34f" @@ -7645,18 +7192,19 @@ supports-preserve-symlinks-flag@^1.0.0: resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== -swagger-client@^3.36.0: - version "3.36.0" - resolved "https://registry.yarnpkg.com/swagger-client/-/swagger-client-3.36.0.tgz#3880f74c993d45f547b119fc87ea022793bc1d25" - integrity sha512-9fkjxGHXuKy20jj8zwE6RwgFSOGKAyOD5U7aKgW/+/futtHZHOdZeqiEkb97sptk2rdBv7FEiUQDNlWZR186RA== +swagger-client@^3.37.0: + version "3.37.0" + resolved "https://registry.yarnpkg.com/swagger-client/-/swagger-client-3.37.0.tgz#1ac57955d650cf92105e11900ca7b640f5604cc7" + integrity sha512-pzU+B+DkUbrSwlj4/E8sGeP1w84/CFgDJAt80fHu650TxnOHbqFLGQjiE6luvpRxTPdfK2zRHJP7I6CgUkI8yA== dependencies: "@babel/runtime-corejs3" "^7.22.15" "@scarf/scarf" "=1.4.0" - "@swagger-api/apidom-core" "^1.0.0-rc.1" - "@swagger-api/apidom-error" "^1.0.0-rc.1" - "@swagger-api/apidom-json-pointer" "^1.0.0-rc.1" - "@swagger-api/apidom-ns-openapi-3-1" "^1.0.0-rc.1" - "@swagger-api/apidom-reference" "^1.0.0-rc.1" + "@swagger-api/apidom-core" "^1.6.0" + "@swagger-api/apidom-error" "^1.6.0" + "@swagger-api/apidom-json-pointer" "^1.6.0" + "@swagger-api/apidom-ns-openapi-3-1" "^1.6.0" + "@swagger-api/apidom-ns-openapi-3-2" "^1.6.0" + "@swagger-api/apidom-reference" "^1.6.0" "@swaggerexpert/cookie" "^2.0.2" deepmerge "~4.3.0" fast-json-patch "^3.0.0-1" @@ -7669,10 +7217,10 @@ swagger-client@^3.36.0: ramda "^0.30.1" ramda-adjunct "^5.1.0" -swagger-ui-react@5.30.2: - version "5.30.2" - resolved "https://registry.yarnpkg.com/swagger-ui-react/-/swagger-ui-react-5.30.2.tgz#d02fe73e3f895f67d1ab8bc02aadccfad55b1a2b" - integrity sha512-0tS9GOcswKuQrIpCyvDoCDs6xS8B6MRC+iE7P99WfVXDhAIU+U7iFHuS4e7zucSh9qXvcL7KsXs623c+4oBe6w== +swagger-ui-react@5.32.0: + version "5.32.0" + resolved "https://registry.yarnpkg.com/swagger-ui-react/-/swagger-ui-react-5.32.0.tgz#fd764a7be746b5fdbff748fee6ef723eb05b4846" + integrity sha512-2mmrtvfp0EA90pdT8qXTMu26ex03TG2bsjvDAwXhdfCm+9foyadYJN+nEvDHM6/c6/xtXbdAsb6cVxBvbltnpw== dependencies: "@babel/runtime-corejs3" "^7.27.1" "@scarf/scarf" "=1.4.0" @@ -7685,7 +7233,7 @@ swagger-ui-react@5.30.2: ieee754 "^1.2.1" immutable "^3.x.x" js-file-download "^0.4.12" - js-yaml "=4.1.0" + js-yaml "=4.1.1" lodash "^4.17.21" prop-types "^15.8.1" randexp "^0.5.3" @@ -7703,7 +7251,7 @@ swagger-ui-react@5.30.2: reselect "^5.1.1" serialize-error "^8.1.0" sha.js "^2.4.12" - swagger-client "^3.36.0" + swagger-client "^3.37.0" url-parse "^1.5.10" xml "=1.0.1" xml-but-prettier "^1.0.1" @@ -7721,7 +7269,7 @@ synckit@^0.11.8: dependencies: "@pkgr/core" "^0.2.9" -tapable@^2.2.0: +tapable@^2.3.0: version "2.3.0" resolved "https://registry.yarnpkg.com/tapable/-/tapable-2.3.0.tgz#7e3ea6d5ca31ba8e078b560f0d83ce9a14aa8be6" integrity sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg== @@ -7735,16 +7283,6 @@ test-exclude@^6.0.0: glob "^7.1.4" minimatch "^3.0.4" -throttleit@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/throttleit/-/throttleit-1.0.0.tgz#9e785836daf46743145a5984b6268d828528ac6c" - integrity sha512-rkTVqu6IjfQ/6+uNuuc3sZek4CEYxTJom3IktzgdSxcZqdARuebbA/f4QmAxMQIxqq9ZLEUkSYqvuk1I6VKq4g== - -through@^2.3.8: - version "2.3.8" - resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" - integrity sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg== - tinyglobby@^0.2.15: version "0.2.15" resolved "https://registry.yarnpkg.com/tinyglobby/-/tinyglobby-0.2.15.tgz#e228dd1e638cea993d2fdb4fcd2d4602a79951c2" @@ -7765,11 +7303,6 @@ tldts@^6.1.32: dependencies: tldts-core "^6.1.86" -tmp@~0.2.1: - version "0.2.5" - resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.2.5.tgz#b06bcd23f0f3c8357b426891726d16015abfd8f8" - integrity sha512-voyz6MApa1rQGUxT3E+BK7/ROe8itEx7vD8/HEvt4xwXucvQ5G5oeEiHkmHZJuBO21RpOf+YYm9MOivj709jow== - tmpl@1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/tmpl/-/tmpl-1.0.5.tgz#8683e0b902bb9c20c4f726e3c0b69f36518c07cc" @@ -7801,16 +7334,6 @@ toggle-selection@^1.0.6: resolved "https://registry.yarnpkg.com/toggle-selection/-/toggle-selection-1.0.6.tgz#6e45b1263f2017fa0acc7d89d78b15b8bf77da32" integrity sha512-BiZS+C1OS8g/q2RRbJmy59xpyghNBqrr6k5L/uKBGRsTfxmu3ffiRnd8mlGPUVayg8pvfi5urfnu8TU7DVOkLQ== -tough-cookie@^4.1.3: - version "4.1.3" - resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-4.1.3.tgz#97b9adb0728b42280aa3d814b6b999b2ff0318bf" - integrity sha512-aX/y5pVRkfRnfmuX+OdbSdXvPe6ieKX/G2s7e98f4poJHnqH3281gDPm/metm6E/WRamfx7WC4HUqkWHfQHprw== - dependencies: - psl "^1.1.33" - punycode "^2.1.1" - universalify "^0.2.0" - url-parse "^1.5.3" - tough-cookie@^5.1.1: version "5.1.2" resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-5.1.2.tgz#66d774b4a1d9e12dc75089725af3ac75ec31bed7" @@ -7864,11 +7387,6 @@ tslib@2.6.2: resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae" integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q== -tslib@^2.1.0: - version "2.4.1" - resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.4.1.tgz#0d0bfbaac2880b91e22df0768e55be9753a5b17e" - integrity sha512-tGyy4dAjRIEwI7BzsB0lynWgOpfqjUdq91XXAlIWD2OwKBH7oCl/GZG/HT4BOHrTlPMOASlMQ7veyTqpmRcrNA== - tslib@^2.3.0: version "2.5.0" resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.5.0.tgz#42bfed86f5787aeb41d031866c8f402429e0fddf" @@ -7879,18 +7397,6 @@ tslib@^2.4.0: resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.8.1.tgz#612efe4ed235d567e8aba5f2a5fab70280ade83f" integrity sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w== -tunnel-agent@^0.6.0: - version "0.6.0" - resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" - integrity sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w== - dependencies: - safe-buffer "^5.0.1" - -tweetnacl@^0.14.3, tweetnacl@~0.14.0: - version "0.14.5" - resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" - integrity sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA== - type-detect@4.0.8: version "4.0.8" resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-4.0.8.tgz#7646fb5f18871cfbb7749e69bd39a6388eb7450c" @@ -7955,16 +7461,6 @@ unicode-property-aliases-ecmascript@^2.0.0: resolved "https://registry.yarnpkg.com/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz#43d41e3be698bd493ef911077c9b131f827e8ccd" integrity sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w== -universalify@^0.2.0: - version "0.2.0" - resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.2.0.tgz#6451760566fa857534745ab1dde952d1b1761be0" - integrity sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg== - -universalify@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/universalify/-/universalify-2.0.0.tgz#75a4984efedc4b08975c5aeb73f530d02df25717" - integrity sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ== - unraw@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/unraw/-/unraw-3.0.0.tgz#73443ed70d2ab09ccbac2b00525602d5991fbbe3" @@ -7997,11 +7493,6 @@ unrs-resolver@^1.7.11: "@unrs/resolver-binding-win32-ia32-msvc" "1.11.1" "@unrs/resolver-binding-win32-x64-msvc" "1.11.1" -untildify@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/untildify/-/untildify-4.0.0.tgz#2bc947b953652487e4600949fb091e3ae8cd919b" - integrity sha512-KK8xQ1mkzZeg9inewmFVDNkg3l5LUhoq9kN6iWYB/CC9YMG8HA+c1Q8HwDe6dEX7kErrEVNVBO3fWsVq5iDgtw== - update-browserslist-db@^1.0.9: version "1.0.10" resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.0.10.tgz#0f54b876545726f17d00cd9a2561e6dade943ff3" @@ -8018,7 +7509,7 @@ update-browserslist-db@^1.1.4: escalade "^3.2.0" picocolors "^1.1.1" -url-parse@^1.5.10, url-parse@^1.5.3: +url-parse@^1.5.10: version "1.5.10" resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.5.10.tgz#9d3c2f736c1d75dd3bd2be507dcc111f1e2ea9c1" integrity sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ== @@ -8031,11 +7522,6 @@ use-sync-external-store@^1.4.0, use-sync-external-store@^1.6.0: resolved "https://registry.yarnpkg.com/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz#b174bfa65cb2b526732d9f2ac0a408027876f32d" integrity sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w== -uuid@^8.3.2: - version "8.3.2" - resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.3.2.tgz#80d5b5ced271bb9af6c445f21a1a04c606cefbe2" - integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg== - v8-to-istanbul@^9.0.1: version "9.1.3" resolved "https://registry.yarnpkg.com/v8-to-istanbul/-/v8-to-istanbul-9.1.3.tgz#ea456604101cd18005ac2cae3cdd1aa058a6306b" @@ -8045,21 +7531,12 @@ v8-to-istanbul@^9.0.1: "@types/istanbul-lib-coverage" "^2.0.1" convert-source-map "^2.0.0" -verror@1.10.0: - version "1.10.0" - resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400" - integrity sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw== - dependencies: - assert-plus "^1.0.0" - core-util-is "1.0.2" - extsprintf "^1.2.0" - -vite@7.2.2: - version "7.2.2" - resolved "https://registry.yarnpkg.com/vite/-/vite-7.2.2.tgz#17dd62eac2d0ca0fa90131c5f56e4fefb8845362" - integrity sha512-BxAKBWmIbrDgrokdGZH1IgkIk/5mMHDreLDmCJ0qpyJaAteP8NvMhkwr/ZCQNqNH97bw/dANTE9PDzqwJghfMQ== +vite@7.3.1: + version "7.3.1" + resolved "https://registry.yarnpkg.com/vite/-/vite-7.3.1.tgz#7f6cfe8fb9074138605e822a75d9d30b814d6507" + integrity sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA== dependencies: - esbuild "^0.25.0" + esbuild "^0.27.0" fdir "^6.5.0" picomatch "^4.0.3" postcss "^8.5.6" @@ -8092,11 +7569,6 @@ web-tree-sitter@=0.24.5: resolved "https://registry.yarnpkg.com/web-tree-sitter/-/web-tree-sitter-0.24.5.tgz#16cea449da63012f23ca7b83bd32817dd0520400" integrity sha512-+J/2VSHN8J47gQUAvF8KDadrfz6uFYVjxoxbKWDoXVsH2u7yLdarCnIURnrMA6uSRkgX3SdmqM5BOoQjPdSh5w== -web-vitals@2.1.4: - version "2.1.4" - resolved "https://registry.yarnpkg.com/web-vitals/-/web-vitals-2.1.4.tgz#76563175a475a5e835264d373704f9dde718290c" - integrity sha512-sVWcwhU5mX6crfI5Vd2dC4qchyTqxV8URinzt25XqVh+bHEPGH4C3NPrNionCP7Obx59wrYEbNlw4Z8sjALzZg== - webidl-conversions@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-7.0.0.tgz#256b4e1882be7debbf01d05f0aa2039778ea080a" @@ -8189,15 +7661,6 @@ which@^2.0.1: string-width "^4.1.0" strip-ansi "^6.0.0" -wrap-ansi@^6.2.0: - version "6.2.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz#e9393ba07102e6c91a3b221478f0257cd2856e53" - integrity sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA== - dependencies: - ansi-styles "^4.0.0" - string-width "^4.1.0" - strip-ansi "^6.0.0" - wrap-ansi@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" @@ -8216,11 +7679,6 @@ wrap-ansi@^8.1.0: string-width "^5.0.1" strip-ansi "^7.0.1" -wrappy@1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" - integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ== - write-file-atomic@^5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-5.0.1.tgz#68df4717c55c6fa4281a7860b4c2ba0a6d2b11e7" @@ -8294,14 +7752,6 @@ yargs@^17.7.2: y18n "^5.0.5" yargs-parser "^21.1.1" -yauzl@^2.10.0: - version "2.10.0" - resolved "https://registry.yarnpkg.com/yauzl/-/yauzl-2.10.0.tgz#c7eb17c93e112cb1086fa6d8e51fb0667b79a5f9" - integrity sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g== - dependencies: - buffer-crc32 "~0.2.3" - fd-slicer "~1.1.0" - yocto-queue@^0.1.0: version "0.1.0" resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" diff --git a/quickwit/rest-api-tests/scenarii/es_compatibility/0032-mappings.yaml b/quickwit/rest-api-tests/scenarii/es_compatibility/0032-mappings.yaml new file mode 100644 index 00000000000..5f241c81dfc --- /dev/null +++ b/quickwit/rest-api-tests/scenarii/es_compatibility/0032-mappings.yaml @@ -0,0 +1,100 @@ +method: [GET] +engines: + - elasticsearch +endpoint: "gharchive/_mappings" +expected: + gharchive: {} + # _all: + # primaries: + # docs: + # count: 100 + # store: + # size_in_bytes: + # $expect: "val > 278300" + # total: + # segments: + # count: 1 + # docs: + # count: 100 + # indices: + # gharchive: + # primaries: + # docs: + # count: 100 + # store: + # size_in_bytes: + # $expect: "val > 278300" + # total: + # segments: + # count: 1 + # docs: + # count: 100 +--- +# method: [GET] +# engines: +# - quickwit +# - elasticsearch +# endpoint: "ghar*/_stats" +# expected: +# _all: +# primaries: +# docs: +# count: 100 +# total: +# segments: +# count: 1 +# docs: +# count: 100 +# indices: +# gharchive: +# primaries: +# docs: +# count: 100 +# total: +# segments: +# count: 1 +# docs: +# count: 100 +# --- +# method: [GET] +# engines: +# - quickwit +# endpoint: "_stats" +# expected: +# _all: +# primaries: +# docs: +# count: 102 +# total: +# segments: +# count: 2 +# docs: +# count: 102 +# indices: +# gharchive: +# primaries: +# docs: +# count: 100 +# total: +# segments: +# count: 1 +# docs: +# count: 100 +# fast_only: +# primaries: +# docs: +# count: 2 +# total: +# segments: +# count: 1 +# docs: +# count: 2 +# empty_index: +# primaries: +# docs: +# count: 0 +# total: +# segments: +# count: 0 +# docs: +# count: 0 diff --git a/quickwit/rest-api-tests/scenarii/es_field_capabilities/0001-field-capabilities.yaml b/quickwit/rest-api-tests/scenarii/es_field_capabilities/0001-field-capabilities.yaml index bd3cd917acd..a3c5041926d 100644 --- a/quickwit/rest-api-tests/scenarii/es_field_capabilities/0001-field-capabilities.yaml +++ b/quickwit/rest-api-tests/scenarii/es_field_capabilities/0001-field-capabilities.yaml @@ -31,10 +31,10 @@ expected: searchable: true aggregatable: true mixed: # This is a little weird case (values [5, -5.5]), since coercion happens only on the columnar side. That's why `long` is not aggregatable. - long: + long: metadata_field: false searchable: true - aggregatable: false + aggregatable: false double: metadata_field: false searchable: true @@ -88,10 +88,10 @@ expected: fields: $expect: "not 'id' in val" # Filtered by start_timestamp mixed: # This is a little weird case (values [5, -5.5]), since coercion happens only on the columnar side. That's why `long` is not aggregatable. - long: + long: metadata_field: false searchable: true - aggregatable: false + aggregatable: false double: metadata_field: false searchable: true @@ -103,8 +103,6 @@ expected: aggregatable: true --- # Test fields parameter with `.dynamic` suffix -engines: - - quickwit method: [GET] engines: - quickwit @@ -193,9 +191,6 @@ expected: --- # Compare with elastic search method: [GET] -engines: - - quickwit - - elasticsearch endpoint: fieldcaps/_field_caps?fields=nested.*ponse expected: indices: @@ -210,9 +205,6 @@ expected: --- # Compare ip field with elastic search method: [GET] -engines: - - quickwit - - elasticsearch endpoint: fieldcaps*/_field_caps?fields=host expected: indices: @@ -295,9 +287,6 @@ expected: --- # Wildcard on index name + Wildcard without match method: [GET] -engines: - - quickwit - - elasticsearch endpoint: fieldca*,blub*/_field_caps?fields=date expected: indices: @@ -313,24 +302,140 @@ expected: --- # Exact match index + Non matching exact index method: [GET] -engines: - - quickwit - - elasticsearch endpoint: fieldcaps,blub/_field_caps?fields=date status_code: 404 --- # Compare ip field with elastic search method: [GET] -engines: - - quickwit - - elasticsearch endpoint: doesnotexist/_field_caps?fields=date status_code: 404 --- # Compare ip field with elastic search method: [GET] +endpoint: doesno*texist/_field_caps?fields=date +status_code: 200 +--- +# Test _field_caps API with index_filter (term query) +# Note: term queries require exact token match; 'fritz' is lowercase due to default tokenizer +method: [POST] +endpoint: fieldcaps/_field_caps?fields=* +json: + index_filter: + term: + name: "fritz" +expected: + indices: + - fieldcaps + fields: + name: + keyword: + type: keyword + metadata_field: false + searchable: true + aggregatable: true + text: + type: text + metadata_field: false + searchable: true + aggregatable: true +--- +# Test _field_caps API with index_filter (match_all query) +method: [POST] +endpoint: fieldcaps/_field_caps?fields=name +json: + index_filter: + match_all: {} +expected: + indices: + - fieldcaps + fields: + name: + keyword: + type: keyword + metadata_field: false + searchable: true + aggregatable: true + text: + type: text + metadata_field: false + searchable: true + aggregatable: true +--- +# Test _field_caps API with index_filter (bool query) +method: [POST] +endpoint: fieldcaps/_field_caps?fields=response,name +json: + index_filter: + bool: + must: + - term: + name: "fritz" + filter: + - range: + response: + gte: 30 +expected: + indices: + - fieldcaps + fields: + response: + long: + type: long + metadata_field: false + searchable: true + aggregatable: true + name: + keyword: + type: keyword + metadata_field: false + searchable: true + aggregatable: true + text: + type: text + metadata_field: false + searchable: true + aggregatable: true +--- +# Test _field_caps API with invalid index_filter +method: [POST] +endpoint: fieldcaps/_field_caps?fields=* +json: + index_filter: + invalid_query_type: + field: "value" +status_code: 400 +--- +# Test _field_caps API with empty index_filter (should return 400 like ES) +method: [POST] engines: - quickwit - elasticsearch -endpoint: doesno*texist/_field_caps?fields=date -status_code: 200 +endpoint: fieldcaps/_field_caps?fields=name +json: + index_filter: {} +status_code: 400 +--- +# Test _field_caps API with index_filter using tag field for split pruning (QW-only) +method: [POST] +engines: + - quickwit +endpoint: fieldcaps/_field_caps?fields=name +json: + index_filter: + term: + tags: "nice" +expected: + indices: + - fieldcaps + fields: + name: + keyword: + type: keyword + metadata_field: false + searchable: true + aggregatable: true + text: + type: text + metadata_field: false + searchable: true + aggregatable: true diff --git a/quickwit/rest-api-tests/scenarii/es_field_capabilities/_setup.quickwit.yaml b/quickwit/rest-api-tests/scenarii/es_field_capabilities/_setup.quickwit.yaml index 8b02ee01882..5576e6cec28 100644 --- a/quickwit/rest-api-tests/scenarii/es_field_capabilities/_setup.quickwit.yaml +++ b/quickwit/rest-api-tests/scenarii/es_field_capabilities/_setup.quickwit.yaml @@ -22,6 +22,7 @@ json: tokenizer: default fast: true timestamp_field: date + tag_fields: ["tags"] field_mappings: - name: date type: datetime @@ -32,6 +33,10 @@ json: - name: host type: ip fast: true + - name: tags + type: array + tokenizer: raw + fast: true --- # Create index method: POST