diff --git a/.gitignore b/.gitignore
index b986336..35087fe 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,17 +1,102 @@
-# Binaries for programs and plugins
+# =============================================================================
+# Go
+# =============================================================================
+
+# Binaries
*.exe
*.exe~
*.dll
*.so
*.dylib
-# Test binary, built with `go test -c`
+# Test binaries
*.test
-# Output of the go coverage tool, specifically when used with LiteIDE
+# Coverage output
*.out
+coverage.out
+
+# Go workspace
+go.work
+go.work.sum
+
+# =============================================================================
+# Project
+# =============================================================================
+
+# Environment files (may contain secrets)
+*.env
+.env.local
+
+# Task runner cache
+.task/
+
+# Split SDK local files
+.split
+.splits
+
+# =============================================================================
+# IDE - JetBrains (GoLand, IntelliJ)
+# =============================================================================
+
+.idea/*
+!.idea/codeStyles/
+!.idea/runConfigurations/
+
+*.iml
+*.ipr
+*.iws
+
+# =============================================================================
+# IDE - VS Code
+# =============================================================================
+
+.vscode/*
+!.vscode/settings.json
+!.vscode/tasks.json
+!.vscode/launch.json
+!.vscode/extensions.json
+!.vscode/*.code-snippets
+
+.history/
+*.vsix
+
+# =============================================================================
+# OS - macOS
+# =============================================================================
+
+.DS_Store
+.AppleDouble
+.LSOverride
+._*
+.Spotlight-V100
+.Trashes
+
+# =============================================================================
+# OS - Windows
+# =============================================================================
+
+Thumbs.db
+ehthumbs.db
+Desktop.ini
+$RECYCLE.BIN/
+*.lnk
+
+# =============================================================================
+# OS - Linux
+# =============================================================================
+
+*~
+.directory
+.Trash-*
+.nfs*
-# Dependency directories (remove the comment below to include it)
-# vendor/
+# =============================================================================
+# Git
+# =============================================================================
-.idea/
\ No newline at end of file
+*.orig
+*.BACKUP.*
+*.BASE.*
+*.LOCAL.*
+*.REMOTE.*
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 0000000..71ada67
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,139 @@
+# golangci-lint configuration for Split OpenFeature Provider
+# Gold Standard Linting Configuration
+
+version: "2"
+
+run:
+ timeout: 5m
+ tests: true
+ modules-download-mode: readonly
+
+linters:
+ enable:
+ # Enabled by default
+ - errcheck # Check for unchecked errors
+ - govet # Go vet
+ - ineffassign # Detect ineffectual assignments
+ - staticcheck # Static analysis
+ - unused # Check for unused code
+
+ # Additional recommended linters
+ - misspell # Check for misspelled words
+ - unconvert # Remove unnecessary type conversions
+ - unparam # Report unused function parameters
+ - prealloc # Find slice declarations that could be preallocated
+ - goconst # Find repeated strings that could be constants
+ - gocyclo # Cyclomatic complexity
+ - gocognit # Cognitive complexity
+ - dupl # Code clone detection
+ - gocritic # Comprehensive checks
+ - revive # Fast, extensible linter
+ - gosec # Security checks
+ - bodyclose # Check HTTP response bodies are closed
+ - noctx # Detect http.Request without context.Context
+ - rowserrcheck # Check sql.Rows.Err is checked
+ - sqlclosecheck # Check sql.Rows and sql.Stmt are closed
+ - errorlint # Error wrapping checks
+ - exhaustive # Check exhaustiveness of enum switch statements
+
+ exclusions:
+ paths:
+ - examples
+ - test
+ - '.*\.pb\.go$'
+
+ rules:
+ # Exclude all linters from test files - focus on production code quality
+ - path: '(.+)_test\.go'
+ linters:
+ - errcheck
+ - gocyclo
+ - gocognit
+ - dupl
+ - gocritic
+ - gosec
+ - goconst
+ - govet
+ - revive
+ - staticcheck
+ - misspell
+ - unconvert
+ - unparam
+ - prealloc
+
+ settings:
+ errcheck:
+ check-type-assertions: true
+ check-blank: true
+
+ govet:
+ enable-all: true
+ disable:
+ - shadow # Too many false positives
+
+ gocyclo:
+ min-complexity: 15
+
+ gocognit:
+ min-complexity: 30
+
+ dupl:
+ threshold: 100
+
+ goconst:
+ min-len: 3
+ min-occurrences: 3
+
+ misspell:
+ locale: US
+
+ staticcheck:
+ checks: [ "all" ]
+
+ revive:
+ confidence: 0.8
+ rules:
+ - name: blank-imports
+ - name: context-as-argument
+ - name: context-keys-type
+ - name: dot-imports
+ - name: error-return
+ - name: error-strings
+ - name: error-naming
+ - name: exported
+ - name: if-return
+ - name: increment-decrement
+ - name: var-declaration
+ - name: package-comments
+ - name: range
+ - name: receiver-naming
+ - name: time-naming
+ - name: unexported-return
+ - name: indent-error-flow
+ - name: errorf
+ - name: empty-block
+ - name: superfluous-else
+ - name: unused-parameter
+ - name: unreachable-code
+ - name: redefines-builtin-id
+
+ gosec:
+ severity: medium
+ confidence: medium
+
+ gocritic:
+ enabled-tags:
+ - diagnostic
+ - performance
+ - style
+ disabled-checks:
+ - commentedOutCode
+ - whyNoLint
+
+ exhaustive:
+ default-signifies-exhaustive: true
+
+ prealloc:
+ simple: true
+ range-loops: true
+ for-loops: false
diff --git a/.mockery.yaml b/.mockery.yaml
new file mode 100644
index 0000000..8a18cc0
--- /dev/null
+++ b/.mockery.yaml
@@ -0,0 +1,14 @@
+# Mockery v3.6.4 configuration
+# See: https://github.com/vektra/mockery/blob/v3/docs/configuration.md
+all: false
+template: testify
+formatter: goimports
+packages:
+ github.com/splitio/split-openfeature-provider-go/v2:
+ interfaces:
+ Client:
+ config:
+ dir: "{{.InterfaceDir}}"
+ filename: "mock_client_test.go"
+ pkgname: "split"
+ structname: "MockClient"
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..ae6d59e
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,125 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [2.0.0] - 2025-11-24
+
+**Complete architectural rewrite** with modern SDK support, production-grade lifecycle management, and critical bug
+fixes.
+
+See [MIGRATION.md](MIGRATION.md) for upgrade instructions.
+
+### Breaking Changes
+
+#### SDK Requirements
+
+- **Split Go SDK upgraded to v6** (import: `github.com/splitio/go-client/v6`)
+- **OpenFeature Go SDK upgraded to v1** (import: `github.com/open-feature/go-sdk/openfeature`)
+
+#### API Changes
+
+- **All evaluation methods now require `context.Context` as first parameter**
+- **`Client()` renamed to `Factory()`** for Split SDK factory access
+- **`NewWithClient()` constructor removed** - use `New()` instead
+
+#### Behavioral Changes
+
+- **`ObjectEvaluation()` return structure changed**:
+ - v1: Returns treatment string only
+ - v2: Returns `FlagSetResult` (typed struct with `Treatment` and `Config` fields)
+
+### New Features
+
+#### Context-Aware Lifecycle
+
+- `InitWithContext(ctx)` - Context-aware initialization with timeout and cancellation
+- `ShutdownWithContext(ctx)` - Graceful shutdown with timeout and proper cleanup
+- Idempotent initialization with singleflight (prevents concurrent init races)
+- Provider cannot be reused after shutdown (must create new instance)
+
+#### Event System
+
+- OpenFeature event support:
+ - `PROVIDER_READY` - Provider initialized
+ - `PROVIDER_ERROR` - Initialization or runtime errors
+ - `PROVIDER_CONFIGURATION_CHANGED` - Flag definitions updated (detected via 30s polling)
+- Background monitoring (30s interval) for configuration change detection
+
+#### Event Tracking
+
+- `Track()` method implementing OpenFeature Tracker interface
+- Associates feature flag evaluations with user actions for A/B testing and experimentation
+- Supports custom traffic types via `trafficType` attribute in evaluation context
+- Supports event properties via `TrackingEventDetails.Add()`
+- Events viewable in Split Data Hub
+
+#### Per-Request Evaluation Options
+
+- `WithEvaluationMode(ctx, mode)` - Control per-request object evaluation behavior via `context.Context`
+ - `EvaluationModeIndividual` - Evaluate a single flag (useful in cloud mode to bypass flag set evaluation)
+ - `EvaluationModeSet` - Evaluate a flag set (explicit, same as cloud default; ignored in localhost mode)
+ - `EvaluationModeDefault` - Use provider's default behavior (flag set in cloud, individual in localhost)
+- `WithImpressionDisabled(ctx)` - Forward-looking API for per-evaluation impression control (logged, not yet enforced)
+- `WithEvalOptions(ctx, opts)` - Set multiple evaluation options at once
+
+#### Per-Request Track Options
+
+- `WithoutMetricValue(ctx)` - Send nil value to Split for count-only events, preventing pollution of sum/average metrics
+- `WithTrackOptions(ctx, opts)` - Set multiple tracking options at once
+- `GetTrackOptions(ctx)` - Extract tracking options from context
+
+#### Client Interface
+
+- Extracted `Client` interface for dependency injection and mock generation
+- Enables mockery-based mock generation for unit testing without Split SDK
+
+#### Observability
+
+- Structured logging with `log/slog` throughout provider and Split SDK
+- `Metrics()` method for health status and diagnostics
+- Unified logging via `WithLogger()` option
+
+### Bug Fixes
+
+#### Critical Fixes
+
+- **`ObjectEvaluation()` structure**: Now returns `FlagSetResult` with `Treatment` and `Config` fields (was: treatment
+ string only)
+- **Dynamic Configuration**: All config types (objects, primitives, arrays) consistently accessible via
+ `FlagMetadata["value"]`
+- **Dynamic Configuration JSON parsing**: Supports objects, arrays, and primitives (was: limited support)
+- **Evaluation context attributes**: Now passed to Split SDK for targeting rules (was: ignored)
+- **Shutdown resource cleanup**: Properly cleans up goroutines, channels, and SDK clients (was: resource leaks)
+
+#### Error Handling
+
+- **Shutdown timeout errors**: `ShutdownWithContext()` returns `ctx.Err()` when cleanup times out (was: no error
+ indication)
+- **JSON parse warnings**: Malformed Dynamic Configuration logged instead of silent failures
+- **Targeting key validation**: Non-string keys rejected with clear errors (was: silent failures)
+
+#### Concurrency & Reliability
+
+- **Atomic initialization**: Factory, client, and manager ready together (was: race conditions)
+- **Thread-safe health checks**: Eliminated race conditions in `Status()` and `Metrics()`
+- **Event channel lifecycle**: Properly closed during shutdown (was: potential goroutine leaks)
+- **Panic recovery**: Monitoring goroutine recovers from panics and terminates gracefully
+
+## [1.0.1] - 2022-10-14
+
+- Updated to OpenFeature spec v0.5.0 and OpenFeature Go SDK v0.6.0
+
+## [1.0.0] - 2022-10-03
+
+- Initial release
+- OpenFeature spec v0.5.0 compliance
+- OpenFeature Go SDK v0.5.0 support
+
+[2.0.0]: https://github.com/splitio/split-openfeature-provider-go/compare/v1.0.1...v2.0.0
+
+[1.0.1]: https://github.com/splitio/split-openfeature-provider-go/compare/v1.0.0...v1.0.1
+
+[1.0.0]: https://github.com/splitio/split-openfeature-provider-go/releases/tag/v1.0.0
diff --git a/CHANGES.txt b/CHANGES.txt
deleted file mode 100644
index d70bf22..0000000
--- a/CHANGES.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-1.0.0
-- 10/3/2022. Up to date with spec v0.5.0 and go sdk v0.5.0
-1.0.1
-- 10/14/2022. Up to date with spec v0.5.0 and go sdk v0.6.0
\ No newline at end of file
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..12aeff0
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,384 @@
+# Contributing to Split OpenFeature Go Provider
+
+We welcome contributions! This guide covers how to build, test, and submit changes.
+
+**Quick Links:**
+
+- [README.md](README.md) - Main documentation
+- [MIGRATION.md](MIGRATION.md) - v1 → v2 migration guide
+- [CHANGELOG.md](CHANGELOG.md) - Version history
+
+---
+
+## Prerequisites
+
+- **Go 1.25.4+**
+- **Task** - [taskfile.dev](https://taskfile.dev)
+- **golangci-lint** - For linting
+
+### Install Task
+
+```bash
+# macOS
+brew install go-task/tap/go-task
+
+# Linux
+sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin
+
+# Via Go
+go install github.com/go-task/task/v3/cmd/task@latest
+```
+
+### Install Development Tools
+
+```bash
+task install-tools # Install golangci-lint and other tools
+task check-tools # Verify installation
+```
+
+---
+
+## Development Workflow
+
+### 1. Fork and Clone
+
+```bash
+git clone https://github.com/YOUR_USERNAME/split-openfeature-provider-go.git
+cd split-openfeature-provider-go
+git remote add upstream https://github.com/splitio/split-openfeature-provider-go.git
+```
+
+### 2. Create Feature Branch
+
+```bash
+git fetch upstream
+git checkout -b feat/your-feature-name upstream/main
+```
+
+### 3. Make Changes
+
+**Run tests first:**
+
+```bash
+task test # Run all tests with race detector
+```
+
+**Make your changes:**
+
+- Add tests for new functionality (use testify/assert)
+- Follow Go idioms and best practices
+- Add godoc comments for exported symbols
+- Keep functions focused and small
+
+**Validate:**
+
+```bash
+task # Run lint + test + coverage
+task pre-commit # Quick pre-commit checks
+```
+
+### 4. Write Tests
+
+**Requirements:**
+
+- Use `testify/assert` or `testify/require` for assertions
+- Maintain >70% coverage (`task coverage-check`)
+- Tests must pass race detector
+- Test both success and error cases
+
+**Example:**
+
+```go
+func TestFeatureName(t *testing.T) {
+ provider, err := setupTestProvider(t)
+ require.NoError(t, err, "Setup failed")
+
+ tests := []struct {
+ name string
+ input string
+ expected string
+ wantErr bool
+ }{
+ {"valid input", "test", "expected", false},
+ {"invalid input", "", "", true},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result, err := provider.YourMethod(tt.input)
+
+ if tt.wantErr {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, tt.expected, result)
+ }
+ })
+ }
+}
+```
+
+### 5. Commit and Push
+
+**Use Conventional Commits:**
+
+```bash
+git commit -m "feat: add new feature"
+git commit -m "fix: resolve bug with shutdown"
+git commit -m "docs: update README examples"
+```
+
+**Types:** `feat`, `fix`, `docs`, `test`, `refactor`, `perf`, `chore`
+
+```bash
+git push origin feat/your-feature-name
+```
+
+### 6. Create Pull Request
+
+**PR Checklist:**
+
+- [ ] All tests pass (`task test`)
+- [ ] Linter passes (`task lint`)
+- [ ] Coverage maintained at >70% (`task coverage-check`)
+- [ ] Documentation updated
+- [ ] Godoc comments added
+- [ ] No goroutine leaks
+- [ ] Concurrency safety verified
+
+---
+
+## Testing
+
+### Unit Tests
+
+```bash
+task test # All tests with race detector
+task test-short # Quick test run
+task coverage # View coverage report
+task coverage-check # Verify 70% threshold
+```
+
+### Integration Tests
+
+```bash
+task test-integration # Uses SPLIT_API_KEY if set, otherwise localhost mode
+task test-cloud # Cloud-only features (requires SPLIT_API_KEY)
+```
+
+**Integration Test (`test/integration/`)** - Automated test suite:
+
+- Localhost mode: 85 tests (no API key needed)
+- Cloud mode: 94 tests (requires SPLIT_API_KEY)
+- All evaluation types (boolean, string, int, float, object)
+- Lifecycle management and concurrent evaluations
+- Event handling and dynamic configurations
+
+**Cloud Test (`test/advanced/`)** - Cloud-only features:
+
+- Event tracking (view in Split Data Hub)
+- Configuration change detection
+- Interactive testing for cloud-specific functionality
+
+**Cloud Mode Testing Setup:**
+
+To run integration tests in cloud mode, create the required flags in your Split.io account.
+See `test/cloud_flags.yaml` for the flag definitions:
+
+1. Create 11 flags as documented in `test/cloud_flags.yaml`
+2. Create a flag set named `split_provider_test`
+3. Add `ui_theme` and `api_version` flags to the flag set
+4. Run tests:
+
+```bash
+SPLIT_API_KEY="your-key" task test-integration
+```
+
+**When Are These Tests Executed?**
+
+Neither test suite runs as part of CI (`task ci`). Run manually:
+
+```bash
+# Integration test - localhost mode (no API key)
+task test-integration
+
+# Integration test - cloud mode (requires API key and flags)
+SPLIT_API_KEY="your-key" task test-integration
+
+# Cloud test - cloud mode (requires API key)
+SPLIT_API_KEY="your-key" task test-cloud
+```
+
+**Recommendation:** Run `task test-integration` before submitting PRs that affect:
+
+- Provider initialization/shutdown
+- Flag evaluation logic
+- Event handling
+- Dynamic configuration parsing
+
+---
+
+## Code Quality
+
+### Required Standards
+
+- All exported symbols must have godoc comments
+- golangci-lint must pass
+- Coverage >70%
+- No race conditions
+- No goroutine leaks
+- Thread-safety verified for shared state
+
+### Common Commands
+
+```bash
+# Workflows
+task # Show available tasks
+task check # Run all quality checks
+task pre-commit # Quick pre-commit
+task ci # Full CI suite
+
+# Testing
+task test # Unit tests with race detector
+task test-integration # Integration tests (localhost or cloud)
+task test-cloud # Cloud-only tests (requires API key)
+task coverage # Coverage report
+
+# Code Quality
+task lint # Run linter
+task lint-fix # Auto-fix issues
+task fmt # Format code
+task vet # Run go vet
+
+# Examples
+task example-cloud # Cloud mode (requires SPLIT_API_KEY)
+task example-localhost # Localhost mode (no API key)
+
+# Tools
+task install-tools # Install dev tools
+task clean # Clean artifacts
+```
+
+---
+
+## Project Structure
+
+```
+split-openfeature-provider-go/
+├── provider.go # Core provider
+├── lifecycle.go # Init/Shutdown (context-aware)
+├── events.go # Event system
+├── evaluation.go # Flag evaluations
+├── helpers.go # Helpers and Factory()
+├── logging.go # Slog adapter
+├── constants.go # Constants
+├── provider_test.go # Unit tests
+├── lifecycle_edge_cases_test.go # Concurrency tests
+├── examples/
+│ ├── cloud/ # Cloud mode example
+│ └── localhost/ # Localhost mode example
+└── test/
+ ├── cloud_flags.yaml # Flag definitions for cloud testing
+ ├── integration/ # Integration tests (localhost + cloud)
+ └── advanced/ # Advanced tests (cloud-only features)
+```
+
+---
+
+## v2 Status: Production Ready ✅
+
+- ✅ Context-aware lifecycle with timeouts
+- ✅ Full OpenFeature event compliance
+- ✅ Optimal test coverage with race detection
+- ✅ Structured logging with slog
+- ✅ Thread-safe concurrent operations
+
+---
+
+## Known Limitations & Future Enhancements
+
+### PROVIDER_STALE Event Not Emitted
+
+**Status:** Known limitation (Split SDK dependency)
+
+The provider cannot emit `PROVIDER_STALE` events when network connectivity is lost. This is due to a limitation in the
+Split Go SDK:
+
+- `factory.IsReady()` only indicates **initial** readiness after `BlockUntilReady()` completes
+- The method does **not** change when the SDK loses network connectivity during operation
+- Internally, the SDK handles connectivity issues (switching between streaming and polling modes) but does not expose
+ this state through its public API
+
+**Impact:**
+
+- When network connectivity is lost, the SDK continues serving cached data silently
+- Applications cannot detect when they are receiving potentially stale feature flag values
+- The `PROVIDER_CONFIGURATION_CHANGED` event still works correctly when flags are updated
+
+**Potential Future Enhancement:**
+If the Split SDK exposes streaming/connectivity status in a future version, this provider could be updated to:
+
+1. Monitor the streaming status channel for `StatusUp`/`StatusDown` events
+2. Emit `PROVIDER_STALE` when streaming disconnects and polling begins
+3. Emit `PROVIDER_READY` when streaming reconnects
+
+**Workaround for Applications:**
+Applications requiring staleness awareness should implement application-level health checks, such as:
+
+- Periodic test evaluations with known flags
+- Monitoring SDK debug logs for connectivity errors
+- External health check endpoints to Split.io APIs
+
+**References:**
+
+- Split SDK sync manager: `go-split-commons/synchronizer/manager.go`
+- Push status constants: `StatusUp`, `StatusDown`, `StatusRetryableError`, `StatusNonRetryableError`
+- SSE keepAlive timeout: 70 seconds (hardcoded in SDK)
+
+### PROVIDER_CONFIGURATION_CHANGED Detected via Polling
+
+**Status:** Known limitation (Split SDK dependency)
+
+The `PROVIDER_CONFIGURATION_CHANGED` event is detected by polling, not via real-time SSE streaming. The polling interval
+is configurable via `WithMonitoringInterval` (default: 30 seconds, minimum: 5 seconds).
+
+**Why Polling?**
+
+- The Split SDK receives configuration changes instantly via SSE streaming
+- However, the SDK does **not** expose a callback or event for configuration changes
+- The only way to detect changes is by polling `manager.Splits()` and comparing `ChangeNumber` values
+
+**Impact:**
+
+- Flag evaluations reflect changes immediately (SDK updates its cache via SSE)
+- `PROVIDER_CONFIGURATION_CHANGED` events have latency up to the configured monitoring interval
+- Applications relying on this event for cache invalidation may see delayed notifications
+
+**Potential Future Enhancement:**
+If the Split SDK exposes a configuration change callback in a future version, this provider could be updated to:
+
+1. Register a callback for real-time change notifications
+2. Emit `PROVIDER_CONFIGURATION_CHANGED` immediately when changes arrive via SSE
+3. Remove the polling-based detection
+
+---
+
+## Resources
+
+**Documentation:**
+
+- [OpenFeature Specification](https://openfeature.dev/specification/sections/providers)
+- [OpenFeature Go SDK](https://openfeature.dev/docs/reference/sdks/server/go/)
+- [Split Go SDK](https://github.com/splitio/go-client)
+- [Go Code Review Comments](https://go.dev/wiki/CodeReviewComments)
+
+**Help:**
+
+- [GitHub Issues](https://github.com/splitio/split-openfeature-provider-go/issues) - Bug reports and feature requests
+- [Pull Requests](https://github.com/splitio/split-openfeature-provider-go/pulls) - Contributions
+
+---
+
+## License
+
+By contributing, you agree your contributions will be licensed under Apache License 2.0.
diff --git a/CONTRIBUTORS-GUIDE.md b/CONTRIBUTORS-GUIDE.md
deleted file mode 100644
index b5653a6..0000000
--- a/CONTRIBUTORS-GUIDE.md
+++ /dev/null
@@ -1,28 +0,0 @@
-# Contributing to the Split OpenFeature Provider
-
-The Split Provider is an open source project and we welcome feedback and contribution. The information below describes how to build the project with your changes, run the tests, and send the Pull Request(PR).
-
-## Development
-
-### Development process
-
-1. Fork the repository and create a topic branch from `development` branch. Please use a descriptive name for your branch.
-2. While developing, use descriptive messages in your commits. Avoid short or meaningless sentences like "fix bug".
-3. Make sure to add tests for both positive and negative cases.
-4. Run the build script and make sure it runs with no errors.
-5. Run all tests and make sure there are no failures.
-6. `git push` your changes to GitHub within your topic branch.
-7. Open a Pull Request(PR) from your forked repo and into the `development` branch of the original repository.
-8. When creating your PR, please fill out all the fields of the PR template, as applicable, for the project.
-9. Check for conflicts once the pull request is created to make sure your PR can be merged cleanly into `development`.
-10. Keep an eye out for any feedback or comments from the Split team.
-
-### Building the Split Provider
-- `go build`
-
-### Running tests
-- `go test`
-
-# Contact
-
-If you have any other questions or need to contact us directly in a private manner send us a note at sdks@split.io
diff --git a/MIGRATION.md b/MIGRATION.md
new file mode 100644
index 0000000..a2ea07f
--- /dev/null
+++ b/MIGRATION.md
@@ -0,0 +1,258 @@
+# Migration Guide: v1 to v2
+
+## Overview
+
+Version 2.0.0 includes critical bug fixes and SDK upgrades.
+
+### Bug Fixes
+
+- `ObjectEvaluation()` returns structured map with treatment and config fields
+- Dynamic Configuration supports any JSON type (objects, arrays, primitives)
+- Evaluation context attributes passed to Split SDK for targeting rules
+- `Shutdown()` properly cleans up all resources
+- Non-string targeting keys validated and rejected
+
+### SDK Updates
+
+- Split Go SDK updated to v6
+- OpenFeature Go SDK updated to v1
+- Go minimum version: 1.25
+
+## Breaking Changes
+
+### Import Paths
+
+```go
+// v1
+import (
+ "github.com/splitio/go-client/splitio/client"
+ "github.com/open-feature/go-sdk/pkg/openfeature"
+)
+
+// v2
+import (
+ "github.com/splitio/go-client/v6/splitio/client"
+ "github.com/open-feature/go-sdk/openfeature"
+)
+```
+
+### Provider Initialization
+
+Use `SetProviderWithContextAndWait()` for synchronous initialization with timeout:
+
+```go
+// v1
+openfeature.SetProvider(provider)
+
+// v2 - Recommended with context and timeout
+ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+defer cancel()
+
+err := openfeature.SetProviderWithContextAndWait(ctx, provider)
+if err != nil {
+ log.Fatal(err)
+}
+
+// v2 - Alternative: No timeout (uses default from BlockUntilReady config)
+err = openfeature.SetProviderAndWait(provider)
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+### Context Required
+
+```go
+// v1
+result, _ := client.BooleanValue(nil, "flag-key", false, evalCtx)
+
+// v2
+ctx := context.Background()
+result, _ := client.BooleanValue(ctx, "flag-key", false, evalCtx)
+```
+
+## Migration Steps
+
+### 1. Update Dependencies
+
+```bash
+go get github.com/splitio/split-openfeature-provider-go/v2@latest
+go get github.com/splitio/go-client/v6@latest
+go get github.com/open-feature/go-sdk@latest
+go mod tidy
+```
+
+### 2. Update Imports
+
+```go
+import (
+ "context"
+
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/splitio/split-openfeature-provider-go/v2"
+)
+```
+
+### 3. Update Initialization
+
+```go
+provider, err := split.New(apiKey)
+if err != nil {
+ log.Fatal(err)
+}
+
+// Defer shutdown with context
+defer func() {
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ if err := openfeature.ShutdownWithContext(ctx); err != nil {
+ log.Printf("Shutdown error: %v", err)
+ }
+}()
+
+// Initialize with context and timeout
+ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+defer cancel()
+
+err = openfeature.SetProviderWithContextAndWait(ctx, provider)
+if err != nil {
+ log.Fatal(err)
+}
+
+client := openfeature.NewClient("my-app")
+```
+
+### 4. Add Context to Evaluations
+
+```go
+ctx := context.Background()
+evalCtx := openfeature.NewEvaluationContext("user-123", map[string]any{
+ "email": "user@example.com",
+})
+result, _ := client.BooleanValue(ctx, "my-feature", false, evalCtx)
+```
+
+## Behavioral Changes
+
+### Dynamic Configurations
+
+v1 returned treatment name. v2 returns structured map with treatment and config:
+
+```go
+result, _ := client.ObjectValue(ctx, "my-flag", split.FlagSetResult{}, evalCtx)
+// v1: "on" (treatment only)
+// v2: {"my-flag": {"treatment": "on", "config": {"feature": "enabled", "limit": 100}}}
+
+// Dynamic Configuration is accessible via FlagMetadata["value"]
+details, _ := client.StringValueDetails(ctx, "my-flag", "default", evalCtx)
+if configValue, ok := details.FlagMetadata["value"]; ok {
+ // All config types wrapped in "value" key for consistent access
+ // Object: configValue.(map[string]any)
+ // Primitive: configValue.(float64), configValue.(string), etc.
+ // Array: configValue.([]any)
+}
+```
+
+### Targeting Rules
+
+v1 ignored evaluation context attributes. v2 passes them correctly:
+
+```go
+evalCtx := openfeature.NewEvaluationContext("user-123", map[string]any{
+ "plan": "premium",
+})
+result, _ := client.BooleanValue(ctx, "premium-feature", false, evalCtx)
+// v1: attributes ignored
+// v2: targeting rules work
+```
+
+### Logging
+
+v1 used plain text logs. v2 uses structured JSON logs with `slog`.
+
+## New Features
+
+### Custom Logger
+
+```go
+logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{
+ Level: slog.LevelInfo,
+}))
+slog.SetDefault(logger)
+```
+
+### Health Check
+
+```go
+metrics := provider.Metrics()
+```
+
+### Factory Access
+
+```go
+factory := provider.Factory()
+manager := factory.Manager()
+```
+
+## Compatibility
+
+| Component | v1.x | v2.x |
+|-----------------|-------|-------|
+| Go Version | 1.19+ | 1.25+ |
+| Split SDK | v5/v6 | v6 |
+| OpenFeature SDK | v0 | v1 |
+
+## Complete Example
+
+### v1
+
+```go
+import (
+ "github.com/open-feature/go-sdk/pkg/openfeature"
+ "github.com/splitio/split-openfeature-provider-go"
+)
+
+provider, _ := split.NewProviderSimple("YOUR_API_KEY")
+openfeature.SetProvider(provider)
+client := openfeature.NewClient("my-app")
+
+evalCtx := openfeature.NewEvaluationContext("user-123", nil)
+result, _ := client.BooleanValue(nil, "my-feature", false, evalCtx)
+```
+
+### v2
+
+```go
+import (
+ "context"
+ "log"
+ "time"
+
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/splitio/split-openfeature-provider-go/v2"
+)
+
+provider, err := split.New("YOUR_API_KEY")
+if err != nil {
+ log.Fatal(err)
+}
+
+defer func() {
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ openfeature.ShutdownWithContext(ctx)
+}()
+
+ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+defer cancel()
+
+err = openfeature.SetProviderWithContextAndWait(ctx, provider)
+if err != nil {
+ log.Fatal(err)
+}
+
+client := openfeature.NewClient("my-app")
+
+evalCtx := openfeature.NewEvaluationContext("user-123", nil)
+result, _ := client.BooleanValue(context.Background(), "my-feature", false, evalCtx)
+```
diff --git a/README.md b/README.md
index 25b6945..a164961 100644
--- a/README.md
+++ b/README.md
@@ -1,112 +1,759 @@
-# Split OpenFeature Provider for Go
-[](https://twitter.com/intent/follow?screen_name=splitsoftware)
+
+
+

+
+# Split OpenFeature Go Provider
+
+[](https://goreportcard.com/report/github.com/splitio/split-openfeature-provider-go)
+[](https://github.com/splitio/split-openfeature-provider-go)
+[](https://pkg.go.dev/github.com/splitio/split-openfeature-provider-go/v2)
+
+**OpenFeature Go Provider for Split.io**
+
+[Installation](#installation) • [Usage](#usage) • [Examples](#examples) • [API](#api) • [Contributing](#contributing)
+
+
+
+---
## Overview
-This Provider is designed to allow the use of OpenFeature with Split, the platform for controlled rollouts, serving features to your users via the Split feature flag to manage your complete customer experience.
-## Compatibility
-This SDK is compatible with Go 1.19 and higher.
+OpenFeature provider for Split.io enabling feature flag evaluation through the OpenFeature SDK with support for
+attribute-based targeting and flag metadata (JSON configurations attached to treatments).
+
+## Features
-## Getting started
-Below is a simple example that describes the instantiation of the Split Provider. Please see the [OpenFeature Documentation](https://docs.openfeature.dev/docs/reference/concepts/evaluation-api) for details on how to use the OpenFeature SDK.
+- All OpenFeature flag types (boolean, string, number, object)
+- Event tracking for experimentation and analytics
+- Attribute-based targeting and flag metadata
+- Configuration change detection via background monitoring
+- Thread-safe concurrent evaluations
+- Structured logging via `slog`
+
+## Installation
+
+```bash
+go get github.com/splitio/split-openfeature-provider-go/v2
+go get github.com/open-feature/go-sdk
+go get github.com/splitio/go-client/v6
+```
+
+## Usage
+
+### Basic Setup
```go
import (
- "github.com/open-feature/go-sdk/pkg/openfeature"
- splitProvider "github.com/splitio/split-openfeature-provider-go"
+ "context"
+ "time"
+
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/splitio/split-openfeature-provider-go/v2"
)
-provider, err := splitProvider.NewProviderSimple("YOUR_SDK_TYPE_API_KEY")
+provider, err := split.New("YOUR_API_KEY")
if err != nil {
- // Provider creation error
+ log.Fatal(err)
}
-openfeature.SetProvider(provider)
+defer func() {
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ if err := openfeature.ShutdownWithContext(ctx); err != nil {
+ log.Printf("Shutdown error: %v", err)
+ }
+}()
+
+ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+defer cancel()
+
+if err := openfeature.SetProviderWithContextAndWait(ctx, provider); err != nil {
+ log.Fatal(err)
+}
+
+client := openfeature.NewClient("my-app")
```
-If you are more familiar with Split or want access to other initialization options, you can provide a `SplitClient` to the constructor. See the [Split Go SDK Documentation](https://help.split.io/hc/en-us/articles/360020093652-Go-SDK#initialization) for more information.
+### Advanced Setup
+
```go
-import (
- "github.com/open-feature/go-sdk/pkg/openfeature"
- "github.com/splitio/go-client/v6/splitio/client"
- "github.com/splitio/go-client/v6/splitio/conf"
- splitProvider "github.com/splitio/split-openfeature-provider-go"
-)
+import "github.com/splitio/go-client/v6/splitio/conf"
cfg := conf.Default()
-factory, err := client.NewSplitFactory("YOUR_SDK_TYPE_API_KEY", cfg)
-if err != nil {
- // SDK initialization error
+cfg.BlockUntilReady = 15 // Default is 10 seconds
+
+provider, err := split.New("YOUR_API_KEY", split.WithSplitConfig(cfg))
+```
+
+See [examples](./examples/) for complete configuration patterns including logging setup.
+
+### Server-Side Evaluation Pattern
+
+In server-side SDKs, create client once at startup, then evaluate per-request with transaction-specific context:
+
+```go
+// Application startup - create client once
+client := openfeature.NewClient("my-app")
+
+// Per-request handler
+func handleRequest(w http.ResponseWriter, r *http.Request) {
+ // Create evaluation context with targeting key and attributes
+ evalCtx := openfeature.NewEvaluationContext("user-123", map[string]any{
+ "email": "user@example.com",
+ "plan": "premium",
+ })
+
+ // Option 1: Pass evaluation context directly to each call
+ enabled, _ := client.BooleanValue(r.Context(), "new-feature", false, evalCtx)
+
+ // Option 2: Use transaction context propagation (set once, use throughout request)
+ ctx := openfeature.WithTransactionContext(r.Context(), evalCtx)
+ enabled, _ = client.BooleanValue(ctx, "new-feature", false, openfeature.EvaluationContext{})
+ theme, _ := client.StringValue(ctx, "ui-theme", "light", openfeature.EvaluationContext{})
}
+```
-splitClient := factory.Client()
+**Required:** Targeting key in evaluation context.
-err = splitClient.BlockUntilReady(10)
-if err != nil {
- // SDK timeout error
+**Transaction context:** Use `openfeature.WithTransactionContext()` to embed evaluation context in `context.Context`
+once, then reuse across multiple evaluations.
+
+### Domain-Specific Providers
+
+Use named providers for multi-tenant or service-isolated configurations:
+
+```go
+defer func() {
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ openfeature.ShutdownWithContext(ctx)
+}()
+
+ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+defer cancel()
+
+tenant1Provider, _ := split.New("TENANT_1_API_KEY")
+openfeature.SetNamedProviderWithContextAndWait(ctx, "tenant-1", tenant1Provider)
+
+tenant2Provider, _ := split.New("TENANT_2_API_KEY")
+openfeature.SetNamedProviderWithContextAndWait(ctx, "tenant-2", tenant2Provider)
+
+// Create clients for each named provider domain
+client1 := openfeature.NewClient("tenant-1")
+client2 := openfeature.NewClient("tenant-2")
+```
+
+### Lifecycle Management
+
+#### Context-Aware Initialization
+
+The provider supports context-aware initialization with timeout and cancellation:
+
+```go
+// Initialization with context (recommended)
+ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+defer cancel()
+
+if err := openfeature.SetProviderWithContextAndWait(ctx, provider); err != nil {
+ log.Fatal(err)
}
+```
-provider, err := splitProvider.NewProvider(*splitClient)
-if err != nil {
- // Provider creation error
+**Key Behaviors:**
+
+- Respects context deadline (returns error if timeout exceeded)
+- Cancellable via context cancellation
+- Idempotent - safe to call multiple times (fast path if already initialized)
+- Thread-safe - concurrent Init calls use singleflight (only one initialization happens)
+
+#### Graceful Shutdown with Timeout
+
+Shutdown is a graceful best-effort operation that returns an error if cleanup doesn't complete within the context
+deadline:
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+defer cancel()
+
+if err := openfeature.ShutdownWithContext(ctx); err != nil {
+ // Error means cleanup timed out, but provider is still logically shut down
+ log.Printf("Shutdown timeout: %v (cleanup continuing in background)", err)
+}
+```
+
+**Shutdown Behavior:**
+
+The provider is **immediately** marked as shut down (all new operations fail with `PROVIDER_NOT_READY`), then cleanup
+happens within the context deadline:
+
+1. **Within Deadline:** Complete cleanup, return `nil`
+2. **After Deadline:** Log warnings, return `ctx.Err()` (context.DeadlineExceeded), continue cleanup in background
+
+**Return Values:**
+
+- `nil` - shutdown completed successfully within timeout
+- `context.DeadlineExceeded` - cleanup timed out (provider still logically shut down)
+- `context.Canceled` - context was cancelled (provider still logically shut down)
+
+**Cleanup Timing:**
+
+- Event channel close: Immediate
+- Monitoring goroutine: Up to 30 seconds to terminate
+- Split SDK Destroy: Up to 1 hour in streaming mode (known SDK limitation)
+
+**Recommended Timeout:** 30 seconds minimum to allow monitoring goroutine to exit cleanly.
+
+**Important:** Even when an error is returned, the provider is logically shut down:
+
+- Provider state is atomically set to "shut down" immediately
+- All new operations (Init, evaluations) will fail with PROVIDER_NOT_READY
+- Background cleanup continues safely even after error is returned
+
+#### Provider Reusability
+
+**Important:** Once shut down, a provider instance cannot be reused. Attempting to initialize after shutdown returns an
+error:
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+defer cancel()
+_ = provider.ShutdownWithContext(ctx)
+
+// This will fail with error: "cannot initialize provider after shutdown"
+initCtx, initCancel := context.WithTimeout(context.Background(), 15*time.Second)
+defer initCancel()
+err := openfeature.SetProviderWithContextAndWait(initCtx, provider)
+```
+
+To use a provider again after shutdown, create a new instance:
+
+```go
+newProvider, _ := split.New("YOUR_API_KEY")
+```
+
+#### Thread Safety Guarantees
+
+The provider is fully thread-safe with the following guarantees:
+
+- **Concurrent Evaluations:** Multiple goroutines can safely call evaluation methods simultaneously
+- **Evaluation During Shutdown:** In-flight evaluations complete safely before client destruction
+- **Concurrent Init Calls:** Multiple Init calls use singleflight - only one initialization happens
+- **Status Consistency:** Status() and Metrics() return consistent atomic state even during transitions
+- **Factory Access:** Factory() can be called safely during concurrent operations
+
+### Provider Status
+
+The provider follows OpenFeature's state lifecycle with the following states:
+
+| State | When It Occurs | Evaluations Behavior | Status() Returns |
+|--------------|-------------------------------------------------|-----------------------------------|--------------------|
+| **NotReady** | After `New()`, before `Init()` completes | Return `PROVIDER_NOT_READY` error | `of.NotReadyState` |
+| **Ready** | After successful `Init()` / `BlockUntilReady()` | Execute normally with Split SDK | `of.ReadyState` |
+| **NotReady** | After `Shutdown()` called | Return `PROVIDER_NOT_READY` error | `of.NotReadyState` |
+
+**State Transitions:**
+
+```
+New() → NotReady
+ ↓
+Init() → Ready (if SDK becomes ready)
+ ↓
+ └─→ NotReady (if Shutdown() called)
+ ↓
+ [Terminal State - Cannot re-initialize]
+```
+
+**Important Notes:**
+
+- Once `Shutdown()` is called, the provider **cannot be re-initialized** - create a new instance instead
+- `Init()` can fail due to timeout, invalid API key, or shutdown during initialization
+- State transitions emit OpenFeature events (`PROVIDER_READY`, `PROVIDER_ERROR`, `PROVIDER_CONFIGURATION_CHANGED`)
+
+**Staleness Detection Limitation:**
+The Split SDK's `IsReady()` method only indicates initial readiness and does not change when network connectivity is
+lost. The SDK handles connectivity issues internally (switching between streaming and polling modes) but does not expose
+this state. As a result, `PROVIDER_STALE` events are not emitted. When connectivity is lost, the SDK continues serving
+cached data silently. See [CONTRIBUTING.md](CONTRIBUTING.md) for details on this limitation.
+
+**Check provider readiness:**
+
+```go
+// Check via client (works for both default and named providers)
+client := openfeature.NewClient("my-app") // or named domain like "tenant-1"
+if client.State() == openfeature.ReadyState {
+ // Provider ready for evaluations
+}
+
+// Get provider metadata
+metadata := client.Metadata()
+domain := metadata.Domain() // Client's domain name
+```
+
+**For diagnostics and monitoring:**
+
+```go
+// Provider-specific health metrics
+metrics := provider.Metrics()
+// Returns map with: provider, initialized, status, splits_count, ready
+```
+
+### Known Limitations
+
+**Context Cancellation During Evaluation**
+
+Evaluation methods (`BooleanValue`, `StringValue`, etc.) accept a `context.Context` parameter but **cannot cancel
+in-flight evaluations**. This is because the underlying Split SDK's `TreatmentWithConfig()` method does not support
+context cancellation.
+
+**Impact:**
+
+- Context cancellation/timeout is only checked **before** calling the Split SDK
+- Once evaluation starts, it runs to completion even if context expires
+- In localhost mode: evaluations are fast (~microseconds), low risk
+- In cloud mode: evaluations read from cache, typically <1ms, but network issues could cause delays
+
+**Affected operations:**
+
+- ✅ `InitWithContext` - respects context cancellation
+- ✅ `ShutdownWithContext` - respects context timeout
+- ❌ Flag evaluations - cannot cancel once started
+
+**Workarounds:**
+
+```go
+// Option 1: Use HTTP-level timeouts (recommended)
+cfg := conf.Default()
+cfg.Advanced.HTTPTimeout = 5 * time.Second
+
+// Option 2: Set aggressive evaluation context timeout
+ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
+defer cancel()
+// Note: timeout only applies BEFORE evaluation starts
+value, err := client.BooleanValue(ctx, "flag", false, evalCtx)
+```
+
+**Per-Evaluation Impression Control (Forward-Looking API)**
+
+`WithImpressionDisabled()` and `EvalOptions.ImpressionDisabled` are provided as forward-looking API surface for
+per-evaluation impression control. The Split Go SDK does not currently support disabling impressions on individual
+evaluations - only SDK-level impression modes (`OPTIMIZED`/`DEBUG`/`NONE`) configured at initialization time.
+
+When `ImpressionDisabled` is set, the provider logs a one-time info message per provider instance but does not enforce
+the setting. This API will become functional when the Split Go SDK adds per-evaluation impression support.
+
+**Split SDK Destroy() Blocking (Streaming Mode)**
+
+In cloud/streaming mode, the Split SDK's `Destroy()` method can block for up to 1 hour due to SSE connection handling.
+This is a known Split SDK limitation tracked
+in [splitio/go-client#243](https://github.com/splitio/go-client/issues/243).
+
+**Impact:** During shutdown, cleanup may continue in background if context timeout expires. The provider is logically
+shut down immediately (all new operations return defaults), only cleanup may be delayed.
+
+**Mitigation:** Use appropriate shutdown timeout (30s recommended):
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+defer cancel()
+openfeature.ShutdownWithContext(ctx)
+```
+
+## Examples
+
+Complete working examples with detailed code:
+
+- **[localhost/](./examples/localhost/)** - Local development mode (YAML file, no API key required)
+- **[cloud/](./examples/cloud/)** - Cloud mode with streaming updates and all flag types
+- **[test/integration/](./test/integration/)** - Comprehensive integration test suite
+
+Run examples:
+
+```bash
+task example-localhost # No API key needed
+task example-cloud # Requires SPLIT_API_KEY
+task test-integration # Full integration tests
+```
+
+## API
+
+### Flag Evaluation
+
+All methods require targeting key in evaluation context:
+
+```go
+ctx := context.Background()
+evalCtx := openfeature.NewEvaluationContext("user-123", map[string]any{
+ "email": "user@example.com",
+ "plan": "premium",
+})
+
+// Boolean
+enabled, err := client.BooleanValue(ctx, "new-feature", false, evalCtx)
+
+// String
+theme, err := client.StringValue(ctx, "ui-theme", "light", evalCtx)
+
+// Number
+maxRetries, err := client.IntValue(ctx, "max-retries", 3, evalCtx)
+discount, err := client.FloatValue(ctx, "discount-rate", 0.0, evalCtx)
+
+// Object
+result, err := client.ObjectValue(ctx, "flag-key", split.FlagSetResult{}, evalCtx)
+```
+
+### Object Evaluation - Mode-Specific Behavior
+
+Object evaluation returns `split.FlagSetResult` in both modes:
+
+```go
+type FlagResult struct {
+ Config any // Parsed JSON config, or nil
+ Treatment string // Split treatment name (e.g., "on", "off", "v1")
+}
+
+type FlagSetResult map[string]FlagResult
+```
+
+**Cloud Mode** (default: flag set evaluation):
+
+```go
+// Default behavior: treats "my-flag-set" as a flag set name
+result, _ := client.ObjectValue(ctx, "my-flag-set", split.FlagSetResult{}, evalCtx)
+flags := result.(split.FlagSetResult)
+for name, flag := range flags {
+ fmt.Printf("%s: %s\n", name, flag.Treatment)
+}
+
+// Override: evaluate a single flag instead of a flag set
+individualCtx := split.WithEvaluationMode(ctx, split.EvaluationModeIndividual)
+result, _ = client.ObjectValue(individualCtx, "single-flag", split.FlagSetResult{}, evalCtx)
+```
+
+**Localhost Mode** (always individual flag evaluation):
+
+```go
+result, _ := client.ObjectValue(ctx, "single-flag", split.FlagSetResult{}, evalCtx)
+flags := result.(split.FlagSetResult)
+flag := flags["single-flag"]
+fmt.Println(flag.Treatment, flag.Config)
+```
+
+**Note:** Flag sets are NOT supported in localhost mode. `EvaluationModeSet` is silently ignored (always uses individual
+evaluation). See [Evaluation Options](#evaluation-options) for details.
+
+### Evaluation Options
+
+Control per-request evaluation behavior via `context.Context`:
+
+```go
+// Force individual flag evaluation in cloud mode (instead of flag set)
+individualCtx := split.WithEvaluationMode(ctx, split.EvaluationModeIndividual)
+result, _ := client.ObjectValue(individualCtx, "single-flag", split.FlagSetResult{}, evalCtx)
+
+// Force flag set evaluation (explicit, same as cloud default)
+setCtx := split.WithEvaluationMode(ctx, split.EvaluationModeSet)
+result, _ = client.ObjectValue(setCtx, "my-flag-set", split.FlagSetResult{}, evalCtx)
+
+// Set multiple evaluation options at once
+optionsCtx := split.WithEvalOptions(ctx, split.EvalOptions{
+ Mode: split.EvaluationModeIndividual,
+ ImpressionDisabled: true, // Forward-looking API (see note below)
+})
+```
+
+**Evaluation Modes:**
+
+| Mode | Cloud Behavior | Localhost Behavior |
+|-------------------------------|-------------------------|------------------------|
+| `EvaluationModeDefault` (`""`)| Flag set evaluation | Individual evaluation |
+| `EvaluationModeSet` | Flag set evaluation | Ignored (individual) |
+| `EvaluationModeIndividual` | Individual evaluation | Individual evaluation |
+
+**Note:** `EvaluationModeSet` is silently ignored in localhost mode because the Split SDK's localhost mode does not
+support flag sets. A debug-level log message is emitted when this occurs.
+
+**ImpressionDisabled** (forward-looking API): The `WithImpressionDisabled()` helper and `ImpressionDisabled` field are
+provided for future compatibility with Split SDK per-evaluation impression control. Currently logged but not enforced.
+See [Known Limitations](#known-limitations).
+
+### Extracting Configuration Metadata
+
+All `*ValueDetails` methods return evaluation metadata including flag metadata:
+
+```go
+details, err := client.StringValueDetails(ctx, "ui-theme", "light", evalCtx)
+
+// Standard fields
+value := details.Value // Evaluated value: "dark" (for strings, same as treatment)
+treatment := details.Variant // Split treatment name: "dark", "light", etc.
+reason := details.Reason // TARGETING_MATCH, DEFAULT, ERROR
+
+// Extract flag metadata (configurations attached to treatments)
+// All config types are wrapped in FlagMetadata["value"] for consistency
+if configValue, ok := details.FlagMetadata["value"]; ok {
+ // Object config: {"bgColor": "#000", "fontSize": 14}
+ if configMap, ok := configValue.(map[string]any); ok {
+ bgColor := configMap["bgColor"]
+ fontSize := configMap["fontSize"]
+ }
+ // Primitive config: 42
+ if num, ok := configValue.(float64); ok {
+ // Use primitive value
+ }
+ // Array config: ["a", "b", "c"]
+ if arr, ok := configValue.([]any); ok {
+ // Use array
+ }
}
-openfeature.SetProvider(provider)
```
-## Use of OpenFeature with Split
-After the initial setup you can use OpenFeature according to their [documentation](https://docs.openfeature.dev/docs/reference/concepts/evaluation-api/).
+### Evaluation Reasons
+
+| Reason | Description |
+|-------------------|---------------------------------------------------------------------------|
+| `TARGETING_MATCH` | Flag successfully evaluated |
+| `DEFAULT` | Flag not found, returned default value |
+| `ERROR` | Evaluation error (missing targeting key, provider not ready, parse error) |
+
+### Error Codes
+
+Provider implements OpenFeature error codes. All errors return default value:
+
+- `PROVIDER_NOT_READY` - Provider not initialized
+- `FLAG_NOT_FOUND` - Flag doesn't exist in Split
+- `PARSE_ERROR` - Treatment can't parse to requested type
+- `TARGETING_KEY_MISSING` - No targeting key in context
+- `INVALID_CONTEXT` - Malformed evaluation context
+- `GENERAL` - Context canceled/timeout or other errors
+
+### Default Value Behavior
+
+OpenFeature's design philosophy: **evaluations never return Go errors**. Instead, they return the default value you
+provide with resolution details indicating what happened.
+
+**When Split SDK Returns "control" Treatment:**
+
+The Split SDK returns a special `"control"` treatment to indicate evaluation failure. Our provider translates this to
+OpenFeature's default value pattern:
+
+| Condition | Split SDK Returns | Caller Receives | Resolution Details |
+|----------------------------|-------------------|-----------------|---------------------------------------------------|
+| Flag doesn't exist | `"control"` | Default value | `Reason: DEFAULT`
`Error: FLAG_NOT_FOUND` |
+| Provider not initialized | `"control"` | Default value | `Reason: ERROR`
`Error: PROVIDER_NOT_READY` |
+| Provider shut down | `"control"` | Default value | `Reason: ERROR`
`Error: PROVIDER_NOT_READY` |
+| Targeting key missing | `"control"` | Default value | `Reason: ERROR`
`Error: TARGETING_KEY_MISSING` |
+| Context canceled | `"control"` | Default value | `Reason: ERROR`
`Error: GENERAL` |
+| Network error (cloud mode) | `"control"` | Default value | `Reason: DEFAULT`
`Error: FLAG_NOT_FOUND` |
+
+**Example:**
+
+```go
+// Flag doesn't exist in Split
+enabled, err := client.BooleanValue(ctx, "nonexistent-flag", false, evalCtx)
+// Result:
+// - enabled = false (your default value)
+// - err = nil (OpenFeature doesn't return errors)
+
+// To check what happened, use *ValueDetails methods:
+details, err := client.BooleanValueDetails(ctx, "nonexistent-flag", false, evalCtx)
+// - details.Value = false
+// - details.Reason = of.DefaultReason
+// - details.ErrorCode = of.FlagNotFoundCode
+// - details.ErrorMessage = "flag not found"
+```
+
+**Key Points:**
+
+- Your application continues running normally with safe default values
+- No panic, no nil pointers, no error handling required for normal operation
+- Use `*ValueDetails` methods when you need to distinguish between success and fallback
+- This design enables graceful degradation during outages or misconfigurations
+
+### Event Tracking
+
+Track user actions for experimentation and analytics:
-One important note is that the Split Provider **requires a targeting key** to be set. Often times this should be set when evaluating the value of a flag by [setting an EvaluationContext](https://docs.openfeature.dev/docs/reference/concepts/evaluation-context) which contains the targeting key. An example flag evaluation is
```go
-client := openfeature.NewClient("CLIENT_NAME");
+evalCtx := openfeature.NewEvaluationContext("user-123", nil)
-evaluationContext := openfeature.NewEvaluationContext("TARGETING_KEY", nil)
-boolValue := client.BooleanValue(nil, "boolFlag", false, evaluationContext)
+// Basic tracking with value
+details := openfeature.NewTrackingEventDetails(99.99)
+client.Track(ctx, "purchase_completed", evalCtx, details)
+
+// Tracking with custom traffic type
+evalCtxAccount := openfeature.NewEvaluationContext("account-456", map[string]any{
+ "trafficType": "account", // Optional, defaults to "user"
+})
+client.Track(ctx, "subscription_created", evalCtxAccount, details)
+
+// Tracking with properties
+purchaseDetails := openfeature.NewTrackingEventDetails(149.99).
+ Add("currency", "USD").
+ Add("item_count", 3).
+ Add("category", "electronics")
+client.Track(ctx, "purchase", evalCtx, purchaseDetails)
```
-If the same targeting key is used repeatedly, the evaluation context may be set at the client level
+
+**Supported Property Types:**
+
+The Split SDK accepts the following property value types:
+
+| Type | Supported | Example |
+|----------------------------|-----------|----------------------------|
+| `string` | ✅ | `Add("currency", "USD")` |
+| `bool` | ✅ | `Add("is_premium", true)` |
+| `int`, `int32`, `int64` | ✅ | `Add("item_count", 3)` |
+| `uint`, `uint32`, `uint64` | ✅ | `Add("quantity", uint(5))` |
+| `float32`, `float64` | ✅ | `Add("price", 99.99)` |
+| `nil` | ✅ | `Add("optional", nil)` |
+| Arrays, maps, structs | ❌ | Silently set to `nil` |
+
+**⚠️ Important:** Unsupported types (arrays, maps, nested objects) are **silently set to `nil`** by the Split SDK - no
+error is returned. Always use primitive types for event properties.
+
+**Parameters:**
+
+- `trackingEventName`: Event name (e.g., "checkout", "signup")
+- `evaluationContext`: Contains targeting key and optional `trafficType` attribute
+- `details`: Event value and custom properties
+
+**Traffic Type:**
+
+- Defaults to `"user"` if not specified
+- Set via `trafficType` attribute in evaluation context
+- Must match a defined traffic type in Split
+
+**Localhost Mode:** Track events are accepted but not persisted (no server to send them to). Code using `Track()` runs
+unchanged in local development.
+
+**View Events:** Track events appear in Split Data Hub (Live Tail tab).
+
+### Track Options
+
+Control per-request tracking behavior via `context.Context`:
+
+```go
+// Track with metric value (standard - value sent to Split for sum/average metrics)
+details := openfeature.NewTrackingEventDetails(99.99)
+client.Track(ctx, "purchase", evalCtx, details)
+
+// Track without metric value (count-only event)
+// Sends nil to Split instead of 0, preventing pollution of sum/average metrics
+noValueCtx := split.WithoutMetricValue(ctx)
+countDetails := openfeature.NewTrackingEventDetails(0) // value ignored due to context option
+client.Track(noValueCtx, "page_view", evalCtx, countDetails)
+```
+
+**Why `WithoutMetricValue`?**
+
+OpenFeature's `NewTrackingEventDetails(value)` requires a numeric value, but Split supports count-only events (no metric
+value). Without this option, passing `0` would pollute sum/average metrics. `WithoutMetricValue` tells the provider to
+send `nil` to Split's Track API, recording only the event count.
+
+### Event Handling
+
+Subscribe to provider lifecycle events:
+
```go
-evaluationContext := openfeature.NewEvaluationContext("TARGETING_KEY", nil)
-client.SetEvaluationContext(context)
+openfeature.AddHandler(openfeature.ProviderReady, func(details openfeature.EventDetails) {
+ log.Println("Provider ready")
+})
+
+openfeature.AddHandler(openfeature.ProviderConfigChange, func(details openfeature.EventDetails) {
+ log.Println("Configuration updated")
+})
```
-or at the OpenFeatureAPI level
+
+**Events:**
+
+- `PROVIDER_READY` - Provider initialized successfully
+- `PROVIDER_CONFIG_CHANGE` - Flag configurations updated (detected via polling, default 30s, configurable via
+ `WithMonitoringInterval`)
+- `PROVIDER_ERROR` - Initialization or runtime error
+
+**Event Limitations:**
+
+- `PROVIDER_STALE` events are not emitted due to Split SDK limitations. See [Provider Status](#provider-status) for
+ details.
+- `PROVIDER_CONFIG_CHANGE` is detected by polling (default 30 seconds, configurable via `WithMonitoringInterval`,
+ minimum
+ 5 seconds), not via real-time SSE streaming. While the Split SDK receives changes instantly via SSE, it doesn't expose
+ a callback for configuration changes, so the provider polls `manager.Splits()` to detect changes. See
+ [CONTRIBUTING.md](CONTRIBUTING.md) for details.
+
+### Direct SDK Access
+
+**⚠️ Advanced Usage Only**
+
+The provider manages the Split SDK lifecycle (initialization, shutdown, cleanup). Direct factory access should only be
+used for Split-specific features not available through OpenFeature.
+
+**Lifecycle Constraints:**
+
+- ❌ **DO NOT** call `factory.Client().Destroy()` - provider owns lifecycle
+- ❌ **DO NOT** call `factory.Client().BlockUntilReady()` - use `client.State()` instead (see [Provider Status](#provider-status))
+- ⚠️ Factory is only valid between `Init` and `Shutdown`
+- ⚠️ After `Shutdown()`, factory and client are destroyed
+
+**Example:**
+
```go
-evaluationContext := openfeature.NewEvaluationContext("TARGETING_KEY", nil)
-openfeature.SetEvaluationContext(context)
-````
-If the context was set at the client or api level, it is not required to provide it during flag evaluation.
+factory := provider.Factory()
+// Use factory for Split-specific features not available in OpenFeature
+```
+
+See [Split Go SDK documentation](https://github.com/splitio/go-client) for available methods.
+
+## Testing
+
+**Unit tests:** Use OpenFeature test provider, not Split provider.
-## Submitting issues
-
-The Split team monitors all issues submitted to this [issue tracker](https://github.com/splitio/split-openfeature-provider-go/issues). We encourage you to use this issue tracker to submit any bug reports, feedback, and feature enhancements. We'll do our best to respond in a timely manner.
+**Integration tests:** Use localhost mode with YAML files. See [test/integration/](./test/integration/).
+
+**Provider tests:**
+
+```bash
+task test # Run all tests
+task test-race # Run with race detection
+task test-coverage # Generate coverage report
+```
+
+## Development
+
+Development workflow managed via [Taskfile](./Taskfile.yml):
+
+```bash
+task # List all tasks
+task example-localhost # Run localhost example
+task example-cloud # Run cloud example
+task test-integration # Run integration tests
+task lint # Run linters
+```
+
+## Logging
+
+Provider uses `slog` for structured logging. Configure via `slog.SetDefault()` or `split.WithLogger()` option.
+
+**Source attribution:**
+
+- `source="split-provider"` - Provider logs
+- `source="split-sdk"` - Split SDK logs
+- `source="openfeature-sdk"` - OpenFeature SDK logs (via hooks)
+
+See [examples/](./examples/) for logging configuration patterns.
## Contributing
-Please see [Contributors Guide](CONTRIBUTORS-GUIDE.md) to find all you need to submit a Pull Request (PR).
+
+Contributions welcome. See [CONTRIBUTING.md](CONTRIBUTING.md) for development setup, testing requirements, and PR
+process.
## License
-Licensed under the Apache License, Version 2.0. See: [Apache License](http://www.apache.org/licenses/).
-
-## About Split
-
-Split is the leading Feature Delivery Platform for engineering teams that want to confidently deploy features as fast as they can develop them. Split’s fine-grained management, real-time monitoring, and data-driven experimentation ensure that new features will improve the customer experience without breaking or degrading performance. Companies like Twilio, Salesforce, GoDaddy and WePay trust Split to power their feature delivery.
-
-To learn more about Split, contact hello@split.io, or get started with feature flags for free at https://www.split.io/signup.
-
-Split has built and maintains SDKs for:
-
-* Java [Github](https://github.com/splitio/java-client) [Docs](https://help.split.io/hc/en-us/articles/360020405151-Java-SDK)
-* Javascript [Github](https://github.com/splitio/javascript-client) [Docs](https://help.split.io/hc/en-us/articles/360020448791-JavaScript-SDK)
-* Node [Github](https://github.com/splitio/javascript-client) [Docs](https://help.split.io/hc/en-us/articles/360020564931-Node-js-SDK)
-* .NET [Github](https://github.com/splitio/dotnet-client) [Docs](https://help.split.io/hc/en-us/articles/360020240172--NET-SDK)
-* Ruby [Github](https://github.com/splitio/ruby-client) [Docs](https://help.split.io/hc/en-us/articles/360020673251-Ruby-SDK)
-* PHP [Github](https://github.com/splitio/php-client) [Docs](https://help.split.io/hc/en-us/articles/360020350372-PHP-SDK)
-* Python [Github](https://github.com/splitio/python-client) [Docs](https://help.split.io/hc/en-us/articles/360020359652-Python-SDK)
-* GO [Github](https://github.com/splitio/go-client) [Docs](https://help.split.io/hc/en-us/articles/360020093652-Go-SDK)
-* Android [Github](https://github.com/splitio/android-client) [Docs](https://help.split.io/hc/en-us/articles/360020343291-Android-SDK)
-* iOS [Github](https://github.com/splitio/ios-client) [Docs](https://help.split.io/hc/en-us/articles/360020401491-iOS-SDK)
-
-For a comprehensive list of open source projects visit our [Github page](https://github.com/splitio?utf8=%E2%9C%93&query=%20only%3Apublic%20).
-
-**Learn more about Split:**
-
-Visit [split.io/product](https://www.split.io/product) for an overview of Split, or visit our documentation at [help.split.io](http://help.split.io) for more detailed information.
+Apache License 2.0. See [LICENSE](http://www.apache.org/licenses/LICENSE-2.0).
+
+## Links
+
+- [Split.io](https://www.split.io/)
+- [OpenFeature](https://openfeature.dev/)
+- [API Documentation](https://pkg.go.dev/github.com/splitio/split-openfeature-provider-go/v2)
+- [Issue Tracker](https://github.com/splitio/split-openfeature-provider-go/issues)
diff --git a/Taskfile.yml b/Taskfile.yml
new file mode 100644
index 0000000..a42c011
--- /dev/null
+++ b/Taskfile.yml
@@ -0,0 +1,366 @@
+version: '3'
+
+# Split OpenFeature Provider - Task Runner
+# Run 'task' or 'task help' for usage information
+
+# =============================================================================
+# Global Configuration
+# =============================================================================
+
+output: prefixed
+silent: true
+dotenv: ['.env.local']
+
+# =============================================================================
+# Variables
+# =============================================================================
+
+vars:
+ COVERAGE_FILE: coverage.out
+ COVERAGE_THRESHOLD: 70
+ LINT_TIMEOUT: 5m
+
+# =============================================================================
+# Default & Help
+# =============================================================================
+
+tasks:
+ default:
+ desc: Show available tasks
+ aliases: [list, ls]
+ cmds:
+ - task --list
+
+ help:
+ desc: Show help and common workflows
+ aliases: [h, '?']
+ cmds:
+ - |
+ echo "Split OpenFeature Provider - Task Runner"
+ echo "========================================"
+ echo ""
+ echo "Quick Start:"
+ echo " task install-tools Install development tools"
+ echo " task test Run tests"
+ echo ""
+ echo "Development Workflow:"
+ echo " task check Run all quality checks (lint + test + coverage)"
+ echo " task pre-commit Quick checks before committing"
+ echo " task ci Full CI checks before PR"
+ echo ""
+ echo "Code Quality:"
+ echo " task lint Run linter"
+ echo " task lint-fix Run linter with auto-fix"
+ echo " task fmt Format code"
+ echo " task vet Run go vet"
+ echo ""
+ echo "Testing:"
+ echo " task test Run unit tests with coverage"
+ echo " task test-race Run with race detection (no coverage)"
+ echo " task test-short Run unit tests (fast mode)"
+ echo " task test-coverage Show coverage report"
+ echo " task coverage-check Verify coverage >= {{.COVERAGE_THRESHOLD}}%"
+ echo ""
+ echo "Examples:"
+ echo " task example-localhost Offline mode with YAML (no account)"
+ echo " task example-cloud Cloud mode (requires SPLIT_API_KEY)"
+ echo ""
+ echo "Integration Testing:"
+ echo " task test-integration Auto-selects localhost or cloud mode"
+ echo " task test-cloud Cloud-only features"
+ echo ""
+ echo "Code Generation:"
+ echo " task generate-mocks Generate mock implementations (mockery v3)"
+ echo ""
+ echo "Run 'task --list' to see all available tasks"
+
+ # ===========================================================================
+ # Workflow Tasks
+ # ===========================================================================
+
+ check:
+ desc: Run all quality checks (lint, test, coverage)
+ aliases: [c]
+ cmds:
+ - task: lint
+ - task: test
+ - task: coverage-check
+
+ ci:
+ desc: Run full CI pipeline (use before submitting PR)
+ cmds:
+ - echo "Running CI checks..."
+ - task: fmt-check
+ - task: lint
+ - task: vet
+ - task: test
+ - task: coverage-check
+ - echo "All CI checks passed!"
+
+ pre-commit:
+ desc: Run pre-commit checks (format, lint, quick test)
+ aliases: [pc]
+ cmds:
+ - task: fmt
+ - task: lint
+ - task: test-short
+
+ pre-push:
+ desc: Run pre-push checks (full CI)
+ aliases: [pp]
+ cmds:
+ - task: ci
+
+ # ===========================================================================
+ # Build Tasks
+ # ===========================================================================
+
+ build:
+ desc: Build the provider
+ aliases: [b]
+ method: checksum
+ sources:
+ - '**/*.go'
+ - go.mod
+ - go.sum
+ cmds:
+ - go build -v ./...
+
+ clean:
+ desc: Clean build artifacts and caches
+ cmds:
+ - rm -f {{.COVERAGE_FILE}}
+ - rm -rf .task/
+ - go clean -cache -testcache
+
+ # ===========================================================================
+ # Testing Tasks
+ # ===========================================================================
+
+ test:
+ desc: Run unit tests with race detector and coverage
+ aliases: [t]
+ cmds:
+ - go test -v -race -coverprofile={{.COVERAGE_FILE}} -covermode=atomic $(go list ./... | grep -v /examples/ | grep -v /test/)
+ - task: _update-coverage-badge
+
+ test-race:
+ desc: Run unit tests with race detector (no coverage)
+ aliases: [tr]
+ cmds:
+ - go test -v -race -count=1 $(go list ./... | grep -v /examples/ | grep -v /test/)
+
+ test-short:
+ desc: Run unit tests in short mode (fast)
+ aliases: [ts]
+ cmds:
+ - go test -v -short $(go list ./... | grep -v /examples/ | grep -v /test/)
+
+ # ===========================================================================
+ # Coverage Tasks
+ # ===========================================================================
+
+ coverage:
+ desc: Generate and display coverage report
+ aliases: [cov, test-coverage]
+ deps: [test]
+ cmds:
+ - go tool cover -func={{.COVERAGE_FILE}}
+
+ coverage-check:
+ desc: Verify coverage meets {{.COVERAGE_THRESHOLD}}% threshold
+ aliases: [cc]
+ deps: [test]
+ cmds:
+ - |
+ COVERAGE=$(go tool cover -func={{.COVERAGE_FILE}} | grep total | awk '{print $3}' | sed 's/%//')
+ echo "Coverage: $COVERAGE% (threshold: {{.COVERAGE_THRESHOLD}}%)"
+ if [ $(echo "$COVERAGE < {{.COVERAGE_THRESHOLD}}" | bc) -eq 1 ]; then
+ echo "FAIL: Coverage is below threshold"
+ exit 1
+ fi
+ echo "PASS: Coverage meets threshold"
+
+ coverage-html:
+ desc: Open coverage report in browser
+ deps: [test]
+ cmds:
+ - go tool cover -html={{.COVERAGE_FILE}}
+
+ # ===========================================================================
+ # Code Quality Tasks
+ # ===========================================================================
+
+ lint:
+ desc: Run golangci-lint
+ aliases: [l]
+ method: checksum
+ sources:
+ - '**/*.go'
+ - .golangci.yml
+ cmds:
+ - golangci-lint run --timeout {{.LINT_TIMEOUT}}
+
+ lint-fix:
+ desc: Run golangci-lint with auto-fix
+ aliases: [lf]
+ cmds:
+ - golangci-lint run --fix --timeout {{.LINT_TIMEOUT}}
+
+ fmt:
+ desc: Format code with gofmt
+ aliases: [f]
+ cmds:
+ - gofmt -s -w .
+
+ fmt-check:
+ desc: Check code formatting (no changes)
+ aliases: [fc]
+ cmds:
+ - |
+ UNFORMATTED=$(gofmt -l .)
+ if [ -n "$UNFORMATTED" ]; then
+ echo "Unformatted files:"
+ echo "$UNFORMATTED"
+ exit 1
+ fi
+ echo "All files formatted correctly"
+
+ vet:
+ desc: Run go vet
+ aliases: [v]
+ method: checksum
+ sources:
+ - '**/*.go'
+ cmds:
+ - go vet ./...
+
+ # ===========================================================================
+ # Example Tasks
+ # ===========================================================================
+
+ example-localhost:
+ desc: Run localhost example (offline YAML flags, no account needed)
+ aliases: [el]
+ dir: examples/localhost
+ cmds:
+ - go run main.go
+
+ example-cloud:
+ desc: Run cloud example (requires SPLIT_API_KEY in .env.local)
+ aliases: [ec]
+ dir: examples/cloud
+ preconditions:
+ - sh: test -n "$SPLIT_API_KEY"
+ msg: "SPLIT_API_KEY not set. Create .env.local with SPLIT_API_KEY=your-key"
+ cmds:
+ - go run main.go
+
+ # ===========================================================================
+ # Integration Testing Tasks
+ # ===========================================================================
+
+ test-integration:
+ desc: Run integration tests (auto-selects localhost or cloud mode)
+ aliases: [ti]
+ dir: test/integration
+ cmds:
+ - go run .
+
+ test-cloud:
+ desc: Run cloud-only integration tests (requires SPLIT_API_KEY)
+ aliases: [tc]
+ dir: test/advanced
+ preconditions:
+ - sh: test -n "$SPLIT_API_KEY"
+ msg: "SPLIT_API_KEY not set. Create .env.local with SPLIT_API_KEY=your-key"
+ cmds:
+ - go run main.go
+
+ # ===========================================================================
+ # Dependency Management Tasks
+ # ===========================================================================
+
+ deps-tidy:
+ desc: Tidy go.mod and go.sum
+ aliases: [tidy]
+ cmds:
+ - go mod tidy
+
+ deps-update:
+ desc: Update all dependencies to latest versions
+ aliases: [update]
+ cmds:
+ - echo "Updating dependencies..."
+ - go get -u ./...
+ - go mod tidy
+ - echo "Done. Run 'task test' to verify."
+
+ # ===========================================================================
+ # Code Generation Tasks
+ # ===========================================================================
+
+ generate-mocks:
+ desc: Generate mock implementations using mockery v3
+ aliases: [mocks]
+ method: checksum
+ sources:
+ - client.go
+ generates:
+ - mock_client_test.go
+ cmds:
+ - echo "Generating mocks..."
+ - mockery
+ - echo "Done. Mock files generated."
+
+ # ===========================================================================
+ # Tool Management Tasks
+ # ===========================================================================
+
+ install-tools:
+ desc: Install required development tools
+ aliases: [tools]
+ cmds:
+ - echo "Installing development tools..."
+ - brew install golangci-lint
+ - brew install mockery
+ - echo "Done!"
+
+ check-tools:
+ desc: Verify required tools are installed
+ cmds:
+ - |
+ echo "Checking tools..."
+ MISSING=""
+ if ! command -v golangci-lint &> /dev/null; then
+ echo "[x] golangci-lint - not installed"
+ MISSING="yes"
+ else
+ echo "[ok] golangci-lint"
+ fi
+ if ! command -v mockery &> /dev/null; then
+ echo "[x] mockery - not installed"
+ MISSING="yes"
+ else
+ echo "[ok] mockery"
+ fi
+ if [ -n "$MISSING" ]; then
+ echo ""
+ echo "Run 'task install-tools' to install missing tools"
+ exit 1
+ fi
+ echo ""
+ echo "All tools installed!"
+
+ # ===========================================================================
+ # Internal Tasks
+ # ===========================================================================
+
+ _update-coverage-badge:
+ internal: true
+ cmds:
+ - |
+ COVERAGE=$(go tool cover -func={{.COVERAGE_FILE}} | grep total | awk '{print $3}' | sed 's/%//')
+ sed -i.bak "s/coverage-[0-9.]*%25/coverage-$COVERAGE%25/g" README.md
+ rm -f README.md.bak
+ echo "Coverage badge updated to $COVERAGE%"
diff --git a/benchmark_test.go b/benchmark_test.go
new file mode 100644
index 0000000..b53b9e1
--- /dev/null
+++ b/benchmark_test.go
@@ -0,0 +1,166 @@
+package split
+
+import (
+ "context"
+ "testing"
+
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/splitio/go-client/v6/splitio/conf"
+ "github.com/splitio/go-toolkit/v5/logging"
+)
+
+// BenchmarkBooleanEvaluation benchmarks single boolean flag evaluation performance.
+func BenchmarkBooleanEvaluation(b *testing.B) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ if err != nil {
+ b.Fatalf("Failed to create provider: %v", err)
+ }
+ defer func() { _ = provider.ShutdownWithContext(context.Background()) }()
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ if err != nil {
+ b.Fatalf("Failed to initialize provider: %v", err)
+ }
+
+ flatCtx := openfeature.FlattenedContext{
+ openfeature.TargetingKey: "bench-user",
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = provider.BooleanEvaluation(context.TODO(), flagSomeOther, false, flatCtx)
+ }
+}
+
+// BenchmarkStringEvaluation benchmarks single string flag evaluation performance.
+func BenchmarkStringEvaluation(b *testing.B) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ if err != nil {
+ b.Fatalf("Failed to create provider: %v", err)
+ }
+ defer func() { _ = provider.ShutdownWithContext(context.Background()) }()
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ if err != nil {
+ b.Fatalf("Failed to initialize provider: %v", err)
+ }
+
+ flatCtx := openfeature.FlattenedContext{
+ openfeature.TargetingKey: "bench-user",
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = provider.StringEvaluation(context.TODO(), flagSomeOther, "default", flatCtx)
+ }
+}
+
+// BenchmarkConcurrentEvaluations benchmarks concurrent flag evaluations.
+func BenchmarkConcurrentEvaluations(b *testing.B) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ if err != nil {
+ b.Fatalf("Failed to create provider: %v", err)
+ }
+ defer func() { _ = provider.ShutdownWithContext(context.Background()) }()
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ if err != nil {
+ b.Fatalf("Failed to initialize provider: %v", err)
+ }
+
+ flatCtx := openfeature.FlattenedContext{
+ openfeature.TargetingKey: "bench-user",
+ }
+
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ _ = provider.BooleanEvaluation(context.TODO(), flagSomeOther, false, flatCtx)
+ }
+ })
+}
+
+// BenchmarkProviderInitialization measures provider initialization time.
+func BenchmarkProviderInitialization(b *testing.B) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ if err != nil {
+ b.Fatalf("Failed to create provider: %v", err)
+ }
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ if err != nil {
+ b.Fatalf("Failed to initialize provider: %v", err)
+ }
+
+ _ = provider.ShutdownWithContext(context.Background())
+ }
+}
+
+// BenchmarkAttributeHeavyEvaluation measures evaluation performance with many attributes.
+func BenchmarkAttributeHeavyEvaluation(b *testing.B) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ if err != nil {
+ b.Fatalf("Failed to create provider: %v", err)
+ }
+ defer func() { _ = provider.ShutdownWithContext(context.Background()) }()
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ if err != nil {
+ b.Fatalf("Failed to initialize provider: %v", err)
+ }
+
+ flatCtx := openfeature.FlattenedContext{
+ openfeature.TargetingKey: "bench-user",
+ "email": "user@example.com",
+ "plan": "enterprise",
+ "region": "us-east-1",
+ "org_id": "org-12345",
+ "user_id": "user-67890",
+ "account_type": "premium",
+ "feature_flags_enabled": true,
+ "beta_tester": true,
+ "signup_date": "2024-01-15",
+ "last_login": "2025-01-18",
+ "session_count": 42,
+ "total_spend": 1299.99,
+ "conversion_rate": 0.25,
+ "engagement_score": 87.5,
+ "device_type": "desktop",
+ "browser": "chrome",
+ "os": "macos",
+ "language": "en-US",
+ "timezone": "America/New_York",
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = provider.BooleanEvaluation(context.TODO(), flagSomeOther, false, flatCtx)
+ }
+}
diff --git a/client.go b/client.go
new file mode 100644
index 0000000..2fb5704
--- /dev/null
+++ b/client.go
@@ -0,0 +1,26 @@
+package split
+
+import "github.com/splitio/go-client/v6/splitio/client"
+
+// Compile-time check that *client.SplitClient satisfies Client.
+var _ Client = (*client.SplitClient)(nil)
+
+// Client defines the Split SDK client methods used by this provider.
+// This interface enables dependency injection for testing (mock generation via mockery).
+// Method signatures match *client.SplitClient exactly (verified by compile-time check above).
+type Client interface {
+ // TreatmentWithConfig evaluates a single flag and returns the treatment with optional JSON config.
+ TreatmentWithConfig(key interface{}, featureFlagName string, attributes map[string]interface{}) client.TreatmentResult
+
+ // TreatmentsWithConfigByFlagSet evaluates all flags in a flag set.
+ TreatmentsWithConfigByFlagSet(key interface{}, flagSet string, attributes map[string]interface{}) map[string]client.TreatmentResult
+
+ // Track sends a tracking event to Split for analytics.
+ Track(key string, trafficType string, eventType string, value interface{}, properties map[string]interface{}) error
+
+ // BlockUntilReady blocks until the SDK is ready or the timeout (seconds) expires.
+ BlockUntilReady(timer int) error
+
+ // Destroy shuts down the SDK client and releases resources.
+ Destroy()
+}
diff --git a/concurrency_test.go b/concurrency_test.go
new file mode 100644
index 0000000..e8962c6
--- /dev/null
+++ b/concurrency_test.go
@@ -0,0 +1,184 @@
+package split
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/splitio/go-client/v6/splitio/conf"
+ "github.com/splitio/go-toolkit/v5/logging"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// TestConcurrentEvaluations tests thread safety with concurrent evaluations.
+func TestConcurrentEvaluations(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Failed to create provider")
+
+ ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+ err = openfeature.SetProviderWithContextAndWait(ctx, provider)
+ require.NoError(t, err, "Failed to set provider")
+
+ defer func() {
+ shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer shutdownCancel()
+ _ = openfeature.ShutdownWithContext(shutdownCtx)
+ }()
+
+ const numGoroutines = 50
+ const numEvaluations = 100
+
+ var wg sync.WaitGroup
+ errors := make(chan error, numGoroutines)
+
+ for i := 0; i < numGoroutines; i++ {
+ wg.Add(1)
+ go func(id int) {
+ defer wg.Done()
+ ofClient := openfeature.NewClient("concurrent-test")
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ for j := 0; j < numEvaluations; j++ {
+ _, err := ofClient.BooleanValue(
+ context.TODO(),
+ flagSomeOther,
+ false,
+ evalCtx,
+ )
+ if err != nil && !strings.Contains(err.Error(), "FLAG_NOT_FOUND") {
+ errors <- fmt.Errorf("goroutine %d iteration %d: %w", id, j, err)
+ return
+ }
+
+ _, err = ofClient.StringValue(
+ context.TODO(),
+ flagSomeOther,
+ "default",
+ evalCtx,
+ )
+ if err != nil && !strings.Contains(err.Error(), "FLAG_NOT_FOUND") {
+ errors <- fmt.Errorf("goroutine %d iteration %d: %w", id, j, err)
+ return
+ }
+ }
+ }(i)
+ }
+
+ wg.Wait()
+ close(errors)
+
+ for err := range errors {
+ t.Errorf("Concurrent evaluation error: %v", err)
+ }
+}
+
+// TestConcurrentInitShutdown tests race conditions when Init and Shutdown are called concurrently.
+func TestConcurrentInitShutdown(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
+
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 1
+
+ const iterations = 2
+ for i := 0; i < iterations; i++ {
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+
+ var wg sync.WaitGroup
+ const concurrency = 3
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ for j := 0; j < concurrency; j++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ _ = provider.InitWithContext(ctx, openfeature.NewEvaluationContext("", nil))
+ }()
+ }
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ time.Sleep(10 * time.Millisecond)
+ shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer shutdownCancel()
+ _ = provider.ShutdownWithContext(shutdownCtx)
+ }()
+
+ wg.Wait()
+
+ assert.Equal(t, openfeature.NotReadyState, provider.Status(), "Provider should be NotReady after shutdown")
+ }
+}
+
+// TestEventChannelOverflow tests behavior when event channel buffer is full.
+func TestEventChannelOverflow(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+ defer func() { _ = provider.ShutdownWithContext(context.Background()) }()
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err)
+
+ eventChan := provider.EventChannel()
+
+ const eventsToEmit = 150
+ const bufferSize = 100
+
+ for i := 0; i < eventsToEmit; i++ {
+ select {
+ case <-eventChan:
+ // Drain one event to make room
+ default:
+ // Channel full or empty
+ }
+ }
+
+ done := make(chan bool)
+ go func() {
+ status := provider.Status()
+ assert.Equal(t, openfeature.ReadyState, status, "Provider should still be ready")
+ done <- true
+ }()
+
+ select {
+ case <-done:
+ // Success - operation completed without blocking
+ case <-time.After(2 * time.Second):
+ t.Fatal("Event emission appears to be blocking")
+ }
+
+ drained := 0
+ for {
+ select {
+ case <-eventChan:
+ drained++
+ case <-time.After(10 * time.Millisecond):
+ goto doneLabel
+ }
+ }
+doneLabel:
+ assert.LessOrEqual(t, drained, bufferSize, "Should not have more events than buffer size")
+}
diff --git a/config.go b/config.go
new file mode 100644
index 0000000..04c1024
--- /dev/null
+++ b/config.go
@@ -0,0 +1,51 @@
+package split
+
+import "github.com/splitio/go-client/v6/splitio/conf"
+
+// TestConfig returns an optimized Split SDK configuration for tests and examples.
+// This configuration minimizes timeouts, queue sizes, and sync intervals for faster
+// execution while maintaining full functionality.
+//
+// Optimizations applied:
+// - BlockUntilReady: 5 seconds (faster initialization timeout)
+// - HTTPTimeout: 5 seconds (faster network failure detection)
+// - ImpressionsMode: debug (sends all impressions, not batched)
+// - Queue sizes: Reduced to 100 (faster event/impression flushing)
+// - Bulk sizes: Reduced to 100 (smaller batches, faster submission)
+// - Sync intervals: Set to minimums (faster updates)
+//
+// Usage:
+//
+// cfg := split.TestConfig()
+// cfg.SplitFile = "./split.yaml" // For localhost mode
+// provider, err := split.New(apiKey, split.WithSplitConfig(cfg))
+func TestConfig() *conf.SplitSdkConfig {
+ cfg := conf.Default()
+
+ // Faster initialization timeout
+ cfg.BlockUntilReady = 5
+
+ // Faster network failure detection
+ cfg.Advanced.HTTPTimeout = 5
+
+ // Use debug mode for impression tracking (sends all impressions, 60s sync)
+ // Default "optimized" batches impressions which can delay visibility
+ cfg.ImpressionsMode = "debug"
+
+ // Smaller queues for faster flushing in tests
+ cfg.Advanced.EventsQueueSize = 100
+ cfg.Advanced.ImpressionsQueueSize = 100
+
+ // Smaller batches for faster submission
+ cfg.Advanced.EventsBulkSize = 100
+ cfg.Advanced.ImpressionsBulkSize = 100
+
+ // Minimum sync intervals for faster updates
+ cfg.TaskPeriods.SplitSync = 5 // minimum: 5s
+ cfg.TaskPeriods.SegmentSync = 30 // minimum: 30s
+ cfg.TaskPeriods.ImpressionSync = 60 // minimum: 60s (debug mode)
+ cfg.TaskPeriods.EventsSync = 1 // minimum: 1s
+ cfg.TaskPeriods.TelemetrySync = 60 // reduced from 3600s
+
+ return cfg
+}
diff --git a/constants.go b/constants.go
new file mode 100644
index 0000000..d3c2b75
--- /dev/null
+++ b/constants.go
@@ -0,0 +1,68 @@
+package split
+
+import "time"
+
+const (
+ // SDK Timeouts
+
+ // defaultSDKTimeout is the default timeout in seconds for Split SDK initialization.
+ // Used as the default BlockUntilReady timeout when not configured.
+ defaultSDKTimeout = 10
+
+ // defaultInitTimeout is the default timeout for provider initialization when no BlockUntilReady is configured.
+ // Provides 5 seconds buffer beyond the defaultSDKTimeout (10s SDK + 5s buffer = 15s total).
+ defaultInitTimeout = 15 * time.Second
+
+ // initTimeoutBuffer is added to BlockUntilReady to ensure initialization has time to complete gracefully.
+ initTimeoutBuffer = 5 * time.Second
+
+ // defaultShutdownTimeout is the default timeout for provider shutdown operations.
+ // Allows time for monitoring goroutine cleanup, SDK destroy, and channel closes.
+ defaultShutdownTimeout = 30 * time.Second
+
+ // Event Handling
+
+ // eventChannelBuffer is the buffer size for the provider's event channel.
+ // Events are sent asynchronously to OpenFeature SDK handlers.
+ // Provides headroom for burst events. Overflow events are dropped (logged as warnings).
+ eventChannelBuffer = 128
+
+ // Monitoring
+
+ // defaultMonitoringInterval is the default interval for checking split definition changes.
+ defaultMonitoringInterval = 30 * time.Second
+
+ // minMonitoringInterval is the minimum allowed monitoring interval.
+ minMonitoringInterval = 5 * time.Second
+
+ // Atomic States
+
+ // shutdownStateActive indicates the provider has been shut down (atomic flag = 1).
+ shutdownStateActive = 1
+
+ // shutdownStateInactive indicates the provider is active (atomic flag = 0).
+ shutdownStateInactive = 0
+
+ // Split SDK Constants
+
+ // controlTreatment is the treatment returned by Split SDK when a flag doesn't exist
+ // or evaluation fails. Used to detect missing flags and return defaults.
+ controlTreatment = "control"
+
+ // treatmentOn is the conventional Split treatment for boolean "true".
+ treatmentOn = "on"
+
+ // treatmentOff is the conventional Split treatment for boolean "false".
+ treatmentOff = "off"
+
+ // OpenFeature Context Keys
+
+ // TrafficTypeKey is the evaluation context attribute key for Split traffic type.
+ // Used by Track() to categorize events. Not used for flag evaluations
+ // (traffic type is configured per flag in Split dashboard).
+ TrafficTypeKey = "trafficType"
+
+ // DefaultTrafficType is the default traffic type used when not specified in context.
+ // "user" is the most common traffic type for user-based targeting and tracking.
+ DefaultTrafficType = "user"
+)
diff --git a/doc.go b/doc.go
new file mode 100644
index 0000000..91295e6
--- /dev/null
+++ b/doc.go
@@ -0,0 +1,42 @@
+// Package split provides an OpenFeature provider implementation for Split.io
+// feature flags and A/B testing platform.
+//
+// # Basic Usage
+//
+// provider, err := split.New("YOUR_API_KEY")
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+// defer cancel()
+// if err := openfeature.SetProviderWithContextAndWait(ctx, provider); err != nil {
+// log.Fatal(err)
+// }
+//
+// client := openfeature.NewClient("my-app")
+// evalCtx := openfeature.NewEvaluationContext("user-123", map[string]any{
+// "email": "user@example.com",
+// })
+// enabled, _ := client.BooleanValue(context.Background(), "new-feature", false, evalCtx)
+//
+// Evaluations return default values on errors. Use *ValueDetails methods to
+// distinguish success from fallback via Reason and ErrorCode fields.
+//
+// # Configuration
+//
+// cfg := conf.Default()
+// cfg.BlockUntilReady = 15
+//
+// provider, _ := split.New("YOUR_API_KEY",
+// split.WithSplitConfig(cfg),
+// split.WithLogger(logger),
+// )
+//
+// # Concurrency
+//
+// The provider is thread-safe. Multiple goroutines can evaluate flags
+// concurrently. Shutdown waits for in-flight evaluations to complete.
+//
+// See README.md for complete documentation and examples.
+package split
diff --git a/docs/images/of_banner.png b/docs/images/of_banner.png
new file mode 100644
index 0000000..bf51611
Binary files /dev/null and b/docs/images/of_banner.png differ
diff --git a/evaluation.go b/evaluation.go
new file mode 100644
index 0000000..b7ee110
--- /dev/null
+++ b/evaluation.go
@@ -0,0 +1,505 @@
+package split
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "sync/atomic"
+
+ of "github.com/open-feature/go-sdk/openfeature"
+)
+
+// FlagResult represents a single flag evaluation result.
+type FlagResult struct {
+ Config any // Parsed JSON config, or nil
+ Treatment string // Split treatment name (e.g., "on", "off", "v1")
+}
+
+// FlagSetResult maps flag names to their evaluation results.
+// Returned by ObjectEvaluation for flag sets and individual flag evaluations.
+type FlagSetResult map[string]FlagResult
+
+// BooleanEvaluation evaluates a feature flag and returns a boolean value.
+//
+// The method converts Split treatments to boolean values:
+// - "on" → true
+// - "off" → false
+// - Other values (including "true", "false", "1", "0") → parse error, returns def
+//
+// A targeting key must be present in ec. Additional attributes in ec
+// are passed to Split for targeting rule evaluation.
+//
+// Context Cancellation Limitation:
+// The ctx parameter is checked BEFORE evaluation starts, but the Split SDK does
+// not support canceling in-flight evaluations. Once evaluation begins, it runs to
+// completion. Evaluations are typically very fast (<1ms from cache), so this is
+// rarely an issue. See README "Known Limitations" for details.
+//
+// Returns the def if:
+// - Context is canceled or deadline exceeded (checked before evaluation)
+// - Targeting key is missing
+// - Flag is not found
+// - Treatment cannot be parsed as boolean
+func (p *Provider) BooleanEvaluation(ctx context.Context, flag string, def bool, ec of.FlattenedContext) of.BoolResolutionDetail {
+ targetingKey, ok := ec[of.TargetingKey].(string)
+ if !ok {
+ targetingKey = ""
+ }
+ p.logger.Debug("evaluating boolean flag", "flag", flag, "targeting_key", targetingKey, "default", def)
+
+ if validationDetail := p.validateEvaluationContext(ctx, ec); validationDetail.Error() != nil {
+ p.logger.Debug("validation failed", "flag", flag, "error", validationDetail.ResolutionError.Error())
+ return of.BoolResolutionDetail{
+ Value: def,
+ ProviderResolutionDetail: validationDetail,
+ }
+ }
+
+ result := p.evaluateTreatmentWithConfig(ctx, flag, ec)
+ p.logger.Debug("Split treatment received", "flag", flag, "treatment", result.Treatment, "has_config", result.Config != nil)
+
+ if noTreatment(result.Treatment) {
+ p.logger.Debug("flag not found or control treatment", "flag", flag, "treatment", result.Treatment)
+ return of.BoolResolutionDetail{
+ Value: def,
+ ProviderResolutionDetail: resolutionDetailNotFound(result.Treatment),
+ }
+ }
+ var value bool
+ switch result.Treatment {
+ case treatmentOn:
+ value = true
+ case treatmentOff:
+ value = false
+ default:
+ p.logger.Warn("cannot parse treatment as boolean", "flag", flag, "treatment", result.Treatment, "returning_default", def)
+ return of.BoolResolutionDetail{
+ Value: def,
+ ProviderResolutionDetail: resolutionDetailParseError(result.Treatment),
+ }
+ }
+ p.logger.Debug("boolean evaluation successful", "flag", flag, "value", value, "treatment", result.Treatment)
+ return of.BoolResolutionDetail{
+ Value: value,
+ ProviderResolutionDetail: p.resolutionDetailWithConfig(flag, result.Treatment, result.Config),
+ }
+}
+
+// StringEvaluation evaluates a feature flag and returns a string value.
+//
+// The method returns the Split treatment directly as a string. This is the most
+// common evaluation type as Split treatments are inherently string-based.
+//
+// A targeting key must be present in ec. Additional attributes in ec
+// are passed to Split for targeting rule evaluation.
+//
+// Context Cancellation Limitation:
+// The ctx parameter is checked BEFORE evaluation starts, but the Split SDK does
+// not support canceling in-flight evaluations. See README "Known Limitations".
+//
+// Returns the def if:
+// - Context is canceled or deadline exceeded (checked before evaluation)
+// - Targeting key is missing
+// - Flag is not found (treatment is "control" or empty)
+func (p *Provider) StringEvaluation(ctx context.Context, flag, def string, ec of.FlattenedContext) of.StringResolutionDetail {
+ targetingKey, ok := ec[of.TargetingKey].(string)
+ if !ok {
+ targetingKey = ""
+ }
+ p.logger.Debug("evaluating string flag", "flag", flag, "targeting_key", targetingKey, "default", def)
+
+ if validationDetail := p.validateEvaluationContext(ctx, ec); validationDetail.Error() != nil {
+ p.logger.Debug("validation failed", "flag", flag, "error", validationDetail.ResolutionError.Error())
+ return of.StringResolutionDetail{
+ Value: def,
+ ProviderResolutionDetail: validationDetail,
+ }
+ }
+
+ result := p.evaluateTreatmentWithConfig(ctx, flag, ec)
+ p.logger.Debug("Split treatment received", "flag", flag, "treatment", result.Treatment, "has_config", result.Config != nil)
+
+ if noTreatment(result.Treatment) {
+ p.logger.Debug("flag not found or control treatment", "flag", flag, "treatment", result.Treatment)
+ return of.StringResolutionDetail{
+ Value: def,
+ ProviderResolutionDetail: resolutionDetailNotFound(result.Treatment),
+ }
+ }
+ p.logger.Debug("string evaluation successful", "flag", flag, "value", result.Treatment, "treatment", result.Treatment)
+ return of.StringResolutionDetail{
+ Value: result.Treatment,
+ ProviderResolutionDetail: p.resolutionDetailWithConfig(flag, result.Treatment, result.Config),
+ }
+}
+
+// FloatEvaluation evaluates a feature flag and returns a float64 value.
+//
+// The method parses the Split treatment as a floating-point number. This is useful
+// for flags that control numeric values like pricing, weights, or percentages.
+//
+// A targeting key must be present in ec. Additional attributes in ec
+// are passed to Split for targeting rule evaluation.
+//
+// Context Cancellation Limitation:
+// The ctx parameter is checked BEFORE evaluation starts, but the Split SDK does
+// not support canceling in-flight evaluations. See README "Known Limitations".
+//
+// Returns the def if:
+// - Context is canceled or deadline exceeded (checked before evaluation)
+// - Targeting key is missing
+// - Flag is not found
+// - Treatment cannot be parsed as a valid float64
+func (p *Provider) FloatEvaluation(ctx context.Context, flag string, def float64, ec of.FlattenedContext) of.FloatResolutionDetail {
+ targetingKey, ok := ec[of.TargetingKey].(string)
+ if !ok {
+ targetingKey = ""
+ }
+ p.logger.Debug("evaluating float flag", "flag", flag, "targeting_key", targetingKey, "default", def)
+
+ if validationDetail := p.validateEvaluationContext(ctx, ec); validationDetail.Error() != nil {
+ p.logger.Debug("validation failed", "flag", flag, "error", validationDetail.ResolutionError.Error())
+ return of.FloatResolutionDetail{
+ Value: def,
+ ProviderResolutionDetail: validationDetail,
+ }
+ }
+
+ result := p.evaluateTreatmentWithConfig(ctx, flag, ec)
+ p.logger.Debug("Split treatment received", "flag", flag, "treatment", result.Treatment, "has_config", result.Config != nil)
+
+ if noTreatment(result.Treatment) {
+ p.logger.Debug("flag not found or control treatment", "flag", flag, "treatment", result.Treatment)
+ return of.FloatResolutionDetail{
+ Value: def,
+ ProviderResolutionDetail: resolutionDetailNotFound(result.Treatment),
+ }
+ }
+ floatEvaluated, parseErr := strconv.ParseFloat(result.Treatment, 64)
+ if parseErr != nil {
+ p.logger.Warn("cannot parse treatment as float", "flag", flag, "treatment", result.Treatment, "error", parseErr, "returning_default", def)
+ return of.FloatResolutionDetail{
+ Value: def,
+ ProviderResolutionDetail: resolutionDetailParseError(result.Treatment),
+ }
+ }
+ p.logger.Debug("float evaluation successful", "flag", flag, "value", floatEvaluated, "treatment", result.Treatment)
+ return of.FloatResolutionDetail{
+ Value: floatEvaluated,
+ ProviderResolutionDetail: p.resolutionDetailWithConfig(flag, result.Treatment, result.Config),
+ }
+}
+
+// IntEvaluation evaluates a feature flag and returns an int64 value.
+//
+// The method parses the Split treatment as a 64-bit integer. This is useful for
+// flags that control counts, limits, timeouts, or other integer-based values.
+//
+// A targeting key must be present in ec. Additional attributes in ec
+// are passed to Split for targeting rule evaluation.
+//
+// Context Cancellation Limitation:
+// The ctx parameter is checked BEFORE evaluation starts, but the Split SDK does
+// not support canceling in-flight evaluations. See README "Known Limitations".
+//
+// Returns the def if:
+// - Context is canceled or deadline exceeded (checked before evaluation)
+// - Targeting key is missing
+// - Flag is not found
+// - Treatment cannot be parsed as a valid int64
+func (p *Provider) IntEvaluation(ctx context.Context, flag string, def int64, ec of.FlattenedContext) of.IntResolutionDetail {
+ targetingKey, ok := ec[of.TargetingKey].(string)
+ if !ok {
+ targetingKey = ""
+ }
+ p.logger.Debug("evaluating int flag", "flag", flag, "targeting_key", targetingKey, "default", def)
+
+ if validationDetail := p.validateEvaluationContext(ctx, ec); validationDetail.Error() != nil {
+ p.logger.Debug("validation failed", "flag", flag, "error", validationDetail.ResolutionError.Error())
+ return of.IntResolutionDetail{
+ Value: def,
+ ProviderResolutionDetail: validationDetail,
+ }
+ }
+
+ result := p.evaluateTreatmentWithConfig(ctx, flag, ec)
+ p.logger.Debug("Split treatment received", "flag", flag, "treatment", result.Treatment, "has_config", result.Config != nil)
+
+ if noTreatment(result.Treatment) {
+ p.logger.Debug("flag not found or control treatment", "flag", flag, "treatment", result.Treatment)
+ return of.IntResolutionDetail{
+ Value: def,
+ ProviderResolutionDetail: resolutionDetailNotFound(result.Treatment),
+ }
+ }
+ intEvaluated, parseErr := strconv.ParseInt(result.Treatment, 10, 64)
+ if parseErr != nil {
+ p.logger.Warn("cannot parse treatment as int", "flag", flag, "treatment", result.Treatment, "error", parseErr, "returning_default", def)
+ return of.IntResolutionDetail{
+ Value: def,
+ ProviderResolutionDetail: resolutionDetailParseError(result.Treatment),
+ }
+ }
+ p.logger.Debug("int evaluation successful", "flag", flag, "value", intEvaluated, "treatment", result.Treatment)
+ return of.IntResolutionDetail{
+ Value: intEvaluated,
+ ProviderResolutionDetail: p.resolutionDetailWithConfig(flag, result.Treatment, result.Config),
+ }
+}
+
+// ObjectEvaluation evaluates feature flags and returns them as a FlagSetResult.
+//
+// Mode of Operation:
+// - Localhost Mode: Always treats flag parameter as a single flag name
+// - Cloud Mode: Treats flag parameter as a flag set name by default;
+// use WithEvaluationMode(EvaluationModeIndividual) to evaluate as a single flag
+//
+// Returns FlagSetResult (map[string]FlagResult) where each FlagResult contains:
+// - Treatment: string (the Split treatment name)
+// - Config: any (parsed JSON config, supports objects/arrays/primitives, or nil)
+//
+// Config values support any valid JSON type. Non-object configs (primitives, arrays)
+// are returned as-is in the Config field.
+//
+// A targeting key must be present in ec. Additional attributes in ec
+// are passed to Split for targeting rule evaluation.
+//
+// Context Cancellation Limitation:
+// The ctx parameter is checked BEFORE evaluation starts, but the Split SDK does
+// not support canceling in-flight evaluations. See README "Known Limitations".
+//
+// Returns def if context canceled (before evaluation), targeting key missing, or flag/flag set not found.
+//
+// Example:
+//
+// evalCtx := openfeature.NewEvaluationContext("user-123", nil)
+// result, _ := client.ObjectValue(ctx, "ui-features", split.FlagSetResult{}, evalCtx)
+// flags := result.(split.FlagSetResult)
+// theme := flags["theme"]
+// fmt.Println(theme.Treatment) // "dark"
+// fmt.Println(theme.Config) // map[string]any{"primary": "#000"}
+func (p *Provider) ObjectEvaluation(ctx context.Context, flag string, def any, ec of.FlattenedContext) of.InterfaceResolutionDetail {
+ targetingKey, ok := ec[of.TargetingKey].(string)
+ if !ok {
+ targetingKey = ""
+ }
+ p.logger.Debug("evaluating object flag", "flag", flag, "targeting_key", targetingKey)
+
+ if validationDetail := p.validateEvaluationContext(ctx, ec); validationDetail.Error() != nil {
+ p.logger.Debug("validation failed", "flag", flag, "error", validationDetail.ResolutionError.Error())
+ return of.InterfaceResolutionDetail{
+ Value: def,
+ ProviderResolutionDetail: validationDetail,
+ }
+ }
+
+ var results FlagSetResult
+
+ // Get evaluation options from context
+ evalOpts := GetEvalOptions(ctx)
+
+ // Determine evaluation mode
+ var mode EvaluationMode
+ if p.isLocalhostMode() {
+ // Localhost mode: always use individual (flag sets not supported in localhost mode)
+ if evalOpts.Mode == EvaluationModeSet {
+ p.logger.Warn("EvaluationModeSet ignored in localhost mode, using individual evaluation",
+ "requested_mode", evalOpts.Mode,
+ "flag", flag)
+ }
+ mode = EvaluationModeIndividual
+ } else {
+ // Cloud mode: respect EvaluationMode option, default to set
+ mode = evalOpts.Mode
+ if mode == EvaluationModeDefault {
+ mode = EvaluationModeSet
+ }
+ }
+
+ // Execute based on resolved mode.
+ switch mode {
+ case EvaluationModeIndividual:
+ p.logger.Debug("evaluating single flag as object", "flag", flag)
+ results = p.evaluateSingleFlagAsObject(ctx, flag, ec)
+ case EvaluationModeSet:
+ p.logger.Debug("evaluating flag set", "flag_set", flag)
+ results = p.evaluateTreatmentsByFlagSet(ctx, flag, ec)
+ default:
+ p.logger.Error("unknown evaluation mode", "mode", mode, "flag", flag)
+ return of.InterfaceResolutionDetail{
+ Value: def,
+ ProviderResolutionDetail: of.ProviderResolutionDetail{
+ ResolutionError: of.NewGeneralResolutionError(fmt.Sprintf("unknown evaluation mode: %s", mode)),
+ Reason: of.ErrorReason,
+ },
+ }
+ }
+
+ if len(results) == 0 {
+ p.logger.Debug("no results returned", "flag", flag, "mode", mode)
+ return of.InterfaceResolutionDetail{
+ Value: def,
+ ProviderResolutionDetail: resolutionDetailNotFound(""),
+ }
+ }
+
+ p.logger.Debug("object evaluation successful", "flag", flag, "flag_count", len(results), "mode", mode)
+ // FlagMetadata is nil because configs are already embedded in each FlagResult.Config
+ // within the FlagSetResult value. Unlike scalar evaluations (where the value is the
+ // treatment and config needs FlagMetadata as a separate channel), ObjectEvaluation
+ // returns the full FlagSetResult containing per-flag configs directly.
+ return of.InterfaceResolutionDetail{
+ Value: results,
+ ProviderResolutionDetail: of.ProviderResolutionDetail{
+ Reason: of.TargetingMatchReason,
+ Variant: flag,
+ },
+ }
+}
+
+// Hooks returns the provider's hooks for OpenFeature lifecycle events.
+//
+// Currently returns nil (no hooks implemented).
+func (p *Provider) Hooks() []of.Hook {
+ return nil
+}
+
+// Track sends a tracking event to Split for experimentation and analytics.
+//
+// This method implements the Tracker interface, enabling the association of
+// feature flag evaluations with subsequent actions or application states.
+// The tracking data is used by Split for:
+// - A/B testing and experimentation
+// - Feature impact analysis
+// - Business metrics correlation
+//
+// Parameters:
+// - ctx: Context for the operation (checked for cancellation before tracking)
+// - trackingEventName: The name of the event to track (e.g., "checkout", "signup")
+// - evaluationContext: Contains the targeting key (user ID) and attributes
+// - details: Optional tracking event details with value and custom attributes
+//
+// Required evaluation context:
+// - targetingKey: The user identifier (required)
+// - trafficType: The Split traffic type (optional, defaults to "user")
+//
+// The trackingEventName must match Split's event type constraints:
+// - Maximum 80 characters
+// - Starts with letter or number
+// - Contains only letters, numbers, hyphens, underscores, periods, or colons
+//
+// The details.Value() is passed as the event value to Split.
+// The details.Attributes() are passed as event properties to Split.
+//
+// Supported property types: string, bool, int, int32, int64, uint, uint32, uint64,
+// float32, float64, and nil. Unsupported types (arrays, maps, structs) are silently
+// set to nil by the Split SDK - no error is returned.
+//
+// If the provider is not ready, context is canceled, or the targeting key is empty,
+// the call is logged and silently ignored (the Tracker interface defines no error return).
+//
+// Localhost Mode: Track events are accepted but not persisted (no server to send
+// them to). This allows code using Track() to run unchanged in local development.
+//
+// Example:
+//
+// evalCtx := openfeature.NewEvaluationContext("user-123", map[string]any{
+// "trafficType": "account", // optional, defaults to "user"
+// })
+// details := openfeature.NewTrackingEventDetails(99.99).
+// Add("currency", "USD").
+// Add("item_count", 3)
+// client.Track(ctx, "purchase", evalCtx, details)
+func (p *Provider) Track(ctx context.Context, trackingEventName string, evaluationContext of.EvaluationContext, details of.TrackingEventDetails) {
+ // Check shutdown first (fast fail to avoid lock overhead during shutdown)
+ if atomic.LoadUint32(&p.shutdown) == shutdownStateActive {
+ p.logger.Debug("tracking event ignored, provider not ready",
+ "event", trackingEventName)
+ return
+ }
+
+ // Check if provider is ready
+ if p.Status() != of.ReadyState {
+ p.logger.Debug("tracking event ignored, provider not ready",
+ "event", trackingEventName)
+ return
+ }
+
+ // Check context cancellation (consistent with evaluation methods)
+ if err := ctx.Err(); err != nil {
+ p.logger.Debug("tracking event ignored, context canceled",
+ "event", trackingEventName,
+ "error", err)
+ return
+ }
+
+ // Get targeting key (user identifier)
+ key := evaluationContext.TargetingKey()
+ if key == "" {
+ p.logger.Warn("tracking event ignored, empty targeting key",
+ "event", trackingEventName,
+ "hint", "ensure evaluationContext has a non-empty TargetingKey")
+ return
+ }
+
+ // Get traffic type from context attributes, default to DefaultTrafficType
+ // Traffic type must match a defined type in Split
+ trafficType := DefaultTrafficType
+ if attrs := evaluationContext.Attributes(); attrs != nil {
+ if tt, ok := attrs[TrafficTypeKey].(string); ok && tt != "" {
+ trafficType = tt
+ }
+ }
+
+ // Get track options from context to check if metric value should be sent
+ trackOpts := GetTrackOptions(ctx)
+
+ // Determine value to send - use nil if MetricValueAbsent to avoid polluting sum/avg metrics
+ var value interface{}
+ if trackOpts.MetricValueAbsent {
+ // Explicitly pass nil - Split excludes nil from sum/average calculations
+ // but includes 0 in sum/average calculations
+ value = nil
+ } else {
+ value = details.Value()
+ }
+
+ // Convert OpenFeature tracking attributes to Split properties
+ var properties map[string]interface{}
+ attrs := details.Attributes()
+ if len(attrs) > 0 {
+ properties = make(map[string]interface{}, len(attrs))
+ for k, v := range attrs {
+ properties[k] = v
+ }
+ }
+
+ // Acquire read lock for client access to prevent concurrent shutdown
+ // This prevents client.Destroy() from being called during Track
+ p.mtx.RLock()
+ defer p.mtx.RUnlock()
+
+ // Double-check shutdown after acquiring lock to prevent nil pointer dereference
+ if atomic.LoadUint32(&p.shutdown) == shutdownStateActive {
+ p.logger.Debug("tracking event ignored, provider shutting down",
+ "event", trackingEventName)
+ return
+ }
+
+ // Call Split SDK's Track method
+ if err := p.client.Track(key, trafficType, trackingEventName, value, properties); err != nil {
+ p.logger.Error("tracking event failed",
+ "event", trackingEventName,
+ "key", key,
+ "traffic_type", trafficType,
+ "error", err)
+ return
+ }
+
+ p.logger.Debug("tracking event sent",
+ "event", trackingEventName,
+ "key", key,
+ "traffic_type", trafficType,
+ "value", value,
+ "value_omitted", trackOpts.MetricValueAbsent)
+}
diff --git a/evaluation_options_test.go b/evaluation_options_test.go
new file mode 100644
index 0000000..eca8997
--- /dev/null
+++ b/evaluation_options_test.go
@@ -0,0 +1,125 @@
+package split
+
+import (
+ "context"
+ "log/slog"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/splitio/go-client/v6/splitio/conf"
+ "github.com/splitio/go-toolkit/v5/logging"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// setupLocalhostProvider creates a provider in localhost mode for direct provider-level testing.
+// NOTE: This is distinct from the existing `create(t)` helper which returns
+// *openfeature.Client for high-level OpenFeature API testing. This helper returns *Provider
+// directly, needed for testing ObjectEvaluation, Track, and other provider methods with context options.
+// Additional options (e.g. WithLogger) can be passed and are applied during construction,
+// before InitWithContext, to avoid data races with background goroutines.
+func setupLocalhostProvider(t *testing.T, opts ...Option) *Provider {
+ t.Helper()
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ allOpts := append([]Option{WithSplitConfig(cfg)}, opts...)
+ provider, err := New("localhost", allOpts...)
+ require.NoError(t, err)
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err)
+
+ t.Cleanup(func() {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ _ = provider.ShutdownWithContext(ctx)
+ })
+ return provider
+}
+
+// testProviderEvalCtx is the standard flattened evaluation context for direct provider method tests.
+var testProviderEvalCtx = openfeature.FlattenedContext{
+ openfeature.TargetingKey: "key",
+}
+
+func TestObjectEvaluation_ModeIndividual_Localhost(t *testing.T) {
+ provider := setupLocalhostProvider(t)
+
+ // EvaluationModeIndividual should work in localhost (it's the default)
+ ctx := WithEvaluationMode(context.Background(), EvaluationModeIndividual)
+
+ result := provider.ObjectEvaluation(ctx, flagObj, nil, testProviderEvalCtx)
+
+ flagSet, ok := result.Value.(FlagSetResult)
+ require.True(t, ok, "Result should be FlagSetResult")
+ assert.Len(t, flagSet, 1)
+ assert.Contains(t, flagSet, flagObj)
+}
+
+func TestObjectEvaluation_ModeSet_IgnoredInLocalhost(t *testing.T) {
+ provider := setupLocalhostProvider(t)
+
+ // Request set mode, but localhost should ignore it and use individual
+ ctx := WithEvaluationMode(context.Background(), EvaluationModeSet)
+
+ result := provider.ObjectEvaluation(ctx, flagObj, nil, testProviderEvalCtx)
+
+ // Should still evaluate single flag (localhost ignores EvaluationModeSet)
+ flagSet, ok := result.Value.(FlagSetResult)
+ require.True(t, ok, "Result should be FlagSetResult")
+ assert.Len(t, flagSet, 1)
+}
+
+func TestObjectEvaluation_DefaultMode_Localhost(t *testing.T) {
+ provider := setupLocalhostProvider(t)
+
+ // Default mode in localhost should use individual
+ result := provider.ObjectEvaluation(context.Background(), flagObj, nil, testProviderEvalCtx)
+
+ flagSet, ok := result.Value.(FlagSetResult)
+ require.True(t, ok, "Result should be FlagSetResult")
+ assert.Len(t, flagSet, 1)
+ assert.Contains(t, flagSet, flagObj)
+}
+
+func TestObjectEvaluation_LocalhostIgnoresSetMode_WithLogging(t *testing.T) {
+ var logBuffer strings.Builder
+ customLogger := slog.New(slog.NewTextHandler(&logBuffer, &slog.HandlerOptions{Level: slog.LevelDebug}))
+ provider := setupLocalhostProvider(t, WithLogger(customLogger))
+
+ // Explicitly request set mode
+ ctx := WithEvaluationMode(context.Background(), EvaluationModeSet)
+
+ // Localhost should IGNORE the set mode and use individual
+ result := provider.ObjectEvaluation(ctx, flagObj, nil, testProviderEvalCtx)
+
+ flagSet, ok := result.Value.(FlagSetResult)
+ require.True(t, ok, "Result should be FlagSetResult")
+ assert.Len(t, flagSet, 1)
+
+ // Verify debug log was emitted about mode override
+ assert.Contains(t, logBuffer.String(), "EvaluationModeSet ignored in localhost mode")
+}
+
+func TestImpressionDisabled_LoggedForObjectEvaluation(t *testing.T) {
+ var logBuffer strings.Builder
+ customLogger := slog.New(slog.NewTextHandler(&logBuffer, &slog.HandlerOptions{Level: slog.LevelInfo}))
+ provider := setupLocalhostProvider(t, WithLogger(customLogger))
+
+ // Set ImpressionDisabled
+ ctx := WithImpressionDisabled(context.Background())
+
+ // Should log "not yet supported" but continue evaluation
+ result := provider.ObjectEvaluation(ctx, flagObj, nil, testProviderEvalCtx)
+
+ // Evaluation should still succeed
+ assert.NotNil(t, result.Value)
+
+ // Verify "not yet supported" was logged (only once)
+ assert.Contains(t, logBuffer.String(), "not yet supported by Split Go SDK")
+}
diff --git a/evaluation_test.go b/evaluation_test.go
new file mode 100644
index 0000000..2049a29
--- /dev/null
+++ b/evaluation_test.go
@@ -0,0 +1,959 @@
+//nolint:dupl,gocognit // Test patterns: type-specific tests have similar structure, comprehensive tests have higher complexity
+package split
+
+import (
+ "context"
+ "testing"
+
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/splitio/go-client/v6/splitio/conf"
+ "github.com/splitio/go-toolkit/v5/logging"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestEvaluationReturnsDefaultValueWhenFlagNotFound(t *testing.T) {
+ ofClient := create(t)
+ flagName := flagNonExistent
+ evalCtx := evaluationContext()
+
+ // Test with default value false
+ result, err := ofClient.BooleanValue(context.TODO(), flagName, false, evalCtx)
+ assert.Error(t, err, "Should return error for non-existent flag")
+ assert.Contains(t, err.Error(), string(openfeature.FlagNotFoundCode), "Error should be FlagNotFoundCode")
+ assert.False(t, result, "Should return default value (false)")
+
+ // Test with default value true
+ result, err = ofClient.BooleanValue(context.TODO(), flagName, true, evalCtx)
+ assert.Error(t, err, "Should return error for non-existent flag")
+ assert.Contains(t, err.Error(), string(openfeature.FlagNotFoundCode), "Error should be FlagNotFoundCode")
+ assert.True(t, result, "Should return default value (true)")
+}
+
+func TestMissingTargetingKey(t *testing.T) {
+ ofClient := create(t)
+ flagName := flagNonExistent
+
+ result, err := ofClient.BooleanValue(context.TODO(), flagName, false, openfeature.NewEvaluationContext("", nil))
+ assert.Error(t, err, "Should return error when targeting key is missing")
+ assert.Contains(t, err.Error(), string(openfeature.TargetingKeyMissingCode), "Error should be TargetingKeyMissingCode")
+ assert.False(t, result, "Should return default value (false)")
+}
+
+func TestBooleanEvaluationReturnsControlVariantForNonExistentFlag(t *testing.T) {
+ ofClient := create(t)
+ flagName := "random-non-existent-feature"
+ evalCtx := evaluationContext()
+
+ result, err := ofClient.BooleanValueDetails(context.TODO(), flagName, false, evalCtx)
+ assert.Error(t, err, "Should return error for non-existent flag")
+ assert.Contains(t, err.Error(), string(openfeature.FlagNotFoundCode), "Error should be FlagNotFoundCode")
+ assert.False(t, result.Value, "Should return default value (false)")
+ assert.Equal(t, "control", result.Variant, "Variant should be 'control' for non-existent flag")
+}
+
+func TestBooleanEvaluationReturnsCorrectValue(t *testing.T) {
+ ofClient := create(t)
+ flagName := flagSomeOther
+ evalCtx := evaluationContext()
+
+ result, err := ofClient.BooleanValue(context.TODO(), flagName, true, evalCtx)
+ assert.NoError(t, err, "Should not return error for valid flag")
+ assert.False(t, result, "Should return false for 'some_other_feature'")
+}
+
+func TestBooleanEvaluationWithTargetingKey(t *testing.T) {
+ ofClient := create(t)
+ flagName := flagMyFeature
+ evalCtx := evaluationContext()
+
+ // Test with targeting key "key" - should return true
+ result, err := ofClient.BooleanValue(context.TODO(), flagName, false, evalCtx)
+ assert.NoError(t, err, "Should not return error for valid flag")
+ assert.True(t, result, "Should return true for 'my_feature' with key='key'")
+
+ // Test with different targeting key - should return false
+ evalCtx = openfeature.NewEvaluationContext("randomKey", nil)
+ result, err = ofClient.BooleanValue(context.TODO(), flagName, true, evalCtx)
+ assert.NoError(t, err, "Should not return error for valid flag")
+ assert.False(t, result, "Should return false for 'my_feature' with key='randomKey'")
+}
+
+func TestStringEvaluationReturnsCorrectValue(t *testing.T) {
+ ofClient := create(t)
+ flagName := flagSomeOther
+ evalCtx := evaluationContext()
+
+ result, err := ofClient.StringValue(context.TODO(), flagName, "on", evalCtx)
+ assert.NoError(t, err, "Should not return error for valid flag")
+ assert.Equal(t, treatmentOff, result, "Should return 'off' treatment")
+}
+
+func TestIntEvaluationReturnsCorrectValue(t *testing.T) {
+ ofClient := create(t)
+ flagName := flagInt
+ evalCtx := evaluationContext()
+
+ result, err := ofClient.IntValue(context.TODO(), flagName, 0, evalCtx)
+ assert.NoError(t, err, "Should not return error for valid flag")
+ assert.Equal(t, int64(32), result, "Should return 32")
+}
+
+func TestObjectEvaluationReturnsCorrectValue(t *testing.T) {
+ ofClient := create(t)
+ flagName := flagObj
+ evalCtx := evaluationContext()
+
+ result, err := ofClient.ObjectValue(context.TODO(), flagName, FlagSetResult{}, evalCtx)
+ assert.NoError(t, err, "Should not return error for valid flag")
+
+ flags, ok := result.(FlagSetResult)
+ require.True(t, ok, "Result should be FlagSetResult")
+ require.Contains(t, flags, "obj_feature", "Should contain obj_feature flag")
+
+ flagResult := flags["obj_feature"]
+ assert.Equal(t, "on", flagResult.Treatment, "Should return correct treatment")
+ assert.Equal(t, map[string]any{"key": "value"}, flagResult.Config, "Should return correct config")
+}
+
+func TestFloatEvaluationReturnsCorrectValue(t *testing.T) {
+ ofClient := create(t)
+ flagName := flagInt
+ evalCtx := evaluationContext()
+
+ result, err := ofClient.FloatValue(context.TODO(), flagName, 0, evalCtx)
+ assert.NoError(t, err, "Should not return error for valid flag")
+ assert.Equal(t, float64(32), result, "Should return 32.0")
+}
+
+// =============================================================================
+// Evaluation Details Tests
+// =============================================================================
+
+func TestBooleanDetails(t *testing.T) {
+ ofClient := create(t)
+ flagName := flagSomeOther
+ evalCtx := evaluationContext()
+
+ result, err := ofClient.BooleanValueDetails(context.TODO(), flagName, true, evalCtx)
+ require.NoError(t, err, "Should not return error") // Use require to prevent panic when accessing result fields
+ assert.Equal(t, flagName, result.FlagKey, "Flag key should match")
+ assert.Contains(t, string(result.Reason), string(openfeature.TargetingMatchReason), "Reason should be TargetingMatchReason")
+ assert.False(t, result.Value, "Value should be false")
+ assert.Equal(t, treatmentOff, result.Variant, "Variant should be 'off'")
+ assert.Empty(t, result.ErrorCode, "ErrorCode should be empty")
+}
+
+func TestIntegerDetails(t *testing.T) {
+ ofClient := create(t)
+ flagName := flagInt
+ evalCtx := evaluationContext()
+
+ result, err := ofClient.IntValueDetails(context.TODO(), flagName, 0, evalCtx)
+ require.NoError(t, err, "Should not return error") // Use require to prevent panic when accessing result fields
+ assert.Equal(t, flagName, result.FlagKey, "Flag key should match")
+ assert.Contains(t, string(result.Reason), string(openfeature.TargetingMatchReason), "Reason should be TargetingMatchReason")
+ assert.Equal(t, int64(32), result.Value, "Value should be 32")
+ assert.Equal(t, "32", result.Variant, "Variant should be '32'")
+ assert.Empty(t, result.ErrorCode, "ErrorCode should be empty")
+}
+
+func TestStringDetails(t *testing.T) {
+ ofClient := create(t)
+ flagName := flagSomeOther
+ evalCtx := evaluationContext()
+
+ result, err := ofClient.StringValueDetails(context.TODO(), flagName, "blah", evalCtx)
+ require.NoError(t, err, "Should not return error") // Use require to prevent panic when accessing result fields
+ assert.Equal(t, flagName, result.FlagKey, "Flag key should match")
+ assert.Contains(t, string(result.Reason), string(openfeature.TargetingMatchReason), "Reason should be TargetingMatchReason")
+ assert.Equal(t, treatmentOff, result.Value, "Value should be 'off'")
+ assert.Equal(t, treatmentOff, result.Variant, "Variant should be 'off'")
+ assert.Empty(t, result.ErrorCode, "ErrorCode should be empty")
+}
+
+func TestObjectDetails(t *testing.T) {
+ ofClient := create(t)
+ flagName := flagObj
+ evalCtx := evaluationContext()
+
+ result, err := ofClient.ObjectValueDetails(context.TODO(), flagName, map[string]any{}, evalCtx)
+ require.NoError(t, err, "Should not return error") // Use require to prevent panic when accessing result fields
+ assert.Equal(t, flagName, result.FlagKey, "Flag key should match")
+ assert.Contains(t, string(result.Reason), string(openfeature.TargetingMatchReason), "Reason should be TargetingMatchReason")
+ assert.Equal(t, flagName, result.Variant, "Variant should be flag name")
+ assert.Empty(t, result.ErrorCode, "ErrorCode should be empty")
+
+ // Verify FlagSetResult structure
+ flags, ok := result.Value.(FlagSetResult)
+ require.True(t, ok, "Value should be FlagSetResult")
+ require.Contains(t, flags, "obj_feature", "Should contain obj_feature flag")
+ assert.Equal(t, "on", flags["obj_feature"].Treatment, "Should return correct treatment")
+ assert.Equal(t, map[string]any{"key": "value"}, flags["obj_feature"].Config, "Should return correct config")
+}
+
+func TestFloatDetails(t *testing.T) {
+ ofClient := create(t)
+ flagName := flagInt
+ evalCtx := evaluationContext()
+
+ result, err := ofClient.FloatValueDetails(context.TODO(), flagName, 0, evalCtx)
+ require.NoError(t, err, "Should not return error") // Use require to prevent panic when accessing result fields
+ assert.Equal(t, flagName, result.FlagKey, "Flag key should match")
+ assert.Contains(t, string(result.Reason), string(openfeature.TargetingMatchReason), "Reason should be TargetingMatchReason")
+ assert.Equal(t, float64(32), result.Value, "Value should be 32")
+ assert.Equal(t, "32", result.Variant, "Variant should be '32'")
+ assert.Empty(t, result.ErrorCode, "ErrorCode should be empty")
+
+ // Test with actual float value
+ flagName = "float_feature"
+ result, err = ofClient.FloatValueDetails(context.TODO(), flagName, 0, evalCtx)
+ require.NoError(t, err, "Should not return error")
+ assert.Equal(t, 32.5, result.Value, "Value should be 32.5")
+ assert.Equal(t, "32.5", result.Variant, "Variant should be '32.5'")
+ assert.Empty(t, result.ErrorCode, "ErrorCode should be empty")
+}
+
+// =============================================================================
+// Parse Error Tests
+// =============================================================================
+
+func TestParseErrorHandling(t *testing.T) {
+ ofClient := create(t)
+ evalCtx := evaluationContext()
+
+ tests := []struct {
+ testBoolFunc func() (bool, error)
+ testBoolDeets func() (openfeature.BooleanEvaluationDetails, error)
+ testIntFunc func() (int64, error)
+ testIntDeets func() (openfeature.IntEvaluationDetails, error)
+ testFloatFunc func() (float64, error)
+ testFloatDeets func() (openfeature.FloatEvaluationDetails, error)
+ name string
+ intDefault int64
+ floatDefault float64
+ boolDefault bool
+ }{
+ {
+ name: "Boolean",
+ testBoolFunc: func() (bool, error) { return ofClient.BooleanValue(context.TODO(), flagUnparseable, false, evalCtx) },
+ testBoolDeets: func() (openfeature.BooleanEvaluationDetails, error) {
+ return ofClient.BooleanValueDetails(context.TODO(), flagUnparseable, false, evalCtx)
+ },
+ boolDefault: false,
+ },
+ {
+ name: "Integer",
+ testIntFunc: func() (int64, error) { return ofClient.IntValue(context.TODO(), flagUnparseable, 10, evalCtx) },
+ testIntDeets: func() (openfeature.IntEvaluationDetails, error) {
+ return ofClient.IntValueDetails(context.TODO(), flagUnparseable, 10, evalCtx)
+ },
+ intDefault: 10,
+ },
+ {
+ name: "Float",
+ testFloatFunc: func() (float64, error) { return ofClient.FloatValue(context.TODO(), flagUnparseable, 10, evalCtx) },
+ testFloatDeets: func() (openfeature.FloatEvaluationDetails, error) {
+ return ofClient.FloatValueDetails(context.TODO(), flagUnparseable, 10, evalCtx)
+ },
+ floatDefault: 10.0,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Test Value functions (Boolean, Int, Float)
+ if tt.testBoolFunc != nil {
+ result, err := tt.testBoolFunc()
+ require.Error(t, err, "Should return parse error")
+ assert.Contains(t, err.Error(), string(openfeature.ParseErrorCode), "Error should be ParseErrorCode")
+ assert.Equal(t, tt.boolDefault, result, "Should return default value")
+
+ // Test Details function
+ details, err := tt.testBoolDeets()
+ require.Error(t, err, "Should return parse error")
+ assert.Contains(t, err.Error(), string(openfeature.ParseErrorCode), "Error should be ParseErrorCode")
+ assert.Equal(t, tt.boolDefault, details.Value, "Value should be default")
+ assert.Equal(t, openfeature.ParseErrorCode, details.ErrorCode, "ErrorCode should be ParseErrorCode")
+ assert.Equal(t, openfeature.ErrorReason, details.Reason, "Reason should be ErrorReason")
+ assert.Equal(t, treatmentUnparseable, details.Variant, "Variant should be the treatment string")
+ }
+
+ if tt.testIntFunc != nil {
+ result, err := tt.testIntFunc()
+ require.Error(t, err, "Should return parse error")
+ assert.Contains(t, err.Error(), string(openfeature.ParseErrorCode), "Error should be ParseErrorCode")
+ assert.Equal(t, tt.intDefault, result, "Should return default value")
+
+ // Test Details function
+ details, err := tt.testIntDeets()
+ require.Error(t, err, "Should return parse error")
+ assert.Contains(t, err.Error(), string(openfeature.ParseErrorCode), "Error should be ParseErrorCode")
+ assert.Equal(t, tt.intDefault, details.Value, "Value should be default")
+ assert.Equal(t, openfeature.ParseErrorCode, details.ErrorCode, "ErrorCode should be ParseErrorCode")
+ assert.Equal(t, openfeature.ErrorReason, details.Reason, "Reason should be ErrorReason")
+ assert.Equal(t, treatmentUnparseable, details.Variant, "Variant should be the treatment string")
+ }
+
+ if tt.testFloatFunc != nil {
+ result, err := tt.testFloatFunc()
+ require.Error(t, err, "Should return parse error")
+ assert.Contains(t, err.Error(), string(openfeature.ParseErrorCode), "Error should be ParseErrorCode")
+ assert.Equal(t, tt.floatDefault, result, "Should return default value")
+
+ // Test Details function
+ details, err := tt.testFloatDeets()
+ require.Error(t, err, "Should return parse error")
+ assert.Contains(t, err.Error(), string(openfeature.ParseErrorCode), "Error should be ParseErrorCode")
+ assert.Equal(t, tt.floatDefault, details.Value, "Value should be default")
+ assert.Equal(t, openfeature.ParseErrorCode, details.ErrorCode, "ErrorCode should be ParseErrorCode")
+ assert.Equal(t, openfeature.ErrorReason, details.Reason, "Reason should be ErrorReason")
+ assert.Equal(t, treatmentUnparseable, details.Variant, "Variant should be the treatment string")
+ }
+ })
+ }
+}
+
+// =============================================================================
+// Attributes and Configuration Tests
+// =============================================================================
+
+// TestAttributesPassedToSplit verifies that attributes from the evaluation context
+// are passed to the Split SDK for targeting rules (Bug #2 fix).
+func TestAttributesPassedToSplit(t *testing.T) {
+ ofClient := create(t)
+
+ evalCtx := openfeature.NewEvaluationContext("key", map[string]any{
+ "email": "user@example.com",
+ "age": int64(30),
+ "beta_user": true,
+ "account_type": "premium",
+ "roles": []string{"admin", "user"},
+ })
+
+ // Test boolean evaluation with attributes
+ flagName := flagMyFeature
+ result, err := ofClient.BooleanValue(context.TODO(), flagName, false, evalCtx)
+ require.NoError(t, err, "Attributes should not cause error in BooleanValue")
+ assert.True(t, result, "Should return true for my_feature")
+
+ // Test string evaluation with attributes
+ flagName2 := "some_other_feature"
+ strResult, err := ofClient.StringValue(context.TODO(), flagName2, "default", evalCtx)
+ require.NoError(t, err, "Attributes should not cause error in StringValue")
+ assert.Equal(t, treatmentOff, strResult, "Should return 'off' treatment")
+
+ // Test that attributes don't interfere with existing functionality
+ evalCtxNoAttrs := openfeature.NewEvaluationContext("key", nil)
+ result2, err := ofClient.BooleanValue(context.TODO(), flagName, false, evalCtxNoAttrs)
+ require.NoError(t, err, "Evaluation without attributes should succeed")
+ assert.True(t, result2, "Should return true even without attributes")
+}
+
+// TestDynamicConfiguration verifies that ObjectEvaluation correctly retrieves
+// Dynamic Configuration from the config field.
+func TestDynamicConfiguration(t *testing.T) {
+ ofClient := create(t)
+ flagName := flagMyFeature
+ evalCtx := openfeature.NewEvaluationContext("key", nil)
+
+ result, err := ofClient.ObjectValue(context.TODO(), flagName, FlagSetResult{}, evalCtx)
+ require.NoError(t, err, "Dynamic Configuration evaluation should succeed")
+
+ flags, ok := result.(FlagSetResult)
+ require.True(t, ok, "Result should be FlagSetResult")
+ require.Contains(t, flags, "my_feature", "Should contain my_feature flag")
+
+ flagResult := flags["my_feature"]
+ assert.Equal(t, "on", flagResult.Treatment, "Should return correct treatment")
+ assert.Equal(t, map[string]any{"desc": "this applies only to ON treatment"}, flagResult.Config, "Should return parsed config")
+}
+
+// TestMalformedJSONInDynamicConfiguration verifies that malformed JSON in Dynamic Configuration
+// is handled gracefully - config is set to nil and a warning is logged.
+func TestMalformedJSONInDynamicConfiguration(t *testing.T) {
+ ofClient := create(t)
+ evalCtx := openfeature.NewEvaluationContext("key", nil)
+
+ t.Run("StringEvaluation", func(t *testing.T) {
+ details, err := ofClient.StringValueDetails(context.TODO(), flagMalformedJSON, "default", evalCtx)
+ require.NoError(t, err, "Should not return error for valid flag")
+ assert.Equal(t, treatmentOn, details.Value, "Should return treatment")
+ assert.Empty(t, details.FlagMetadata, "FlagMetadata should be empty for malformed JSON")
+ })
+
+ t.Run("BooleanEvaluation", func(t *testing.T) {
+ details, err := ofClient.BooleanValueDetails(context.TODO(), flagMalformedJSON, false, evalCtx)
+ require.NoError(t, err, "Should not return error for valid flag")
+ assert.True(t, details.Value, "Should return true for 'on' treatment")
+ assert.Empty(t, details.FlagMetadata, "FlagMetadata should be empty for malformed JSON")
+ })
+
+ t.Run("ObjectEvaluation", func(t *testing.T) {
+ result, err := ofClient.ObjectValue(context.TODO(), flagMalformedJSON, FlagSetResult{}, evalCtx)
+ require.NoError(t, err, "Should not return error for valid flag")
+
+ flags, ok := result.(FlagSetResult)
+ require.True(t, ok, "Result should be FlagSetResult")
+ flagResult, ok := flags[flagMalformedJSON]
+ require.True(t, ok, "Result should contain flag entry")
+ assert.Equal(t, treatmentOn, flagResult.Treatment, "Should return treatment")
+ assert.Nil(t, flagResult.Config, "Config should be nil for malformed JSON")
+ })
+}
+
+// =============================================================================
+// Missing Key and Not Found Tests
+// =============================================================================
+
+// TestEvaluationMissingTargetingKey tests all evaluation types with missing targeting key.
+func TestEvaluationMissingTargetingKey(t *testing.T) {
+ ofClient := create(t)
+
+ tests := []struct {
+ testStrFunc func() (string, error)
+ testFloatFunc func() (float64, error)
+ testIntFunc func() (int64, error)
+ testObjFunc func() (any, error)
+ objDefault any
+ name string
+ strDefault string
+ floatDefault float64
+ intDefault int64
+ }{
+ {
+ name: "String",
+ testStrFunc: func() (string, error) {
+ return ofClient.StringValue(context.TODO(), "str_feature", "default", openfeature.NewEvaluationContext("", nil))
+ },
+ strDefault: "default",
+ },
+ {
+ name: "Float",
+ testFloatFunc: func() (float64, error) {
+ return ofClient.FloatValue(context.TODO(), "float_feature", 3.14, openfeature.NewEvaluationContext("", nil))
+ },
+ floatDefault: 3.14,
+ },
+ {
+ name: "Integer",
+ testIntFunc: func() (int64, error) {
+ return ofClient.IntValue(context.TODO(), flagInt, 42, openfeature.NewEvaluationContext("", nil))
+ },
+ intDefault: 42,
+ },
+ {
+ name: "Object",
+ testObjFunc: func() (any, error) {
+ return ofClient.ObjectValue(context.TODO(), flagObj, FlagSetResult{}, openfeature.NewEvaluationContext("", nil))
+ },
+ objDefault: FlagSetResult{},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if tt.testStrFunc != nil {
+ result, err := tt.testStrFunc()
+ assert.Error(t, err, "Should return error when targeting key is missing")
+ assert.Contains(t, err.Error(), string(openfeature.TargetingKeyMissingCode), "Error should be TargetingKeyMissingCode")
+ assert.Equal(t, tt.strDefault, result, "Should return default value")
+ }
+
+ if tt.testFloatFunc != nil {
+ result, err := tt.testFloatFunc()
+ assert.Error(t, err, "Should return error when targeting key is missing")
+ assert.Contains(t, err.Error(), string(openfeature.TargetingKeyMissingCode), "Error should be TargetingKeyMissingCode")
+ assert.Equal(t, tt.floatDefault, result, "Should return default value")
+ }
+
+ if tt.testIntFunc != nil {
+ result, err := tt.testIntFunc()
+ assert.Error(t, err, "Should return error when targeting key is missing")
+ assert.Contains(t, err.Error(), string(openfeature.TargetingKeyMissingCode), "Error should be TargetingKeyMissingCode")
+ assert.Equal(t, tt.intDefault, result, "Should return default value")
+ }
+
+ if tt.testObjFunc != nil {
+ result, err := tt.testObjFunc()
+ assert.Error(t, err, "Should return error when targeting key is missing")
+ assert.Contains(t, err.Error(), string(openfeature.TargetingKeyMissingCode), "Error should be TargetingKeyMissingCode")
+ assert.Equal(t, tt.objDefault, result, "Should return default value")
+ }
+ })
+ }
+}
+
+// TestEvaluationNotFound tests all evaluation types with non-existent flags.
+func TestEvaluationNotFound(t *testing.T) {
+ ofClient := create(t)
+ evalCtx := evaluationContext()
+
+ tests := []struct {
+ testStrFunc func() (string, error)
+ testFloatFunc func() (float64, error)
+ testIntFunc func() (int64, error)
+ testObjFunc func() (any, error)
+ objDefault any
+ name string
+ strDefault string
+ floatDefault float64
+ intDefault int64
+ }{
+ {
+ name: "String",
+ testStrFunc: func() (string, error) {
+ return ofClient.StringValue(context.TODO(), "nonexistent-string-feature", "default", evalCtx)
+ },
+ strDefault: "default",
+ },
+ {
+ name: "Float",
+ testFloatFunc: func() (float64, error) {
+ return ofClient.FloatValue(context.TODO(), "nonexistent-float-feature", 3.14, evalCtx)
+ },
+ floatDefault: 3.14,
+ },
+ {
+ name: "Integer",
+ testIntFunc: func() (int64, error) {
+ return ofClient.IntValue(context.TODO(), "nonexistent-int-feature", 42, evalCtx)
+ },
+ intDefault: 42,
+ },
+ {
+ name: "Object",
+ testObjFunc: func() (any, error) {
+ return ofClient.ObjectValue(context.TODO(), "nonexistent-obj-feature", FlagSetResult{}, evalCtx)
+ },
+ objDefault: FlagSetResult{},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if tt.testStrFunc != nil {
+ result, err := tt.testStrFunc()
+ assert.Error(t, err, "Should return error for non-existent flag")
+ assert.Contains(t, err.Error(), string(openfeature.FlagNotFoundCode), "Error should be FlagNotFoundCode")
+ assert.Equal(t, tt.strDefault, result, "Should return default value")
+ }
+
+ if tt.testFloatFunc != nil {
+ result, err := tt.testFloatFunc()
+ assert.Error(t, err, "Should return error for non-existent flag")
+ assert.Contains(t, err.Error(), string(openfeature.FlagNotFoundCode), "Error should be FlagNotFoundCode")
+ assert.Equal(t, tt.floatDefault, result, "Should return default value")
+ }
+
+ if tt.testIntFunc != nil {
+ result, err := tt.testIntFunc()
+ assert.Error(t, err, "Should return error for non-existent flag")
+ assert.Contains(t, err.Error(), string(openfeature.FlagNotFoundCode), "Error should be FlagNotFoundCode")
+ assert.Equal(t, tt.intDefault, result, "Should return default value")
+ }
+
+ if tt.testObjFunc != nil {
+ result, err := tt.testObjFunc()
+ assert.Error(t, err, "Should return error for non-existent flag")
+ assert.Contains(t, err.Error(), string(openfeature.FlagNotFoundCode), "Error should be FlagNotFoundCode")
+ assert.Equal(t, tt.objDefault, result, "Should return default value")
+ }
+ })
+ }
+}
+
+// =============================================================================
+// Edge Case Tests
+// =============================================================================
+
+// TestIntegerEdgeCases tests integer evaluation with boundary values and edge cases.
+func TestIntegerEdgeCases(t *testing.T) {
+ ofClient := create(t)
+ evalCtx := evaluationContext()
+
+ tests := []struct {
+ name string
+ description string
+ defaultValue int64
+ }{
+ {name: "Zero", defaultValue: 0, description: "Test with zero value"},
+ {name: "Negative", defaultValue: -42, description: "Test with negative integer"},
+ {name: "MaxInt64", defaultValue: 9223372036854775807, description: "Test with max int64 value"},
+ {name: "MinInt64", defaultValue: -9223372036854775808, description: "Test with min int64 value"},
+ {name: "SmallNegative", defaultValue: -1, description: "Test with -1 value"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result, err := ofClient.IntValue(context.TODO(), "nonexistent-int-edge-case", tt.defaultValue, evalCtx)
+ assert.Error(t, err, "Should return error for non-existent flag")
+ assert.Contains(t, err.Error(), string(openfeature.FlagNotFoundCode), "Error should be FlagNotFoundCode")
+ assert.Equal(t, tt.defaultValue, result, "Should return default value: %s", tt.description)
+ })
+ }
+}
+
+// TestFloatEdgeCases tests float evaluation with boundary values and edge cases.
+func TestFloatEdgeCases(t *testing.T) {
+ ofClient := create(t)
+ evalCtx := evaluationContext()
+
+ tests := []struct {
+ name string
+ description string
+ defaultValue float64
+ }{
+ {name: "Zero", defaultValue: 0.0, description: "Test with zero value"},
+ {name: "Negative", defaultValue: -3.14, description: "Test with negative float"},
+ {name: "VerySmall", defaultValue: 1e-10, description: "Test with very small number (scientific notation)"},
+ {name: "VeryLarge", defaultValue: 1e10, description: "Test with very large number"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result, err := ofClient.FloatValue(context.TODO(), "nonexistent-float-edge-case", tt.defaultValue, evalCtx)
+ assert.Error(t, err, "Should return error for non-existent flag")
+ assert.Contains(t, err.Error(), string(openfeature.FlagNotFoundCode), "Error should be FlagNotFoundCode")
+ assert.Equal(t, tt.defaultValue, result, "Should return default value: %s", tt.description)
+ })
+ }
+}
+
+// TestStringEdgeCases tests string evaluation with edge case values.
+func TestStringEdgeCases(t *testing.T) {
+ ofClient := create(t)
+ evalCtx := evaluationContext()
+
+ tests := []struct {
+ name string
+ flagName string
+ defaultValue string
+ description string
+ }{
+ {name: "EmptyString", flagName: "nonexistent-flag", defaultValue: "", description: "Test with empty string as default value"},
+ {name: "VeryLongString", flagName: "nonexistent-flag", defaultValue: string(make([]byte, 1000)), description: "Test with very long default value (1000+ chars)"},
+ {name: "UnicodeChars", flagName: "nonexistent-flag", defaultValue: "hello-世界-🌍", description: "Test with unicode characters"},
+ {name: "SpecialChars", flagName: "nonexistent-flag", defaultValue: "!@#$%^&*()_+-=[]{}|;:',.<>?/~`", description: "Test with special characters"},
+ {name: "Whitespace", flagName: "nonexistent-flag", defaultValue: " \t\n\r ", description: "Test with whitespace characters"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result, err := ofClient.StringValue(context.TODO(), tt.flagName, tt.defaultValue, evalCtx)
+ assert.Error(t, err, "Should return error for non-existent flag")
+ assert.Contains(t, err.Error(), string(openfeature.FlagNotFoundCode), "Error should be FlagNotFoundCode")
+ assert.Equal(t, tt.defaultValue, result, "Should return default value: %s", tt.description)
+ })
+ }
+}
+
+// TestObjectEdgeCases tests object evaluation with edge case structures.
+func TestObjectEdgeCases(t *testing.T) {
+ ofClient := create(t)
+ evalCtx := evaluationContext()
+
+ tests := []struct {
+ name string
+ defaultValue map[string]any
+ description string
+ }{
+ {name: "EmptyObject", defaultValue: map[string]any{}, description: "Test with empty object"},
+ {
+ name: "NestedObject",
+ defaultValue: map[string]any{
+ "level1": map[string]any{
+ "level2": map[string]any{"level3": "deep"},
+ },
+ },
+ description: "Test with deeply nested object",
+ },
+ {
+ name: "ObjectWithArray",
+ defaultValue: map[string]any{
+ "items": []any{"a", "b", "c"},
+ "counts": []int{1, 2, 3},
+ },
+ description: "Test with arrays in object",
+ },
+ {
+ name: "ObjectWithNull",
+ defaultValue: map[string]any{"key": "value", "nullField": nil},
+ description: "Test with null values in object",
+ },
+ {
+ name: "MixedTypes",
+ defaultValue: map[string]any{
+ "string": "text", "number": 42, "float": 3.14, "bool": true,
+ "array": []any{1, "two", 3.0}, "nested": map[string]any{"inner": "value"},
+ },
+ description: "Test with mixed types in object",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result, err := ofClient.ObjectValue(context.TODO(), "nonexistent-obj-edge-case", tt.defaultValue, evalCtx)
+ assert.Error(t, err, "Should return error for non-existent flag")
+ assert.Contains(t, err.Error(), string(openfeature.FlagNotFoundCode), "Error should be FlagNotFoundCode")
+ assert.Equal(t, tt.defaultValue, result, "Should return default value: %s", tt.description)
+ })
+ }
+}
+
+// TestTargetingKeyEdgeCases tests various edge cases for targeting keys.
+func TestTargetingKeyEdgeCases(t *testing.T) {
+ ofClient := create(t)
+
+ tests := []struct {
+ name string
+ targetingKey string
+ flagName string
+ description string
+ }{
+ {name: "EmptyTargetingKey", targetingKey: "", flagName: flagSomeOther, description: "Test with empty targeting key"},
+ {name: "VeryLongTargetingKey", targetingKey: string(make([]byte, 1000)), flagName: flagSomeOther, description: "Test with very long targeting key (1000+ chars)"},
+ {name: "UnicodeTargetingKey", targetingKey: "user-世界-🌍", flagName: flagSomeOther, description: "Test with unicode in targeting key"},
+ {name: "SpecialCharsTargetingKey", targetingKey: "user@example.com", flagName: flagSomeOther, description: "Test with email-like targeting key"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ evalCtx := openfeature.NewEvaluationContext(tt.targetingKey, nil)
+
+ result, err := ofClient.BooleanValue(context.TODO(), tt.flagName, true, evalCtx)
+
+ if tt.targetingKey == "" {
+ assert.Error(t, err, "Should return error for empty targeting key")
+ assert.Contains(t, err.Error(), string(openfeature.TargetingKeyMissingCode), "Error should be TargetingKeyMissingCode")
+ } else {
+ _ = result
+ _ = err
+ }
+ })
+ }
+}
+
+// =============================================================================
+// Context Cancellation Tests
+// =============================================================================
+
+// TestContextCancellation verifies that canceled contexts are respected in all evaluation methods.
+func TestContextCancellation(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Failed to create provider")
+ defer func() { _ = provider.ShutdownWithContext(context.Background()) }()
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err, "Init should succeed")
+
+ // Create a canceled context
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel() // Cancel immediately
+
+ flatCtx := openfeature.FlattenedContext{
+ openfeature.TargetingKey: "test-user",
+ }
+
+ // Test Boolean evaluation with canceled context
+ boolResult := provider.BooleanEvaluation(ctx, flagSomeOther, true, flatCtx)
+ assert.Equal(t, true, boolResult.Value, "Should return default value when context is canceled")
+ assert.Equal(t, openfeature.ErrorReason, boolResult.Reason, "Should have error reason")
+ assert.NotNil(t, boolResult.ResolutionError, "Should have resolution error")
+
+ // Test String evaluation with canceled context
+ strResult := provider.StringEvaluation(ctx, flagSomeOther, "default", flatCtx)
+ assert.Equal(t, "default", strResult.Value, "Should return default value when context is canceled")
+ assert.Equal(t, openfeature.ErrorReason, strResult.Reason, "Should have error reason")
+ assert.NotNil(t, strResult.ResolutionError, "Should have resolution error")
+
+ // Test Int evaluation with canceled context
+ intResult := provider.IntEvaluation(ctx, flagInt, 999, flatCtx)
+ assert.Equal(t, int64(999), intResult.Value, "Should return default value when context is canceled")
+ assert.Equal(t, openfeature.ErrorReason, intResult.Reason, "Should have error reason")
+ assert.NotNil(t, intResult.ResolutionError, "Should have resolution error")
+
+ // Test Float evaluation with canceled context
+ floatResult := provider.FloatEvaluation(ctx, "some_flag", 123.45, flatCtx)
+ assert.Equal(t, 123.45, floatResult.Value, "Should return default value when context is canceled")
+ assert.Equal(t, openfeature.ErrorReason, floatResult.Reason, "Should have error reason")
+ assert.NotNil(t, floatResult.ResolutionError, "Should have resolution error")
+
+ // Test Object evaluation with canceled context
+ defaultObj := FlagSetResult{}
+ objResult := provider.ObjectEvaluation(ctx, "some_flag", defaultObj, flatCtx)
+ assert.Equal(t, defaultObj, objResult.Value, "Should return default value when context is canceled")
+ assert.Equal(t, openfeature.ErrorReason, objResult.Reason, "Should have error reason")
+ assert.NotNil(t, objResult.ResolutionError, "Should have resolution error")
+}
+
+// =============================================================================
+// PROVIDER_NOT_READY Evaluation Tests
+// =============================================================================
+
+// TestEvaluationWhenProviderNotReady tests all evaluation types when provider is not initialized.
+func TestEvaluationWhenProviderNotReady(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Failed to create provider")
+
+ // Initialize then shut down to reliably put provider in NotReady state.
+ // We can't rely on "not calling Init" because in localhost mode, the Split SDK
+ // factory auto-initializes from the YAML file during New() (factory.IsReady() == true).
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err)
+ err = provider.ShutdownWithContext(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, openfeature.NotReadyState, provider.Status(), "Provider should be NotReady after shutdown")
+
+ flatCtx := openfeature.FlattenedContext{
+ openfeature.TargetingKey: "test-user",
+ }
+
+ t.Run("Boolean", func(t *testing.T) {
+ result := provider.BooleanEvaluation(context.TODO(), flagSomeOther, true, flatCtx)
+ assert.Equal(t, true, result.Value, "Should return default value")
+ assert.Equal(t, openfeature.ErrorReason, result.Reason)
+ assert.Contains(t, result.ResolutionError.Error(), string(openfeature.ProviderNotReadyCode))
+ })
+
+ t.Run("String", func(t *testing.T) {
+ result := provider.StringEvaluation(context.TODO(), flagSomeOther, "default", flatCtx)
+ assert.Equal(t, "default", result.Value, "Should return default value")
+ assert.Equal(t, openfeature.ErrorReason, result.Reason)
+ assert.Contains(t, result.ResolutionError.Error(), string(openfeature.ProviderNotReadyCode))
+ })
+
+ t.Run("Int", func(t *testing.T) {
+ result := provider.IntEvaluation(context.TODO(), flagInt, 42, flatCtx)
+ assert.Equal(t, int64(42), result.Value, "Should return default value")
+ assert.Equal(t, openfeature.ErrorReason, result.Reason)
+ assert.Contains(t, result.ResolutionError.Error(), string(openfeature.ProviderNotReadyCode))
+ })
+
+ t.Run("Float", func(t *testing.T) {
+ result := provider.FloatEvaluation(context.TODO(), flagInt, 3.14, flatCtx)
+ assert.Equal(t, 3.14, result.Value, "Should return default value")
+ assert.Equal(t, openfeature.ErrorReason, result.Reason)
+ assert.Contains(t, result.ResolutionError.Error(), string(openfeature.ProviderNotReadyCode))
+ })
+
+ t.Run("Object", func(t *testing.T) {
+ defaultObj := FlagSetResult{}
+ result := provider.ObjectEvaluation(context.TODO(), flagObj, defaultObj, flatCtx)
+ assert.Equal(t, defaultObj, result.Value, "Should return default value")
+ assert.Equal(t, openfeature.ErrorReason, result.Reason)
+ assert.Contains(t, result.ResolutionError.Error(), string(openfeature.ProviderNotReadyCode))
+ })
+}
+
+// =============================================================================
+// INVALID_CONTEXT Evaluation Tests
+// =============================================================================
+
+// TestEvaluationWithInvalidContext tests evaluation with a non-string targeting key.
+func TestEvaluationWithInvalidContext(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Failed to create provider")
+ defer func() { _ = provider.ShutdownWithContext(context.Background()) }()
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err, "Init should succeed")
+
+ // Targeting key exists but is NOT a string (integer instead)
+ flatCtx := openfeature.FlattenedContext{
+ openfeature.TargetingKey: 12345,
+ }
+
+ t.Run("Boolean", func(t *testing.T) {
+ result := provider.BooleanEvaluation(context.TODO(), flagSomeOther, true, flatCtx)
+ assert.Equal(t, true, result.Value, "Should return default value")
+ assert.Equal(t, openfeature.ErrorReason, result.Reason)
+ assert.Contains(t, result.ResolutionError.Error(), string(openfeature.InvalidContextCode))
+ })
+
+ t.Run("String", func(t *testing.T) {
+ result := provider.StringEvaluation(context.TODO(), flagSomeOther, "default", flatCtx)
+ assert.Equal(t, "default", result.Value, "Should return default value")
+ assert.Equal(t, openfeature.ErrorReason, result.Reason)
+ assert.Contains(t, result.ResolutionError.Error(), string(openfeature.InvalidContextCode))
+ })
+
+ t.Run("Int", func(t *testing.T) {
+ result := provider.IntEvaluation(context.TODO(), flagInt, 42, flatCtx)
+ assert.Equal(t, int64(42), result.Value, "Should return default value")
+ assert.Equal(t, openfeature.ErrorReason, result.Reason)
+ assert.Contains(t, result.ResolutionError.Error(), string(openfeature.InvalidContextCode))
+ })
+
+ t.Run("Float", func(t *testing.T) {
+ result := provider.FloatEvaluation(context.TODO(), flagInt, 3.14, flatCtx)
+ assert.Equal(t, 3.14, result.Value, "Should return default value")
+ assert.Equal(t, openfeature.ErrorReason, result.Reason)
+ assert.Contains(t, result.ResolutionError.Error(), string(openfeature.InvalidContextCode))
+ })
+
+ t.Run("Object", func(t *testing.T) {
+ defaultObj := FlagSetResult{}
+ result := provider.ObjectEvaluation(context.TODO(), flagObj, defaultObj, flatCtx)
+ assert.Equal(t, defaultObj, result.Value, "Should return default value")
+ assert.Equal(t, openfeature.ErrorReason, result.Reason)
+ assert.Contains(t, result.ResolutionError.Error(), string(openfeature.InvalidContextCode))
+ })
+}
+
+// =============================================================================
+// Integration Tests
+// =============================================================================
+
+// TestIntegrationWithOpenFeatureSDK tests integration with the OpenFeature SDK.
+func TestIntegrationWithOpenFeatureSDK(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping integration test in short mode")
+ }
+
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Failed to create provider")
+ defer func() { _ = provider.ShutdownWithContext(context.Background()) }()
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err, "Init should succeed")
+
+ flatCtx := openfeature.FlattenedContext{
+ openfeature.TargetingKey: "test-user",
+ }
+
+ // Test boolean evaluation
+ boolResult := provider.BooleanEvaluation(context.TODO(), flagSomeOther, false, flatCtx)
+ assert.Equal(t, openfeature.TargetingMatchReason, boolResult.Reason, "Boolean evaluation should succeed")
+ assert.False(t, boolResult.Value, "Boolean value should be false for flagSomeOther")
+
+ // Test string evaluation
+ strResult := provider.StringEvaluation(context.TODO(), flagSomeOther, "default", flatCtx)
+ assert.Equal(t, openfeature.TargetingMatchReason, strResult.Reason, "String evaluation should succeed")
+ assert.Equal(t, treatmentOff, strResult.Value, "String value should be 'off'")
+
+ // Test integer evaluation
+ intResult := provider.IntEvaluation(context.TODO(), flagInt, 0, flatCtx)
+ assert.Equal(t, openfeature.TargetingMatchReason, intResult.Reason, "Int evaluation should succeed")
+ assert.Equal(t, int64(32), intResult.Value, "Int value should be 32")
+}
diff --git a/events.go b/events.go
new file mode 100644
index 0000000..a5c5a63
--- /dev/null
+++ b/events.go
@@ -0,0 +1,220 @@
+package split
+
+import (
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ of "github.com/open-feature/go-sdk/openfeature"
+ "github.com/splitio/go-client/v6/splitio/client"
+)
+
+// EventChannel returns a channel for receiving provider lifecycle events.
+//
+// This method implements the EventHandler interface. The OpenFeature SDK
+// uses this channel to receive events about provider state changes.
+//
+// Events Emitted:
+// - PROVIDER_READY: Provider initialized successfully
+// - PROVIDER_ERROR: Provider encountered initialization error
+// - PROVIDER_CONFIGURATION_CHANGED: Split definitions updated (detected via polling)
+//
+// Configuration Change Detection Limitation:
+// PROVIDER_CONFIGURATION_CHANGED is detected by polling, not via real-time SSE streaming.
+// While the Split SDK receives changes instantly via SSE, it doesn't expose a callback
+// for configuration changes. The provider polls manager.Splits() and compares ChangeNumber
+// values to detect changes. The polling interval is configurable via WithMonitoringInterval
+// (default: 30 seconds, minimum: 5 seconds).
+//
+// Staleness Detection Limitation:
+// PROVIDER_STALE events are NOT currently emitted. The Split SDK's IsReady()
+// method only indicates initial readiness and does not change when network
+// connectivity is lost during operation. The SDK handles connectivity issues
+// internally (switching between streaming and polling modes) but does not
+// expose this state through its public API.
+//
+// When network connectivity is lost, the SDK continues serving cached data
+// silently. Applications requiring staleness awareness should implement
+// application-level health checks or monitor SDK debug logs.
+//
+// See CONTRIBUTING.md for details on this known limitation and potential
+// future enhancements if Split SDK exposes streaming/connectivity status.
+//
+// The channel is buffered (128 events) to prevent blocking event emission.
+// Applications can register handlers via openfeature.AddHandler() to react to events.
+//
+// Example:
+//
+// openfeature.AddHandler(openfeature.ProviderReady, func(details openfeature.EventDetails) {
+// log.Println("Split provider is ready!")
+// })
+//
+// openfeature.AddHandler(openfeature.ProviderConfigChange, func(details openfeature.EventDetails) {
+// log.Println("Feature flags updated - may want to re-evaluate")
+// })
+func (p *Provider) EventChannel() <-chan of.Event {
+ return p.eventStream
+}
+
+// emitEvent sends an event to the event channel without blocking.
+//
+// If the channel buffer is full, the event is dropped and a warning is logged.
+// This prevents slow event consumers from blocking provider operations.
+// If the provider is shut down and the channel is closed, the send is silently ignored.
+//
+// Concurrency Safety Design:
+// Uses atomic shutdown check as a fast path, then acquires a brief read lock
+// for the actual channel send. This prevents race detector warnings while
+// keeping the lock duration minimal (just the non-blocking select).
+func (p *Provider) emitEvent(event *of.Event) {
+ if atomic.LoadUint32(&p.shutdown) == shutdownStateActive {
+ return
+ }
+
+ // Acquire read lock for channel send to prevent race with close()
+ // The lock duration is minimal - just the non-blocking select
+ p.mtx.RLock()
+ defer p.mtx.RUnlock()
+
+ // Double-check shutdown after acquiring lock
+ if atomic.LoadUint32(&p.shutdown) == shutdownStateActive {
+ return
+ }
+
+ select {
+ case p.eventStream <- *event:
+ default:
+ p.logger.Warn("event channel full, dropping event", "event_type", event.EventType)
+ }
+}
+
+// monitorSplitUpdates runs in a background goroutine to monitor Split SDK updates.
+//
+// This goroutine:
+// - Polls the Split SDK for changes in split definitions
+// - Emits PROVIDER_CONFIGURATION_CHANGED events when splits are updated
+// - Gracefully shuts down when stopMonitor channel is closed
+//
+// The monitoring interval is configurable via WithMonitoringInterval (default: 30s, min: 5s).
+//
+// Panic Recovery:
+// If a panic occurs (e.g., nil pointer in SDK), the goroutine recovers, logs the error,
+// and terminates gracefully. This prevents the monitoring goroutine from leaving
+// monitorDone unclosed, which would cause shutdown to hang.
+func (p *Provider) monitorSplitUpdates() {
+ defer func() {
+ if r := recover(); r != nil {
+ p.logger.Error("monitoring goroutine panicked, terminating gracefully",
+ "panic", r,
+ "advice", "this may indicate a bug in Split SDK or provider implementation")
+ p.emitEvent(&of.Event{
+ ProviderName: p.Metadata().Name,
+ EventType: of.ProviderError,
+ ProviderEventDetails: of.ProviderEventDetails{
+ Message: fmt.Sprintf("monitoring goroutine panicked: %v", r),
+ },
+ })
+ }
+ close(p.monitorDone)
+ p.logger.Debug("monitoring goroutine stopped")
+ }()
+
+ manager, lastKnownSplits, ok := p.initMonitorState()
+ if !ok {
+ p.emitEvent(&of.Event{
+ ProviderName: p.Metadata().Name,
+ EventType: of.ProviderError,
+ ProviderEventDetails: of.ProviderEventDetails{
+ Message: "monitoring failed to start: factory or manager unavailable",
+ },
+ })
+ return
+ }
+
+ p.logger.Debug("starting background Split monitoring",
+ "interval", p.monitoringInterval,
+ "initial_splits", len(lastKnownSplits))
+
+ ticker := time.NewTicker(p.monitoringInterval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-p.stopMonitor:
+ p.logger.Debug("received shutdown signal, stopping monitoring")
+ return
+
+ case <-ticker.C:
+ currentSplits := p.pollCurrentSplits(manager)
+ if splitsChanged(lastKnownSplits, currentSplits) {
+ p.logger.Debug("Split definitions changed",
+ "old_count", len(lastKnownSplits),
+ "new_count", len(currentSplits))
+ p.emitEvent(&of.Event{
+ ProviderName: p.Metadata().Name,
+ EventType: of.ProviderConfigChange,
+ ProviderEventDetails: of.ProviderEventDetails{
+ Message: fmt.Sprintf("Split definitions updated (count: %d)", len(currentSplits)),
+ },
+ })
+ lastKnownSplits = currentSplits
+ }
+ }
+ }
+}
+
+// initMonitorState acquires the factory/manager and captures initial split state.
+// Uses defer for panic-safe RLock release — if manager.Splits() panics, the lock
+// is released before the panic propagates to the outer recovery handler.
+func (p *Provider) initMonitorState() (*client.SplitManager, map[string]int64, bool) {
+ p.mtx.RLock()
+ defer p.mtx.RUnlock()
+
+ if p.factory == nil {
+ p.logger.Error("no factory available for monitoring despite provider reporting ready")
+ return nil, nil, false
+ }
+
+ manager := p.factory.Manager()
+ if manager == nil {
+ p.logger.Error("factory manager is nil, stopping monitoring",
+ "reason", "Split SDK may not be fully initialized or factory is in invalid state")
+ return nil, nil, false
+ }
+
+ lastKnownSplits := make(map[string]int64)
+ splits := manager.Splits()
+ for i := range splits {
+ lastKnownSplits[splits[i].Name] = splits[i].ChangeNumber
+ }
+ return manager, lastKnownSplits, true
+}
+
+// pollCurrentSplits reads the current split state under RLock.
+// Uses defer for panic-safe RLock release.
+func (p *Provider) pollCurrentSplits(manager *client.SplitManager) map[string]int64 {
+ p.mtx.RLock()
+ defer p.mtx.RUnlock()
+
+ currentSplits := make(map[string]int64)
+ currentSplitList := manager.Splits()
+ for i := range currentSplitList {
+ currentSplits[currentSplitList[i].Name] = currentSplitList[i].ChangeNumber
+ }
+ return currentSplits
+}
+
+// splitsChanged checks if splits have changed by comparing names and change numbers.
+// Returns true if any split was added, removed, or modified.
+func splitsChanged(old, current map[string]int64) bool {
+ if len(old) != len(current) {
+ return true
+ }
+ for name, changeNum := range current {
+ oldChangeNum, exists := old[name]
+ if !exists || oldChangeNum != changeNum {
+ return true
+ }
+ }
+ return false
+}
diff --git a/examples/cloud/README.md b/examples/cloud/README.md
new file mode 100644
index 0000000..4e48b4b
--- /dev/null
+++ b/examples/cloud/README.md
@@ -0,0 +1,61 @@
+# Cloud Example
+
+**Cloud mode example** demonstrating Split OpenFeature Provider in streaming/cloud mode.
+
+## What This Demonstrates
+
+- Provider initialization in **streaming/cloud mode** with structured colored logging
+- Boolean, String, Integer, Float, and **Object** flag evaluations
+- Evaluation context with targeting keys and attributes
+- Getting evaluation details (variant, reason, flag metadata)
+- **Flag sets** evaluation (object evaluations in cloud mode)
+- **Flag metadata** (JSON configurations attached to treatments)
+- Provider health checks
+- Source-attributed logs for debugging
+
+**Requires Split API key** - Connects to Split's cloud service for real-time flag updates via streaming.
+
+## Prerequisites
+
+Get your Split API key from [Split.io](https://split.io) (use server-side SDK key).
+
+## Running
+
+```bash
+cd examples/cloud
+export SPLIT_API_KEY="your-server-side-sdk-key"
+go run main.go
+```
+
+The example will:
+
+1. Initialize the Split provider in cloud/streaming mode
+2. Evaluate multiple flag types (boolean, string, int, float, object)
+3. Demonstrate flag sets and flag metadata
+4. Show evaluation details and provider health
+5. Display structured colored logs with source attribution
+
+## Troubleshooting
+
+### "SPLIT_API_KEY environment variable is required"
+
+- Make sure you've set the environment variable: `export SPLIT_API_KEY="your-key"`
+- Verify your key is correct in the UI under Admin → API Keys
+
+### Flags returning default values
+
+- This is normal if flags don't exist in Split
+- Create the flags in the UI to see different behaviors
+- Check that you're using the correct SDK key (server-side, not client-side)
+
+### Provider initialization timeout
+
+- Check your network connection
+- Verify the API key is valid
+- The SDK needs to download flag definitions on first run
+
+## Learn More
+
+- [Split OpenFeature Go Provider Documentation](../../README.md)
+- [OpenFeature Go SDK](https://openfeature.dev/docs/reference/sdks/server/go)
+- [Split Go SDK](https://github.com/splitio/go-client)
diff --git a/examples/cloud/main.go b/examples/cloud/main.go
new file mode 100644
index 0000000..a43954e
--- /dev/null
+++ b/examples/cloud/main.go
@@ -0,0 +1,214 @@
+// Package main demonstrates cloud mode usage of the Split OpenFeature Provider.
+//
+// This example shows how to:
+// - Create and initialize a Split provider in streaming/cloud mode
+// - Evaluate different flag types (boolean, string, int, float, object)
+// - Get evaluation details (variant, reason, flag metadata)
+// - Monitor provider health
+//
+// This example requires a Split API key and connects to Split's cloud service.
+// Flags that don't exist return their default values - create flags in Split dashboard.
+//
+// Run: SPLIT_API_KEY=your-key-here go run main.go
+package main
+
+import (
+ "context"
+ "log/slog"
+ "os"
+ "time"
+
+ "github.com/lmittmann/tint"
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/open-feature/go-sdk/openfeature/hooks"
+
+ "github.com/splitio/split-openfeature-provider-go/v2"
+)
+
+func main() {
+ logLevel := slog.LevelInfo
+ if level := os.Getenv("LOG_LEVEL"); level != "" {
+ switch level {
+ case "debug", "DEBUG", "trace", "TRACE":
+ logLevel = slog.LevelDebug
+ case "info", "INFO":
+ logLevel = slog.LevelInfo
+ case "warn", "WARN", "warning", "WARNING":
+ logLevel = slog.LevelWarn
+ case "error", "ERROR":
+ logLevel = slog.LevelError
+ default:
+ logLevel = slog.LevelInfo
+ slog.Warn("invalid LOG_LEVEL, using INFO", "provided", level, "valid", "debug|info|warn|error")
+ }
+ }
+
+ baseLogger := slog.New(tint.NewHandler(os.Stderr, &tint.Options{
+ Level: logLevel,
+ TimeFormat: time.TimeOnly,
+ }))
+
+ appLogger := baseLogger.With("source", "app")
+ ofLogger := baseLogger.With("source", "openfeature-sdk")
+
+ slog.SetDefault(baseLogger)
+
+ apiKey := os.Getenv("SPLIT_API_KEY")
+ if apiKey == "" {
+ appLogger.Error("SPLIT_API_KEY environment variable is required")
+ os.Exit(1)
+ }
+
+ // Use optimized test configuration for faster startup
+ cfg := split.TestConfig()
+
+ provider, err := split.New(apiKey,
+ split.WithLogger(baseLogger),
+ split.WithSplitConfig(cfg),
+ )
+ if err != nil {
+ appLogger.Error("failed to create provider", "error", err)
+ os.Exit(1)
+ }
+
+ defer func() {
+ shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ if err := openfeature.ShutdownWithContext(shutdownCtx); err != nil {
+ appLogger.Error("shutdown error", "error", err)
+ }
+ }()
+
+ openfeature.AddHooks(hooks.NewLoggingHook(false, ofLogger))
+
+ initCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+
+ if err := openfeature.SetNamedProviderWithContextAndWait(initCtx, "cloud-streaming", provider); err != nil {
+ appLogger.Error("failed to initialize provider", "error", err)
+ os.Exit(1)
+ }
+
+ appLogger.Info("Split provider initialized successfully in cloud/streaming mode")
+
+ client := openfeature.NewClient("cloud-streaming")
+ ctx := context.Background()
+
+ // Check provider state
+ if client.State() == openfeature.ReadyState {
+ appLogger.Info("provider is ready for evaluations")
+ }
+
+ // Get client metadata
+ metadata := client.Metadata()
+ appLogger.Info("client metadata", "domain", metadata.Domain())
+
+ evalCtx := openfeature.NewEvaluationContext("user-123", map[string]any{
+ "email": "user@example.com",
+ "plan": "premium",
+ })
+
+ // Example 1: Boolean flag evaluation
+ appLogger.Info("boolean flag evaluation")
+ showNewFeature, err := client.BooleanValue(ctx, "feature_boolean_on", false, evalCtx)
+ if err != nil {
+ appLogger.Warn("error evaluating boolean flag", "error", err)
+ }
+ appLogger.Info("flag evaluated", "flag", "feature_boolean_on", "value", showNewFeature, "default", false)
+
+ // Example 2: String flag evaluation
+ appLogger.Info("string flag evaluation")
+ theme, err := client.StringValue(ctx, "ui_theme", "light", evalCtx)
+ if err != nil {
+ appLogger.Warn("error evaluating string flag", "error", err)
+ }
+ appLogger.Info("flag evaluated", "flag", "ui_theme", "value", theme, "default", "light")
+
+ // Example 3: Integer flag evaluation
+ appLogger.Info("integer flag evaluation")
+ maxRetries, err := client.IntValue(ctx, "max_retries", 3, evalCtx)
+ if err != nil {
+ appLogger.Warn("error evaluating integer flag", "error", err)
+ }
+ appLogger.Info("flag evaluated", "flag", "max_retries", "value", maxRetries, "default", 3)
+
+ // Example 4: Float flag evaluation
+ appLogger.Info("float flag evaluation")
+ discountRate, err := client.FloatValue(ctx, "discount_rate", 0.0, evalCtx)
+ if err != nil {
+ appLogger.Warn("error evaluating float flag", "error", err)
+ }
+ appLogger.Info("flag evaluated", "flag", "discount_rate", "value", discountRate, "default", 0.0)
+
+ // Example 5: Object flag evaluation (evaluates flag sets in cloud mode)
+ appLogger.Info("object flag evaluation (flag set)")
+ flagSetData, err := client.ObjectValue(ctx, "split_provider_test", split.FlagSetResult{}, evalCtx)
+ if err != nil {
+ appLogger.Warn("error evaluating object flag", "error", err)
+ } else if flags, ok := flagSetData.(split.FlagSetResult); ok {
+ appLogger.Info("flag set evaluated",
+ "flag_set", "split_provider_test",
+ "flags_count", len(flags))
+ // Access individual flags using the struct
+ if uiTheme, ok := flags["ui_theme"]; ok {
+ appLogger.Info("flag from set", "flag", "ui_theme", "treatment", uiTheme.Treatment)
+ }
+ }
+
+ // Example 6: Get evaluation details with flag metadata
+ appLogger.Info("getting evaluation details with metadata")
+ details, err := client.StringValueDetails(ctx, "ui_theme", "light", evalCtx)
+ if err != nil {
+ appLogger.Warn("error getting evaluation details", "error", err)
+ } else {
+ appLogger.Info("evaluation details",
+ "value", details.Value,
+ "variant", details.Variant,
+ "reason", details.Reason,
+ "flag_key", details.FlagKey,
+ "has_metadata", len(details.FlagMetadata) > 0)
+ if len(details.FlagMetadata) > 0 {
+ appLogger.Info("flag metadata available",
+ "metadata_keys", len(details.FlagMetadata))
+ }
+ }
+
+ // Example 7: Evaluation mode - force individual flag evaluation in cloud mode
+ // By default, ObjectEvaluation in cloud mode evaluates flag sets.
+ // Use WithEvaluationMode to evaluate a single flag as an object instead.
+ appLogger.Info("individual flag evaluation in cloud mode")
+ individualCtx := split.WithEvaluationMode(ctx, split.EvaluationModeIndividual)
+ singleFlag, err := client.ObjectValue(individualCtx, "ui_theme", split.FlagSetResult{}, evalCtx)
+ if err != nil {
+ appLogger.Warn("error evaluating individual flag", "error", err)
+ } else if flags, ok := singleFlag.(split.FlagSetResult); ok {
+ appLogger.Info("individual flag evaluated", "flags_count", len(flags))
+ }
+
+ // Example 8: Track event with metric value
+ appLogger.Info("tracking events")
+ trackDetails := openfeature.NewTrackingEventDetails(99.99).
+ Add("currency", "USD")
+ client.Track(ctx, "purchase", evalCtx, trackDetails)
+ appLogger.Info("tracked event with metric value", "event", "purchase", "value", 99.99)
+
+ // Example 9: Track count-only event (no metric value)
+ // Use WithoutMetricValue to avoid polluting sum/average metrics with zeros
+ noValueCtx := split.WithoutMetricValue(ctx)
+ countDetails := openfeature.NewTrackingEventDetails(0).
+ Add("page", "/home")
+ client.Track(noValueCtx, "page_view", evalCtx, countDetails)
+ appLogger.Info("tracked count-only event (nil value sent to Split)", "event", "page_view")
+
+ // Example 10: Provider health check
+ appLogger.Info("provider health check")
+ metrics := provider.Metrics()
+ appLogger.Info("provider health",
+ "provider", metrics.Provider,
+ "status", metrics.Status,
+ "initialized", metrics.Initialized,
+ "ready", metrics.Ready,
+ "splits_count", metrics.SplitsCount)
+
+ appLogger.Info("example completed successfully")
+}
diff --git a/examples/localhost/README.md b/examples/localhost/README.md
new file mode 100644
index 0000000..65e13a0
--- /dev/null
+++ b/examples/localhost/README.md
@@ -0,0 +1,79 @@
+# Localhost Mode Example
+
+**No Split account needed!** This example demonstrates offline flag evaluation using local YAML files.
+
+## What This Demonstrates
+
+- Localhost mode configuration (no network calls to Split)
+- Loading feature flags from local YAML file
+- User-specific targeting with key-based routing
+- All flag types (Boolean, String, Integer, Float, Object)
+- Flag metadata (JSON configurations attached to treatments)
+- Colored structured logging with source attribution
+- Perfect for CI/CD and integration tests
+
+## Why Use Localhost Mode?
+
+Perfect for:
+
+- Local development without Split account
+- Unit/integration testing with predictable values
+- CI/CD pipelines requiring deterministic behavior
+- Working offline or in restricted networks
+
+**WARNING:** Localhost mode does NOT sync with Split servers. Development/testing only - never use in production.
+
+## Running
+
+```bash
+cd examples/localhost
+go run main.go
+```
+
+No environment variables or API keys needed! The example will:
+
+1. Load flags from `split.yaml`
+2. Evaluate flags for multiple users
+3. Show targeting behavior
+4. Display structured logs with source attribution
+
+## Split File Format
+
+The `split.yaml` file defines feature flags:
+
+```yaml
+- flag_name:
+ treatment: "value"
+ keys: "user-1,user-2" # Optional: target specific users
+ config: '{"key": "value"}' # Optional: JSON configuration
+```
+
+## Limitations
+
+**Flag Sets Not Supported:** Localhost mode does NOT support flag sets for bulk evaluation.
+
+## Troubleshooting
+
+### "File not found" Error
+
+- Ensure `split.yaml` exists in the same directory
+- Use absolute paths if needed: `cfg.SplitFile = "/path/to/split.yaml"`
+
+### Flags Always Return Defaults
+
+- Check YAML syntax (proper indentation)
+- Verify flag names match exactly (case-sensitive)
+- Check the `keys` field if using targeted rollouts
+
+### Invalid YAML Format
+
+- Ensure proper YAML structure
+- Use quotes around string values with special characters
+- Validate YAML with online tools
+
+## Learn More
+
+- [Cloud Example](../cloud/) - Cloud mode with streaming
+- [Split OpenFeature Go Provider Documentation](../../README.md)
+- [OpenFeature Go SDK](https://openfeature.dev/docs/reference/sdks/server/go)
+- [Split Go SDK](https://github.com/splitio/go-client)
diff --git a/examples/localhost/main.go b/examples/localhost/main.go
new file mode 100644
index 0000000..4223168
--- /dev/null
+++ b/examples/localhost/main.go
@@ -0,0 +1,203 @@
+// Package main demonstrates localhost mode usage of the Split OpenFeature Provider.
+//
+// Localhost mode is ideal for:
+// - Development and testing without Split.io account
+// - Testing flag configurations locally before deployment
+// - CI/CD pipelines and integration tests
+//
+// This example shows how to:
+// - Configure Split SDK in localhost mode
+// - Load flags from a local YAML file (split.yaml)
+// - Evaluate flags with different user attributes
+//
+// Run: go run main.go
+package main
+
+import (
+ "context"
+ "log/slog"
+ "os"
+ "time"
+
+ "github.com/lmittmann/tint"
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/open-feature/go-sdk/openfeature/hooks"
+
+ "github.com/splitio/split-openfeature-provider-go/v2"
+)
+
+func main() {
+ logLevel := slog.LevelInfo
+ if level := os.Getenv("LOG_LEVEL"); level != "" {
+ switch level {
+ case "debug", "DEBUG", "trace", "TRACE":
+ logLevel = slog.LevelDebug
+ case "info", "INFO":
+ logLevel = slog.LevelInfo
+ case "warn", "WARN", "warning", "WARNING":
+ logLevel = slog.LevelWarn
+ case "error", "ERROR":
+ logLevel = slog.LevelError
+ default:
+ logLevel = slog.LevelInfo
+ slog.Warn("invalid LOG_LEVEL, using INFO", "provided", level, "valid", "debug|info|warn|error")
+ }
+ }
+
+ baseLogger := slog.New(tint.NewHandler(os.Stderr, &tint.Options{
+ Level: logLevel,
+ TimeFormat: time.TimeOnly,
+ }))
+
+ appLogger := baseLogger.With("source", "app")
+ ofLogger := baseLogger.With("source", "openfeature-sdk")
+
+ slog.SetDefault(baseLogger)
+
+ appLogger.Info("Split OpenFeature Provider - localhost mode example")
+ appLogger.Warn("this example runs in LOCALHOST MODE for development/testing")
+ appLogger.Info("reading feature flags from ./split.yaml")
+
+ // Use optimized test configuration for faster startup
+ cfg := split.TestConfig()
+ cfg.SplitFile = "./split.yaml"
+
+ provider, err := split.New("localhost", split.WithSplitConfig(cfg), split.WithLogger(baseLogger))
+ if err != nil {
+ appLogger.Error("failed to create provider", "error", err)
+ os.Exit(1)
+ }
+
+ defer func() {
+ shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ if err := openfeature.ShutdownWithContext(shutdownCtx); err != nil {
+ appLogger.Error("shutdown error", "error", err)
+ }
+ }()
+
+ openfeature.AddHooks(hooks.NewLoggingHook(false, ofLogger))
+
+ initCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ if err := openfeature.SetProviderWithContextAndWait(initCtx, provider); err != nil {
+ appLogger.Error("failed to initialize provider", "error", err)
+ os.Exit(1)
+ }
+
+ appLogger.Info("provider initialized in localhost mode")
+
+ // Create default OpenFeature client (uses default provider)
+ ofClient := openfeature.NewDefaultClient()
+ ctx := context.Background()
+
+ // Check provider state
+ if ofClient.State() == openfeature.ReadyState {
+ appLogger.Info("provider is ready for evaluations")
+ }
+
+ // Get client metadata
+ metadata := ofClient.Metadata()
+ appLogger.Info("client metadata", "domain", metadata.Domain())
+
+ // Test with different users to see targeting in action
+ testUsers := []string{"user-123", "user-456", "user-789"}
+
+ for _, userID := range testUsers {
+ appLogger.Info("evaluating flags for user", "user_id", userID)
+ evalCtx := openfeature.NewEvaluationContext(userID, nil)
+
+ // Boolean flag with targeting
+ newFeature, _ := ofClient.BooleanValue(ctx, "new_feature", false, evalCtx)
+ appLogger.Info("boolean flag evaluated", "flag", "new_feature", "value", newFeature)
+
+ // String flag
+ theme, _ := ofClient.StringValue(ctx, "ui_theme", "light", evalCtx)
+ appLogger.Info("string flag evaluated", "flag", "ui_theme", "value", theme)
+
+ // Integer flag
+ maxRetries, _ := ofClient.IntValue(ctx, "max_retries", 3, evalCtx)
+ appLogger.Info("integer flag evaluated", "flag", "max_retries", "value", maxRetries)
+
+ // Float flag
+ discount, _ := ofClient.FloatValue(ctx, "discount_rate", 0.0, evalCtx)
+ appLogger.Info("float flag evaluated", "flag", "discount_rate", "value", discount)
+
+ // Object flag with dynamic configuration - returns FlagSetResult
+ premiumFeatures, _ := ofClient.ObjectValue(ctx, "premium_features", split.FlagSetResult{}, evalCtx)
+ if flags, ok := premiumFeatures.(split.FlagSetResult); ok {
+ if flag, ok := flags["premium_features"]; ok {
+ appLogger.Info("object flag evaluated",
+ "flag", "premium_features",
+ "treatment", flag.Treatment,
+ "has_config", flag.Config != nil)
+ }
+ }
+
+ // Get evaluation details to see variant/treatment
+ details, _ := ofClient.BooleanValueDetails(ctx, "new_feature", false, evalCtx)
+ appLogger.Info("flag details", "variant", details.Variant, "reason", details.Reason)
+ }
+
+ // Demonstrate evaluation options
+ // Note: In localhost mode, EvaluationModeSet is ignored (always uses individual).
+ // EvaluationModeIndividual works as expected.
+ appLogger.Info("evaluation mode options (localhost mode)")
+ demoEvalCtx := openfeature.NewEvaluationContext("demo-user", nil)
+ individualCtx := split.WithEvaluationMode(ctx, split.EvaluationModeIndividual)
+ premiumIndividual, _ := ofClient.ObjectValue(individualCtx, "premium_features", split.FlagSetResult{}, demoEvalCtx)
+ if flags, ok := premiumIndividual.(split.FlagSetResult); ok {
+ appLogger.Info("individual mode evaluation", "flags_count", len(flags))
+ }
+
+ // Demonstrate track options
+ appLogger.Info("track options")
+
+ // Track with metric value (purchase amount)
+ purchaseDetails := openfeature.NewTrackingEventDetails(149.99).
+ Add("currency", "USD")
+ ofClient.Track(ctx, "purchase", demoEvalCtx, purchaseDetails)
+ appLogger.Info("tracked with metric value", "event", "purchase", "value", 149.99)
+
+ // Track without metric value (count-only event)
+ // WithoutMetricValue prevents polluting sum/average metrics with zeros
+ noValueCtx := split.WithoutMetricValue(ctx)
+ pageViewDetails := openfeature.NewTrackingEventDetails(0).
+ Add("page", "/dashboard")
+ ofClient.Track(noValueCtx, "page_view", demoEvalCtx, pageViewDetails)
+ appLogger.Info("tracked count-only event (nil value)", "event", "page_view")
+
+ // Demonstrate onboarding flow with configuration
+ appLogger.Info("onboarding flow configuration")
+ evalCtx := openfeature.NewEvaluationContext("new-user", nil)
+ onboardingFlow, _ := ofClient.StringValue(ctx, "onboarding_flow", "v1", evalCtx)
+ appLogger.Info("onboarding flow evaluated", "version", onboardingFlow)
+
+ // Get the configuration
+ details, _ := ofClient.StringValueDetails(ctx, "onboarding_flow", "v1", evalCtx)
+ appLogger.Info("onboarding flow details", "variant", details.Variant)
+
+ // Demonstrate maintenance mode flag
+ appLogger.Info("system flags")
+ maintenanceMode, _ := ofClient.BooleanValue(ctx, "maintenance_mode", false, evalCtx)
+ if maintenanceMode {
+ appLogger.Warn("system is in maintenance mode")
+ } else {
+ appLogger.Info("system is operational")
+ }
+
+ // Show provider health
+ appLogger.Info("provider health")
+ metrics := provider.Metrics()
+ appLogger.Info("health status",
+ "status", metrics.Status,
+ "splits_count", metrics.SplitsCount)
+
+ appLogger.Info("localhost mode example completed successfully")
+ appLogger.Info("tips",
+ "edit_config", "Edit split.yaml to change flag values",
+ "network", "No network connection required",
+ "ci_cd", "Perfect for CI/CD pipelines and unit tests",
+ "docs", "See README.md for YAML format details")
+}
diff --git a/examples/localhost/split.yaml b/examples/localhost/split.yaml
new file mode 100644
index 0000000..baecb2a
--- /dev/null
+++ b/examples/localhost/split.yaml
@@ -0,0 +1,46 @@
+# Split Localhost Mode - Example Flags
+#
+# Format:
+# - flag_name:
+# treatment: "value" # Required: Treatment (must be a string)
+# keys: "key1,key2" # Optional: Comma-separated targeting keys
+# config: '{"key": "value"}' # Optional: Dynamic Configuration (JSON)
+#
+# Documentation: https://developer.harness.io/docs/feature-management-experimentation/sdks-and-infrastructure/server-side-sdks/go-sdk#yaml
+
+- new_feature:
+ treatment: "on"
+ config: '{"rollout_percentage": 100, "description": "New feature enabled"}'
+
+- ui_theme:
+ treatment: "dark"
+
+- premium_features:
+ treatment: "on"
+ config: '{"analytics": true, "ai_assistant": true, "priority_support": true}'
+
+- max_retries:
+ treatment: "5"
+
+- discount_rate:
+ treatment: "0.15"
+
+- beta_rollout:
+ treatment: "off"
+ keys: "beta-tester-001" # Only specific beta testers
+
+- onboarding_flow:
+ treatment: "v2"
+ config: '{"steps": ["welcome", "profile", "preferences", "done"], "skip_allowed": true}'
+
+- rate_limit:
+ treatment: "100"
+ config: '{"per_minute": 100, "burst": 150}'
+
+- experimental_algorithm:
+ treatment: "control"
+ # Default to control group (no treatment)
+
+- maintenance_mode:
+ treatment: "off"
+ # System-wide maintenance flag
\ No newline at end of file
diff --git a/go.mod b/go.mod
index 859d49d..fa71be4 100644
--- a/go.mod
+++ b/go.mod
@@ -1,20 +1,31 @@
-module github.com/splitio/split-openfeature-provider-go
+module github.com/splitio/split-openfeature-provider-go/v2
-go 1.19
+go 1.25.4
require (
- github.com/open-feature/go-sdk v0.6.0
- github.com/splitio/go-client v6.1.1-0.20210611192632-af2ff877b14a+incompatible
- github.com/splitio/go-toolkit v4.2.1-0.20210714181516-85e7c471376a+incompatible
+ github.com/lmittmann/tint v1.1.2
+ github.com/open-feature/go-sdk v1.17.0
+ github.com/splitio/go-client/v6 v6.8.1
+ github.com/splitio/go-toolkit/v5 v5.4.1
+ github.com/stretchr/testify v1.11.1
+ go.uber.org/goleak v1.3.0
+ golang.org/x/sync v0.17.0
)
require (
- github.com/go-logr/logr v1.2.3 // indirect
- github.com/go-redis/redis v6.15.9+incompatible // indirect
- github.com/onsi/ginkgo v1.16.5 // indirect
- github.com/onsi/gomega v1.20.2 // indirect
- github.com/splitio/go-split-commons v3.1.1-0.20210714173613-90097f92c8af+incompatible // indirect
- golang.org/x/net v0.7.0 // indirect
- golang.org/x/text v0.7.0 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
+ github.com/bits-and-blooms/bitset v1.3.1 // indirect
+ github.com/bits-and-blooms/bloom/v3 v3.3.1 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
+ github.com/go-logr/logr v1.4.3 // indirect
+ github.com/hashicorp/errwrap v1.0.0 // indirect
+ github.com/hashicorp/go-multierror v1.1.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/redis/go-redis/v9 v9.0.4 // indirect
+ github.com/splitio/go-split-commons/v8 v8.0.0 // indirect
+ github.com/stretchr/objx v0.5.2 // indirect
+ go.uber.org/mock v0.6.0 // indirect
+ golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/go.sum b/go.sum
index 30f3304..b72f314 100644
--- a/go.sum
+++ b/go.sum
@@ -1,107 +1,59 @@
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/bits-and-blooms/bitset v1.3.1 h1:y+qrlmq3XsWi+xZqSaueaE8ry8Y127iMxlMfqcK8p0g=
+github.com/bits-and-blooms/bitset v1.3.1/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
+github.com/bits-and-blooms/bloom/v3 v3.3.1 h1:K2+A19bXT8gJR5mU7y+1yW6hsKfNCjcP2uNfLFKncjQ=
+github.com/bits-and-blooms/bloom/v3 v3.3.1/go.mod h1:bhUUknWd5khVbTe4UgMCSiOOVJzr3tMoijSK3WwvW90=
+github.com/bsm/ginkgo/v2 v2.7.0 h1:ItPMPH90RbmZJt5GtkcNvIRuGEdwlBItdNVoyzaNQao=
+github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w=
+github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y=
+github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
-github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
-github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
-github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
-github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
-github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.20.2 h1:8uQq0zMgLEfa0vRrrBgaJF2gyW9Da9BmfGV+OyUzfkY=
-github.com/onsi/gomega v1.20.2/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc=
-github.com/open-feature/go-sdk v0.4.0 h1:4MC58EBEqsZRPrBfjywTEZXlgiD7lFQVSz0XIJQIRLM=
-github.com/open-feature/go-sdk v0.4.0/go.mod h1:rLTOsXIC5wJ/5iVZ0LOTz3/ahJmzxhzWcJTS81AaSqM=
-github.com/open-feature/go-sdk v0.5.0 h1:1Y3TYoiZn8yhez9SS6VkS0n9WTfIDst1QDGV92WWHeE=
-github.com/open-feature/go-sdk v0.5.0/go.mod h1:5yoSk6QrkAHXKQW9pD+ejxOx3uXUqJwoHmwEK4hlZvk=
-github.com/open-feature/go-sdk v0.5.1 h1:gra5dYqcgz3DuyKuOA3TIXS8MuYqNCTVgJpNGemkAQ8=
-github.com/open-feature/go-sdk v0.5.1/go.mod h1:5yoSk6QrkAHXKQW9pD+ejxOx3uXUqJwoHmwEK4hlZvk=
-github.com/open-feature/go-sdk v0.6.0 h1:/u1XH4msHeChaen65Alfk139/ifu8ZS3mLt37CenR5k=
-github.com/open-feature/go-sdk v0.6.0/go.mod h1:5yoSk6QrkAHXKQW9pD+ejxOx3uXUqJwoHmwEK4hlZvk=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/lmittmann/tint v1.1.2 h1:2CQzrL6rslrsyjqLDwD11bZ5OpLBPU+g3G/r5LSfS8w=
+github.com/lmittmann/tint v1.1.2/go.mod h1:HIS3gSy7qNwGCj+5oRjAutErFBl4BzdQP6cJZ0NfMwE=
+github.com/open-feature/go-sdk v1.17.0 h1:/OUBBw5d9D61JaNZZxb2Nnr5/EJrEpjtKCTY3rspJQk=
+github.com/open-feature/go-sdk v1.17.0/go.mod h1:lPxPSu1UnZ4E3dCxZi5gV3et2ACi8O8P+zsTGVsDZUw=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/splitio/go-client v6.1.1-0.20210611192632-af2ff877b14a+incompatible h1:ahRviKx2RNNwK2b9NQbD9Iv1DLfHn+KHoBXwmbQ1EgY=
-github.com/splitio/go-client v6.1.1-0.20210611192632-af2ff877b14a+incompatible/go.mod h1:dJcPPOO+DlFMELdWAqGUcHTXGvGw0km+UEZJie7Hejk=
-github.com/splitio/go-split-commons v3.1.1-0.20210714173613-90097f92c8af+incompatible h1:jaP0z3iiwOYgneBEL7MGkUZNeQgsDiWqa6EBKBgSpQc=
-github.com/splitio/go-split-commons v3.1.1-0.20210714173613-90097f92c8af+incompatible/go.mod h1:w1uWXr+HcRVJLeoVyZucm+r3dt0W7zj7Sa9H2TCB3kA=
-github.com/splitio/go-toolkit v4.2.1-0.20210714181516-85e7c471376a+incompatible h1:vK8jmQOWqghCU9ZYPjHfrngpugLOFsc4tUMa4OqRk8M=
-github.com/splitio/go-toolkit v4.2.1-0.20210714181516-85e7c471376a+incompatible/go.mod h1:Oygm4Hgf3KotB5ZAaXIluLk5HgH2qu723HEPNvszJi8=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0=
-golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
-golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
-golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
-golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
-golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+github.com/redis/go-redis/v9 v9.0.4 h1:FC82T+CHJ/Q/PdyLW++GeCO+Ol59Y4T7R4jbgjvktgc=
+github.com/redis/go-redis/v9 v9.0.4/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk=
+github.com/splitio/go-client/v6 v6.8.1 h1:wAOmeqrUz63uFBkd64hyob/y+xPuNQypahtvcmvZxOM=
+github.com/splitio/go-client/v6 v6.8.1/go.mod h1:2qAeh3AsmnkXcRs+vrBJp35MK1bqhB792dCwJTSprbw=
+github.com/splitio/go-split-commons/v8 v8.0.0 h1:wLk5eT6WU2LfxtaWG3ZHlTbNMGWP2eYsZTb1o+tFpkI=
+github.com/splitio/go-split-commons/v8 v8.0.0/go.mod h1:vgRGPn0s4RC9/zp1nIn4KeeIEj/K3iXE2fxYQbCk/WI=
+github.com/splitio/go-toolkit/v5 v5.4.1 h1:srTyvDBJZMUcJ/KiiQDMyjCuELVgTBh2TGRVn0sOXEE=
+github.com/splitio/go-toolkit/v5 v5.4.1/go.mod h1:SifzysrOVDbzMcOE8zjX02+FG5az4FrR3Us/i5SeStw=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
+github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg=
+github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
+go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
+golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
+golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
+golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
+golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
+golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/helpers.go b/helpers.go
new file mode 100644
index 0000000..3d54d95
--- /dev/null
+++ b/helpers.go
@@ -0,0 +1,428 @@
+package split
+
+import (
+ "context"
+ "encoding/json"
+ "sync/atomic"
+
+ of "github.com/open-feature/go-sdk/openfeature"
+ "github.com/splitio/go-client/v6/splitio/client"
+)
+
+// Factory returns the underlying Split SDK factory for advanced use cases.
+//
+// ⚠️ ADVANCED USAGE - Lifecycle Management Warning:
+//
+// The provider manages the Split SDK lifecycle (initialization, shutdown, cleanup).
+// When using Factory() directly, you must be aware of these constraints:
+//
+// 1. DO NOT call factory.Client().Destroy() - the provider owns SDK lifecycle
+// 2. DO NOT call factory.Client().BlockUntilReady() - use provider.Status() instead
+// 3. The factory is only valid between Init and Shutdown
+// 4. After Shutdown(), the client is destroyed and set to nil - factory.Client() will be invalid
+//
+// See https://github.com/splitio/go-client for Split SDK documentation.
+//
+// Concurrency Safety:
+// Uses read lock for consistency with Status() and Metrics() methods.
+// Even though factory is set once during New(), the client field IS set to nil
+// during shutdown. The lock prevents returning a factory whose client has been
+// destroyed.
+//
+// Example:
+//
+// factory := provider.Factory()
+// // Use factory for Split-specific features not available in OpenFeature
+func (p *Provider) Factory() *client.SplitFactory {
+ p.mtx.RLock()
+ defer p.mtx.RUnlock()
+ return p.factory
+}
+
+// buildSplitAttributes creates an attributes map from FlattenedContext for Split SDK calls.
+// Excludes OpenFeature-specific keys that have dedicated uses:
+// - targetingKey: used as Split's key parameter (user identifier)
+// - trafficType: used for Track() traffic type, not a targeting attribute
+func buildSplitAttributes(ec of.FlattenedContext) map[string]any {
+ attributes := make(map[string]any)
+ for k, v := range ec {
+ if k != of.TargetingKey && k != TrafficTypeKey {
+ attributes[k] = v
+ }
+ }
+ return attributes
+}
+
+// evaluateTreatmentWithConfig evaluates a flag and returns the complete treatment result.
+// Returns a non-nil *TreatmentResult{Treatment: "control", Config: nil} if the provider
+// is shut down, or the targeting key is missing or invalid.
+//
+// Concurrency Safety:
+// Uses read lock during client call to prevent race with ShutdownWithContext.
+// This ensures the client is not destroyed while an evaluation is in progress.
+// Checks shutdown flag atomically before acquiring lock for fast-fail during shutdown.
+func (p *Provider) evaluateTreatmentWithConfig(ctx context.Context, flag string, ec of.FlattenedContext) *client.TreatmentResult {
+ // Check shutdown first (fast fail to avoid lock overhead during shutdown)
+ // If shutdown is in progress, return control treatment immediately
+ if atomic.LoadUint32(&p.shutdown) == shutdownStateActive {
+ return &client.TreatmentResult{Treatment: controlTreatment, Config: nil}
+ }
+
+ p.logImpressionDisabledNotSupported(ctx, flag)
+
+ key, ok := ec[of.TargetingKey]
+ if !ok {
+ p.logger.Debug("targeting key missing", "flag", flag)
+ return &client.TreatmentResult{Treatment: controlTreatment, Config: nil}
+ }
+
+ keyStr, ok := key.(string)
+ if !ok {
+ p.logger.Debug("targeting key not a string", "flag", flag)
+ return &client.TreatmentResult{Treatment: controlTreatment, Config: nil}
+ }
+
+ attributes := buildSplitAttributes(ec)
+
+ // Acquire read lock for client access to prevent concurrent shutdown
+ // This prevents client.Destroy() from being called during evaluation
+ p.mtx.RLock()
+ defer p.mtx.RUnlock()
+
+ // Double-check shutdown after acquiring lock to prevent nil pointer dereference
+ if atomic.LoadUint32(&p.shutdown) == shutdownStateActive {
+ return &client.TreatmentResult{Treatment: controlTreatment, Config: nil}
+ }
+
+ result := p.client.TreatmentWithConfig(keyStr, flag, attributes)
+ return &result
+}
+
+// evaluateTreatmentsByFlagSet evaluates all flags in a flag set and returns treatments with configs.
+// Returns FlagSetResult (map[flagName]FlagResult).
+// Config supports any valid JSON type (objects, arrays, primitives).
+// Assumes targeting key validated by caller as string.
+//
+// Concurrency Safety:
+// Uses read lock during client call to prevent race with ShutdownWithContext.
+// This ensures the client is not destroyed while an evaluation is in progress.
+// Checks shutdown flag atomically before acquiring lock for fast-fail during shutdown.
+func (p *Provider) evaluateTreatmentsByFlagSet(ctx context.Context, flagSet string, ec of.FlattenedContext) FlagSetResult {
+ // Check shutdown first (fast fail to avoid lock overhead during shutdown)
+ // If shutdown is in progress, return empty map immediately
+ if atomic.LoadUint32(&p.shutdown) == shutdownStateActive {
+ return make(FlagSetResult)
+ }
+
+ p.logImpressionDisabledNotSupported(ctx, flagSet)
+
+ // Extract targeting key (already validated by caller as string)
+ keyStr, ok := ec[of.TargetingKey].(string)
+ if !ok {
+ p.logger.Error("targeting key not a string (validation invariant violated)", "flag_set", flagSet)
+ return make(FlagSetResult)
+ }
+
+ attributes := buildSplitAttributes(ec)
+
+ // Acquire read lock for client access to prevent concurrent shutdown
+ // This prevents client.Destroy() from being called during evaluation
+ p.mtx.RLock()
+ defer p.mtx.RUnlock()
+
+ // Double-check shutdown after acquiring lock to prevent nil pointer dereference
+ if atomic.LoadUint32(&p.shutdown) == shutdownStateActive {
+ return make(FlagSetResult)
+ }
+
+ results := p.client.TreatmentsWithConfigByFlagSet(keyStr, flagSet, attributes)
+
+ // Transform the results: parse config strings into any valid JSON
+ transformed := make(FlagSetResult, len(results))
+ for flagName, result := range results {
+ flagResult := FlagResult{
+ Treatment: result.Treatment,
+ }
+
+ flagResult.Config = p.parseConfigJSON(flagName, result.Config)
+ transformed[flagName] = flagResult
+ }
+
+ return transformed
+}
+
+// isLocalhostMode checks if the provider is running in localhost mode.
+// Localhost mode is detected by checking the OperationMode set by the Split SDK.
+// When API key is "localhost", Split SDK automatically sets OperationMode to "localhost".
+// This method is concurrent-safe as splitConfig is set once during New() and
+// the OperationMode field is not subsequently modified.
+func (p *Provider) isLocalhostMode() bool {
+ return p.splitConfig != nil && p.splitConfig.OperationMode == "localhost"
+}
+
+// evaluateSingleFlagAsObject evaluates a single flag and returns it in flag set structure.
+// Returns FlagSetResult (map[flagName]FlagResult) or empty map if flag not found.
+// Assumes targeting key validated by caller as string.
+//
+// Concurrency Safety:
+// Uses read lock during client call to prevent race with ShutdownWithContext.
+// This ensures the client is not destroyed while an evaluation is in progress.
+// Checks shutdown flag atomically before acquiring lock for fast-fail during shutdown.
+func (p *Provider) evaluateSingleFlagAsObject(ctx context.Context, flag string, ec of.FlattenedContext) FlagSetResult {
+ // Check shutdown first (fast fail to avoid lock overhead during shutdown)
+ // If shutdown is in progress, return empty map immediately
+ if atomic.LoadUint32(&p.shutdown) == shutdownStateActive {
+ return make(FlagSetResult)
+ }
+
+ p.logImpressionDisabledNotSupported(ctx, flag)
+
+ // Extract targeting key (already validated by caller as string)
+ keyStr, ok := ec[of.TargetingKey].(string)
+ if !ok {
+ p.logger.Error("targeting key not a string (validation invariant violated)", "flag", flag)
+ return make(FlagSetResult)
+ }
+
+ attributes := buildSplitAttributes(ec)
+
+ // Acquire read lock for client access to prevent concurrent shutdown
+ // This prevents client.Destroy() from being called during evaluation
+ p.mtx.RLock()
+ defer p.mtx.RUnlock()
+
+ // Double-check shutdown after acquiring lock to prevent nil pointer dereference
+ if atomic.LoadUint32(&p.shutdown) == shutdownStateActive {
+ return make(FlagSetResult)
+ }
+
+ result := p.client.TreatmentWithConfig(keyStr, flag, attributes)
+
+ // If treatment is control or empty, return empty map (flag not found)
+ if noTreatment(result.Treatment) {
+ return make(FlagSetResult)
+ }
+
+ // Build result in same structure as flag sets
+ flagResult := FlagResult{
+ Treatment: result.Treatment,
+ Config: p.parseConfigJSON(flag, result.Config),
+ }
+
+ // Return single-entry map with flag name as key
+ return FlagSetResult{
+ flag: flagResult,
+ }
+}
+
+// validateEvaluationContext validates the context and evaluation context for common error conditions.
+// Returns a ProviderResolutionDetail with an error if validation fails, or an empty detail if valid.
+// The caller should check if Error() is not nil to determine if validation failed.
+func (p *Provider) validateEvaluationContext(ctx context.Context, ec of.FlattenedContext) of.ProviderResolutionDetail {
+ if p.Status() != of.ReadyState {
+ return resolutionDetailProviderNotReady()
+ }
+
+ if err := ctx.Err(); err != nil {
+ return resolutionDetailContextCancelled(err)
+ }
+
+ key, ok := ec[of.TargetingKey]
+ if !ok {
+ return resolutionDetailTargetingKeyMissing()
+ }
+
+ if _, ok := key.(string); !ok {
+ return resolutionDetailInvalidContext("targeting key must be a string")
+ }
+
+ return of.ProviderResolutionDetail{}
+}
+
+// noTreatment checks if a treatment is empty or the control treatment.
+func noTreatment(treatment string) bool {
+ return treatment == "" || treatment == controlTreatment
+}
+
+// ========================================
+// OpenFeature Error Code Implementation
+// ========================================
+//
+// This provider implements all applicable OpenFeature error codes per the spec:
+// https://openfeature.dev/specification/types/#error-code
+//
+// IMPLEMENTED ERROR CODES:
+//
+// 1. PROVIDER_NOT_READY - Provider has not been initialized or is shut down
+// Used in: validateEvaluationContext when p.Status() != ReadyState
+//
+// 2. FLAG_NOT_FOUND - Flag does not exist in Split
+// Used in: All evaluation methods when Split returns "control" treatment
+//
+// 3. PARSE_ERROR - Treatment value cannot be parsed to requested type
+// Used in: Boolean/Int/Float evaluation when strconv.Parse* fails
+// Note: Split treatments are always strings, so this is correct for parse failures
+//
+// 4. TARGETING_KEY_MISSING - Evaluation context has no targeting key
+// Used in: validateEvaluationContext when ec[TargetingKey] is not present
+//
+// 5. INVALID_CONTEXT - Evaluation context is malformed
+// Used in: validateEvaluationContext when targeting key exists but is not a string
+//
+// 6. GENERAL - Context canceled, deadline exceeded, or other errors
+// Used in: validateEvaluationContext when ctx.Err() != nil
+//
+// NOT APPLICABLE ERROR CODES:
+//
+// 7. TYPE_MISMATCH - Flag value type does not match expected type
+// Why not used: Split treatments are untyped strings. We always attempt to parse
+// them to the requested type. When parsing fails, it's a PARSE_ERROR (the string
+// cannot be parsed), not a TYPE_MISMATCH (Split doesn't have a native type system
+// where a flag could be "configured as a boolean" vs "configured as a string").
+// TYPE_MISMATCH would be appropriate for providers with typed flag systems.
+//
+// 8. PROVIDER_FATAL - Provider encountered an unrecoverable error
+// Why not used: Split SDK does not expose fatal runtime errors during evaluation.
+// When the SDK cannot evaluate (auth failure, network issues, SDK destroyed), it
+// returns the "control" treatment which we handle as FLAG_NOT_FOUND. Provider
+// initialization failures are handled by returning errors from New()/Init(), not
+// by returning PROVIDER_FATAL during evaluations.
+
+// resolutionDetailNotFound creates a resolution detail for a flag not found error.
+func resolutionDetailNotFound(variant string) of.ProviderResolutionDetail {
+ return providerResolutionDetailError(
+ of.NewFlagNotFoundResolutionError("flag not found"),
+ of.DefaultReason,
+ variant)
+}
+
+// resolutionDetailParseError creates a resolution detail for a parse error.
+func resolutionDetailParseError(variant string) of.ProviderResolutionDetail {
+ return providerResolutionDetailError(
+ of.NewParseErrorResolutionError("cannot parse treatment to given type"),
+ of.ErrorReason,
+ variant)
+}
+
+// resolutionDetailTargetingKeyMissing creates a resolution detail for missing targeting key.
+func resolutionDetailTargetingKeyMissing() of.ProviderResolutionDetail {
+ return providerResolutionDetailError(
+ of.NewTargetingKeyMissingResolutionError("targeting key missing"),
+ of.ErrorReason,
+ "")
+}
+
+// resolutionDetailContextCancelled creates a resolution detail for canceled context.
+func resolutionDetailContextCancelled(err error) of.ProviderResolutionDetail {
+ return providerResolutionDetailError(
+ of.NewGeneralResolutionError(err.Error()),
+ of.ErrorReason,
+ "")
+}
+
+// resolutionDetailInvalidContext creates a resolution detail for invalid context.
+func resolutionDetailInvalidContext(msg string) of.ProviderResolutionDetail {
+ return providerResolutionDetailError(
+ of.NewInvalidContextResolutionError(msg),
+ of.ErrorReason,
+ "")
+}
+
+// resolutionDetailProviderNotReady creates a resolution detail for provider not ready.
+func resolutionDetailProviderNotReady() of.ProviderResolutionDetail {
+ return providerResolutionDetailError(
+ of.NewProviderNotReadyResolutionError("provider not initialized"),
+ of.ErrorReason,
+ "")
+}
+
+// providerResolutionDetailError creates a resolution detail with an error.
+func providerResolutionDetailError(resErr of.ResolutionError, reason of.Reason, variant string) of.ProviderResolutionDetail {
+ return of.ProviderResolutionDetail{
+ ResolutionError: resErr,
+ Reason: reason,
+ Variant: variant,
+ }
+}
+
+// resolutionDetailWithConfig creates resolution detail with Dynamic Configuration.
+// Parses config JSON and adds to FlagMetadata. All config values are wrapped as
+// {"value": ...} to provide a consistent access pattern via FlagMetadata.
+// This is a receiver method (unlike other resolutionDetail* helpers) to enable logging
+// of malformed JSON warnings.
+//
+// ENHANCEMENT NOTE for Split SDK:
+// OpenFeature defines 8 semantic reason codes to indicate WHY a flag value was returned:
+// - TARGETING_MATCH: Dynamic evaluation based on user targeting rules
+// - SPLIT: Pseudorandom assignment (A/B test, traffic allocation)
+// - STATIC: Static value with no dynamic evaluation
+// - CACHED: Value retrieved from cache
+// - DEFAULT: Flag not found, returned default value
+// - DISABLED: Flag disabled in management system
+// - UNKNOWN: Reason could not be determined
+// - ERROR: Error occurred during evaluation
+//
+// Currently, we use TARGETING_MATCH for ALL successful evaluations because the Split SDK
+// does not expose the evaluation reason in its TreatmentResult. The SDK internally knows
+// whether the treatment came from:
+// - Targeted rule matching (user attributes matched targeting rules) → TARGETING_MATCH
+// - Traffic allocation / A/B test (pseudorandom split) → SPLIT
+// - Default treatment (no targeting, simple value) → STATIC
+// - Cached value (serving from local cache) → CACHED
+//
+// To properly implement OpenFeature reason codes, the Split Go SDK would need to expose
+// this information, perhaps by adding a "Reason" field to the TreatmentResult struct
+// returned by TreatmentWithConfig(). This would enable OpenFeature providers to
+// accurately report the semantic reason for each evaluation.
+func (p *Provider) resolutionDetailWithConfig(flagName, variant string, config *string) of.ProviderResolutionDetail {
+ detail := of.ProviderResolutionDetail{
+ Reason: of.TargetingMatchReason, // See ENHANCEMENT NOTE above
+ Variant: variant,
+ }
+
+ // If Dynamic Configuration is present, parse it and add to FlagMetadata
+ if configData := p.parseConfigJSON(flagName, config); configData != nil {
+ detail.FlagMetadata = of.FlagMetadata{"value": configData}
+ }
+
+ return detail
+}
+
+// logImpressionDisabledNotSupported logs a one-time info message if ImpressionDisabled is set.
+// Called from evaluation helpers to inform users the option isn't enforced yet.
+func (p *Provider) logImpressionDisabledNotSupported(ctx context.Context, _ string) {
+ evalOpts := GetEvalOptions(ctx)
+ if evalOpts.ImpressionDisabled {
+ p.logOnce("impression_disabled", func() {
+ p.logger.Info("ImpressionDisabled option detected but not yet supported by Split Go SDK",
+ "note", "will be honored when SDK adds per-evaluation impression control")
+ })
+ }
+}
+
+// parseConfigJSON parses a Split config string into a Go value.
+// Returns nil if config is nil, empty, or malformed JSON.
+// Logs a warning on parse failure since it indicates invalid configuration in Split UI.
+func (p *Provider) parseConfigJSON(flagName string, config *string) any {
+ if config == nil || *config == "" {
+ return nil
+ }
+ var configData any
+ if err := json.Unmarshal([]byte(*config), &configData); err != nil {
+ p.logger.Warn("failed to parse dynamic configuration JSON",
+ "flag", flagName,
+ "error", err,
+ "config_preview", truncateString(*config, 100))
+ return nil
+ }
+ return configData
+}
+
+// truncateString truncates a string to maxLen bytes, adding "..." if truncated.
+// Used for logging previews of potentially large config strings.
+func truncateString(s string, maxLen int) string {
+ if len(s) <= maxLen {
+ return s
+ }
+ return s[:maxLen] + "..."
+}
diff --git a/lifecycle.go b/lifecycle.go
new file mode 100644
index 0000000..affc4f2
--- /dev/null
+++ b/lifecycle.go
@@ -0,0 +1,490 @@
+package split
+
+import (
+ "context"
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ of "github.com/open-feature/go-sdk/openfeature"
+)
+
+// Init implements StateHandler, which is embedded in ContextAwareStateHandler.
+// Required by the interface contract but never called by the OpenFeature SDK
+// (it calls InitWithContext instead). Delegates to InitWithContext with a
+// timeout derived from BlockUntilReady config + 5s buffer.
+func (p *Provider) Init(evaluationContext of.EvaluationContext) error {
+ // Determine timeout: BlockUntilReady + buffer for SDK operations
+ timeout := defaultInitTimeout // Defensive fallback (New() guarantees BlockUntilReady > 0)
+ if p.splitConfig != nil && p.splitConfig.BlockUntilReady > 0 {
+ timeout = time.Duration(p.splitConfig.BlockUntilReady)*time.Second + initTimeoutBuffer
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+
+ return p.InitWithContext(ctx, evaluationContext)
+}
+
+// InitWithContext initializes the provider with context support.
+//
+// This method implements the ContextAwareStateHandler interface and provides
+// context-aware initialization that respects cancellation and timeouts.
+//
+// The context is used to:
+// - Cancel initialization if the caller's deadline is exceeded
+// - Support graceful shutdown during initialization
+// - Propagate cancellation signals from the caller
+//
+// The OpenFeature SDK calls this method directly since the provider implements
+// ContextAwareStateHandler. Init() also delegates here with a derived timeout
+// context for direct callers using the StateHandler interface.
+func (p *Provider) InitWithContext(ctx context.Context, evaluationContext of.EvaluationContext) error {
+ _ = evaluationContext // Intentionally unused; required by ContextAwareStateHandler interface
+
+ p.initMu.Lock()
+ defer p.initMu.Unlock()
+
+ // Check if provider has been shut down - cannot re-initialize after shutdown
+ // Once Shutdown() is called, the Split SDK client is destroyed and cannot be reused
+ if atomic.LoadUint32(&p.shutdown) == shutdownStateActive {
+ return fmt.Errorf("cannot initialize provider after shutdown: provider has been permanently shut down, create a new provider instance")
+ }
+
+ // Fast path: check if already initialized with read lock only
+ p.mtx.RLock()
+ if p.factory != nil && p.factory.IsReady() {
+ p.mtx.RUnlock()
+ p.logger.Debug("provider already initialized")
+ return nil
+ }
+ p.mtx.RUnlock()
+
+ // Use singleflight to ensure only one initialization happens
+ // All concurrent InitWithContext() calls wait for the same result
+ _, err, _ := p.initGroup.Do("init", func() (any, error) {
+ // Double-check after acquiring singleflight lock
+ p.mtx.RLock()
+ if p.factory != nil && p.factory.IsReady() {
+ p.mtx.RUnlock()
+ p.logger.Debug("provider already initialized (concurrent init detected)")
+ return nil, nil
+ }
+ p.mtx.RUnlock()
+
+ // Block until Split SDK is ready WITH context monitoring
+ // This can take 10+ seconds, so we monitor ctx.Done() for cancellation
+ p.logger.Debug("waiting for Split SDK to be ready", "timeout_seconds", p.splitConfig.BlockUntilReady)
+
+ // Run BlockUntilReady in goroutine since it doesn't support context
+ readyErr := make(chan error, 1)
+ p.initWg.Add(1)
+ go func() {
+ defer p.initWg.Done() // Signal goroutine completion
+ readyErr <- p.client.BlockUntilReady(p.splitConfig.BlockUntilReady)
+ }()
+
+ // Wait for either ready or context cancellation
+ select {
+ case <-ctx.Done():
+ // Context canceled before SDK ready - check if readyErr also completed
+ select {
+ case err := <-readyErr:
+ // SDK completed after context canceled - check result
+ if err != nil {
+ // SDK failed AND context canceled - return SDK error
+ errMsg := fmt.Errorf("split SDK failed to become ready within %d seconds: %w",
+ p.splitConfig.BlockUntilReady, err)
+ p.emitEvent(&of.Event{
+ ProviderName: p.Metadata().Name,
+ EventType: of.ProviderError,
+ ProviderEventDetails: of.ProviderEventDetails{
+ Message: errMsg.Error(),
+ },
+ })
+ return nil, errMsg
+ }
+ // SDK succeeded even though context canceled - proceed with initialization
+ p.logger.Debug("SDK initialized successfully despite context cancellation")
+ default:
+ // SDK still running, context truly canceled - return context error
+ errMsg := fmt.Errorf("initialization canceled: %w", ctx.Err())
+ p.emitEvent(&of.Event{
+ ProviderName: p.Metadata().Name,
+ EventType: of.ProviderError,
+ ProviderEventDetails: of.ProviderEventDetails{
+ Message: errMsg.Error(),
+ },
+ })
+ return nil, errMsg
+ }
+ case err := <-readyErr:
+ if err != nil {
+ errMsg := fmt.Errorf("split SDK failed to become ready within %d seconds: %w",
+ p.splitConfig.BlockUntilReady, err)
+ p.emitEvent(&of.Event{
+ ProviderName: p.Metadata().Name,
+ EventType: of.ProviderError,
+ ProviderEventDetails: of.ProviderEventDetails{
+ Message: errMsg.Error(),
+ },
+ })
+ return nil, errMsg
+ }
+ // SDK succeeded - check if context was canceled during initialization
+ // If context canceled but SDK ready, we proceed (SDK is usable)
+ p.logger.Debug("SDK became ready successfully")
+ }
+
+ // Atomically check shutdown and start monitoring to prevent race condition
+ // We hold write lock to ensure:
+ // 1. If Shutdown() is closing stopMonitor, we wait then see shutdown flag
+ // 2. If we start monitoring, Shutdown() will wait for monitorDone
+ // This prevents the deadlock where Shutdown waits for monitorDone that never closes
+ p.mtx.Lock()
+
+ // Check if shutdown happened during BlockUntilReady
+ // This prevents starting monitoring goroutine after shutdown
+ if atomic.LoadUint32(&p.shutdown) == shutdownStateActive {
+ p.mtx.Unlock()
+ return nil, fmt.Errorf("provider was shut down during initialization")
+ }
+
+ // Verify factory is ready (final confirmation that entire SDK is ready)
+ // At this point, factory, client, and manager should all be ready
+ if !p.factory.IsReady() {
+ p.mtx.Unlock()
+ err := fmt.Errorf("split SDK BlockUntilReady succeeded but factory not ready")
+ p.emitEvent(&of.Event{
+ ProviderName: p.Metadata().Name,
+ EventType: of.ProviderError,
+ ProviderEventDetails: of.ProviderEventDetails{
+ Message: err.Error(),
+ },
+ })
+ return nil, err
+ }
+
+ // Get the number of splits loaded for informational logging
+ splitCount := 0
+ if manager := p.factory.Manager(); manager != nil {
+ splitNames := manager.SplitNames()
+ splitCount = len(splitNames)
+ }
+
+ // Start background monitoring while holding lock (atomic with shutdown check)
+ // This guarantees that if we start monitoring, Shutdown() will wait for monitorDone
+ go p.monitorSplitUpdates()
+ p.mtx.Unlock()
+
+ // Emit PROVIDER_READY event (emitEvent is concurrent-safe)
+ p.emitEvent(&of.Event{
+ ProviderName: p.Metadata().Name,
+ EventType: of.ProviderReady,
+ ProviderEventDetails: of.ProviderEventDetails{
+ Message: "Split provider initialized successfully",
+ },
+ })
+
+ p.logger.Info("Split provider ready", "splits_loaded", splitCount)
+ return nil, nil
+ })
+
+ return err
+}
+
+// Shutdown implements StateHandler, which is embedded in ContextAwareStateHandler.
+// Required by the interface contract but never called by the OpenFeature SDK
+// (it calls ShutdownWithContext instead). Delegates to ShutdownWithContext with a
+// default timeout of 30s, or BlockUntilReady duration if it exceeds 30s.
+//
+// This method performs "best effort" shutdown within the timeout:
+// - Provider state is immediately marked as shut down (no new operations allowed)
+// - Cleanup operations run within timeout (monitoring stop, SDK destroy, channel close)
+// - If timeout expires, cleanup continues in background goroutines
+// - Always succeeds (never panics or hangs)
+//
+// See ShutdownWithContext for detailed best effort shutdown semantics.
+func (p *Provider) Shutdown() {
+ // Determine timeout: use default, or BlockUntilReady if larger
+ timeout := defaultShutdownTimeout
+ if p.splitConfig != nil && p.splitConfig.BlockUntilReady > 0 {
+ configTimeout := time.Duration(p.splitConfig.BlockUntilReady) * time.Second
+ if configTimeout > timeout {
+ timeout = configTimeout
+ }
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+
+ if err := p.ShutdownWithContext(ctx); err != nil {
+ p.logger.Warn("shutdown completed with errors",
+ "error", err,
+ "note", "StateHandler.Shutdown() has no return value, error cannot be propagated")
+ }
+}
+
+// ShutdownWithContext gracefully shuts down the provider with context support.
+//
+// This method implements the ContextAwareStateHandler interface and provides
+// context-aware shutdown that respects cancellation and timeouts from the caller.
+//
+// # Return Values
+//
+// Returns nil if shutdown completes successfully within the context deadline.
+// Returns ctx.Err() if the context expires before shutdown completes (context.DeadlineExceeded
+// or context.Canceled). Note that even when an error is returned, the provider is logically
+// shut down - the shutdown flag is set immediately and new operations will fail with
+// PROVIDER_NOT_READY.
+//
+// # Shutdown Behavior
+//
+// The provider state is atomically set to "shut down" immediately upon entry, preventing
+// new operations. Cleanup happens on a best-effort basis within the context deadline.
+//
+// If the context deadline expires during cleanup:
+// 1. Warnings are logged about incomplete operations
+// 2. ctx.Err() is returned to indicate timeout/cancellation
+// 3. Cleanup continues in background goroutines that will eventually complete
+// 4. Provider remains logically shut down (Status() returns NotReadyState)
+//
+// Cleanup operations and their timeout behavior:
+// - Event channel close: Always completes immediately
+// - Monitoring goroutine: May take up to one monitoring interval to terminate after stopMonitor signal
+// - Split SDK Destroy(): May take up to 1 hour in streaming mode (known SDK issue)
+//
+// The context is used to:
+// - Respect the caller's shutdown deadline
+// - Cancel long-running cleanup operations
+// - Provide graceful shutdown within time constraints
+//
+// Recommended minimum timeout: 30 seconds to allow monitoring goroutine to exit cleanly.
+func (p *Provider) ShutdownWithContext(ctx context.Context) error {
+ // Check if already shut down and set shutdown flag atomically
+ // Using atomic operations to prevent race with emitEvent()
+ if !atomic.CompareAndSwapUint32(&p.shutdown, shutdownStateInactive, shutdownStateActive) {
+ p.logger.Debug("provider already shut down")
+ return nil
+ }
+
+ p.logger.Debug("shutting down Split provider")
+
+ // Track whether any timeout occurred during shutdown
+ var shutdownErr error
+
+ // Stop background monitoring (if it was started)
+ // Note: Monitoring only starts after successful initialization
+ // Atomically close stopMonitor and check if monitoring was started to prevent race condition
+ // We hold write lock to ensure:
+ // 1. If Init() is starting monitoring, we wait then close stopMonitor safely
+ // 2. Our wasInitialized check happens atomically with stopMonitor close
+ // This prevents the deadlock where we wait for monitorDone that was never started
+ p.logger.Debug("stopping background monitoring goroutine")
+ p.mtx.Lock()
+ close(p.stopMonitor)
+ wasInitialized := p.factory != nil && p.factory.IsReady()
+ p.mtx.Unlock()
+
+ if wasInitialized {
+ p.logger.Debug("waiting for background monitoring to stop")
+ select {
+ case <-p.monitorDone:
+ p.logger.Debug("background monitoring stopped")
+ case <-ctx.Done():
+ shutdownErr = ctx.Err()
+ p.logger.Warn("context deadline exceeded while waiting for monitoring goroutine, forcing shutdown",
+ "reason", "monitoring goroutine may still be running",
+ "error", shutdownErr)
+ }
+ } else {
+ p.logger.Debug("provider was never initialized, skipping monitoring cleanup")
+ }
+
+ // Wait for initialization goroutine(s) to finish
+ // This prevents goroutine leak when Init is canceled but BlockUntilReady still running
+ // BlockUntilReady has a built-in timeout, so initWg.Wait() is bounded
+ if err := p.waitForInitGoroutines(ctx); err != nil && shutdownErr == nil {
+ shutdownErr = err
+ }
+
+ // Destroy Split SDK client and close event channel
+ // Order is critical: monitoring stopped -> init goroutines done -> NOW safe to close channel and destroy client
+ operationMode := "unknown"
+ if p.splitConfig != nil {
+ operationMode = p.splitConfig.OperationMode
+ }
+ p.logger.Debug("destroying Split SDK client", "mode", operationMode)
+
+ destroyStart := time.Now()
+ destroyDone := make(chan struct{})
+ go func() {
+ defer close(destroyDone)
+
+ p.mtx.Lock()
+ clientToDestroy := p.client
+ p.client = nil
+ close(p.eventStream)
+ p.mtx.Unlock()
+
+ if clientToDestroy != nil {
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ p.logger.Error("panic during Split SDK destroy",
+ "panic", r,
+ "advice", "this may indicate a bug in Split SDK")
+ }
+ }()
+ clientToDestroy.Destroy()
+ }()
+ }
+ elapsed := time.Since(destroyStart).Milliseconds()
+ p.logger.Debug("Split SDK client destroyed", "duration_ms", elapsed)
+ }()
+
+ // Wait for either destroy completion or context cancellation
+ select {
+ case <-destroyDone:
+ elapsed := time.Since(destroyStart).Milliseconds()
+ p.logger.Debug("Split SDK client destroyed successfully", "duration_ms", elapsed)
+ case <-ctx.Done():
+ if shutdownErr == nil {
+ shutdownErr = ctx.Err()
+ }
+ elapsed := time.Since(destroyStart).Milliseconds()
+ p.logger.Warn("context deadline exceeded during Split SDK destroy, forcing shutdown",
+ "elapsed_ms", elapsed,
+ "mode", operationMode,
+ "reason", "known Split SDK streaming mode issue - SSE connection blocks on read",
+ "error", shutdownErr)
+ }
+
+ if shutdownErr != nil {
+ p.logger.Warn("Split provider shutdown completed with errors",
+ "error", shutdownErr,
+ "note", "provider is logically shut down but cleanup may be incomplete")
+ return shutdownErr
+ }
+
+ p.logger.Debug("Split provider shut down successfully")
+ return nil
+}
+
+// waitForInitGoroutines waits for initialization goroutines to complete,
+// respecting the context deadline. Returns ctx.Err() if the deadline expires.
+func (p *Provider) waitForInitGoroutines(ctx context.Context) error {
+ p.logger.Debug("waiting for initialization goroutines to complete")
+ initDone := make(chan struct{})
+ go func() {
+ p.initMu.Lock()
+ p.initWg.Wait()
+ p.initMu.Unlock()
+ close(initDone)
+ }()
+
+ select {
+ case <-initDone:
+ p.logger.Debug("initialization goroutines completed")
+ return nil
+ case <-ctx.Done():
+ p.logger.Warn("context deadline exceeded while waiting for initialization goroutines",
+ "error", ctx.Err())
+ return ctx.Err()
+ }
+}
+
+// Status returns the current state of the provider.
+//
+// Used by the OpenFeature SDK to determine provider readiness. Returns one of:
+// - NotReadyState: Provider not initialized or shut down
+// - ReadyState: Provider initialized and ready for evaluations
+//
+// The state is derived from the Split SDK factory's ready status.
+// This method is atomic - it checks both shutdown flag and factory state
+// together to prevent race conditions during shutdown.
+func (p *Provider) Status() of.State {
+ // Atomic read of shutdown flag and factory state together
+ // This prevents TOCTOU (time-of-check-time-of-use) race condition
+ p.mtx.RLock()
+ shutdown := atomic.LoadUint32(&p.shutdown) == shutdownStateActive
+ factory := p.factory
+ p.mtx.RUnlock()
+
+ // If shut down, always NotReady
+ if shutdown {
+ return of.NotReadyState
+ }
+
+ // If we have a factory and it's ready, we're ready
+ if factory != nil && factory.IsReady() {
+ return of.ReadyState
+ }
+
+ // Otherwise, we're not ready
+ return of.NotReadyState
+}
+
+// ProviderMetrics contains provider health and diagnostic information.
+//
+// All fields are always populated. SplitsCount is -1 when the provider
+// is not ready (not initialized or shut down).
+type ProviderMetrics struct {
+ // Provider is the provider name ("Split").
+ Provider string
+
+ // Status is the current provider state (NotReadyState or ReadyState).
+ Status of.State
+
+ // SplitsCount is the number of split definitions loaded.
+ // Set to -1 when the provider is not ready.
+ SplitsCount int
+
+ // Initialized indicates whether the provider is initialized and ready.
+ Initialized bool
+
+ // Ready indicates whether the provider is ready for evaluations.
+ // Same as Initialized — both derived from factory ready state and shutdown flag.
+ Ready bool
+}
+
+// Metrics returns the current metrics and status of the provider.
+//
+// Example:
+//
+// m := provider.Metrics()
+// fmt.Printf("Provider: %s, Status: %s, Splits: %d\n",
+// m.Provider, m.Status, m.SplitsCount)
+func (p *Provider) Metrics() ProviderMetrics {
+ // Atomic read of shutdown flag and factory state together
+ // This prevents TOCTOU race condition (consistent with Status())
+ p.mtx.RLock()
+ shutdown := atomic.LoadUint32(&p.shutdown) == shutdownStateActive
+ factory := p.factory
+ p.mtx.RUnlock()
+
+ isReady := !shutdown && factory != nil && factory.IsReady()
+
+ var status of.State
+ if isReady {
+ status = of.ReadyState
+ } else {
+ status = of.NotReadyState
+ }
+
+ m := ProviderMetrics{
+ Provider: "Split",
+ Initialized: isReady,
+ Status: status,
+ Ready: isReady,
+ SplitsCount: -1,
+ }
+
+ if isReady && factory != nil {
+ if manager := factory.Manager(); manager != nil {
+ m.SplitsCount = len(manager.SplitNames())
+ }
+ }
+
+ return m
+}
diff --git a/lifecycle_edge_cases_test.go b/lifecycle_edge_cases_test.go
new file mode 100644
index 0000000..7d30fd6
--- /dev/null
+++ b/lifecycle_edge_cases_test.go
@@ -0,0 +1,915 @@
+package split
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/splitio/go-client/v6/splitio/client"
+ "github.com/splitio/go-client/v6/splitio/conf"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// TestInitWithContextTimeout verifies that InitWithContext respects context timeout
+// when it's shorter than BlockUntilReady configuration.
+//
+// This test addresses the edge case where:
+// - BlockUntilReady is configured for 10 seconds
+// - Context timeout is only 1 second
+// - InitWithContext should return context.DeadlineExceeded after ~1 second, not wait 10 seconds
+func TestInitWithContextTimeout(t *testing.T) {
+ // Use invalid API key to force SDK to timeout
+ // This ensures BlockUntilReady will take the full timeout duration
+ cfg := conf.Default()
+ cfg.BlockUntilReady = 10 // 10 seconds timeout in SDK
+
+ provider, err := New("invalid-key-will-timeout", WithSplitConfig(cfg))
+ require.NoError(t, err, "Provider creation should succeed")
+
+ // Proper cleanup: Shutdown provider to prevent goroutine leak
+ defer func() {
+ shutdownCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+ _ = provider.ShutdownWithContext(shutdownCtx)
+ }()
+
+ // Context with 1 second timeout (shorter than BlockUntilReady)
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+ defer cancel()
+
+ start := time.Now()
+ err = provider.InitWithContext(ctx, openfeature.NewEvaluationContext("", nil))
+ elapsed := time.Since(start)
+
+ // Should fail with context error
+ assert.Error(t, err, "InitWithContext should return error when context times out")
+ assert.Contains(t, err.Error(), "initialization canceled", "Error should indicate cancellation")
+ assert.Contains(t, err.Error(), "deadline exceeded", "Error should contain context.DeadlineExceeded")
+
+ // Should respect context timeout (1s), not wait for BlockUntilReady (10s)
+ assert.Less(t, elapsed, 3*time.Second,
+ "InitWithContext should return within ~1s (context timeout), not wait 10s (BlockUntilReady)")
+ assert.Greater(t, elapsed, 800*time.Millisecond,
+ "InitWithContext should actually wait for context timeout, not return immediately")
+}
+
+// TestInitWithContextCancellationDuringBlockUntilReady verifies that context
+// cancellation during BlockUntilReady is handled correctly.
+//
+// This test addresses the edge case where:
+// - InitWithContext is called with a context
+// - Context is cancelled WHILE BlockUntilReady is running
+// - Should return immediately with context.Canceled error
+func TestInitWithContextCancellationDuringBlockUntilReady(t *testing.T) {
+ cfg := conf.Default()
+ cfg.BlockUntilReady = 10 // Long timeout to ensure we can cancel during init
+
+ provider, err := New("invalid-key-will-timeout", WithSplitConfig(cfg))
+ require.NoError(t, err)
+
+ // Proper cleanup: Shutdown provider to prevent goroutine leak
+ defer func() {
+ shutdownCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+ _ = provider.ShutdownWithContext(shutdownCtx)
+ }()
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ // Cancel context after 500ms (while BlockUntilReady is running)
+ go func() {
+ time.Sleep(500 * time.Millisecond)
+ cancel()
+ }()
+
+ start := time.Now()
+ err = provider.InitWithContext(ctx, openfeature.NewEvaluationContext("", nil))
+ elapsed := time.Since(start)
+
+ assert.Error(t, err, "Should return error when context cancelled")
+ assert.Contains(t, err.Error(), "initialization canceled", "Should indicate cancellation")
+
+ // Should return shortly after cancellation (~500ms), not wait for BlockUntilReady (10s)
+ assert.Less(t, elapsed, 2*time.Second,
+ "Should return quickly after context cancellation")
+ assert.Greater(t, elapsed, 400*time.Millisecond,
+ "Should actually wait for cancellation, not return immediately")
+}
+
+// TestInitWithContextRaceCondition verifies the fix for the context cancellation race.
+//
+// This test addresses the critical edge case where:
+// - BlockUntilReady completes successfully
+// - Context is cancelled at nearly the same moment
+// - Both readyErr channel and ctx.Done() are ready
+// - select{} randomly chooses which case to execute
+//
+// Expected behavior: If SDK initialized successfully, we should SUCCEED even if
+// context was cancelled, because the SDK is now ready and usable.
+func TestInitWithContextRaceCondition(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = "testdata/split.yaml"
+ cfg.BlockUntilReady = 1 // Fast init
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+
+ // Create context with very short timeout
+ // Timing is such that context expires RIGHT as BlockUntilReady completes
+ ctx, cancel := context.WithTimeout(context.Background(), 900*time.Millisecond)
+ defer cancel()
+
+ err = provider.InitWithContext(ctx, openfeature.NewEvaluationContext("", nil))
+
+ // The fix ensures this ALWAYS succeeds (SDK is ready)
+ // Without the fix, this would randomly fail when ctx.Done() is chosen by select
+ assert.NoError(t, err, "Should succeed when SDK initializes, even if context cancelled during init")
+
+ // Verify provider is actually ready
+ assert.Equal(t, openfeature.ReadyState, provider.Status(), "Provider should be in Ready state")
+
+ // Cleanup with timeout
+ shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer shutdownCancel()
+ _ = provider.ShutdownWithContext(shutdownCtx)
+}
+
+// TestShutdownWithContextTimeout verifies that ShutdownWithContext respects
+// context timeout without failing prematurely.
+//
+// This test addresses the edge case where:
+// - Context timeout is shorter than monitoring goroutine stop time
+// - Previously had hardcoded 5s timeout that would conflict
+// - Should not return error if context times out, just log warning and continue
+func TestShutdownWithContextTimeout(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = "testdata/split.yaml"
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+
+ // Initialize provider
+ initCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ err = provider.InitWithContext(initCtx, openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err, "Init should succeed")
+
+ // Shutdown with extremely short timeout (simulates aggressive shutdown deadline)
+ // In localhost mode, shutdown is very fast, so we need an unrealistically short timeout
+ // to trigger the timeout path
+ shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ defer shutdownCancel()
+
+ // Give the timeout a chance to expire before we call Shutdown
+ time.Sleep(1 * time.Millisecond)
+
+ start := time.Now()
+ err = provider.ShutdownWithContext(shutdownCtx)
+ elapsed := time.Since(start)
+
+ // Should return error when context times out
+ assert.Error(t, err, "ShutdownWithContext should return error when context times out")
+ assert.ErrorIs(t, err, context.DeadlineExceeded,
+ "Error should be context.DeadlineExceeded")
+
+ // Should respect context timeout
+ assert.Less(t, elapsed, 1*time.Second,
+ "Should return quickly when context times out")
+
+ // Verify provider is shut down (logically) even though cleanup may be incomplete
+ assert.Equal(t, openfeature.NotReadyState, provider.Status(),
+ "Provider should be NotReady after shutdown even if context timed out")
+}
+
+// TestShutdownWithContextGracefulStop verifies that ShutdownWithContext
+// waits for monitoring goroutine when context allows sufficient time.
+func TestShutdownWithContextGracefulStop(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = "testdata/split.yaml"
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+
+ // Initialize provider
+ initCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ err = provider.InitWithContext(initCtx, openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err)
+
+ // Shutdown with generous timeout (allows clean shutdown)
+ shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer shutdownCancel()
+
+ err = provider.ShutdownWithContext(shutdownCtx)
+
+ assert.NoError(t, err, "ShutdownWithContext should succeed with sufficient timeout")
+ assert.Equal(t, openfeature.NotReadyState, provider.Status(), "Provider should be NotReady")
+}
+
+// TestInitShutdownContextInterplay verifies that Init and Shutdown
+// contexts are independent and don't interfere with each other.
+func TestInitShutdownContextInterplay(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = "testdata/split.yaml"
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+
+ // Init with context that expires after initialization
+ initCtx, initCancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer initCancel()
+
+ err = provider.InitWithContext(initCtx, openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err)
+
+ // Cancel init context (should not affect shutdown)
+ initCancel()
+
+ // Shutdown with different context
+ shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer shutdownCancel()
+
+ err = provider.ShutdownWithContext(shutdownCtx)
+ assert.NoError(t, err, "Shutdown should succeed with its own context")
+}
+
+// TestInitAfterShutdown verifies that Init cannot be called after Shutdown.
+// This ensures the provider cannot be reused after shutdown.
+func TestInitAfterShutdown(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = "testdata/split.yaml"
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+
+ // Initialize provider
+ initCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ err = provider.InitWithContext(initCtx, openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err, "Initial init should succeed")
+
+ // Shutdown provider
+ shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer shutdownCancel()
+ err = provider.ShutdownWithContext(shutdownCtx)
+ require.NoError(t, err, "Shutdown should succeed")
+
+ // Attempt to re-initialize after shutdown
+ reinitCtx, reinitCancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer reinitCancel()
+ err = provider.InitWithContext(reinitCtx, openfeature.NewEvaluationContext("", nil))
+
+ // Should fail with explicit error about shutdown
+ assert.Error(t, err, "Init after shutdown should fail")
+ assert.Contains(t, err.Error(), "cannot initialize provider after shutdown",
+ "Error should indicate provider was shut down")
+ assert.Contains(t, err.Error(), "permanently shut down",
+ "Error should indicate shutdown is permanent")
+
+ // Verify provider status is NotReady
+ assert.Equal(t, openfeature.NotReadyState, provider.Status(),
+ "Provider should be NotReady after shutdown")
+}
+
+// TestShutdownBeforeInit verifies that shutting down before initialization is safe.
+// This tests the edge case where a provider is created but never initialized.
+func TestShutdownBeforeInit(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = "testdata/split.yaml"
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+
+ // Shutdown without ever calling Init
+ shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ err = provider.ShutdownWithContext(shutdownCtx)
+
+ // Should succeed - shutdown before init is a valid operation
+ assert.NoError(t, err, "Shutdown before init should succeed")
+
+ // Provider should be in NotReady state
+ assert.Equal(t, openfeature.NotReadyState, provider.Status(),
+ "Provider should be NotReady after shutdown")
+
+ // Subsequent Init should fail
+ initCtx, initCancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer initCancel()
+ err = provider.InitWithContext(initCtx, openfeature.NewEvaluationContext("", nil))
+
+ assert.Error(t, err, "Init after shutdown should fail")
+ assert.Contains(t, err.Error(), "cannot initialize provider after shutdown",
+ "Error should indicate provider was shut down")
+}
+
+// TestConcurrentEvaluationDuringShutdown verifies that evaluations in progress
+// are safe during shutdown, and shutdown waits for evaluations to complete.
+func TestConcurrentEvaluationDuringShutdown(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = "testdata/split.yaml"
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+
+ // Initialize provider
+ initCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ err = provider.InitWithContext(initCtx, openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err)
+
+ // Start multiple concurrent evaluations
+ evaluationsDone := make(chan bool, 10)
+ ctx := context.Background()
+ flatCtx := openfeature.FlattenedContext{
+ openfeature.TargetingKey: "user-123",
+ }
+
+ for i := 0; i < 10; i++ {
+ go func() {
+ // Perform evaluation (should succeed or return PROVIDER_NOT_READY)
+ result := provider.BooleanEvaluation(ctx, "my-feature", false, flatCtx)
+ // Don't assert success - evaluation might fail if shutdown happens first
+ // The important thing is it doesn't panic or hang
+ _ = result
+ evaluationsDone <- true
+ }()
+ }
+
+ // Give evaluations a brief moment to start
+ time.Sleep(10 * time.Millisecond)
+
+ // Shutdown while evaluations are in progress
+ shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer shutdownCancel()
+ err = provider.ShutdownWithContext(shutdownCtx)
+ assert.NoError(t, err, "Shutdown should succeed even with concurrent evaluations")
+
+ // Wait for all evaluations to complete
+ for i := 0; i < 10; i++ {
+ select {
+ case <-evaluationsDone:
+ // Evaluation completed
+ case <-time.After(2 * time.Second):
+ t.Fatal("Evaluation did not complete within timeout")
+ }
+ }
+
+ // Verify provider is shut down
+ assert.Equal(t, openfeature.NotReadyState, provider.Status())
+}
+
+// TestMetricsBeforeInit verifies Health() returns correct state before initialization.
+func TestMetricsBeforeInit(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = "testdata/split.yaml"
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+
+ // Check health before init
+ metrics := provider.Metrics()
+ assert.Equal(t, "Split", metrics.Provider)
+ assert.False(t, metrics.Initialized)
+ assert.Equal(t, openfeature.NotReadyState, metrics.Status)
+ assert.False(t, metrics.Ready)
+ assert.Equal(t, -1, metrics.SplitsCount, "SplitsCount should be -1 before init")
+}
+
+// TestMetricsAfterInit verifies Health() returns correct state after initialization.
+func TestMetricsAfterInit(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = "testdata/split.yaml"
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+
+ // Initialize
+ initCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ err = provider.InitWithContext(initCtx, openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err)
+
+ // Check health after init
+ metrics := provider.Metrics()
+ assert.Equal(t, "Split", metrics.Provider)
+ assert.True(t, metrics.Initialized)
+ assert.Equal(t, openfeature.ReadyState, metrics.Status)
+ assert.True(t, metrics.Ready)
+ assert.Greater(t, metrics.SplitsCount, 0, "SplitsCount should be > 0 for testdata")
+
+ // Cleanup
+ shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer shutdownCancel()
+ _ = provider.ShutdownWithContext(shutdownCtx)
+}
+
+// TestMetricsAfterShutdown verifies Health() returns correct state after shutdown.
+func TestMetricsAfterShutdown(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = "testdata/split.yaml"
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+
+ // Initialize
+ initCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ err = provider.InitWithContext(initCtx, openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err)
+
+ // Shutdown
+ shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer shutdownCancel()
+ err = provider.ShutdownWithContext(shutdownCtx)
+ require.NoError(t, err)
+
+ // Check health after shutdown
+ metrics := provider.Metrics()
+ assert.Equal(t, "Split", metrics.Provider)
+ assert.False(t, metrics.Initialized)
+ assert.Equal(t, openfeature.NotReadyState, metrics.Status)
+ assert.False(t, metrics.Ready)
+ assert.Equal(t, -1, metrics.SplitsCount, "SplitsCount should be -1 after shutdown")
+}
+
+// TestStatusAtomicity verifies that Status() reads shutdown flag and factory state atomically.
+// This test runs with -race to detect any race conditions.
+func TestStatusAtomicity(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = "testdata/split.yaml"
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+
+ // Initialize
+ initCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ err = provider.InitWithContext(initCtx, openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err)
+
+ // Concurrently call Status() while shutting down
+ done := make(chan struct{})
+ goroutinesDone := make(chan struct{}, 5)
+
+ // Goroutines calling Status() repeatedly
+ for i := 0; i < 5; i++ {
+ go func() {
+ defer func() { goroutinesDone <- struct{}{} }()
+ for {
+ select {
+ case <-done:
+ return
+ default:
+ // Just call Status(), don't store the result
+ _ = provider.Status()
+ // Small sleep to avoid tight loop
+ time.Sleep(1 * time.Millisecond)
+ }
+ }
+ }()
+ }
+
+ // Give Status() calls a moment to start
+ time.Sleep(10 * time.Millisecond)
+
+ // Shutdown
+ shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer shutdownCancel()
+ err = provider.ShutdownWithContext(shutdownCtx)
+ assert.NoError(t, err)
+
+ // Stop Status() calls and wait for all goroutines to finish
+ close(done)
+ for i := 0; i < 5; i++ {
+ <-goroutinesDone
+ }
+
+ // Verify final status is NotReady
+ finalStatus := provider.Status()
+ assert.Equal(t, openfeature.NotReadyState, finalStatus,
+ "Final status should be NotReady after shutdown")
+
+ // The test passes if no race detector warnings occur
+ // All intermediate statuses should be either Ready or NotReady (no undefined states)
+}
+
+// TestDoubleShutdown verifies that calling Shutdown multiple times is safe.
+func TestDoubleShutdown(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = "testdata/split.yaml"
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+
+ // Initialize
+ initCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ err = provider.InitWithContext(initCtx, openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err)
+
+ // First shutdown
+ shutdownCtx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel1()
+ err = provider.ShutdownWithContext(shutdownCtx1)
+ assert.NoError(t, err, "First shutdown should succeed")
+
+ // Second shutdown (should be idempotent)
+ shutdownCtx2, cancel2 := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel2()
+ err = provider.ShutdownWithContext(shutdownCtx2)
+ assert.NoError(t, err, "Second shutdown should succeed (idempotent)")
+
+ // Verify provider is still NotReady
+ assert.Equal(t, openfeature.NotReadyState, provider.Status())
+}
+
+// TestInitIdempotency verifies that calling Init when already initialized
+// returns immediately without re-initializing or starting duplicate monitoring goroutines.
+func TestInitIdempotency(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = "testdata/split.yaml"
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+
+ // First Init
+ initCtx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel1()
+ err = provider.InitWithContext(initCtx1, openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err, "First init should succeed")
+ assert.Equal(t, openfeature.ReadyState, provider.Status())
+
+ // Second Init (should hit fast path, return immediately)
+ initCtx2, cancel2 := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel2()
+ start := time.Now()
+ err = provider.InitWithContext(initCtx2, openfeature.NewEvaluationContext("", nil))
+ elapsed := time.Since(start)
+
+ // Should succeed immediately (fast path)
+ assert.NoError(t, err, "Second init should succeed (idempotent)")
+ assert.Less(t, elapsed, 100*time.Millisecond,
+ "Second init should return immediately via fast path, not wait for BlockUntilReady")
+ assert.Equal(t, openfeature.ReadyState, provider.Status())
+
+ // Third Init (also fast path)
+ initCtx3, cancel3 := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel3()
+ err = provider.InitWithContext(initCtx3, openfeature.NewEvaluationContext("", nil))
+ assert.NoError(t, err, "Third init should succeed (idempotent)")
+
+ // Cleanup
+ shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ _ = provider.ShutdownWithContext(shutdownCtx)
+}
+
+// TestConcurrentInit verifies that multiple concurrent Init calls are handled
+// correctly using singleflight - only ONE initialization happens.
+func TestConcurrentInit(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = "testdata/split.yaml"
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+
+ // Start 10 concurrent Init calls
+ const numGoroutines = 10
+ results := make(chan error, numGoroutines)
+ start := make(chan struct{})
+
+ for i := 0; i < numGoroutines; i++ {
+ go func() {
+ <-start // Synchronize all goroutines to start at once
+ initCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ err := provider.InitWithContext(initCtx, openfeature.NewEvaluationContext("", nil))
+ results <- err
+ }()
+ }
+
+ // Start all goroutines at once
+ close(start)
+
+ // Collect all results
+ var successCount int
+ for i := 0; i < numGoroutines; i++ {
+ err := <-results
+ if err == nil {
+ successCount++
+ }
+ }
+
+ // All Init calls should succeed (singleflight ensures only one actual init)
+ assert.Equal(t, numGoroutines, successCount, "All Init calls should succeed")
+
+ // Verify provider is ready
+ assert.Equal(t, openfeature.ReadyState, provider.Status())
+
+ // Cleanup
+ shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ _ = provider.ShutdownWithContext(shutdownCtx)
+}
+
+// TestShutdownDuringInit verifies that calling Shutdown while Init is in progress
+// is handled safely without panics or hangs.
+func TestShutdownDuringInit(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = "testdata/split.yaml"
+ cfg.BlockUntilReady = 2 // Longer to give shutdown time to race
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+
+ // Start Init in background
+ initDone := make(chan error, 1)
+ go func() {
+ initCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ err := provider.InitWithContext(initCtx, openfeature.NewEvaluationContext("", nil))
+ initDone <- err
+ }()
+
+ // Give Init a moment to start BlockUntilReady
+ time.Sleep(100 * time.Millisecond)
+
+ // Call Shutdown while Init is in progress
+ shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ err = provider.ShutdownWithContext(shutdownCtx)
+
+ // Shutdown should succeed (may complete before init, or after)
+ assert.NoError(t, err, "Shutdown should succeed even during init")
+
+ // Wait for Init to complete
+ select {
+ case initErr := <-initDone:
+ // Init might succeed (if it completed before shutdown)
+ // or fail (if shutdown happened first)
+ // Either outcome is acceptable - the important thing is no panic/hang
+ _ = initErr
+ case <-time.After(15 * time.Second):
+ t.Fatal("Init did not complete within timeout")
+ }
+
+ // Final status should be NotReady (shutdown completed)
+ assert.Equal(t, openfeature.NotReadyState, provider.Status())
+}
+
+// TestFactoryAccessorDuringShutdown verifies that Factory() accessor is safe
+// to call concurrently with Shutdown().
+func TestFactoryAccessorDuringShutdown(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = "testdata/split.yaml"
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+
+ // Initialize
+ initCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ err = provider.InitWithContext(initCtx, openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err)
+
+ // Concurrently access Factory() while shutting down
+ done := make(chan struct{})
+ goroutinesDone := make(chan int, 5)
+
+ // Goroutines calling Factory() repeatedly
+ for i := 0; i < 5; i++ {
+ go func() {
+ count := 0
+ defer func() { goroutinesDone <- count }()
+ for {
+ select {
+ case <-done:
+ return
+ default:
+ var factory *client.SplitFactory = provider.Factory()
+ if factory != nil {
+ count++
+ }
+ time.Sleep(1 * time.Millisecond)
+ }
+ }
+ }()
+ }
+
+ // Give Factory() calls a moment to start
+ time.Sleep(10 * time.Millisecond)
+
+ // Shutdown
+ shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer shutdownCancel()
+ err = provider.ShutdownWithContext(shutdownCtx)
+ assert.NoError(t, err)
+
+ // Stop Factory() calls and wait for all goroutines
+ close(done)
+ totalCalls := 0
+ for i := 0; i < 5; i++ {
+ totalCalls += <-goroutinesDone
+ }
+
+ // Verify we got some factory results (test passed if no data race)
+ assert.Greater(t, totalCalls, 0, "Should have retrieved factory at least once")
+}
+
+// TestEventChannelClosedOnShutdown verifies that the event channel is properly
+// closed when the provider is shut down, preventing deadlocks for consumers
+// using for...range loops.
+//
+// This test addresses a critical requirement from the OpenFeature specification:
+// the event channel must be closed during shutdown to signal consumers that
+// no more events will be sent.
+func TestEventChannelClosedOnShutdown(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Provider creation should succeed")
+
+ // Initialize the provider
+ ctx := context.Background()
+ err = provider.InitWithContext(ctx, openfeature.NewEvaluationContext("test-user", nil))
+ require.NoError(t, err, "Init should succeed")
+
+ // Get the event channel
+ eventChan := provider.EventChannel()
+ require.NotNil(t, eventChan, "EventChannel should not be nil")
+
+ // Start a goroutine that ranges over the event channel
+ // This simulates a typical consumer pattern
+ consumerDone := make(chan struct{})
+ receivedEvents := 0
+
+ go func() {
+ defer close(consumerDone)
+ for range eventChan {
+ receivedEvents++
+ }
+ // When the channel is closed, the range loop exits and we close consumerDone
+ }()
+
+ // Shutdown the provider
+ shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ err = provider.ShutdownWithContext(shutdownCtx)
+ assert.NoError(t, err, "Shutdown should succeed")
+
+ // Wait for the consumer goroutine to exit (with timeout)
+ // If the channel is not closed, this will timeout
+ select {
+ case <-consumerDone:
+ // Success - the range loop exited because the channel was closed
+ t.Logf("Consumer goroutine exited cleanly after receiving %d events", receivedEvents)
+ case <-time.After(2 * time.Second):
+ t.Fatal("Consumer goroutine did not exit - event channel was not closed on shutdown")
+ }
+
+ // Verify we received at least the PROVIDER_READY event
+ assert.Greater(t, receivedEvents, 0, "Should have received at least one event")
+}
+
+// TestEventChannelMultipleConsumers verifies that multiple goroutines
+// ranging over the event channel all exit cleanly when the provider shuts down.
+func TestEventChannelMultipleConsumers(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Provider creation should succeed")
+
+ // Initialize the provider
+ ctx := context.Background()
+ err = provider.InitWithContext(ctx, openfeature.NewEvaluationContext("test-user", nil))
+ require.NoError(t, err, "Init should succeed")
+
+ // Get the event channel
+ eventChan := provider.EventChannel()
+
+ // Start multiple consumer goroutines
+ numConsumers := 5
+ consumersDone := make(chan int, numConsumers)
+
+ for i := 0; i < numConsumers; i++ {
+ go func() {
+ count := 0
+ for range eventChan {
+ count++
+ }
+ consumersDone <- count
+ }()
+ }
+
+ // Give consumers a moment to start
+ time.Sleep(100 * time.Millisecond)
+
+ // Shutdown the provider
+ shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ err = provider.ShutdownWithContext(shutdownCtx)
+ assert.NoError(t, err, "Shutdown should succeed")
+
+ // Wait for all consumers to exit (with timeout)
+ timeout := time.After(2 * time.Second)
+ for i := 0; i < numConsumers; i++ {
+ select {
+ case count := <-consumersDone:
+ t.Logf("Consumer %d exited cleanly after receiving %d events", i, count)
+ case <-timeout:
+ t.Fatalf("Consumer %d did not exit - event channel was not closed on shutdown", i)
+ }
+ }
+}
+
+// TestEventChannelClosedBeforeInit verifies that shutdown works correctly
+// even when called before initialization (edge case).
+func TestEventChannelClosedBeforeInit(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Provider creation should succeed")
+
+ // Get the event channel before init
+ eventChan := provider.EventChannel()
+ require.NotNil(t, eventChan, "EventChannel should not be nil")
+
+ // Start consumer before init
+ consumerDone := make(chan struct{})
+ go func() {
+ defer close(consumerDone)
+ for range eventChan {
+ // Consume events
+ }
+ }()
+
+ // Shutdown without initializing
+ shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ err = provider.ShutdownWithContext(shutdownCtx)
+ assert.NoError(t, err, "Shutdown should succeed even without init")
+
+ // Verify consumer exits
+ select {
+ case <-consumerDone:
+ // Success
+ case <-time.After(2 * time.Second):
+ t.Fatal("Consumer did not exit - event channel was not closed")
+ }
+}
+
+// TestShutdownIdempotencyWithEventChannel verifies that calling shutdown
+// multiple times doesn't cause panics (double-close on channel).
+func TestShutdownIdempotencyWithEventChannel(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.BlockUntilReady = 1
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Provider creation should succeed")
+
+ // Initialize
+ ctx := context.Background()
+ err = provider.InitWithContext(ctx, openfeature.NewEvaluationContext("test-user", nil))
+ require.NoError(t, err, "Init should succeed")
+
+ // First shutdown
+ shutdownCtx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel1()
+ err = provider.ShutdownWithContext(shutdownCtx1)
+ assert.NoError(t, err, "First shutdown should succeed")
+
+ // Second shutdown - should not panic from double-close
+ shutdownCtx2, cancel2 := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel2()
+ err = provider.ShutdownWithContext(shutdownCtx2)
+ assert.NoError(t, err, "Second shutdown should succeed without panic")
+
+ // Third shutdown - verify idempotency
+ shutdownCtx3, cancel3 := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel3()
+ err = provider.ShutdownWithContext(shutdownCtx3)
+ assert.NoError(t, err, "Third shutdown should succeed without panic")
+}
diff --git a/lifecycle_test.go b/lifecycle_test.go
new file mode 100644
index 0000000..8d89ad4
--- /dev/null
+++ b/lifecycle_test.go
@@ -0,0 +1,249 @@
+package split
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/splitio/go-client/v6/splitio/conf"
+ "github.com/splitio/go-toolkit/v5/logging"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// TestProviderInit tests the Init method and lifecycle initialization.
+func TestProviderInit(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Failed to create provider")
+
+ assert.Equal(t, openfeature.NotReadyState, provider.Status(), "Provider should start in NotReady state")
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err, "Init should succeed")
+
+ assert.Equal(t, openfeature.ReadyState, provider.Status(), "Provider should be Ready after Init")
+
+ // Calling Init again should be idempotent
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ assert.NoError(t, err, "Second Init call should succeed (idempotent)")
+
+ _ = provider.ShutdownWithContext(context.Background())
+}
+
+// TestProviderShutdown tests the Shutdown method and resource cleanup.
+func TestProviderShutdown(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Failed to create provider")
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err, "Init should succeed")
+
+ assert.Equal(t, openfeature.ReadyState, provider.Status(), "Provider should be Ready after Init")
+
+ _ = provider.ShutdownWithContext(context.Background())
+
+ assert.Equal(t, openfeature.NotReadyState, provider.Status(), "Provider should be NotReady after Shutdown")
+
+ // Calling Shutdown again should be idempotent (should not panic)
+ _ = provider.ShutdownWithContext(context.Background())
+}
+
+// TestProviderShutdownTimeout tests that Shutdown completes within reasonable time.
+func TestProviderShutdownTimeout(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Failed to create provider")
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err, "Init should succeed")
+
+ done := make(chan struct{})
+ go func() {
+ _ = provider.ShutdownWithContext(context.Background())
+ close(done)
+ }()
+
+ select {
+ case <-done:
+ // Success - shutdown completed
+ case <-time.After(10 * time.Second):
+ t.Fatal("Shutdown did not complete within 10 seconds")
+ }
+}
+
+// TestProviderEventChannel tests the EventChannel method and event emission.
+func TestProviderEventChannel(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Failed to create provider")
+
+ eventChan := provider.EventChannel()
+ require.NotNil(t, eventChan, "EventChannel() should not return nil")
+
+ events := make([]openfeature.Event, 0)
+ done := make(chan struct{})
+ go func() {
+ for event := range eventChan {
+ events = append(events, event)
+ if event.EventType == openfeature.ProviderReady {
+ close(done)
+ return
+ }
+ }
+ }()
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err, "Init should succeed")
+
+ select {
+ case <-done:
+ // Success - received ProviderReady event
+ case <-time.After(1 * time.Second):
+ t.Error("Timeout waiting for ProviderReady event")
+ }
+
+ assert.NotEmpty(t, events, "Should receive at least one event")
+
+ foundReady := false
+ for _, event := range events {
+ if event.EventType == openfeature.ProviderReady {
+ foundReady = true
+ assert.Equal(t, providerNameSplit, event.ProviderName, "Provider name should be 'Split'")
+ }
+ }
+ assert.True(t, foundReady, "Should receive ProviderReady event")
+
+ _ = provider.ShutdownWithContext(context.Background())
+}
+
+// TestProviderHealth tests the Health method.
+func TestProviderHealth(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Failed to create provider")
+
+ // Health before Init
+ metrics := provider.Metrics()
+ assert.Equal(t, providerNameSplit, metrics.Provider, "Provider name should be 'Split'")
+ assert.False(t, metrics.Initialized, "Should not be initialized before Init")
+ assert.Equal(t, openfeature.NotReadyState, metrics.Status, "Status should be NOT_READY")
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err, "Init should succeed")
+
+ // Metrics after Init
+ metrics = provider.Metrics()
+ assert.True(t, metrics.Initialized, "Should be initialized after Init")
+ assert.Equal(t, openfeature.ReadyState, metrics.Status, "Status should be READY")
+ assert.True(t, metrics.Ready, "Should be ready")
+ assert.Greater(t, metrics.SplitsCount, 0, "splits_count should be greater than 0")
+
+ _ = provider.ShutdownWithContext(context.Background())
+}
+
+// TestInitWrapper tests the Init() method (StateHandler interface).
+// Verifies it delegates to InitWithContext with a derived timeout.
+func TestInitWrapper(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Failed to create provider")
+
+ assert.Equal(t, openfeature.NotReadyState, provider.Status(), "Provider should start in NotReady state")
+
+ // Call Init (non-context version)
+ err = provider.Init(openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err, "Init should succeed")
+
+ assert.Equal(t, openfeature.ReadyState, provider.Status(), "Provider should be Ready after Init")
+
+ // Calling Init again should be idempotent
+ err = provider.Init(openfeature.NewEvaluationContext("", nil))
+ assert.NoError(t, err, "Second Init call should succeed (idempotent)")
+
+ _ = provider.ShutdownWithContext(context.Background())
+}
+
+// TestShutdownWrapper tests the Shutdown() method (StateHandler interface).
+// Verifies it delegates to ShutdownWithContext with a derived timeout.
+func TestShutdownWrapper(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Failed to create provider")
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err, "Init should succeed")
+
+ assert.Equal(t, openfeature.ReadyState, provider.Status(), "Provider should be Ready after Init")
+
+ // Call Shutdown (non-context version) - should not panic
+ provider.Shutdown()
+
+ assert.Equal(t, openfeature.NotReadyState, provider.Status(), "Provider should be NotReady after Shutdown")
+
+ // Calling Shutdown again should be idempotent (should not panic)
+ provider.Shutdown()
+}
+
+// TestProviderFactoryGetter tests the Factory method.
+func TestProviderFactoryGetter(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Failed to create provider")
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err, "Failed to initialize provider")
+
+ factory := provider.Factory()
+ require.NotNil(t, factory, "Factory should not be nil")
+
+ splitClient := factory.Client()
+ require.NotNil(t, splitClient, "Client should not be nil")
+
+ err = splitClient.Track("test-user", "test-traffic", "test-event", 1.0, nil)
+ assert.NoError(t, err, "Track call should succeed")
+
+ manager := factory.Manager()
+ require.NotNil(t, manager, "Manager should not be nil")
+
+ splitNames := manager.SplitNames()
+ assert.NotEmpty(t, splitNames, "Should have split definitions loaded")
+
+ assert.True(t, factory.IsReady(), "Factory should be ready")
+
+ _ = provider.ShutdownWithContext(context.Background())
+}
diff --git a/logging.go b/logging.go
new file mode 100644
index 0000000..fd386cd
--- /dev/null
+++ b/logging.go
@@ -0,0 +1,105 @@
+package split
+
+import (
+ "fmt"
+ "log/slog"
+)
+
+// SlogToSplitAdapter adapts Go's standard *slog.Logger to Split SDK's LoggerInterface.
+//
+// This adapter allows the Split SDK to use the same logger configured for the application
+// via slog.SetDefault(), ensuring consistent logging across the provider and Split SDK.
+// All Split SDK log levels are mapped directly to slog levels (Error→Error, Warning→Warn, etc.)
+//
+// This type is exported for advanced use cases where you need to configure the Split SDK
+// client directly with structured logging support.
+type SlogToSplitAdapter struct {
+ logger *slog.Logger
+}
+
+// NewSplitLogger creates a Split SDK logger adapter from a slog.Logger.
+//
+// This function allows you to use Go's structured logging (slog) with the Split SDK
+// by configuring the SDK before creating the provider.
+//
+// Example usage with custom logger configuration:
+//
+// import (
+// "log/slog"
+// "github.com/splitio/go-client/v6/splitio/conf"
+// split "github.com/splitio/split-openfeature-provider-go/v2"
+// )
+//
+// // Configure custom slog logger
+// logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{
+// Level: slog.LevelInfo,
+// }))
+//
+// // Configure Split SDK with slog adapter
+// cfg := conf.Default()
+// cfg.Logger = split.NewSplitLogger(logger) // Use slog adapter
+// cfg.BlockUntilReady = 10
+//
+// // Create provider with configured logging
+// provider, _ := split.New("YOUR_SDK_KEY", split.WithSplitConfig(cfg))
+// defer provider.Shutdown()
+//
+// For local development/testing, you can use localhost mode with a local splits file.
+//
+// If logger is nil, slog.Default() is used.
+func NewSplitLogger(logger *slog.Logger) *SlogToSplitAdapter {
+ if logger == nil {
+ logger = slog.Default()
+ }
+ return &SlogToSplitAdapter{logger: logger}
+}
+
+// Error logs an error message.
+// If multiple arguments are provided, the first is treated as the message
+// and remaining arguments are logged as structured "details" field.
+func (a *SlogToSplitAdapter) Error(msg ...any) {
+ a.log(a.logger.Error, msg...)
+}
+
+// Warning logs a warning message.
+// If multiple arguments are provided, the first is treated as the message
+// and remaining arguments are logged as structured "details" field.
+func (a *SlogToSplitAdapter) Warning(msg ...any) {
+ a.log(a.logger.Warn, msg...)
+}
+
+// Info logs an informational message.
+// If multiple arguments are provided, the first is treated as the message
+// and remaining arguments are logged as structured "details" field.
+func (a *SlogToSplitAdapter) Info(msg ...any) {
+ a.log(a.logger.Info, msg...)
+}
+
+// Debug logs a debug message.
+// If multiple arguments are provided, the first is treated as the message
+// and remaining arguments are logged as structured "details" field.
+func (a *SlogToSplitAdapter) Debug(msg ...any) {
+ a.log(a.logger.Debug, msg...)
+}
+
+// Verbose logs a verbose message (mapped to Debug level in slog).
+// If multiple arguments are provided, the first is treated as the message
+// and remaining arguments are logged as structured "details" field.
+func (a *SlogToSplitAdapter) Verbose(msg ...any) {
+ a.log(a.logger.Debug, msg...)
+}
+
+// log is a helper that preserves structured logging when multiple arguments are provided.
+// Single argument: logged as message only.
+// Multiple arguments: first as message, rest as structured "details" field.
+func (a *SlogToSplitAdapter) log(logFunc func(string, ...any), msg ...any) {
+ if len(msg) == 0 {
+ logFunc("")
+ return
+ }
+ if len(msg) == 1 {
+ logFunc(fmt.Sprint(msg[0]))
+ return
+ }
+ logFunc(fmt.Sprint(msg[0]), "details", msg[1:])
+}
diff --git a/logging_test.go b/logging_test.go
new file mode 100644
index 0000000..2a99b75
--- /dev/null
+++ b/logging_test.go
@@ -0,0 +1,363 @@
+package split
+
+import (
+ "bytes"
+ "encoding/json"
+ "log/slog"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// TestNewSplitLoggerCreatesValidAdapter verifies factory function creates valid adapter.
+func TestNewSplitLoggerCreatesValidAdapter(t *testing.T) {
+ t.Run("with custom logger", func(t *testing.T) {
+ var buf bytes.Buffer
+ logger := slog.New(slog.NewJSONHandler(&buf, nil))
+ adapter := NewSplitLogger(logger)
+
+ require.NotNil(t, adapter)
+ assert.NotNil(t, adapter.logger)
+ })
+
+ t.Run("with nil logger uses default", func(t *testing.T) {
+ adapter := NewSplitLogger(nil)
+
+ require.NotNil(t, adapter)
+ assert.NotNil(t, adapter.logger)
+ assert.Equal(t, slog.Default(), adapter.logger)
+ })
+}
+
+// TestLogAdapterLogsAtCorrectLevel verifies all log levels produce correct output.
+func TestLogAdapterLogsAtCorrectLevel(t *testing.T) {
+ tests := []struct {
+ logFunc func(*SlogToSplitAdapter, ...any)
+ name string
+ expectedLevel string
+ message string
+ slogLevel slog.Level
+ }{
+ {
+ name: "Error level",
+ logFunc: (*SlogToSplitAdapter).Error,
+ expectedLevel: "ERROR",
+ slogLevel: slog.LevelError,
+ message: "test error message",
+ },
+ {
+ name: "Warning level",
+ logFunc: (*SlogToSplitAdapter).Warning,
+ expectedLevel: "WARN",
+ slogLevel: slog.LevelWarn,
+ message: "test warning message",
+ },
+ {
+ name: "Info level",
+ logFunc: (*SlogToSplitAdapter).Info,
+ expectedLevel: "INFO",
+ slogLevel: slog.LevelInfo,
+ message: "test info message",
+ },
+ {
+ name: "Debug level",
+ logFunc: (*SlogToSplitAdapter).Debug,
+ expectedLevel: "DEBUG",
+ slogLevel: slog.LevelDebug,
+ message: "test debug message",
+ },
+ {
+ name: "Verbose maps to Debug level",
+ logFunc: (*SlogToSplitAdapter).Verbose,
+ expectedLevel: "DEBUG",
+ slogLevel: slog.LevelDebug,
+ message: "test verbose message",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var buf bytes.Buffer
+ logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{
+ Level: tt.slogLevel,
+ }))
+ adapter := NewSplitLogger(logger)
+
+ tt.logFunc(adapter, tt.message)
+
+ logOutput := buf.String()
+ assert.Contains(t, logOutput, tt.expectedLevel)
+ assert.Contains(t, logOutput, tt.message)
+
+ // Verify JSON structure
+ var logEntry map[string]any
+ err := json.Unmarshal([]byte(logOutput), &logEntry)
+ require.NoError(t, err)
+ assert.Equal(t, tt.expectedLevel, logEntry["level"])
+ assert.Equal(t, tt.message, logEntry["msg"])
+ })
+ }
+}
+
+// TestLogAdapterPreservesStructuredData verifies structured logging with multiple arguments.
+func TestLogAdapterPreservesStructuredData(t *testing.T) {
+ var buf bytes.Buffer
+ logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{
+ Level: slog.LevelInfo,
+ }))
+ adapter := NewSplitLogger(logger)
+
+ // Log with structured details
+ adapter.Info("operation completed", "duration_ms", 150, "success", true)
+
+ logOutput := buf.String()
+
+ // Verify JSON structure
+ var logEntry map[string]any
+ err := json.Unmarshal([]byte(logOutput), &logEntry)
+ require.NoError(t, err)
+
+ assert.Equal(t, "INFO", logEntry["level"])
+ assert.Equal(t, "operation completed", logEntry["msg"])
+
+ // Verify structured details field exists
+ require.Contains(t, logEntry, "details")
+ details, ok := logEntry["details"].([]any)
+ require.True(t, ok, "details should be an array")
+
+ // Verify details contain the arguments
+ require.Len(t, details, 4)
+ assert.Equal(t, "duration_ms", details[0])
+ assert.Equal(t, float64(150), details[1]) // JSON numbers are float64
+ assert.Equal(t, "success", details[2])
+ assert.Equal(t, true, details[3])
+}
+
+// TestLogAdapterDoesNotCreateDetailsForSingleArgument verifies single argument behavior.
+func TestLogAdapterDoesNotCreateDetailsForSingleArgument(t *testing.T) {
+ var buf bytes.Buffer
+ logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{
+ Level: slog.LevelInfo,
+ }))
+ adapter := NewSplitLogger(logger)
+
+ adapter.Info("simple message")
+
+ logOutput := buf.String()
+
+ // Verify JSON structure
+ var logEntry map[string]any
+ err := json.Unmarshal([]byte(logOutput), &logEntry)
+ require.NoError(t, err)
+
+ assert.Equal(t, "INFO", logEntry["level"])
+ assert.Equal(t, "simple message", logEntry["msg"])
+
+ // Should NOT have details field for single argument
+ assert.NotContains(t, logEntry, "details")
+}
+
+// TestLogAdapterSupportsStructuredLoggingAtAllLevels verifies all levels support structured data.
+func TestLogAdapterSupportsStructuredLoggingAtAllLevels(t *testing.T) {
+ tests := []struct {
+ name string
+ logFunc func(*SlogToSplitAdapter, ...any)
+ expected string
+ }{
+ {"Error", (*SlogToSplitAdapter).Error, "ERROR"},
+ {"Warning", (*SlogToSplitAdapter).Warning, "WARN"},
+ {"Info", (*SlogToSplitAdapter).Info, "INFO"},
+ {"Debug", (*SlogToSplitAdapter).Debug, "DEBUG"},
+ {"Verbose", (*SlogToSplitAdapter).Verbose, "DEBUG"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var buf bytes.Buffer
+ logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{
+ Level: slog.LevelDebug,
+ }))
+ adapter := NewSplitLogger(logger)
+
+ tt.logFunc(adapter, "message", "key", "value")
+
+ logOutput := buf.String()
+
+ var logEntry map[string]any
+ err := json.Unmarshal([]byte(logOutput), &logEntry)
+ require.NoError(t, err)
+
+ assert.Equal(t, tt.expected, logEntry["level"])
+ assert.Equal(t, "message", logEntry["msg"])
+ assert.Contains(t, logEntry, "details")
+ })
+ }
+}
+
+// TestLogAdapterCreatesStructuredDetailsFromMultipleArguments verifies details array creation.
+func TestLogAdapterCreatesStructuredDetailsFromMultipleArguments(t *testing.T) {
+ var buf bytes.Buffer
+ logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{
+ Level: slog.LevelInfo,
+ }))
+ adapter := NewSplitLogger(logger)
+
+ adapter.Info("message", " ", "with", " ", "multiple", " ", "args")
+
+ logOutput := buf.String()
+
+ // With structured logging, first arg is message, rest are details
+ var logEntry map[string]any
+ err := json.Unmarshal([]byte(logOutput), &logEntry)
+ require.NoError(t, err)
+
+ assert.Equal(t, "message", logEntry["msg"])
+ assert.Contains(t, logEntry, "details")
+ details, ok := logEntry["details"].([]any)
+ require.True(t, ok)
+ assert.Len(t, details, 6) // All the remaining arguments
+}
+
+// TestLogAdapterFiltersLogsByLevel verifies log level filtering works correctly.
+func TestLogAdapterFiltersLogsByLevel(t *testing.T) {
+ var buf bytes.Buffer
+ logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{
+ Level: slog.LevelWarn, // Only warn and above
+ }))
+ adapter := NewSplitLogger(logger)
+
+ adapter.Debug("debug message")
+ adapter.Info("info message")
+ adapter.Warning("warning message")
+ adapter.Error("error message")
+
+ logOutput := buf.String()
+
+ // Debug and Info should be filtered out
+ assert.NotContains(t, logOutput, "debug message")
+ assert.NotContains(t, logOutput, "info message")
+
+ // Warning and Error should be present
+ assert.Contains(t, logOutput, "warning message")
+ assert.Contains(t, logOutput, "error message")
+}
+
+// TestLogAdapterHandlesEmptyMessage verifies empty message logging.
+func TestLogAdapterHandlesEmptyMessage(t *testing.T) {
+ var buf bytes.Buffer
+ logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{
+ Level: slog.LevelInfo,
+ }))
+ adapter := NewSplitLogger(logger)
+
+ adapter.Info()
+
+ logOutput := buf.String()
+ // Empty message should still produce a log entry
+ assert.Contains(t, logOutput, "INFO")
+
+ var logEntry map[string]any
+ err := json.Unmarshal([]byte(logOutput), &logEntry)
+ require.NoError(t, err)
+ assert.Equal(t, "", logEntry["msg"])
+}
+
+// TestLogAdapterEscapesSpecialCharactersInJSON verifies JSON escaping.
+func TestLogAdapterEscapesSpecialCharactersInJSON(t *testing.T) {
+ var buf bytes.Buffer
+ logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{
+ Level: slog.LevelInfo,
+ }))
+ adapter := NewSplitLogger(logger)
+
+ specialMsg := "message with \"quotes\" and \n newlines"
+ adapter.Info(specialMsg)
+
+ logOutput := buf.String()
+
+ // Verify JSON is valid despite special characters
+ var logEntry map[string]any
+ err := json.Unmarshal([]byte(logOutput), &logEntry)
+ require.NoError(t, err)
+
+ // Message should be properly escaped in JSON
+ assert.Contains(t, logEntry["msg"], "quotes")
+}
+
+// TestLogAdapterFormatsNonStringArgumentsCorrectly verifies non-string formatting.
+func TestLogAdapterFormatsNonStringArgumentsCorrectly(t *testing.T) {
+ var buf bytes.Buffer
+ logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{
+ Level: slog.LevelInfo,
+ }))
+ adapter := NewSplitLogger(logger)
+
+ adapter.Info("count:", 42, "enabled:", true, "rate:", 3.14)
+
+ logOutput := buf.String()
+ assert.Contains(t, logOutput, "count:")
+ assert.Contains(t, logOutput, "42")
+ assert.Contains(t, logOutput, "enabled:")
+ assert.Contains(t, logOutput, "true")
+ assert.Contains(t, logOutput, "rate:")
+ assert.Contains(t, logOutput, "3.14")
+}
+
+// TestLogAdapterWorksWithTextHandler verifies text handler compatibility.
+func TestLogAdapterWorksWithTextHandler(t *testing.T) {
+ var buf bytes.Buffer
+ logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
+ Level: slog.LevelInfo,
+ }))
+ adapter := NewSplitLogger(logger)
+
+ adapter.Info("text handler message")
+
+ logOutput := buf.String()
+ assert.Contains(t, logOutput, "level=INFO")
+ assert.Contains(t, logOutput, "text handler message")
+}
+
+// TestLogAdapterIsThreadSafe verifies concurrent logging safety.
+func TestLogAdapterIsThreadSafe(t *testing.T) {
+ var buf bytes.Buffer
+ logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{
+ Level: slog.LevelInfo,
+ }))
+ adapter := NewSplitLogger(logger)
+
+ // Launch 10 goroutines, each logging 10 messages
+ const goroutines = 10
+ const messagesPerGoroutine = 10
+
+ done := make(chan bool)
+ for i := 0; i < goroutines; i++ {
+ go func(id int) {
+ for j := 0; j < messagesPerGoroutine; j++ {
+ adapter.Info("goroutine", id, "message", j)
+ }
+ done <- true
+ }(i)
+ }
+
+ // Wait for all goroutines to complete
+ for i := 0; i < goroutines; i++ {
+ <-done
+ }
+
+ logOutput := buf.String()
+
+ // Count the number of log entries
+ logLines := strings.Split(strings.TrimSpace(logOutput), "\n")
+ expectedLines := goroutines * messagesPerGoroutine
+ assert.Equal(t, expectedLines, len(logLines), "should have %d log lines", expectedLines)
+
+ // Verify all logs are valid JSON
+ for i, line := range logLines {
+ var logEntry map[string]any
+ err := json.Unmarshal([]byte(line), &logEntry)
+ assert.NoError(t, err, "line %d should be valid JSON: %s", i, line)
+ }
+}
diff --git a/mock_client_test.go b/mock_client_test.go
new file mode 100644
index 0000000..f6d681e
--- /dev/null
+++ b/mock_client_test.go
@@ -0,0 +1,324 @@
+// Code generated by mockery; DO NOT EDIT.
+// github.com/vektra/mockery
+// template: testify
+
+package split
+
+import (
+ "github.com/splitio/go-client/v6/splitio/client"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// NewMockClient creates a new instance of MockClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewMockClient(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *MockClient {
+ mock := &MockClient{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
+
+// MockClient is an autogenerated mock type for the Client type
+type MockClient struct {
+ mock.Mock
+}
+
+type MockClient_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *MockClient) EXPECT() *MockClient_Expecter {
+ return &MockClient_Expecter{mock: &_m.Mock}
+}
+
+// BlockUntilReady provides a mock function for the type MockClient
+func (_mock *MockClient) BlockUntilReady(timer int) error {
+ ret := _mock.Called(timer)
+
+ if len(ret) == 0 {
+ panic("no return value specified for BlockUntilReady")
+ }
+
+ var r0 error
+ if returnFunc, ok := ret.Get(0).(func(int) error); ok {
+ r0 = returnFunc(timer)
+ } else {
+ r0 = ret.Error(0)
+ }
+ return r0
+}
+
+// MockClient_BlockUntilReady_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockUntilReady'
+type MockClient_BlockUntilReady_Call struct {
+ *mock.Call
+}
+
+// BlockUntilReady is a helper method to define mock.On call
+// - timer int
+func (_e *MockClient_Expecter) BlockUntilReady(timer interface{}) *MockClient_BlockUntilReady_Call {
+ return &MockClient_BlockUntilReady_Call{Call: _e.mock.On("BlockUntilReady", timer)}
+}
+
+func (_c *MockClient_BlockUntilReady_Call) Run(run func(timer int)) *MockClient_BlockUntilReady_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 int
+ if args[0] != nil {
+ arg0 = args[0].(int)
+ }
+ run(
+ arg0,
+ )
+ })
+ return _c
+}
+
+func (_c *MockClient_BlockUntilReady_Call) Return(err error) *MockClient_BlockUntilReady_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *MockClient_BlockUntilReady_Call) RunAndReturn(run func(timer int) error) *MockClient_BlockUntilReady_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Destroy provides a mock function for the type MockClient
+func (_mock *MockClient) Destroy() {
+ _mock.Called()
+ return
+}
+
+// MockClient_Destroy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Destroy'
+type MockClient_Destroy_Call struct {
+ *mock.Call
+}
+
+// Destroy is a helper method to define mock.On call
+func (_e *MockClient_Expecter) Destroy() *MockClient_Destroy_Call {
+ return &MockClient_Destroy_Call{Call: _e.mock.On("Destroy")}
+}
+
+func (_c *MockClient_Destroy_Call) Run(run func()) *MockClient_Destroy_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *MockClient_Destroy_Call) Return() *MockClient_Destroy_Call {
+ _c.Call.Return()
+ return _c
+}
+
+func (_c *MockClient_Destroy_Call) RunAndReturn(run func()) *MockClient_Destroy_Call {
+ _c.Run(run)
+ return _c
+}
+
+// Track provides a mock function for the type MockClient
+func (_mock *MockClient) Track(key string, trafficType string, eventType string, value interface{}, properties map[string]interface{}) error {
+ ret := _mock.Called(key, trafficType, eventType, value, properties)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Track")
+ }
+
+ var r0 error
+ if returnFunc, ok := ret.Get(0).(func(string, string, string, interface{}, map[string]interface{}) error); ok {
+ r0 = returnFunc(key, trafficType, eventType, value, properties)
+ } else {
+ r0 = ret.Error(0)
+ }
+ return r0
+}
+
+// MockClient_Track_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Track'
+type MockClient_Track_Call struct {
+ *mock.Call
+}
+
+// Track is a helper method to define mock.On call
+// - key string
+// - trafficType string
+// - eventType string
+// - value interface{}
+// - properties map[string]interface{}
+func (_e *MockClient_Expecter) Track(key interface{}, trafficType interface{}, eventType interface{}, value interface{}, properties interface{}) *MockClient_Track_Call {
+ return &MockClient_Track_Call{Call: _e.mock.On("Track", key, trafficType, eventType, value, properties)}
+}
+
+func (_c *MockClient_Track_Call) Run(run func(key string, trafficType string, eventType string, value interface{}, properties map[string]interface{})) *MockClient_Track_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 string
+ if args[0] != nil {
+ arg0 = args[0].(string)
+ }
+ var arg1 string
+ if args[1] != nil {
+ arg1 = args[1].(string)
+ }
+ var arg2 string
+ if args[2] != nil {
+ arg2 = args[2].(string)
+ }
+ var arg3 interface{}
+ if args[3] != nil {
+ arg3 = args[3].(interface{})
+ }
+ var arg4 map[string]interface{}
+ if args[4] != nil {
+ arg4 = args[4].(map[string]interface{})
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ )
+ })
+ return _c
+}
+
+func (_c *MockClient_Track_Call) Return(err error) *MockClient_Track_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *MockClient_Track_Call) RunAndReturn(run func(key string, trafficType string, eventType string, value interface{}, properties map[string]interface{}) error) *MockClient_Track_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// TreatmentWithConfig provides a mock function for the type MockClient
+func (_mock *MockClient) TreatmentWithConfig(key interface{}, featureFlagName string, attributes map[string]interface{}) client.TreatmentResult {
+ ret := _mock.Called(key, featureFlagName, attributes)
+
+ if len(ret) == 0 {
+ panic("no return value specified for TreatmentWithConfig")
+ }
+
+ var r0 client.TreatmentResult
+ if returnFunc, ok := ret.Get(0).(func(interface{}, string, map[string]interface{}) client.TreatmentResult); ok {
+ r0 = returnFunc(key, featureFlagName, attributes)
+ } else {
+ r0 = ret.Get(0).(client.TreatmentResult)
+ }
+ return r0
+}
+
+// MockClient_TreatmentWithConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TreatmentWithConfig'
+type MockClient_TreatmentWithConfig_Call struct {
+ *mock.Call
+}
+
+// TreatmentWithConfig is a helper method to define mock.On call
+// - key interface{}
+// - featureFlagName string
+// - attributes map[string]interface{}
+func (_e *MockClient_Expecter) TreatmentWithConfig(key interface{}, featureFlagName interface{}, attributes interface{}) *MockClient_TreatmentWithConfig_Call {
+ return &MockClient_TreatmentWithConfig_Call{Call: _e.mock.On("TreatmentWithConfig", key, featureFlagName, attributes)}
+}
+
+func (_c *MockClient_TreatmentWithConfig_Call) Run(run func(key interface{}, featureFlagName string, attributes map[string]interface{})) *MockClient_TreatmentWithConfig_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 interface{}
+ if args[0] != nil {
+ arg0 = args[0].(interface{})
+ }
+ var arg1 string
+ if args[1] != nil {
+ arg1 = args[1].(string)
+ }
+ var arg2 map[string]interface{}
+ if args[2] != nil {
+ arg2 = args[2].(map[string]interface{})
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ )
+ })
+ return _c
+}
+
+func (_c *MockClient_TreatmentWithConfig_Call) Return(treatmentResult client.TreatmentResult) *MockClient_TreatmentWithConfig_Call {
+ _c.Call.Return(treatmentResult)
+ return _c
+}
+
+func (_c *MockClient_TreatmentWithConfig_Call) RunAndReturn(run func(key interface{}, featureFlagName string, attributes map[string]interface{}) client.TreatmentResult) *MockClient_TreatmentWithConfig_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// TreatmentsWithConfigByFlagSet provides a mock function for the type MockClient
+func (_mock *MockClient) TreatmentsWithConfigByFlagSet(key interface{}, flagSet string, attributes map[string]interface{}) map[string]client.TreatmentResult {
+ ret := _mock.Called(key, flagSet, attributes)
+
+ if len(ret) == 0 {
+ panic("no return value specified for TreatmentsWithConfigByFlagSet")
+ }
+
+ var r0 map[string]client.TreatmentResult
+ if returnFunc, ok := ret.Get(0).(func(interface{}, string, map[string]interface{}) map[string]client.TreatmentResult); ok {
+ r0 = returnFunc(key, flagSet, attributes)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(map[string]client.TreatmentResult)
+ }
+ }
+ return r0
+}
+
+// MockClient_TreatmentsWithConfigByFlagSet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TreatmentsWithConfigByFlagSet'
+type MockClient_TreatmentsWithConfigByFlagSet_Call struct {
+ *mock.Call
+}
+
+// TreatmentsWithConfigByFlagSet is a helper method to define mock.On call
+// - key interface{}
+// - flagSet string
+// - attributes map[string]interface{}
+func (_e *MockClient_Expecter) TreatmentsWithConfigByFlagSet(key interface{}, flagSet interface{}, attributes interface{}) *MockClient_TreatmentsWithConfigByFlagSet_Call {
+ return &MockClient_TreatmentsWithConfigByFlagSet_Call{Call: _e.mock.On("TreatmentsWithConfigByFlagSet", key, flagSet, attributes)}
+}
+
+func (_c *MockClient_TreatmentsWithConfigByFlagSet_Call) Run(run func(key interface{}, flagSet string, attributes map[string]interface{})) *MockClient_TreatmentsWithConfigByFlagSet_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 interface{}
+ if args[0] != nil {
+ arg0 = args[0].(interface{})
+ }
+ var arg1 string
+ if args[1] != nil {
+ arg1 = args[1].(string)
+ }
+ var arg2 map[string]interface{}
+ if args[2] != nil {
+ arg2 = args[2].(map[string]interface{})
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ )
+ })
+ return _c
+}
+
+func (_c *MockClient_TreatmentsWithConfigByFlagSet_Call) Return(stringToTreatmentResult map[string]client.TreatmentResult) *MockClient_TreatmentsWithConfigByFlagSet_Call {
+ _c.Call.Return(stringToTreatmentResult)
+ return _c
+}
+
+func (_c *MockClient_TreatmentsWithConfigByFlagSet_Call) RunAndReturn(run func(key interface{}, flagSet string, attributes map[string]interface{}) map[string]client.TreatmentResult) *MockClient_TreatmentsWithConfigByFlagSet_Call {
+ _c.Call.Return(run)
+ return _c
+}
diff --git a/mock_evaluation_test.go b/mock_evaluation_test.go
new file mode 100644
index 0000000..808acf9
--- /dev/null
+++ b/mock_evaluation_test.go
@@ -0,0 +1,574 @@
+package split
+
+import (
+ "context"
+ "errors"
+ "log/slog"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/splitio/go-client/v6/splitio/client"
+ "github.com/splitio/go-client/v6/splitio/conf"
+ "github.com/splitio/go-toolkit/v5/logging"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+)
+
+// =============================================================================
+// Mock Test Infrastructure
+// =============================================================================
+
+// createTestProvider creates a provider initialized in localhost mode, ready for
+// mock client swapping. Each top-level test should call this once and share the
+// provider across subtests to minimize Split SDK factory goroutine count.
+//
+// The returned provider's client should be swapped with swapMockClient per subtest.
+func createTestProvider(t *testing.T, logBuffer *strings.Builder) *Provider {
+ t.Helper()
+
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ var opts []Option
+ opts = append(opts, WithSplitConfig(cfg))
+ if logBuffer != nil {
+ logger := slog.New(slog.NewTextHandler(logBuffer, &slog.HandlerOptions{Level: slog.LevelDebug}))
+ opts = append(opts, WithLogger(logger))
+ }
+
+ provider, err := New("localhost", opts...)
+ require.NoError(t, err)
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err)
+
+ t.Cleanup(func() {
+ // Swap in a permissive mock for Destroy during shutdown
+ permissive := &MockClient{}
+ permissive.On("Destroy").Maybe()
+ provider.mtx.Lock()
+ provider.client = permissive
+ provider.mtx.Unlock()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ _ = provider.ShutdownWithContext(ctx)
+ })
+
+ return provider
+}
+
+// swapMockClient creates a fresh MockClient for a subtest and installs it in the
+// shared provider. NewMockClient(t) auto-registers AssertExpectations via t.Cleanup,
+// so all expected calls are verified when the subtest completes.
+func swapMockClient(t *testing.T, provider *Provider) *MockClient {
+ t.Helper()
+ mockClient := NewMockClient(t)
+ provider.mtx.Lock()
+ provider.client = mockClient
+ provider.mtx.Unlock()
+ return mockClient
+}
+
+// createCloudTestProvider creates a test provider that reports as cloud mode
+// by overriding the splitConfig.OperationMode.
+func createCloudTestProvider(t *testing.T, logBuffer *strings.Builder) *Provider {
+ t.Helper()
+ provider := createTestProvider(t, logBuffer)
+ provider.splitConfig.OperationMode = ""
+ return provider
+}
+
+// =============================================================================
+// Track: SDK Call Argument Verification (Table-Driven)
+// =============================================================================
+
+func TestTrackMock_SDKArguments(t *testing.T) {
+ provider := createTestProvider(t, nil)
+
+ tests := []struct {
+ name string
+ targetingKey string
+ evalAttrs map[string]any
+ eventName string
+ detailsValue float64
+ useNoMetricValue bool
+ wantKey string
+ wantTrafficType string
+ wantEvent string
+ wantValue interface{}
+ wantProps interface{} // mock.Anything or specific map
+ }{
+ {
+ name: "default traffic type",
+ targetingKey: "user-123",
+ eventName: "checkout",
+ detailsValue: 42.0,
+ wantKey: "user-123",
+ wantTrafficType: "user",
+ wantEvent: "checkout",
+ wantValue: 42.0,
+ wantProps: mock.Anything,
+ },
+ {
+ name: "custom traffic type from eval context",
+ targetingKey: "acct-456",
+ evalAttrs: map[string]any{"trafficType": "account"},
+ eventName: "upgrade",
+ detailsValue: 99.99,
+ wantKey: "acct-456",
+ wantTrafficType: "account",
+ wantEvent: "upgrade",
+ wantValue: 99.99,
+ wantProps: mock.Anything,
+ },
+ {
+ name: "WithoutMetricValue passes nil instead of 0",
+ targetingKey: "user-123",
+ eventName: "page_view",
+ detailsValue: 0,
+ useNoMetricValue: true,
+ wantKey: "user-123",
+ wantTrafficType: "user",
+ wantEvent: "page_view",
+ wantValue: nil,
+ wantProps: mock.Anything,
+ },
+ {
+ name: "metric value passed through",
+ targetingKey: "user-123",
+ eventName: "purchase",
+ detailsValue: 149.99,
+ wantKey: "user-123",
+ wantTrafficType: "user",
+ wantEvent: "purchase",
+ wantValue: 149.99,
+ wantProps: mock.Anything,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mockClient := swapMockClient(t, provider)
+
+ mockClient.EXPECT().
+ Track(tt.wantKey, tt.wantTrafficType, tt.wantEvent, tt.wantValue, tt.wantProps).
+ Once().
+ Return(nil)
+
+ ctx := context.Background()
+ if tt.useNoMetricValue {
+ ctx = WithoutMetricValue(ctx)
+ }
+
+ evalCtx := openfeature.NewEvaluationContext(tt.targetingKey, tt.evalAttrs)
+ details := openfeature.NewTrackingEventDetails(tt.detailsValue)
+
+ provider.Track(ctx, tt.eventName, evalCtx, details)
+ // AssertExpectations is called automatically via t.Cleanup
+ })
+ }
+}
+
+func TestTrackMock_PropertiesPassthrough(t *testing.T) {
+ provider := createTestProvider(t, nil)
+ mockClient := swapMockClient(t, provider)
+
+ expectedProps := map[string]interface{}{
+ "currency": "USD",
+ "item_count": 3,
+ }
+
+ mockClient.EXPECT().
+ Track("user-123", "user", "purchase", 99.0, expectedProps).
+ Once().
+ Return(nil)
+
+ evalCtx := openfeature.NewEvaluationContext("user-123", nil)
+ details := openfeature.NewTrackingEventDetails(99.0).
+ Add("currency", "USD").
+ Add("item_count", 3)
+
+ provider.Track(context.Background(), "purchase", evalCtx, details)
+}
+
+// =============================================================================
+// Track: SDK Error Handling
+// =============================================================================
+
+func TestTrackMock_SDKError_LoggedAtError(t *testing.T) {
+ var logBuffer strings.Builder
+ provider := createTestProvider(t, &logBuffer)
+ mockClient := swapMockClient(t, provider)
+
+ mockClient.EXPECT().
+ Track("user-123", "user", "bad_event", 1.0, mock.Anything).
+ Once().
+ Return(errors.New("SDK validation error: event name too long"))
+
+ evalCtx := openfeature.NewEvaluationContext("user-123", nil)
+ details := openfeature.NewTrackingEventDetails(1.0)
+
+ provider.Track(context.Background(), "bad_event", evalCtx, details)
+
+ logOutput := logBuffer.String()
+ assert.Contains(t, logOutput, "tracking event failed")
+ assert.Contains(t, logOutput, "SDK validation error")
+ assert.Contains(t, logOutput, "level=ERROR")
+}
+
+// =============================================================================
+// Track: Precondition Guard Tests (SDK NOT called)
+// =============================================================================
+
+func TestTrackMock_NotCalled_WhenEmptyTargetingKey(t *testing.T) {
+ provider := createTestProvider(t, nil)
+ mockClient := swapMockClient(t, provider)
+
+ evalCtx := openfeature.NewEvaluationContext("", nil)
+ details := openfeature.NewTrackingEventDetails(1.0)
+
+ provider.Track(context.Background(), "ignored_event", evalCtx, details)
+
+ mockClient.AssertNotCalled(t, "Track", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything)
+}
+
+func TestTrackMock_NotCalled_WhenContextCanceled(t *testing.T) {
+ provider := createTestProvider(t, nil)
+ mockClient := swapMockClient(t, provider)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+
+ evalCtx := openfeature.NewEvaluationContext("user-123", nil)
+ details := openfeature.NewTrackingEventDetails(1.0)
+
+ provider.Track(ctx, "canceled_event", evalCtx, details)
+
+ mockClient.AssertNotCalled(t, "Track", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything)
+}
+
+// =============================================================================
+// ObjectEvaluation: Cloud Mode Flag Set (Table-Driven)
+// =============================================================================
+
+func TestObjectEvaluation_CloudMode(t *testing.T) {
+ configJSON := `{"primary":"#000"}`
+ provider := createCloudTestProvider(t, nil)
+
+ tests := []struct {
+ name string
+ mode EvaluationMode
+ flagParam string
+ setupMock func(*MockClient)
+ assertResult func(*testing.T, openfeature.InterfaceResolutionDetail)
+ wantFlagCount int
+ wantNotFound bool
+ }{
+ {
+ name: "default mode uses TreatmentsWithConfigByFlagSet",
+ flagParam: "ui-features",
+ setupMock: func(m *MockClient) {
+ m.EXPECT().
+ TreatmentsWithConfigByFlagSet("key", "ui-features", mock.Anything).
+ Once().
+ Return(map[string]client.TreatmentResult{
+ "theme": {Treatment: "dark", Config: &configJSON},
+ "layout": {Treatment: "grid", Config: nil},
+ })
+ },
+ wantFlagCount: 2,
+ assertResult: func(t *testing.T, result openfeature.InterfaceResolutionDetail) {
+ t.Helper()
+ flagSet := result.Value.(FlagSetResult)
+ assert.Equal(t, "dark", flagSet["theme"].Treatment)
+ assert.Equal(t, map[string]any{"primary": "#000"}, flagSet["theme"].Config)
+ assert.Equal(t, "grid", flagSet["layout"].Treatment)
+ assert.Nil(t, flagSet["layout"].Config)
+ },
+ },
+ {
+ name: "explicit set mode uses TreatmentsWithConfigByFlagSet",
+ mode: EvaluationModeSet,
+ flagParam: "my-set",
+ setupMock: func(m *MockClient) {
+ m.EXPECT().
+ TreatmentsWithConfigByFlagSet("key", "my-set", mock.Anything).
+ Once().
+ Return(map[string]client.TreatmentResult{
+ "flag_a": {Treatment: "on", Config: nil},
+ })
+ },
+ wantFlagCount: 1,
+ },
+ {
+ name: "individual mode uses TreatmentWithConfig",
+ mode: EvaluationModeIndividual,
+ flagParam: "single-flag",
+ setupMock: func(m *MockClient) {
+ m.EXPECT().
+ TreatmentWithConfig("key", "single-flag", mock.Anything).
+ Once().
+ Return(client.TreatmentResult{Treatment: "on", Config: &configJSON})
+ },
+ wantFlagCount: 1,
+ assertResult: func(t *testing.T, result openfeature.InterfaceResolutionDetail) {
+ t.Helper()
+ flagSet := result.Value.(FlagSetResult)
+ assert.Equal(t, "on", flagSet["single-flag"].Treatment)
+ assert.Equal(t, map[string]any{"primary": "#000"}, flagSet["single-flag"].Config)
+ },
+ },
+ {
+ name: "empty flag set returns FLAG_NOT_FOUND",
+ flagParam: "nonexistent-set",
+ setupMock: func(m *MockClient) {
+ m.EXPECT().
+ TreatmentsWithConfigByFlagSet("key", "nonexistent-set", mock.Anything).
+ Once().
+ Return(map[string]client.TreatmentResult{})
+ },
+ wantNotFound: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mockClient := swapMockClient(t, provider)
+ tt.setupMock(mockClient)
+
+ ctx := context.Background()
+ if tt.mode != "" {
+ ctx = WithEvaluationMode(ctx, tt.mode)
+ }
+
+ flatCtx := openfeature.FlattenedContext{
+ openfeature.TargetingKey: "key",
+ }
+
+ result := provider.ObjectEvaluation(ctx, tt.flagParam, FlagSetResult{}, flatCtx)
+
+ if tt.wantNotFound {
+ assert.Contains(t, result.ResolutionError.Error(), string(openfeature.FlagNotFoundCode))
+ return
+ }
+
+ flagSet, ok := result.Value.(FlagSetResult)
+ require.True(t, ok, "Value should be FlagSetResult")
+ assert.Len(t, flagSet, tt.wantFlagCount)
+ assert.Equal(t, openfeature.TargetingMatchReason, result.Reason)
+
+ if tt.assertResult != nil {
+ tt.assertResult(t, result)
+ }
+ })
+ }
+}
+
+// =============================================================================
+// ObjectEvaluation: Cloud Mode JSON Config Parsing
+// =============================================================================
+
+func TestObjectEvaluation_CloudMode_ConfigParsesAllJSONTypes(t *testing.T) {
+ provider := createCloudTestProvider(t, nil)
+ mockClient := swapMockClient(t, provider)
+
+ arrayConfig := `[1,2,3]`
+ stringConfig := `"hello"`
+ numberConfig := `42`
+ boolConfig := `true`
+ nullConfig := `null`
+
+ mockClient.EXPECT().
+ TreatmentsWithConfigByFlagSet("key", "json-types", mock.Anything).
+ Once().
+ Return(map[string]client.TreatmentResult{
+ "array_flag": {Treatment: "on", Config: &arrayConfig},
+ "string_flag": {Treatment: "on", Config: &stringConfig},
+ "number_flag": {Treatment: "on", Config: &numberConfig},
+ "bool_flag": {Treatment: "on", Config: &boolConfig},
+ "null_flag": {Treatment: "on", Config: &nullConfig},
+ })
+
+ flatCtx := openfeature.FlattenedContext{openfeature.TargetingKey: "key"}
+ result := provider.ObjectEvaluation(context.Background(), "json-types", nil, flatCtx)
+
+ flagSet, ok := result.Value.(FlagSetResult)
+ require.True(t, ok)
+ assert.Len(t, flagSet, 5)
+
+ assert.Equal(t, []any{float64(1), float64(2), float64(3)}, flagSet["array_flag"].Config)
+ assert.Equal(t, "hello", flagSet["string_flag"].Config)
+ assert.Equal(t, float64(42), flagSet["number_flag"].Config)
+ assert.Equal(t, true, flagSet["bool_flag"].Config)
+ assert.Nil(t, flagSet["null_flag"].Config)
+}
+
+// =============================================================================
+// Scalar Evaluation: Mock-Based (Table-Driven)
+// =============================================================================
+
+func TestScalarEvaluation_MockClient(t *testing.T) {
+ configJSON := `{"rollout":"gradual"}`
+ provider := createTestProvider(t, nil)
+
+ tests := []struct {
+ name string
+ mockSetup func(*MockClient)
+ evaluate func(*Provider) interface{}
+ wantValue interface{}
+ wantMeta map[string]any // expected FlagMetadata["value"], nil if none
+ }{
+ {
+ name: "BooleanEvaluation/on returns true with config",
+ mockSetup: func(m *MockClient) {
+ m.EXPECT().
+ TreatmentWithConfig("user-1", "feature_x", mock.Anything).
+ Once().
+ Return(client.TreatmentResult{Treatment: "on", Config: &configJSON})
+ },
+ evaluate: func(p *Provider) interface{} {
+ flatCtx := openfeature.FlattenedContext{openfeature.TargetingKey: "user-1"}
+ return p.BooleanEvaluation(context.Background(), "feature_x", false, flatCtx)
+ },
+ wantValue: true,
+ wantMeta: map[string]any{"rollout": "gradual"},
+ },
+ {
+ name: "StringEvaluation/returns treatment string",
+ mockSetup: func(m *MockClient) {
+ m.EXPECT().
+ TreatmentWithConfig("user-1", "color_flag", mock.Anything).
+ Once().
+ Return(client.TreatmentResult{Treatment: "blue", Config: nil})
+ },
+ evaluate: func(p *Provider) interface{} {
+ flatCtx := openfeature.FlattenedContext{openfeature.TargetingKey: "user-1"}
+ return p.StringEvaluation(context.Background(), "color_flag", "red", flatCtx)
+ },
+ wantValue: "blue",
+ },
+ {
+ name: "IntEvaluation/parses treatment as int",
+ mockSetup: func(m *MockClient) {
+ m.EXPECT().
+ TreatmentWithConfig("user-1", "retry_count", mock.Anything).
+ Once().
+ Return(client.TreatmentResult{Treatment: "5", Config: nil})
+ },
+ evaluate: func(p *Provider) interface{} {
+ flatCtx := openfeature.FlattenedContext{openfeature.TargetingKey: "user-1"}
+ return p.IntEvaluation(context.Background(), "retry_count", 3, flatCtx)
+ },
+ wantValue: int64(5),
+ },
+ {
+ name: "FloatEvaluation/parses treatment as float",
+ mockSetup: func(m *MockClient) {
+ m.EXPECT().
+ TreatmentWithConfig("user-1", "discount", mock.Anything).
+ Once().
+ Return(client.TreatmentResult{Treatment: "0.15", Config: nil})
+ },
+ evaluate: func(p *Provider) interface{} {
+ flatCtx := openfeature.FlattenedContext{openfeature.TargetingKey: "user-1"}
+ return p.FloatEvaluation(context.Background(), "discount", 0.0, flatCtx)
+ },
+ wantValue: 0.15,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mockClient := swapMockClient(t, provider)
+ tt.mockSetup(mockClient)
+
+ result := tt.evaluate(provider)
+
+ // Assert value based on result type
+ switch r := result.(type) {
+ case openfeature.BoolResolutionDetail:
+ assert.Equal(t, tt.wantValue, r.Value)
+ assert.Equal(t, openfeature.TargetingMatchReason, r.Reason)
+ if tt.wantMeta != nil {
+ assert.Equal(t, tt.wantMeta, r.FlagMetadata["value"])
+ }
+ case openfeature.StringResolutionDetail:
+ assert.Equal(t, tt.wantValue, r.Value)
+ assert.Equal(t, openfeature.TargetingMatchReason, r.Reason)
+ case openfeature.IntResolutionDetail:
+ assert.Equal(t, tt.wantValue, r.Value)
+ assert.Equal(t, openfeature.TargetingMatchReason, r.Reason)
+ case openfeature.FloatResolutionDetail:
+ assert.Equal(t, tt.wantValue, r.Value)
+ assert.Equal(t, openfeature.TargetingMatchReason, r.Reason)
+ default:
+ t.Fatalf("unexpected result type: %T", result)
+ }
+ })
+ }
+}
+
+// =============================================================================
+// Attributes Passthrough: Mock-Based (Table-Driven)
+// =============================================================================
+
+func TestEvaluation_MockClient_AttributeFiltering(t *testing.T) {
+ provider := createTestProvider(t, nil)
+
+ tests := []struct {
+ name string
+ flatCtx openfeature.FlattenedContext
+ wantAttrs map[string]interface{}
+ }{
+ {
+ name: "passes user attributes, excludes targetingKey",
+ flatCtx: openfeature.FlattenedContext{
+ openfeature.TargetingKey: "user-1",
+ "email": "user@test.com",
+ "plan": "premium",
+ },
+ wantAttrs: map[string]interface{}{
+ "email": "user@test.com",
+ "plan": "premium",
+ },
+ },
+ {
+ name: "excludes trafficType from attributes",
+ flatCtx: openfeature.FlattenedContext{
+ openfeature.TargetingKey: "user-1",
+ TrafficTypeKey: "account",
+ "email": "user@test.com",
+ },
+ wantAttrs: map[string]interface{}{
+ "email": "user@test.com",
+ },
+ },
+ {
+ name: "no extra attributes passes empty map",
+ flatCtx: openfeature.FlattenedContext{
+ openfeature.TargetingKey: "user-1",
+ },
+ wantAttrs: map[string]interface{}{},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mockClient := swapMockClient(t, provider)
+
+ mockClient.EXPECT().
+ TreatmentWithConfig("user-1", "feature", tt.wantAttrs).
+ Once().
+ Return(client.TreatmentResult{Treatment: "on", Config: nil})
+
+ result := provider.BooleanEvaluation(context.Background(), "feature", false, tt.flatCtx)
+ assert.True(t, result.Value)
+ })
+ }
+}
diff --git a/options.go b/options.go
new file mode 100644
index 0000000..2948ae6
--- /dev/null
+++ b/options.go
@@ -0,0 +1,111 @@
+package split
+
+import "context"
+
+// Private key types - MUST be unexported to prevent external collision
+type evalOptionsKeyType struct{}
+type trackOptionsKeyType struct{}
+
+// Package-level keys (unexported)
+var (
+ evalOptionsKey = evalOptionsKeyType{}
+ trackOptionsKey = trackOptionsKeyType{}
+)
+
+// EvaluationMode controls how ObjectEvaluation interprets the flag parameter.
+type EvaluationMode string
+
+const (
+ // EvaluationModeDefault uses the provider's default behavior.
+ // The actual mode is determined at evaluation time based on provider mode:
+ // - Cloud mode: flag set evaluation (TreatmentsWithConfigByFlagSet)
+ // - Localhost mode: individual flag evaluation (TreatmentWithConfig)
+ EvaluationModeDefault EvaluationMode = ""
+
+ // EvaluationModeSet treats the flag parameter as a flag set name.
+ // Uses TreatmentsWithConfigByFlagSet.
+ EvaluationModeSet EvaluationMode = "set"
+
+ // EvaluationModeIndividual treats the flag parameter as a single flag name.
+ // Uses TreatmentWithConfig.
+ EvaluationModeIndividual EvaluationMode = "individual"
+)
+
+// EvalOptions contains per-request evaluation options.
+type EvalOptions struct {
+ // Mode controls set vs individual evaluation in ObjectEvaluation.
+ // Ignored in localhost mode (always individual).
+ Mode EvaluationMode
+
+ // ImpressionDisabled disables impression tracking for this evaluation.
+ // Useful for health checks, load tests, internal tools.
+ //
+ // This mirrors Split Evaluator's per-request `impressionsDisabled` parameter.
+ // Split SDK-level impression modes (OPTIMIZED/DEBUG/NONE) are configured at
+ // initialization time via cfg.ImpressionsMode - those are NOT per-request.
+ //
+ // NOTE: Forward-looking API - not yet supported by Split Go SDK.
+ // Will be logged but not enforced until SDK adds per-evaluation support.
+ ImpressionDisabled bool
+}
+
+// WithEvalOptions adds evaluation options to context.
+func WithEvalOptions(ctx context.Context, opts EvalOptions) context.Context {
+ return context.WithValue(ctx, evalOptionsKey, opts)
+}
+
+// GetEvalOptions extracts evaluation options from context.
+// Returns zero value EvalOptions if not set.
+func GetEvalOptions(ctx context.Context) EvalOptions {
+ if opts, ok := ctx.Value(evalOptionsKey).(EvalOptions); ok {
+ return opts
+ }
+ return EvalOptions{}
+}
+
+// WithEvaluationMode sets only the evaluation mode.
+// Uses Get-Modify-Set pattern to preserve other EvalOptions fields.
+func WithEvaluationMode(ctx context.Context, mode EvaluationMode) context.Context {
+ opts := GetEvalOptions(ctx)
+ opts.Mode = mode
+ return WithEvalOptions(ctx, opts)
+}
+
+// WithImpressionDisabled disables impression tracking for evaluations on this context.
+// Uses Get-Modify-Set pattern to preserve other EvalOptions fields.
+// NOTE: Forward-looking API - not yet enforced by Split Go SDK.
+func WithImpressionDisabled(ctx context.Context) context.Context {
+ opts := GetEvalOptions(ctx)
+ opts.ImpressionDisabled = true
+ return WithEvalOptions(ctx, opts)
+}
+
+// TrackOptions contains per-request tracking options.
+type TrackOptions struct {
+ // MetricValueAbsent indicates that no metric value was provided.
+ // When true, the provider passes nil to Split instead of 0.
+ // This prevents polluting sum/average metrics with zeros.
+ MetricValueAbsent bool
+}
+
+// WithTrackOptions adds tracking options to context.
+func WithTrackOptions(ctx context.Context, opts TrackOptions) context.Context {
+ return context.WithValue(ctx, trackOptionsKey, opts)
+}
+
+// GetTrackOptions extracts tracking options from context.
+// Returns zero value TrackOptions if not set.
+func GetTrackOptions(ctx context.Context) TrackOptions {
+ if opts, ok := ctx.Value(trackOptionsKey).(TrackOptions); ok {
+ return opts
+ }
+ return TrackOptions{}
+}
+
+// WithoutMetricValue marks that no metric value should be sent.
+// Uses Get-Modify-Set pattern to preserve other TrackOptions fields.
+func WithoutMetricValue(ctx context.Context) context.Context {
+ opts := GetTrackOptions(ctx)
+ opts.MetricValueAbsent = true
+ return WithTrackOptions(ctx, opts)
+}
diff --git a/options_test.go b/options_test.go
new file mode 100644
index 0000000..5a15779
--- /dev/null
+++ b/options_test.go
@@ -0,0 +1,88 @@
+package split
+
+import (
+ "context"
+ "testing"
+
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestWithEvalOptions(t *testing.T) {
+ ctx := context.Background()
+
+ ctx = WithEvalOptions(ctx, EvalOptions{
+ Mode: EvaluationModeIndividual,
+ ImpressionDisabled: true,
+ })
+
+ opts := GetEvalOptions(ctx)
+ assert.Equal(t, EvaluationModeIndividual, opts.Mode)
+ assert.True(t, opts.ImpressionDisabled)
+}
+
+func TestWithEvalOptions_NotSet(t *testing.T) {
+ ctx := context.Background()
+ opts := GetEvalOptions(ctx)
+
+ assert.Equal(t, EvaluationModeDefault, opts.Mode)
+ assert.False(t, opts.ImpressionDisabled)
+}
+
+func TestWithTrackOptions_MetricValueAbsent(t *testing.T) {
+ ctx := context.Background()
+ ctx = WithoutMetricValue(ctx)
+
+ opts := GetTrackOptions(ctx)
+ assert.True(t, opts.MetricValueAbsent)
+}
+
+func TestWithEvalOptions_MultipleCalls_MergesCorrectly(t *testing.T) {
+ ctx := context.Background()
+
+ // Set mode first
+ ctx = WithEvaluationMode(ctx, EvaluationModeIndividual)
+
+ // Set ImpressionDisabled second - should preserve mode
+ ctx = WithImpressionDisabled(ctx)
+
+ opts := GetEvalOptions(ctx)
+ assert.Equal(t, EvaluationModeIndividual, opts.Mode)
+ assert.True(t, opts.ImpressionDisabled)
+}
+
+func TestWithTrackOptions_MultipleCalls_MergesCorrectly(t *testing.T) {
+ ctx := context.Background()
+
+ ctx = WithoutMetricValue(ctx)
+
+ opts := GetTrackOptions(ctx)
+ assert.True(t, opts.MetricValueAbsent)
+}
+
+func TestContext_PassedThroughMiddlewareChain(t *testing.T) {
+ ctx := context.Background()
+ ctx = WithEvaluationMode(ctx, EvaluationModeIndividual)
+ ctx = WithImpressionDisabled(ctx)
+ ctx = WithoutMetricValue(ctx)
+
+ evalOpts := GetEvalOptions(ctx)
+ trackOpts := GetTrackOptions(ctx)
+
+ assert.Equal(t, EvaluationModeIndividual, evalOpts.Mode)
+ assert.True(t, evalOpts.ImpressionDisabled)
+ assert.True(t, trackOpts.MetricValueAbsent)
+}
+
+func TestTrack_MetricValueAbsent_TakesPrecedence(t *testing.T) {
+ // Set MetricValueAbsent even though details has a non-zero value
+ ctx := WithoutMetricValue(context.Background())
+ details := openfeature.NewTrackingEventDetails(99.99) // Has value!
+
+ // Verify that MetricValueAbsent takes precedence in context
+ trackOpts := GetTrackOptions(ctx)
+ assert.True(t, trackOpts.MetricValueAbsent, "MetricValueAbsent should be true")
+
+ // The value in details doesn't affect the context option
+ assert.Equal(t, 99.99, details.Value(), "Details still has the value")
+}
diff --git a/provider.go b/provider.go
index 762f17a..0b1cd45 100644
--- a/provider.go
+++ b/provider.go
@@ -1,232 +1,246 @@
-package split_openfeature_provider_go
+package split
import (
- "context"
- "encoding/json"
- "github.com/splitio/go-client/splitio/conf"
- "strconv"
-
- "github.com/open-feature/go-sdk/pkg/openfeature"
- "github.com/splitio/go-client/splitio/client"
+ "fmt"
+ "log/slog"
+ "sync"
+ "time"
+
+ of "github.com/open-feature/go-sdk/openfeature"
+ "github.com/splitio/go-client/v6/splitio/client"
+ "github.com/splitio/go-client/v6/splitio/conf"
+ "golang.org/x/sync/singleflight"
)
-type SplitProvider struct {
- client client.SplitClient
+// Provider implements the OpenFeature FeatureProvider interface for Split.io.
+//
+// # Goroutine Management and Lifecycle
+//
+// This provider spawns and manages goroutines with the following guarantees:
+//
+// 1. **Background Monitoring Goroutine** (monitorSplitUpdates)
+// - Spawned: During InitWithContext after SDK is ready
+// - Purpose: Monitors Split SDK for configuration changes
+// - Shutdown: Gracefully terminated via close(stopMonitor)
+// - Guarantee: Always terminates within the configured monitoring interval (default 30s) after stopMonitor closed
+// - Tracking: monitorDone channel closed when goroutine exits
+// - Safety: Panic recovery ensures monitorDone always closed
+//
+// 2. **Initialization Goroutine** (BlockUntilReady wrapper)
+// - Spawned: During InitWithContext to monitor SDK initialization
+// - Purpose: Wraps SDK's BlockUntilReady to allow context cancellation
+// - Termination: Always terminates when BlockUntilReady completes (max: BlockUntilReady timeout)
+// - Tracking: Tracked via sync.WaitGroup (initWg) - Add(1) before spawn, Done() on completion
+// - Cleanup: ShutdownWithContext blocks on initWg.Wait() before destroying client
+// - Guarantee: GUARANTEED no leak - Shutdown cannot complete until all init goroutines terminate
+// - Lifecycle: Short-lived, terminates within BlockUntilReady timeout (default 10s)
+//
+// 3. **Shutdown Goroutine** (Destroy wrapper)
+// - Spawned: During ShutdownWithContext to destroy Split SDK client
+// - Purpose: Wraps SDK's Destroy() to allow context timeout
+// - Termination: Terminates when Destroy() completes
+// - Known Issue: In streaming mode, Destroy() can block up to 1 hour (Split SDK SSE issue)
+// - Guarantee: Eventually terminates, but may outlive ShutdownWithContext's context timeout
+// - Impact: Acceptable - goroutine performs cleanup and terminates, doesn't affect functionality
+//
+// All goroutines are properly tracked and either terminate gracefully or have documented
+// termination guarantees. No unbounded goroutine leaks exist in normal operation.
+type Provider struct {
+ client Client
+ initGroup singleflight.Group
+ monitorDone chan struct{}
+ logger *slog.Logger
+ eventStream chan of.Event
+ stopMonitor chan struct{}
+ splitConfig *conf.SplitSdkConfig
+ factory *client.SplitFactory
+ loggedOnce sync.Map // one-time log deduplication (small fixed set of keys)
+ initWg sync.WaitGroup
+ monitoringInterval time.Duration
+ mtx sync.RWMutex // protects client, factory, eventStream access
+ initMu sync.Mutex // serializes InitWithContext calls
+ shutdown uint32
+}
+
+// logOnce logs a message only once per key using sync.Map for thread-safety.
+func (p *Provider) logOnce(key string, logFn func()) {
+ if _, loaded := p.loggedOnce.LoadOrStore(key, struct{}{}); !loaded {
+ logFn()
+ }
+}
+
+// Config holds provider configuration.
+type Config struct {
+ // SplitConfig is the Split SDK configuration.
+ // If nil, conf.Default() is used.
+ SplitConfig *conf.SplitSdkConfig
+
+ // Logger is the slog.Logger used for provider and Split SDK logs.
+ // If nil, slog.Default() is used.
+ Logger *slog.Logger
+
+ // APIKey is the Split SDK key or "localhost" for local mode.
+ APIKey string
+
+ // MonitoringInterval is how often the provider checks for split definition changes.
+ // Default: 30 seconds. Minimum: 5 seconds.
+ // Lower values increase responsiveness but also CPU usage.
+ MonitoringInterval time.Duration
+}
+
+// Option configures a provider Config.
+type Option interface {
+ apply(*Config)
+}
+
+// WithSplitConfig sets the Split SDK configuration.
+func WithSplitConfig(cfg *conf.SplitSdkConfig) Option {
+ return withSplitConfig{cfg}
+}
+
+type withSplitConfig struct {
+ cfg *conf.SplitSdkConfig
+}
+
+func (o withSplitConfig) apply(c *Config) {
+ c.SplitConfig = o.cfg
+}
+
+// WithLogger sets the logger for provider and Split SDK logs.
+// The provider wraps this logger with SlogToSplitAdapter for Split SDK compatibility,
+// unless SplitConfig.Logger is already set.
+// This ensures unified logging across the provider, Split SDK, and OpenFeature SDK
+// when the same logger is also passed to hooks.NewLoggingHook().
+func WithLogger(logger *slog.Logger) Option {
+ return withLogger{logger}
+}
+
+type withLogger struct {
+ logger *slog.Logger
+}
+
+func (o withLogger) apply(c *Config) {
+ c.Logger = o.logger
+}
+
+// WithMonitoringInterval sets how often the provider checks for split definition changes.
+// Default: 30 seconds. Minimum: 5 seconds. Values below minimum are clamped.
+func WithMonitoringInterval(interval time.Duration) Option {
+ return withMonitoringInterval{interval}
}
-func NewProvider(splitClient client.SplitClient) (*SplitProvider, error) {
- return &SplitProvider{
- client: splitClient,
- }, nil
+type withMonitoringInterval struct {
+ interval time.Duration
}
-func NewProviderSimple(apiKey string) (*SplitProvider, error) {
- cfg := conf.Default()
- factory, err := client.NewSplitFactory(apiKey, cfg)
- if err != nil {
- return nil, err
- }
- splitClient := factory.Client()
- err = splitClient.BlockUntilReady(10)
- if err != nil {
- return nil, err
- }
- return NewProvider(*splitClient)
+func (o withMonitoringInterval) apply(c *Config) {
+ c.MonitoringInterval = o.interval
}
-func (provider *SplitProvider) Metadata() openfeature.Metadata {
- return openfeature.Metadata{
- Name: "Split",
- }
-}
-
-func (provider *SplitProvider) BooleanEvaluation(ctx context.Context, flag string, defaultValue bool, evalCtx openfeature.FlattenedContext) openfeature.BoolResolutionDetail {
- if noTargetingKey(evalCtx) {
- return openfeature.BoolResolutionDetail{
- Value: defaultValue,
- ProviderResolutionDetail: resolutionDetailTargetingKeyMissing(),
- }
- }
- evaluated := provider.evaluateTreatment(flag, evalCtx)
- if noTreatment(evaluated) {
- return openfeature.BoolResolutionDetail{
- Value: defaultValue,
- ProviderResolutionDetail: resolutionDetailNotFound(evaluated),
- }
- }
- var value bool
- if evaluated == "true" || evaluated == "on" {
- value = true
- } else if evaluated == "false" || evaluated == "off" {
- value = false
- } else {
- return openfeature.BoolResolutionDetail{
- Value: defaultValue,
- ProviderResolutionDetail: resolutionDetailParseError(evaluated),
- }
+// New creates a Split provider with the given configuration.
+//
+// The apiKey parameter is required. Additional configuration can be provided
+// via functional options.
+//
+// Example with defaults:
+//
+// provider, _ := split.New("YOUR_SDK_KEY")
+//
+// Example with custom logger:
+//
+// logger := slog.New(slog.NewJSONHandler(os.Stdout, nil))
+// provider, _ := split.New("YOUR_SDK_KEY", split.WithLogger(logger))
+//
+// Example with custom Split SDK config:
+//
+// cfg := conf.Default()
+// cfg.OperationMode = "localhost"
+// provider, _ := split.New("localhost", split.WithSplitConfig(cfg))
+//
+// Example with unified logging (provider, Split SDK, and OpenFeature SDK):
+//
+// logger := slog.New(slog.NewJSONHandler(os.Stdout, nil))
+// slog.SetDefault(logger)
+// provider, _ := split.New("YOUR_SDK_KEY", split.WithLogger(logger))
+// openfeature.AddHooks(hooks.NewLoggingHook(false, logger))
+//
+// The provider is created in NotReady state. Call Init() (or use OpenFeature's
+// SetProviderAndWait) to wait for the SDK to download splits. Always call Shutdown()
+// when done to clean up resources.
+func New(apiKey string, opts ...Option) (*Provider, error) {
+ if apiKey == "" {
+ return nil, fmt.Errorf("split provider: apiKey is required")
}
- return openfeature.BoolResolutionDetail{
- Value: value,
- ProviderResolutionDetail: resolutionDetailTargetingMatch(evaluated),
- }
-}
-func (provider *SplitProvider) StringEvaluation(ctx context.Context, flag string, defaultValue string, evalCtx openfeature.FlattenedContext) openfeature.StringResolutionDetail {
- if noTargetingKey(evalCtx) {
- return openfeature.StringResolutionDetail{
- Value: defaultValue,
- ProviderResolutionDetail: resolutionDetailTargetingKeyMissing(),
- }
- }
- evaluated := provider.evaluateTreatment(flag, evalCtx)
- if noTreatment(evaluated) {
- return openfeature.StringResolutionDetail{
- Value: defaultValue,
- ProviderResolutionDetail: resolutionDetailNotFound(evaluated),
- }
+ cfg := &Config{
+ APIKey: apiKey,
+ SplitConfig: nil,
+ Logger: nil,
}
- return openfeature.StringResolutionDetail{
- Value: evaluated,
- ProviderResolutionDetail: resolutionDetailTargetingMatch(evaluated),
- }
-}
-func (provider *SplitProvider) FloatEvaluation(ctx context.Context, flag string, defaultValue float64, evalCtx openfeature.FlattenedContext) openfeature.FloatResolutionDetail {
- if noTargetingKey(evalCtx) {
- return openfeature.FloatResolutionDetail{
- Value: defaultValue,
- ProviderResolutionDetail: resolutionDetailTargetingKeyMissing(),
- }
- }
- evaluated := provider.evaluateTreatment(flag, evalCtx)
- if noTreatment(evaluated) {
- return openfeature.FloatResolutionDetail{
- Value: defaultValue,
- ProviderResolutionDetail: resolutionDetailNotFound(evaluated),
- }
+ for _, opt := range opts {
+ opt.apply(cfg)
}
- floatEvaluated, parseErr := strconv.ParseFloat(evaluated, 64)
- if parseErr != nil {
- return openfeature.FloatResolutionDetail{
- Value: defaultValue,
- ProviderResolutionDetail: resolutionDetailParseError(evaluated),
- }
- }
- return openfeature.FloatResolutionDetail{
- Value: floatEvaluated,
- ProviderResolutionDetail: resolutionDetailTargetingMatch(evaluated),
- }
-}
-func (provider *SplitProvider) IntEvaluation(ctx context.Context, flag string, defaultValue int64, evalCtx openfeature.FlattenedContext) openfeature.IntResolutionDetail {
- if noTargetingKey(evalCtx) {
- return openfeature.IntResolutionDetail{
- Value: defaultValue,
- ProviderResolutionDetail: resolutionDetailTargetingKeyMissing(),
- }
- }
- evaluated := provider.evaluateTreatment(flag, evalCtx)
- if noTreatment(evaluated) {
- return openfeature.IntResolutionDetail{
- Value: defaultValue,
- ProviderResolutionDetail: resolutionDetailNotFound(evaluated),
- }
- }
- intEvaluated, parseErr := strconv.ParseInt(evaluated, 10, 64)
- if parseErr != nil {
- return openfeature.IntResolutionDetail{
- Value: defaultValue,
- ProviderResolutionDetail: resolutionDetailParseError(evaluated),
- }
+ if cfg.SplitConfig == nil {
+ cfg.SplitConfig = conf.Default()
}
- return openfeature.IntResolutionDetail{
- Value: intEvaluated,
- ProviderResolutionDetail: resolutionDetailTargetingMatch(evaluated),
+ if cfg.Logger == nil {
+ cfg.Logger = slog.Default()
}
-}
-func (provider *SplitProvider) ObjectEvaluation(ctx context.Context, flag string, defaultValue interface{}, evalCtx openfeature.FlattenedContext) openfeature.InterfaceResolutionDetail {
- if noTargetingKey(evalCtx) {
- return openfeature.InterfaceResolutionDetail{
- Value: defaultValue,
- ProviderResolutionDetail: resolutionDetailTargetingKeyMissing(),
- }
- }
- evaluated := provider.evaluateTreatment(flag, evalCtx)
- if noTreatment(evaluated) {
- return openfeature.InterfaceResolutionDetail{
- Value: defaultValue,
- ProviderResolutionDetail: resolutionDetailNotFound(evaluated),
- }
- }
- var data map[string]interface{}
- parseErr := json.Unmarshal([]byte(evaluated), &data)
- if parseErr != nil {
- return openfeature.InterfaceResolutionDetail{
- Value: defaultValue,
- ProviderResolutionDetail: resolutionDetailParseError(evaluated),
- }
- } else {
- return openfeature.InterfaceResolutionDetail{
- Value: data,
- ProviderResolutionDetail: resolutionDetailTargetingMatch(evaluated),
- }
+ if cfg.SplitConfig.BlockUntilReady <= 0 {
+ cfg.SplitConfig.BlockUntilReady = defaultSDKTimeout
}
-}
-
-func (provider *SplitProvider) Hooks() []openfeature.Hook {
- return []openfeature.Hook{}
-}
-
-// *** Helpers ***
-
-func (provider *SplitProvider) evaluateTreatment(flag string, evalContext openfeature.FlattenedContext) string {
- return provider.client.Treatment(evalContext[openfeature.TargetingKey], flag, nil)
-}
-
-func noTargetingKey(evalContext openfeature.FlattenedContext) bool {
- _, ok := evalContext[openfeature.TargetingKey]
- return !ok
-}
+ providerLogger := cfg.Logger.With("source", "split-provider")
-func noTreatment(treatment string) bool {
- return treatment == "" || treatment == "control"
-}
+ // Apply monitoring interval defaults and minimum
+ monitoringInterval := cfg.MonitoringInterval
+ if monitoringInterval == 0 {
+ monitoringInterval = defaultMonitoringInterval
+ } else if monitoringInterval < minMonitoringInterval {
+ providerLogger.Warn("monitoring interval below minimum, using minimum",
+ "requested", monitoringInterval,
+ "minimum", minMonitoringInterval)
+ monitoringInterval = minMonitoringInterval
+ }
-func resolutionDetailNotFound(variant string) openfeature.ProviderResolutionDetail {
- return providerResolutionDetailError(
- openfeature.NewFlagNotFoundResolutionError(
- "Flag not found."),
- openfeature.DefaultReason,
- variant)
-}
+ if cfg.SplitConfig.Logger == nil {
+ splitSDKLogger := cfg.Logger.With("source", "split-sdk")
+ cfg.SplitConfig.Logger = NewSplitLogger(splitSDKLogger)
+ }
-func resolutionDetailParseError(variant string) openfeature.ProviderResolutionDetail {
- return providerResolutionDetailError(
- openfeature.NewParseErrorResolutionError("Error parsing the treatment to the given type."),
- openfeature.ErrorReason,
- variant)
-}
+ factory, err := client.NewSplitFactory(cfg.APIKey, cfg.SplitConfig)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create Split factory: %w", err)
+ }
-func resolutionDetailTargetingKeyMissing() openfeature.ProviderResolutionDetail {
- return providerResolutionDetailError(
- openfeature.NewTargetingKeyMissingResolutionError("Targeting key is required and missing."),
- openfeature.ErrorReason,
- "")
-}
+ provider := &Provider{
+ client: factory.Client(),
+ factory: factory,
+ eventStream: make(chan of.Event, eventChannelBuffer),
+ stopMonitor: make(chan struct{}),
+ monitorDone: make(chan struct{}),
+ splitConfig: cfg.SplitConfig,
+ monitoringInterval: monitoringInterval,
+ logger: providerLogger,
+ }
-func providerResolutionDetailError(error openfeature.ResolutionError, reason openfeature.Reason, variant string) openfeature.ProviderResolutionDetail {
- return openfeature.ProviderResolutionDetail{
- ResolutionError: error,
- Reason: reason,
- Variant: variant,
+ mode := "cloud"
+ if provider.isLocalhostMode() {
+ mode = "localhost"
}
+ providerLogger.Info("Split provider created",
+ "mode", mode,
+ "block_until_ready", cfg.SplitConfig.BlockUntilReady)
+
+ return provider, nil
}
-func resolutionDetailTargetingMatch(variant string) openfeature.ProviderResolutionDetail {
- return openfeature.ProviderResolutionDetail{
- Reason: openfeature.TargetingMatchReason,
- Variant: variant,
+// Metadata returns provider metadata with name "Split".
+func (p *Provider) Metadata() of.Metadata {
+ return of.Metadata{
+ Name: "Split",
}
}
diff --git a/provider_test.go b/provider_test.go
index 8667a3a..d8104a3 100644
--- a/provider_test.go
+++ b/provider_test.go
@@ -1,451 +1,403 @@
-package split_openfeature_provider_go
+//nolint:dupl,gocognit // Test patterns: type-specific tests have similar structure, comprehensive tests have higher complexity
+package split
import (
- "github.com/open-feature/go-sdk/pkg/openfeature"
- "github.com/splitio/go-client/splitio/client"
- "github.com/splitio/go-client/splitio/conf"
- "github.com/splitio/go-toolkit/logging"
- "reflect"
+ "context"
+ "fmt"
+ "log/slog"
"strings"
+ "sync"
"testing"
+ "time"
+
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/splitio/go-client/v6/splitio/conf"
+ "github.com/splitio/go-toolkit/v5/logging"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/goleak"
)
+// =============================================================================
+// Test Constants
+// =============================================================================
+
+// Test flag names used across multiple tests
+const (
+ flagNonExistent = "random-non-existent-feature"
+ flagSomeOther = "some_other_feature"
+ flagMyFeature = "my_feature"
+ flagInt = "int_feature"
+ flagObj = "obj_feature"
+ flagUnparseable = "unparseable_feature"
+ flagMalformedJSON = "malformed_json_feature"
+ // treatmentOn and treatmentOff are defined in constants.go
+ treatmentUnparseable = "not-a-valid-type" // Treatment that cannot be parsed as bool/int/float
+ testClientName = "test_client"
+ testSplitFile = "testdata/split.yaml"
+ providerNameSplit = "Split"
+)
+
+// =============================================================================
+// Test Main & Shared Helpers
+// =============================================================================
+
+// TestMain adds goroutine leak detection to all tests.
+// Uses goleak to detect goroutine leaks from OUR code (external dependencies ignored).
+func TestMain(m *testing.M) {
+ goleak.VerifyTestMain(m,
+ // Ignore OpenFeature SDK event executor goroutines (created per test via SetProvider)
+ // Use IgnoreAnyFunction because these goroutines can be in various states
+ // Note: Function names differ between normal and race detector builds
+ goleak.IgnoreAnyFunction("github.com/open-feature/go-sdk/openfeature.(*eventExecutor).startEventListener.func1.1"),
+ goleak.IgnoreAnyFunction("github.com/open-feature/go-sdk/openfeature.newEventExecutor.(*eventExecutor).startEventListener.func1.1"), // -race variant
+ goleak.IgnoreAnyFunction("github.com/open-feature/go-sdk/openfeature.(*eventExecutor).startListeningAndShutdownOld.func1"),
+ goleak.IgnoreAnyFunction("github.com/open-feature/go-sdk/openfeature.newEventExecutor.(*eventExecutor).startListeningAndShutdownOld.func1"), // -race variant
+ goleak.IgnoreAnyFunction("github.com/open-feature/go-sdk/openfeature.(*eventExecutor).triggerEvent"),
+ // Ignore Split SDK background goroutines (created during individual tests)
+ goleak.IgnoreTopFunction("github.com/splitio/go-split-commons/v8/synchronizer.(*ManagerImpl).Start.func1"),
+ goleak.IgnoreTopFunction("github.com/splitio/go-split-commons/v8/synchronizer.(*ManagerImpl).StartBGSync.func1"),
+ // Ignore standard library goroutines
+ goleak.IgnoreTopFunction("internal/poll.runtime_pollWait"),
+ goleak.IgnoreTopFunction("time.Sleep"),
+ )
+}
+
+// create sets up a provider via the global OpenFeature SDK and returns a client.
+// Used for high-level OpenFeature API testing (BooleanValue, StringValue, etc.).
func create(t *testing.T) *openfeature.Client {
+ t.Helper()
cfg := conf.Default()
- cfg.SplitFile = "./split.yaml"
+ cfg.SplitFile = testSplitFile
cfg.LoggerConfig.LogLevel = logging.LevelNone
- factory, err := client.NewSplitFactory("localhost", cfg)
- if err != nil {
- // error
- t.Error("Error creating split factory")
- }
- splitClient := factory.Client()
- err = splitClient.BlockUntilReady(10)
- if err != nil {
- // error timeout
- t.Error("Split sdk timeout error")
- }
- provider, err := NewProvider(*splitClient)
- if err != nil {
- t.Error(err)
- }
- if provider == nil {
- t.Error("Error creating Split Provider")
- }
- openfeature.SetProvider(provider)
- return openfeature.NewClient("test_client")
+ cfg.BlockUntilReady = 10 // Must be positive
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Failed to create provider")
+ require.NotNil(t, provider, "Provider should not be nil")
+
+ // Proper cleanup: Shutdown provider when test completes
+ t.Cleanup(func() {
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ _ = openfeature.ShutdownWithContext(ctx)
+ })
+
+ // Use context-aware SetProviderWithContextAndWait (gold standard)
+ ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+ err = openfeature.SetProviderWithContextAndWait(ctx, provider)
+ require.NoError(t, err, "Failed to set provider")
+
+ return openfeature.NewClient(testClientName)
}
func evaluationContext() openfeature.EvaluationContext {
return openfeature.NewEvaluationContext("key", nil)
}
-func TestCreateSimple(t *testing.T) {
- provider, err := NewProviderSimple("localhost")
- if err != nil {
- t.Error(err)
- }
- if provider == nil {
- t.Error("Error creating Split Provider")
- }
-}
-
-func TestUseDefault(t *testing.T) {
- ofClient := create(t)
- flagName := "random-non-existent-feature"
- evalCtx := evaluationContext()
-
- result, err := ofClient.BooleanValue(nil, flagName, false, evalCtx)
- if err == nil {
- t.Error("Should have returned flag not found error")
- } else if !strings.Contains(err.Error(), string(openfeature.FlagNotFoundCode)) {
- t.Errorf("Unexpected error occurred %s", err.Error())
- } else if result == true {
- t.Error("Result was true, but should have been default value of false")
- }
- result, err = ofClient.BooleanValue(nil, flagName, true, evalCtx)
- if err == nil {
- t.Error("Should have returned flag not found error")
- } else if !strings.Contains(err.Error(), string(openfeature.FlagNotFoundCode)) {
- t.Errorf("Unexpected error occurred %s", err.Error())
- } else if result == false {
- t.Error("Result was false, but should have been default value of true")
- }
-}
-
-func TestMissingTargetingKey(t *testing.T) {
- ofClient := create(t)
- flagName := "random-non-existent-feature"
-
- result, err := ofClient.BooleanValue(nil, flagName, false, openfeature.EvaluationContext{})
- if err == nil {
- t.Error("Should have returned targeting key missing error")
- } else if !strings.Contains(err.Error(), string(openfeature.TargetingKeyMissingCode)) {
- t.Errorf("Unexpected error occurred %s", err.Error())
- } else if result == true {
- t.Error("Result was true, but should have been default value of false")
- }
-}
+// =============================================================================
+// Provider Creation Tests
+// =============================================================================
-func TestGetControlVariantNonExistentSplit(t *testing.T) {
- ofClient := create(t)
- flagName := "random-non-existent-feature"
- evalCtx := evaluationContext()
-
- result, err := ofClient.BooleanValueDetails(nil, flagName, false, evalCtx)
- if err == nil {
- t.Error("Should have returned flag not found error")
- } else if !strings.Contains(err.Error(), string(openfeature.FlagNotFoundCode)) {
- t.Errorf("Unexpected error occurred %s", err.Error())
- } else if result.Value == true {
- t.Error("Result was true, but should have been default value of false")
- } else if result.Variant != "control" {
- t.Error("Variant should be control due to Split Go SDK functionality")
- }
-}
-
-func TestGetBooleanSplit(t *testing.T) {
- ofClient := create(t)
- flagName := "some_other_feature"
- evalCtx := evaluationContext()
-
- result, err := ofClient.BooleanValue(nil, flagName, true, evalCtx)
- if err != nil {
- t.Errorf("Unexpected error occurred %s", err.Error())
- } else if result == true {
- t.Error("Result was true, but should have been false as set in split.yaml")
- }
-}
-
-func TestGetBooleanWithKeySplit(t *testing.T) {
- ofClient := create(t)
- flagName := "my_feature"
- evalCtx := evaluationContext()
-
- result, err := ofClient.BooleanValue(nil, flagName, false, evalCtx)
- if err != nil {
- t.Errorf("Unexpected error occurred %s", err.Error())
- } else if result == false {
- t.Error("Result was false, but should have been true as set in split.yaml")
- }
-
- evalCtx = openfeature.NewEvaluationContext("randomKey", nil)
- result, err = ofClient.BooleanValue(nil, flagName, true, evalCtx)
- if err != nil {
- t.Errorf("Unexpected error occurred %s", err.Error())
- } else if result == true {
- t.Error("Result was true, but should have been false as set in split.yaml")
- }
-}
-
-func TestGetStringSplit(t *testing.T) {
- ofClient := create(t)
- flagName := "some_other_feature"
- evalCtx := evaluationContext()
-
- result, err := ofClient.StringValue(nil, flagName, "on", evalCtx)
- if err != nil {
- t.Errorf("Unexpected error occurred %s", err.Error())
- } else if result != "off" {
- t.Errorf("Result was %s, not off as set in split.yaml", result)
- }
-}
+func TestCreateSimple(t *testing.T) {
+ // Test New() with configuration
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10 // Must be positive
-func TestGetIntegerSplit(t *testing.T) {
- ofClient := create(t)
- flagName := "int_feature"
- evalCtx := evaluationContext()
-
- result, err := ofClient.IntValue(nil, flagName, 0, evalCtx)
- if err != nil {
- t.Errorf("Unexpected error occurred %s", err.Error())
- } else if result != 32 {
- t.Errorf("Result was %d, not 32 as set in split.yaml", result)
- }
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Provider creation should succeed")
+ assert.NotNil(t, provider, "Provider should not be nil")
+ defer func() { _ = provider.ShutdownWithContext(context.Background()) }()
}
-func TestGetObjectSplit(t *testing.T) {
- ofClient := create(t)
- flagName := "obj_feature"
- evalCtx := evaluationContext()
-
- result, err := ofClient.ObjectValue(nil, flagName, 0, evalCtx)
- expectedResult := map[string]interface{}{
- "key": "value",
- }
- if err != nil {
- t.Errorf("Unexpected error occurred %s", err.Error())
- } else if !reflect.DeepEqual(result, expectedResult) {
- t.Error("Result was not map from key to value as set in split.yaml")
- }
+// TestNewErrors tests error handling in New constructor.
+func TestNewErrors(t *testing.T) {
+ // Test with empty API key - should fail during factory creation
+ provider, err := New("")
+ assert.Error(t, err, "Empty API key should cause error")
+ assert.Nil(t, provider, "Provider should be nil when creation fails")
+
+ // Test with invalid API key format - Split SDK should reject it
+ provider, err = New("invalid-key-format-!@#$%")
+ // Note: Split SDK might accept any string as API key and only fail on network calls
+ _ = provider
+ _ = err
}
-func TestGetFloatSplit(t *testing.T) {
- ofClient := create(t)
- flagName := "int_feature"
- evalCtx := evaluationContext()
-
- result, err := ofClient.FloatValue(nil, flagName, 0, evalCtx)
- if err != nil {
- t.Errorf("Unexpected error occurred %s", err.Error())
- } else if result != float64(32) {
- t.Errorf("Result was %f, not 32 as set in split.yaml", result)
- }
-}
+// =============================================================================
+// Metadata Tests
+// =============================================================================
-func TestMetadataName(t *testing.T) {
+func TestMetadataReturnsProviderName(t *testing.T) {
ofClient := create(t)
- if ofClient.Metadata().Name() != "test_client" {
- t.Error("Client name was not set properly")
- }
- if openfeature.ProviderMetadata().Name != "Split" {
- t.Errorf("Provider metadata name was %s, not Split", openfeature.ProviderMetadata().Name)
- }
+ assert.Equal(t, testClientName, ofClient.Metadata().Domain(), "Client name should match")
+ assert.Equal(t, providerNameSplit, openfeature.ProviderMetadata().Name, "Provider name should be 'Split'")
}
-func TestBooleanDetails(t *testing.T) {
- ofClient := create(t)
- flagName := "some_other_feature"
- evalCtx := evaluationContext()
-
- result, err := ofClient.BooleanValueDetails(nil, flagName, true, evalCtx)
- if err != nil {
- t.Errorf("Unexpected error occurred %s", err.Error())
- } else if result.FlagKey != flagName {
- t.Errorf("Flag name is %s, not %s", result.FlagKey, flagName)
- } else if !strings.Contains(string(result.Reason), string(openfeature.TargetingMatchReason)) {
- t.Errorf("reason is %s, not targeting match", result.Reason)
- } else if result.Value == true {
- t.Error("Result was true, but should have been false as in split.yaml")
- } else if result.Variant != "off" {
- t.Errorf("Variant should be off as in split.yaml, but was %s", result.Variant)
- } else if result.ErrorCode != "" {
- t.Errorf("Unexpected error in result %s", result.ErrorCode)
+// =============================================================================
+// Logger Configuration Tests
+// =============================================================================
+
+// TestLoggerConfiguration verifies all logger configuration scenarios work correctly.
+func TestLoggerConfiguration(t *testing.T) {
+ baseConfig := func() *conf.SplitSdkConfig {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+ return cfg
+ }
+
+ tests := []struct {
+ setup func() (provider *Provider, customSlog *slog.Logger, customSplit *customTestLogger)
+ name string
+ expectSplitLoggerType string
+ expectProviderUsesDefault bool
+ }{
+ {
+ name: "no logger specified uses defaults",
+ setup: func() (*Provider, *slog.Logger, *customTestLogger) {
+ p, err := New("localhost")
+ require.NoError(t, err)
+ return p, nil, nil
+ },
+ expectProviderUsesDefault: true,
+ expectSplitLoggerType: "adapter",
+ },
+ {
+ name: "with logger option uses custom for both",
+ setup: func() (*Provider, *slog.Logger, *customTestLogger) {
+ var buf strings.Builder
+ customLogger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{Level: slog.LevelDebug}))
+ p, err := New("localhost", WithLogger(customLogger))
+ require.NoError(t, err)
+ return p, customLogger, nil
+ },
+ expectProviderUsesDefault: false,
+ expectSplitLoggerType: "adapter",
+ },
+ {
+ name: "split config logger only preserves custom split logger",
+ setup: func() (*Provider, *slog.Logger, *customTestLogger) {
+ customSplitLogger := &customTestLogger{logs: make([]string, 0)}
+ cfg := baseConfig()
+ cfg.Logger = customSplitLogger
+ p, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err)
+ return p, nil, customSplitLogger
+ },
+ expectProviderUsesDefault: true,
+ expectSplitLoggerType: "custom",
+ },
+ {
+ name: "both loggers uses each respectively",
+ setup: func() (*Provider, *slog.Logger, *customTestLogger) {
+ var buf strings.Builder
+ customSlogLogger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{Level: slog.LevelDebug}))
+ customSplitLogger := &customTestLogger{logs: make([]string, 0)}
+ cfg := baseConfig()
+ cfg.Logger = customSplitLogger
+ p, err := New("localhost", WithLogger(customSlogLogger), WithSplitConfig(cfg))
+ require.NoError(t, err)
+ return p, customSlogLogger, customSplitLogger
+ },
+ expectProviderUsesDefault: false,
+ expectSplitLoggerType: "custom",
+ },
+ {
+ name: "with logger and empty split config uses custom for both",
+ setup: func() (*Provider, *slog.Logger, *customTestLogger) {
+ var buf strings.Builder
+ customLogger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{Level: slog.LevelDebug}))
+ cfg := baseConfig()
+ p, err := New("localhost", WithLogger(customLogger), WithSplitConfig(cfg))
+ require.NoError(t, err)
+ return p, customLogger, nil
+ },
+ expectProviderUsesDefault: false,
+ expectSplitLoggerType: "adapter",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ provider, _, customSplitLogger := tt.setup()
+ defer func() { _ = provider.ShutdownWithContext(context.Background()) }()
+
+ assert.NotNil(t, provider.logger, "Provider logger should be set")
+ assert.NotNil(t, provider.splitConfig.Logger, "Split SDK logger should be set")
+
+ switch tt.expectSplitLoggerType {
+ case "adapter":
+ adapter, ok := provider.splitConfig.Logger.(*SlogToSplitAdapter)
+ require.True(t, ok, "Split SDK logger should be SlogToSplitAdapter")
+ assert.NotNil(t, adapter.logger, "Adapter should have a logger")
+
+ case "custom":
+ assert.Equal(t, customSplitLogger, provider.splitConfig.Logger,
+ "Split SDK should preserve custom logger (not overwritten)")
+ }
+ })
}
}
-func TestIntegerDetails(t *testing.T) {
- ofClient := create(t)
- flagName := "int_feature"
- evalCtx := evaluationContext()
-
- result, err := ofClient.IntValueDetails(nil, flagName, 0, evalCtx)
- if err != nil {
- t.Errorf("Unexpected error occurred %s", err.Error())
- } else if result.FlagKey != flagName {
- t.Errorf("Flag name is %s, not %s", result.FlagKey, flagName)
- } else if !strings.Contains(string(result.Reason), string(openfeature.TargetingMatchReason)) {
- t.Errorf("reason is %s, not targeting match", result.Reason)
- } else if result.Value != int64(32) {
- t.Errorf("Result was %d, but should have been 32 as in split.yaml", result.Value)
- } else if result.Variant != "32" {
- t.Errorf("Variant should be 32 as in split.yaml, but was %s", result.Variant)
- } else if result.ErrorCode != "" {
- t.Errorf("Unexpected error in result %s", result.ErrorCode)
- }
+// customTestLogger implements the Split SDK logging interface for testing.
+// Thread-safe to handle concurrent calls from Split SDK goroutines.
+type customTestLogger struct {
+ logs []string
+ mu sync.Mutex
}
-func TestStringDetails(t *testing.T) {
- ofClient := create(t)
- flagName := "some_other_feature"
- evalCtx := evaluationContext()
-
- result, err := ofClient.StringValueDetails(nil, flagName, "blah", evalCtx)
- if err != nil {
- t.Errorf("Unexpected error occurred %s", err.Error())
- } else if result.FlagKey != flagName {
- t.Errorf("Flag name is %s, not %s", result.FlagKey, flagName)
- } else if !strings.Contains(string(result.Reason), string(openfeature.TargetingMatchReason)) {
- t.Errorf("reason is %s, not targeting match", result.Reason)
- } else if result.Value != "off" {
- t.Errorf("Result was %s, but should have been off as in split.yaml", result.Value)
- } else if result.Variant != "off" {
- t.Errorf("Variant should be off as in split.yaml, but was %s", result.Variant)
- } else if result.ErrorCode != "" {
- t.Errorf("Unexpected error in result %s", result.ErrorCode)
- }
+func (l *customTestLogger) Error(msg ...any) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.logs = append(l.logs, fmt.Sprint("ERROR: ", msg))
}
-func TestObjectDetails(t *testing.T) {
- ofClient := create(t)
- flagName := "obj_feature"
- evalCtx := evaluationContext()
-
- result, err := ofClient.ObjectValueDetails(nil, flagName, map[string]interface{}{}, evalCtx)
- expectedResult := map[string]interface{}{
- "key": "value",
- }
- if err != nil {
- t.Errorf("Unexpected error occurred %s", err.Error())
- } else if result.FlagKey != flagName {
- t.Errorf("Flag name is %s, not %s", result.FlagKey, flagName)
- } else if !strings.Contains(string(result.Reason), string(openfeature.TargetingMatchReason)) {
- t.Errorf("reason is %s, not targeting match", result.Reason)
- } else if !reflect.DeepEqual(result.Value, expectedResult) {
- t.Error("Result was not map of key->value as in split.yaml")
- } else if result.Variant != "{\"key\": \"value\"}" {
- t.Errorf("Variant should be {\"key\": \"value\"} as in split.yaml, but was %s", result.Variant)
- } else if result.ErrorCode != "" {
- t.Errorf("Unexpected error in result %s", result.ErrorCode)
- }
+func (l *customTestLogger) Warning(msg ...any) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.logs = append(l.logs, fmt.Sprint("WARN: ", msg))
}
-func TestFloatDetails(t *testing.T) {
- ofClient := create(t)
- flagName := "int_feature"
- evalCtx := evaluationContext()
-
- result, err := ofClient.FloatValueDetails(nil, flagName, 0, evalCtx)
- if err != nil {
- t.Errorf("Unexpected error occurred %s", err.Error())
- } else if result.FlagKey != flagName {
- t.Errorf("Flag name is %s, not %s", result.FlagKey, flagName)
- } else if !strings.Contains(string(result.Reason), string(openfeature.TargetingMatchReason)) {
- t.Errorf("reason is %s, not targeting match", result.Reason)
- } else if result.Value != float64(32) {
- t.Errorf("Result was %f, but should have been 32 as in split.yaml", result.Value)
- } else if result.Variant != "32" {
- t.Errorf("Variant should be 32 as in split.yaml, but was %s", result.Variant)
- } else if result.ErrorCode != "" {
- t.Errorf("Unexpected error in result %s", result.ErrorCode)
- }
-
- flagName = "float_feature"
- result, err = ofClient.FloatValueDetails(nil, flagName, 0, evalCtx)
- if err != nil {
- t.Errorf("Unexpected error occurred %s", err.Error())
- } else if result.Value != 32.5 {
- t.Errorf("Result was %f, but should have been 32.5 as in split.yaml", result.Value)
- } else if result.Variant != "32.5" {
- t.Errorf("Variant should be 32 as in split.yaml, but was %s", result.Variant)
- } else if result.ErrorCode != "" {
- t.Errorf("Unexpected error in result %s", result.ErrorCode)
- }
+func (l *customTestLogger) Info(msg ...any) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.logs = append(l.logs, fmt.Sprint("INFO: ", msg))
}
-func TestBooleanFail(t *testing.T) {
- // attempt to fetch an object treatment as a boolean. Should result in the default
- ofClient := create(t)
- flagName := "obj_feature"
- evalCtx := evaluationContext()
-
- result, err := ofClient.BooleanValue(nil, flagName, false, evalCtx)
- if err == nil {
- t.Error("Expected exception to occur")
- } else if !strings.Contains(err.Error(), string(openfeature.ParseErrorCode)) {
- t.Errorf("Expected parse error, got %s", err.Error())
- } else if result != false {
- t.Error("Result was true, but should have been default of false")
- }
-
- resultDetails, err := ofClient.BooleanValueDetails(nil, flagName, false, evalCtx)
- if err == nil {
- t.Error("Expected exception to occur")
- } else if !strings.Contains(err.Error(), string(openfeature.ParseErrorCode)) {
- t.Errorf("Expected parse error, got %s", err.Error())
- } else if resultDetails.Value != false {
- t.Error("Result was true, but should have been default of false")
- } else if resultDetails.ErrorCode != openfeature.ParseErrorCode {
- t.Errorf("Expected parse error code, got %s", resultDetails.ErrorCode)
- } else if resultDetails.Reason != openfeature.ErrorReason {
- t.Errorf("Expected error reason code, got %s", resultDetails.Reason)
- } else if resultDetails.Variant != "{\"key\": \"value\"}" {
- t.Errorf("Expected variant to be string of map, got %s", resultDetails.Variant)
- }
+func (l *customTestLogger) Debug(msg ...any) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.logs = append(l.logs, fmt.Sprint("DEBUG: ", msg))
}
-func TestIntegerFail(t *testing.T) {
- // attempt to fetch an object treatment as an integer. Should result in the default
- ofClient := create(t)
- flagName := "obj_feature"
- evalCtx := evaluationContext()
-
- result, err := ofClient.IntValue(nil, flagName, 10, evalCtx)
- if err == nil {
- t.Error("Expected exception to occur")
- } else if !strings.Contains(err.Error(), string(openfeature.ParseErrorCode)) {
- t.Errorf("Expected parse error, got %s", err.Error())
- } else if result != int64(10) {
- t.Errorf("Result was %d, but should have been default of 10", result)
- }
-
- resultDetails, err := ofClient.IntValueDetails(nil, flagName, 10, evalCtx)
- if err == nil {
- t.Error("Expected exception to occur")
- } else if !strings.Contains(err.Error(), string(openfeature.ParseErrorCode)) {
- t.Errorf("Expected parse error, got %s", err.Error())
- } else if resultDetails.Value != int64(10) {
- t.Errorf("Result was %d, but should have been default of 10", resultDetails.Value)
- } else if resultDetails.ErrorCode != openfeature.ParseErrorCode {
- t.Errorf("Expected parse error code, got %s", resultDetails.ErrorCode)
- } else if resultDetails.Reason != openfeature.ErrorReason {
- t.Errorf("Expected error reason code, got %s", resultDetails.Reason)
- } else if resultDetails.Variant != "{\"key\": \"value\"}" {
- t.Errorf("Expected variant to be string of map, got %s", resultDetails.Variant)
- }
+func (l *customTestLogger) Verbose(msg ...any) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.logs = append(l.logs, fmt.Sprint("VERBOSE: ", msg))
}
-func TestFloatFail(t *testing.T) {
- // attempt to fetch an object treatment as a float. Should result in the default
- ofClient := create(t)
- flagName := "obj_feature"
- evalCtx := evaluationContext()
-
- result, err := ofClient.FloatValue(nil, flagName, 10, evalCtx)
- if err == nil {
- t.Error("Expected exception to occur")
- } else if !strings.Contains(err.Error(), string(openfeature.ParseErrorCode)) {
- t.Errorf("Expected parse error, got %s", err.Error())
- } else if result != float64(10) {
- t.Errorf("Result was %f, but should have been default of 10", result)
- }
-
- resultDetails, err := ofClient.FloatValueDetails(nil, flagName, 10, evalCtx)
- if err == nil {
- t.Error("Expected exception to occur")
- } else if !strings.Contains(err.Error(), string(openfeature.ParseErrorCode)) {
- t.Errorf("Expected parse error, got %s", err.Error())
- } else if resultDetails.Value != float64(10) {
- t.Errorf("Result was %f, but should have been default of 10", resultDetails.Value)
- } else if resultDetails.ErrorCode != openfeature.ParseErrorCode {
- t.Errorf("Expected parse error code, got %s", resultDetails.ErrorCode)
- } else if resultDetails.Reason != openfeature.ErrorReason {
- t.Errorf("Expected error reason code, got %s", resultDetails.Reason)
- } else if resultDetails.Variant != "{\"key\": \"value\"}" {
- t.Errorf("Expected variant to be string of map, got %s", resultDetails.Variant)
+// =============================================================================
+// splitsChanged Pure Function Tests
+// =============================================================================
+
+func TestSplitsChanged(t *testing.T) {
+ tests := []struct {
+ name string
+ old map[string]int64
+ current map[string]int64
+ want bool
+ }{
+ {
+ name: "identical maps",
+ old: map[string]int64{"a": 1, "b": 2},
+ current: map[string]int64{"a": 1, "b": 2},
+ want: false,
+ },
+ {
+ name: "both empty",
+ old: map[string]int64{},
+ current: map[string]int64{},
+ want: false,
+ },
+ {
+ name: "split added",
+ old: map[string]int64{"a": 1},
+ current: map[string]int64{"a": 1, "b": 2},
+ want: true,
+ },
+ {
+ name: "split removed",
+ old: map[string]int64{"a": 1, "b": 2},
+ current: map[string]int64{"a": 1},
+ want: true,
+ },
+ {
+ name: "change number updated",
+ old: map[string]int64{"a": 1, "b": 2},
+ current: map[string]int64{"a": 1, "b": 3},
+ want: true,
+ },
+ {
+ name: "split replaced",
+ old: map[string]int64{"a": 1, "b": 2},
+ current: map[string]int64{"a": 1, "c": 2},
+ want: true,
+ },
+ {
+ name: "old nil current empty",
+ old: nil,
+ current: map[string]int64{},
+ want: false,
+ },
+ {
+ name: "old empty current has splits",
+ old: map[string]int64{},
+ current: map[string]int64{"a": 1},
+ want: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := splitsChanged(tt.old, tt.current)
+ assert.Equal(t, tt.want, got)
+ })
}
}
-func TestObjectFail(t *testing.T) {
- // attempt to fetch an int as an object. Should result in the default
- ofClient := create(t)
- flagName := "int_feature"
- evalCtx := evaluationContext()
- defaultTreatment := map[string]interface{}{
- "key": "value",
- }
-
- result, err := ofClient.ObjectValue(nil, flagName, defaultTreatment, evalCtx)
- if err == nil {
- t.Error("Expected exception to occur")
- } else if !strings.Contains(err.Error(), string(openfeature.ParseErrorCode)) {
- t.Errorf("Expected parse error, got %s", err.Error())
- } else if !reflect.DeepEqual(result, defaultTreatment) {
- t.Error("Result was not default treatment")
- }
+// =============================================================================
+// WithMonitoringInterval Clamping Tests
+// =============================================================================
- resultDetails, err := ofClient.ObjectValueDetails(nil, flagName, defaultTreatment, evalCtx)
- if err == nil {
- t.Error("Expected exception to occur")
- } else if !strings.Contains(err.Error(), string(openfeature.ParseErrorCode)) {
- t.Errorf("Expected parse error, got %s", err.Error())
- } else if !reflect.DeepEqual(resultDetails.Value, defaultTreatment) {
- t.Errorf("Result was %f, but should have been default of 10", resultDetails.Value)
- } else if resultDetails.ErrorCode != openfeature.ParseErrorCode {
- t.Errorf("Expected parse error code, got %s", resultDetails.ErrorCode)
- } else if resultDetails.Reason != openfeature.ErrorReason {
- t.Errorf("Expected error reason code, got %s", resultDetails.Reason)
- } else if resultDetails.Variant != "32" {
- t.Errorf("Expected variant to be string of integer, got %s", resultDetails.Variant)
+func TestWithMonitoringIntervalClamping(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ tests := []struct {
+ name string
+ interval time.Duration
+ expected time.Duration
+ }{
+ {
+ name: "zero uses default",
+ interval: 0,
+ expected: 30 * time.Second,
+ },
+ {
+ name: "below minimum clamped to minimum",
+ interval: 1 * time.Second,
+ expected: 5 * time.Second,
+ },
+ {
+ name: "at minimum accepted",
+ interval: 5 * time.Second,
+ expected: 5 * time.Second,
+ },
+ {
+ name: "above minimum accepted",
+ interval: 60 * time.Second,
+ expected: 60 * time.Second,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var opts []Option
+ if tt.interval != 0 {
+ opts = append(opts, WithMonitoringInterval(tt.interval))
+ }
+ provider, err := New("localhost", append(opts, WithSplitConfig(cfg))...)
+ require.NoError(t, err)
+ defer func() { _ = provider.ShutdownWithContext(context.Background()) }()
+
+ assert.Equal(t, tt.expected, provider.monitoringInterval)
+ })
}
}
diff --git a/split.yaml b/split.yaml
deleted file mode 100644
index b5e3e1e..0000000
--- a/split.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- my_feature:
- treatment: "on"
- keys: "key"
- config: "{\"desc\" : \"this applies only to ON treatment\"}"
-- my_feature:
- treatment: "off"
-- some_other_feature:
- treatment: "off"
-- int_feature:
- treatment: "32"
-- obj_feature:
- treatment: "{\"key\": \"value\"}"
-- float_feature:
- treatment: "32.5"
\ No newline at end of file
diff --git a/test/advanced/README.md b/test/advanced/README.md
new file mode 100644
index 0000000..63a65b7
--- /dev/null
+++ b/test/advanced/README.md
@@ -0,0 +1,65 @@
+# Advanced Integration Test
+
+Interactive test for configuration change event detection.
+
+## What This Tests
+
+**PROVIDER_CONFIGURATION_CHANGED Event Detection** - Validates that the provider correctly emits configuration change
+events when flags are modified in the Split dashboard.
+
+This test requires manual interaction: you must modify a flag in the Split dashboard while the test is running to
+trigger the event.
+
+All other cloud-only features (flag sets, targeting rules) are tested automatically in
+the [integration test](../integration/) when `SPLIT_API_KEY` is provided.
+
+## Prerequisites
+
+- A Split account with SDK API key (server-side key)
+- Any flag to modify during the test
+
+Set `SPLIT_API_KEY` to your Split SDK API key.
+
+## Provider Configuration
+
+The test uses a 5-second monitoring interval for faster configuration change detection:
+
+```go
+provider, _ := split.New(apiKey,
+ split.WithMonitoringInterval(5*time.Second),
+)
+```
+
+## Running
+
+```bash
+cd test/advanced
+
+# Run with API key
+SPLIT_API_KEY=your-key go run main.go
+
+# With debug logging
+LOG_LEVEL=debug SPLIT_API_KEY=your-key go run main.go
+```
+
+## Test Flow
+
+1. **Initialize Provider** - Connects to Split cloud with 5-second monitoring interval
+2. **Wait for Configuration Change** - Waits up to 2 minutes for `PROVIDER_CONFIGURATION_CHANGED` event
+
+ - Modify any flag in Split dashboard to trigger the event
+ - Test automatically detects the change and reports success
+
+3. **Event Summary** - Reports counts of all provider events received
+
+## Notes
+
+- **Monitoring Interval**: The provider polls every 5 seconds (configured via `WithMonitoringInterval`). Default is 30
+ seconds, minimum is 5 seconds.
+- **Configuration Change Detection**: While the Split SDK receives changes via SSE near-instantly, the provider polls
+ for changes to emit `PROVIDER_CONFIGURATION_CHANGED` events.
+
+## Learn More
+
+- [Integration Test](../integration/) - Automated tests including flag sets and targeting (cloud mode)
+- [Cloud Example](../../examples/cloud/) - Simple cloud mode example
diff --git a/test/advanced/main.go b/test/advanced/main.go
new file mode 100644
index 0000000..876afbb
--- /dev/null
+++ b/test/advanced/main.go
@@ -0,0 +1,354 @@
+// Package main provides advanced tests for cloud-only Split features.
+//
+// This test validates:
+// - Event tracking (view events in Split Data Hub)
+// - PROVIDER_CONFIGURATION_CHANGED event detection
+//
+// Prerequisites:
+// - A real Split account with SDK API key
+// - For config change test: any flag to modify in the Split dashboard
+//
+// Run: SPLIT_API_KEY=your-key go run main.go
+package main
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "os"
+ "os/signal"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "time"
+
+ "github.com/lmittmann/tint"
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/open-feature/go-sdk/openfeature/hooks"
+
+ "github.com/splitio/split-openfeature-provider-go/v2"
+)
+
+// Event counters for validation
+var (
+ readyCount atomic.Int32
+ configChangedCount atomic.Int32
+ errorCount atomic.Int32
+ configChangedChan = make(chan struct{}, 10)
+)
+
+func main() {
+ fmt.Println("============================================================")
+ fmt.Println(" Split OpenFeature Provider - Advanced Cloud Tests")
+ fmt.Println(" Testing: Event Tracking & Configuration Change Detection")
+ fmt.Println("============================================================")
+ fmt.Println()
+
+ // ============================================================
+ // SETUP: CONTEXT WITH TIMEOUT AND SIGNAL HANDLING
+ // ============================================================
+
+ // 5-minute timeout for interactive test
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
+ defer cancel()
+
+ // ============================================================
+ // 1. LOGGING CONFIGURATION
+ // ============================================================
+
+ logLevel := slog.LevelInfo
+ if level := os.Getenv("LOG_LEVEL"); level != "" {
+ switch level {
+ case "debug", "DEBUG", "trace", "TRACE":
+ logLevel = slog.LevelDebug
+ case "info", "INFO":
+ logLevel = slog.LevelInfo
+ case "warn", "WARN", "warning", "WARNING":
+ logLevel = slog.LevelWarn
+ case "error", "ERROR":
+ logLevel = slog.LevelError
+ default:
+ logLevel = slog.LevelInfo
+ }
+ }
+
+ baseLogger := slog.New(tint.NewHandler(os.Stderr, &tint.Options{
+ Level: logLevel,
+ TimeFormat: time.TimeOnly,
+ }))
+
+ appLogger := baseLogger.With("source", "app")
+ ofLogger := baseLogger.With("source", "openfeature-sdk")
+
+ slog.SetDefault(baseLogger)
+
+ appLogger.Info("logging configured", "level", logLevel.String())
+
+ // ============================================================
+ // 2. CHECK API KEY
+ // ============================================================
+
+ apiKey := os.Getenv("SPLIT_API_KEY")
+ if apiKey == "" {
+ appLogger.Error("SPLIT_API_KEY environment variable is required")
+ appLogger.Info("Usage: SPLIT_API_KEY=your-key go run main.go")
+ os.Exit(1)
+ }
+
+ // ============================================================
+ // 3. OPENFEATURE LOGGING HOOK
+ // ============================================================
+ openfeature.AddHooks(hooks.NewLoggingHook(false, ofLogger))
+
+ // ============================================================
+ // 4. EVENT HANDLERS
+ // ============================================================
+
+ readyHandler := func(details openfeature.EventDetails) {
+ readyCount.Add(1)
+ appLogger.Info("EVENT: PROVIDER_READY",
+ "count", readyCount.Load(),
+ "message", details.Message)
+ }
+ openfeature.AddHandler(openfeature.ProviderReady, &readyHandler)
+
+ configChangeHandler := func(details openfeature.EventDetails) {
+ configChangedCount.Add(1)
+ appLogger.Info("EVENT: PROVIDER_CONFIGURATION_CHANGED",
+ "count", configChangedCount.Load(),
+ "message", details.Message)
+ select {
+ case configChangedChan <- struct{}{}:
+ default:
+ }
+ }
+ openfeature.AddHandler(openfeature.ProviderConfigChange, &configChangeHandler)
+
+ errorHandler := func(details openfeature.EventDetails) {
+ errorCount.Add(1)
+ appLogger.Error("EVENT: PROVIDER_ERROR",
+ "count", errorCount.Load(),
+ "message", details.Message)
+ }
+ openfeature.AddHandler(openfeature.ProviderError, &errorHandler)
+
+ appLogger.Info("event handlers registered", "handlers", 3)
+
+ // ============================================================
+ // 5. CREATE PROVIDER WITH OPTIMIZED CONFIG
+ // ============================================================
+
+ // Use optimized test configuration for faster execution
+ cfg := split.TestConfig()
+
+ provider, err := split.New(apiKey,
+ split.WithLogger(baseLogger),
+ split.WithSplitConfig(cfg),
+ split.WithMonitoringInterval(5*time.Second), // Fast config change detection
+ )
+ if err != nil {
+ appLogger.Error("failed to create provider", "error", err)
+ os.Exit(1)
+ }
+
+ appLogger.Info("provider created",
+ "monitoring_interval", "5s",
+ "block_until_ready", cfg.BlockUntilReady)
+
+ // ============================================================
+ // 6. GRACEFUL SHUTDOWN SETUP
+ // ============================================================
+
+ var cleanupOnce sync.Once
+ cleanup := func() {
+ cleanupOnce.Do(func() {
+ defer func() {
+ if r := recover(); r != nil {
+ slog.Error("panic during shutdown", "panic", r)
+ }
+ }()
+
+ fmt.Println()
+ fmt.Println(strings.Repeat("-", 60))
+ slog.Info("initiating graceful shutdown")
+
+ shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer shutdownCancel()
+ if err := openfeature.ShutdownWithContext(shutdownCtx); err != nil {
+ slog.Error("shutdown error", "error", err)
+ }
+
+ slog.Info("graceful shutdown complete")
+ })
+ }
+
+ defer cleanup()
+
+ // Setup interrupt handling
+ shutdownChan := make(chan os.Signal, 1)
+ done := make(chan struct{})
+ signal.Notify(shutdownChan, os.Interrupt, syscall.SIGTERM)
+
+ go func() {
+ select {
+ case sig := <-shutdownChan:
+ slog.Warn("interrupt signal received", "signal", sig)
+ signal.Stop(shutdownChan)
+ cancel()
+ case <-done:
+ signal.Stop(shutdownChan)
+ return
+ }
+ }()
+
+ defer close(done)
+
+ // ============================================================
+ // 7. PROVIDER INITIALIZATION
+ // ============================================================
+
+ initCtx, initCancel := context.WithTimeout(ctx, 30*time.Second)
+ defer initCancel()
+
+ appLogger.Info("initializing provider...")
+ if err := openfeature.SetProviderWithContextAndWait(initCtx, provider); err != nil {
+ appLogger.Error("failed to initialize provider", "error", err)
+ cleanup()
+ os.Exit(1)
+ }
+
+ appLogger.Info("provider initialized successfully")
+
+ // Create OpenFeature client for tracking
+ client := openfeature.NewDefaultClient()
+
+ // ============================================================
+ // TRACKING TEST
+ // ============================================================
+
+ fmt.Println()
+ fmt.Println("------------------------------------------------------------")
+ fmt.Println(">> TRACKING EVENTS (view in Split Data Hub)")
+ fmt.Println("------------------------------------------------------------")
+
+ testTracking(ctx, client, appLogger)
+
+ // ============================================================
+ // CONFIGURATION CHANGE TEST
+ // ============================================================
+
+ fmt.Println()
+ fmt.Println("------------------------------------------------------------")
+ fmt.Println(">> CONFIGURATION CHANGE EVENT DETECTION")
+ fmt.Println("------------------------------------------------------------")
+
+ testConfigurationChange(ctx, appLogger)
+
+ // ============================================================
+ // EVENT SUMMARY
+ // ============================================================
+
+ fmt.Println()
+ fmt.Println("------------------------------------------------------------")
+ fmt.Println(">> EVENT SUMMARY")
+ fmt.Println("------------------------------------------------------------")
+
+ appLogger.Info("provider event summary",
+ "PROVIDER_READY", readyCount.Load(),
+ "PROVIDER_CONFIGURATION_CHANGED", configChangedCount.Load(),
+ "PROVIDER_ERROR", errorCount.Load())
+
+ if readyCount.Load() >= 1 {
+ appLogger.Info("PASS: received PROVIDER_READY event")
+ } else {
+ appLogger.Error("FAIL: did not receive PROVIDER_READY event")
+ }
+
+ appLogger.Info("configuration change test completed")
+}
+
+func testConfigurationChange(ctx context.Context, logger *slog.Logger) {
+ if ctx.Err() != nil {
+ logger.Info("skipping - context cancelled")
+ return
+ }
+
+ // Drain any config change events that occurred before this test
+ for len(configChangedChan) > 0 {
+ <-configChangedChan
+ }
+
+ logger.Info("waiting for PROVIDER_CONFIGURATION_CHANGED event...")
+ logger.Info("modify any flag in Split dashboard to trigger the event", "timeout", "2m")
+
+ select {
+ case <-ctx.Done():
+ logger.Info("context canceled")
+ return
+ case <-configChangedChan:
+ logger.Info("PASS: PROVIDER_CONFIGURATION_CHANGED event detected")
+ case <-time.After(2 * time.Minute):
+ logger.Warn("no configuration change detected within timeout", "timeout", "2m")
+ return
+ }
+}
+
+// testTracking sends tracking events to Split for viewing in the console.
+// Events can be viewed in Split Data Hub.
+func testTracking(ctx context.Context, client *openfeature.Client, logger *slog.Logger) {
+ if ctx.Err() != nil {
+ logger.Info("skipping - context cancelled")
+ return
+ }
+
+ logger.Info("sending tracking events to Split...")
+
+ // Test 1: Basic event with default traffic type ("user")
+ evalCtx := openfeature.NewEvaluationContext("test-user-123", nil)
+ details := openfeature.NewTrackingEventDetails(1.0)
+ client.Track(ctx, "page_view", evalCtx, details)
+ logger.Info("sent tracking event",
+ "event", "page_view",
+ "key", "test-user-123",
+ "traffic_type", "user",
+ "value", 1.0)
+
+ // Test 2: Event with custom traffic type
+ evalCtxAccount := openfeature.NewEvaluationContext("account-456", map[string]any{
+ "trafficType": "account",
+ })
+ client.Track(ctx, "subscription_created", evalCtxAccount, openfeature.NewTrackingEventDetails(99.99))
+ logger.Info("sent tracking event",
+ "event", "subscription_created",
+ "key", "account-456",
+ "traffic_type", "account",
+ "value", 99.99)
+
+ // Test 3: Event with properties
+ evalCtxPurchase := openfeature.NewEvaluationContext("user-789", nil)
+ purchaseDetails := openfeature.NewTrackingEventDetails(149.99).
+ Add("currency", "USD").
+ Add("item_count", 3).
+ Add("category", "electronics").
+ Add("is_first_purchase", true)
+ client.Track(ctx, "purchase_completed", evalCtxPurchase, purchaseDetails)
+ logger.Info("sent tracking event",
+ "event", "purchase_completed",
+ "key", "user-789",
+ "traffic_type", "user",
+ "value", 149.99,
+ "properties", "currency=USD, item_count=3, category=electronics, is_first_purchase=true")
+
+ // Test 4: Event without value (count-only)
+ client.Track(ctx, "button_clicked", evalCtx, openfeature.NewTrackingEventDetails(0))
+ logger.Info("sent tracking event",
+ "event", "button_clicked",
+ "key", "test-user-123",
+ "traffic_type", "user",
+ "value", 0)
+
+ logger.Info("tracking events sent successfully",
+ "total_events", 4,
+ "note", "view events in Split Data Hub")
+}
diff --git a/test/cloud_flags.yaml b/test/cloud_flags.yaml
new file mode 100644
index 0000000..5b64a28
--- /dev/null
+++ b/test/cloud_flags.yaml
@@ -0,0 +1,87 @@
+# Split OpenFeature Provider - Cloud Test Flags
+#
+# This file documents the flags required for cloud mode integration testing.
+# Contributors can create these flags in their own Split.io account to run
+# cloud mode tests with SPLIT_API_KEY.
+#
+# Flag Set Setup (required for ObjectEvaluation tests):
+# 1. Create a flag set named "split_provider_test" in Split UI
+# 2. Add flags tagged with "split-provider-test-set" to the flag set:
+# - ui_theme
+# - api_version
+#
+# Expected Test Results:
+# - Localhost mode: 73 tests
+# - Cloud mode: 81 tests (includes flag set tests)
+
+flags:
+ # ============================================================
+ # Boolean Flags
+ # ============================================================
+
+ - name: feature_boolean_on
+ treatments: [ on, off ]
+ default: "on"
+
+ - name: feature_boolean_off
+ treatments: [ on, off ]
+ default: "off"
+
+ # ============================================================
+ # String Flags
+ # ============================================================
+
+ - name: ui_theme
+ treatments: [ dark, light ]
+ default: "dark"
+ flag_set: split_provider_test # Add to flag set
+ config:
+ dark: '{"primary_color": "#1a1a2e", "secondary_color": "#16213e", "accent": "#0f3460"}'
+ light: '{"primary_color": "#ffffff", "secondary_color": "#f5f5f5", "accent": "#e8e8e8"}'
+ targeting:
+ - when: variant = "two"
+ serve: "light"
+
+ - name: api_version
+ treatments: [ v1, v2 ]
+ default: "v2"
+ flag_set: split_provider_test # Add to flag set
+ targeting:
+ - when: variant = "two"
+ serve: "v1"
+
+ - name: homepage_variant
+ treatments: [ variant_a, variant_b ]
+ default: "variant_b"
+
+ # ============================================================
+ # Integer Flags
+ # ============================================================
+
+ - name: max_retries
+ treatments: [ "3", "5" ]
+ default: "5"
+
+ - name: page_size
+ treatments: [ "25", "50" ]
+ default: "50"
+
+ - name: timeout_seconds
+ treatments: [ "15", "30" ]
+ default: "30"
+
+ # ============================================================
+ # Float Flags
+ # ============================================================
+
+ - name: discount_rate
+ treatments: [ "0.10", "0.15" ]
+ default: "0.15"
+
+ - name: cache_hit_ratio
+ treatments: [ "0.75", "0.85" ]
+ default: "0.85"
+
+ - name: sampling_rate
+ treatments: [ "0.01", "0.05" ]
+ default: "0.01"
diff --git a/test/integration/README.md b/test/integration/README.md
new file mode 100644
index 0000000..2550150
--- /dev/null
+++ b/test/integration/README.md
@@ -0,0 +1,75 @@
+# Integration Test
+
+Comprehensive integration test validating all provider features with automated assertions.
+
+## What This Tests
+
+- Custom Split SDK configuration with flexible mode (localhost or cloud)
+- Structured logging with slog and colored output via tint
+- Event handling (PROVIDER_READY, PROVIDER_ERROR, PROVIDER_CONFIGURATION_CHANGED)
+- Graceful shutdown with context cancellation and interrupt handling
+- All evaluation types (boolean, string, int, float, object)
+- Evaluation details (variant, reason, flag key)
+- Flag metadata (configurations attached to flags)
+- Flag set evaluation (cloud mode only)
+- Targeting with user attributes
+- Context cancellation and timeout handling
+- Direct Split SDK access (Track, Treatments)
+- Concurrent evaluations (100 goroutines x 10 evaluations)
+- Provider lifecycle (init, shutdown, named providers)
+
+**Test Coverage:**
+
+- Localhost mode: 85 tests
+- Cloud mode: 94 tests (includes flag set tests)
+
+## File Structure
+
+| File | Purpose |
+|------------------|------------------------------------------------------------------------|
+| `main.go` | Entry point, setup, and test orchestration |
+| `results.go` | Test result tracking with atomic counters |
+| `evaluations.go` | Flag evaluation tests (boolean, string, int, float, object, targeting) |
+| `lifecycle.go` | Provider lifecycle tests (init, shutdown, named providers, timeouts) |
+| `sdk.go` | SDK access, concurrent evaluations, metrics, and health tests |
+
+## Running
+
+```bash
+cd test/integration
+
+# Localhost mode (recommended - no API key needed)
+go run .
+
+# With debug logging
+LOG_LEVEL=debug go run .
+
+# Cloud mode (requires flags created per test/cloud_flags.yaml)
+SPLIT_API_KEY=your-key go run .
+```
+
+## Test Modes
+
+### Localhost Mode (default)
+
+Uses `split.yaml` file with test flags. Runs all tests except flag set evaluation (85 tests).
+
+### Cloud Mode (with SPLIT_API_KEY)
+
+Connects to Split cloud. Runs flag set evaluation tests in addition to all other tests (94 tests total).
+Requires flags created per `test/cloud_flags.yaml`.
+
+## Exit Codes
+
+- `0`: All tests passed
+- `1`: One or more tests failed
+- `2`: Timeout or fatal error
+
+Timeout: 5 minutes.
+
+## Learn More
+
+- [Advanced Test](../advanced/) - Configuration change event detection (requires manual flag modification)
+- [Split OpenFeature Go Provider Documentation](../../README.md)
+- [OpenFeature Go SDK](https://openfeature.dev/docs/reference/sdks/server/go)
+- [Split Go SDK](https://github.com/splitio/go-client)
diff --git a/test/integration/evaluations.go b/test/integration/evaluations.go
new file mode 100644
index 0000000..4c07f57
--- /dev/null
+++ b/test/integration/evaluations.go
@@ -0,0 +1,611 @@
+// evaluations.go contains flag evaluation tests for all types.
+// Tests cover boolean, string, int, float, and object evaluations,
+// as well as evaluation details, flag metadata, flag sets, targeting,
+// context cancellation, and error handling.
+package main
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "time"
+
+ "github.com/open-feature/go-sdk/openfeature"
+
+ "github.com/splitio/split-openfeature-provider-go/v2"
+)
+
+// testBooleanEvaluations tests boolean flag evaluations (on/off)
+func testBooleanEvaluations(ctx context.Context, client *openfeature.Client) {
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ tests := []struct {
+ flag string
+ expected bool
+ }{
+ {"feature_boolean_on", true},
+ {"feature_boolean_off", false},
+ }
+
+ for _, tt := range tests {
+ value, err := client.BooleanValue(ctx, tt.flag, !tt.expected, evalCtx)
+ if err != nil {
+ results.Fail(fmt.Sprintf("Boolean(%s)", tt.flag), err.Error())
+ continue
+ }
+
+ if value != tt.expected {
+ results.Fail(fmt.Sprintf("Boolean(%s)", tt.flag),
+ fmt.Sprintf("expected %v, got %v", tt.expected, value))
+ } else {
+ results.Pass(fmt.Sprintf("Boolean(%s)", tt.flag))
+ }
+ }
+}
+
+// testStringEvaluations tests string flag evaluations
+func testStringEvaluations(ctx context.Context, client *openfeature.Client) {
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ tests := []struct {
+ flag string
+ expected string
+ }{
+ {"ui_theme", "dark"},
+ {"api_version", "v2"},
+ {"homepage_variant", "variant_b"},
+ }
+
+ for _, tt := range tests {
+ value, err := client.StringValue(ctx, tt.flag, "", evalCtx)
+ if err != nil {
+ results.Fail(fmt.Sprintf("String(%s)", tt.flag), err.Error())
+ continue
+ }
+
+ if value != tt.expected {
+ results.Fail(fmt.Sprintf("String(%s)", tt.flag),
+ fmt.Sprintf("expected %s, got %s", tt.expected, value))
+ } else {
+ results.Pass(fmt.Sprintf("String(%s)", tt.flag))
+ }
+ }
+}
+
+// testIntEvaluations tests integer flag evaluations
+func testIntEvaluations(ctx context.Context, client *openfeature.Client) {
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ tests := []struct {
+ flag string
+ expected int64
+ }{
+ {"max_retries", 5},
+ {"page_size", 50},
+ {"timeout_seconds", 30},
+ }
+
+ for _, tt := range tests {
+ value, err := client.IntValue(ctx, tt.flag, 0, evalCtx)
+ if err != nil {
+ results.Fail(fmt.Sprintf("Int(%s)", tt.flag), err.Error())
+ continue
+ }
+
+ if value != tt.expected {
+ results.Fail(fmt.Sprintf("Int(%s)", tt.flag),
+ fmt.Sprintf("expected %d, got %d", tt.expected, value))
+ } else {
+ results.Pass(fmt.Sprintf("Int(%s)", tt.flag))
+ }
+ }
+}
+
+// testFloatEvaluations tests float flag evaluations
+func testFloatEvaluations(ctx context.Context, client *openfeature.Client) {
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ tests := []struct {
+ flag string
+ expected float64
+ }{
+ {"discount_rate", 0.15},
+ {"cache_hit_ratio", 0.85},
+ {"sampling_rate", 0.01},
+ }
+
+ for _, tt := range tests {
+ value, err := client.FloatValue(ctx, tt.flag, 0.0, evalCtx)
+ if err != nil {
+ results.Fail(fmt.Sprintf("Float(%s)", tt.flag), err.Error())
+ continue
+ }
+
+ if value != tt.expected {
+ results.Fail(fmt.Sprintf("Float(%s)", tt.flag),
+ fmt.Sprintf("expected %.4f, got %.4f", tt.expected, value))
+ } else {
+ results.Pass(fmt.Sprintf("Float(%s)", tt.flag))
+ }
+ }
+}
+
+// testObjectEvaluations tests object flag evaluations (localhost mode only)
+func testObjectEvaluations(ctx context.Context, client *openfeature.Client) {
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ // Test 1: Single flag evaluation (localhost mode)
+ // Returns: FlagSetResult{"premium_features": FlagResult{Treatment: "on", Config: {...}}}
+ value, err := client.ObjectValue(ctx, "premium_features", split.FlagSetResult{}, evalCtx)
+ if err != nil {
+ results.Fail("Object(premium_features)", err.Error())
+ } else {
+ // Type-assert to FlagSetResult
+ flags, ok := value.(split.FlagSetResult)
+ if !ok {
+ results.Fail("Object(premium_features)", fmt.Sprintf("expected FlagSetResult, got %T", value))
+ return
+ }
+
+ // Check structure: should have flag name as key
+ flagData, ok := flags["premium_features"]
+ if !ok {
+ results.Fail("Object(premium_features)", "flag data not found")
+ return
+ }
+
+ slog.Info("object evaluation result",
+ "flag", "premium_features",
+ "treatment", flagData.Treatment,
+ "has_config", flagData.Config != nil)
+
+ results.Pass("Object(premium_features)")
+ }
+
+ // Test 2: Object with configuration
+ // This demonstrates accessing JSON config data attached to treatments
+ value, err = client.ObjectValue(ctx, "feature_config", split.FlagSetResult{}, evalCtx)
+ if err != nil {
+ results.Fail("Object(feature_config)", err.Error())
+ } else {
+ flags, ok := value.(split.FlagSetResult)
+ if !ok {
+ results.Fail("Object(feature_config)", fmt.Sprintf("expected FlagSetResult, got %T", value))
+ return
+ }
+
+ flagData, ok := flags["feature_config"]
+ if !ok {
+ results.Fail("Object(feature_config)", "flag data not found")
+ return
+ }
+
+ // Check if config is present and valid
+ if config, ok := flagData.Config.(map[string]any); ok {
+ slog.Info("config data received",
+ "flag", "feature_config",
+ "config_keys", len(config))
+ results.Pass("Object(feature_config)")
+ } else {
+ results.Pass("Object(feature_config) - no config")
+ }
+ }
+}
+
+// testEvaluationDetails tests evaluation details (variant, reason, flagKey)
+func testEvaluationDetails(ctx context.Context, client *openfeature.Client) {
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ details, err := client.BooleanValueDetails(ctx, "feature_boolean_on", false, evalCtx)
+ if err != nil {
+ results.Fail("BooleanDetails(variant)", err.Error())
+ return
+ }
+
+ if details.Variant == "" {
+ results.Fail("BooleanDetails(variant)", "variant is empty")
+ } else {
+ results.Pass("BooleanDetails(variant)")
+ }
+
+ if details.Reason == "" {
+ results.Fail("BooleanDetails(reason)", "reason is empty")
+ } else {
+ results.Pass("BooleanDetails(reason)")
+ }
+
+ if details.FlagKey != "feature_boolean_on" {
+ results.Fail("BooleanDetails(flagKey)", fmt.Sprintf("expected feature_boolean_on, got %s", details.FlagKey))
+ } else {
+ results.Pass("BooleanDetails(flagKey)")
+ }
+}
+
+// testFlagMetadata tests flag metadata (configurations attached to flags)
+func testFlagMetadata(ctx context.Context, client *openfeature.Client) {
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ stringDetails, err := client.StringValueDetails(ctx, "ui_theme", "light", evalCtx)
+ if err != nil {
+ results.Fail("FlagMetadata(string)", err.Error())
+ return
+ }
+
+ if stringDetails.FlagMetadata != nil && len(stringDetails.FlagMetadata) > 0 {
+ slog.Info("flag metadata in StringDetails",
+ "flag", "ui_theme",
+ "treatment", stringDetails.Value,
+ "metadata_keys", len(stringDetails.FlagMetadata))
+
+ if configValue, ok := stringDetails.FlagMetadata["value"]; ok {
+ if configMap, ok := configValue.(map[string]any); ok {
+ if primaryColor, ok := configMap["primary_color"]; ok {
+ slog.Info("config field accessible", "primary_color", primaryColor)
+ results.Pass("FlagMetadata(string)")
+ } else {
+ results.Pass("FlagMetadata(string) - no primary_color")
+ }
+ } else {
+ results.Pass("FlagMetadata(string) - non-object config")
+ }
+ } else {
+ results.Pass("FlagMetadata(string) - no config")
+ }
+ } else {
+ results.Pass("FlagMetadata(string) - no metadata")
+ }
+
+ boolDetails, err := client.BooleanValueDetails(ctx, "feature_boolean_on", false, evalCtx)
+ if err != nil {
+ results.Fail("FlagMetadata(boolean)", err.Error())
+ return
+ }
+
+ if boolDetails.FlagMetadata != nil {
+ slog.Info("flag metadata in BooleanDetails",
+ "flag", "feature_boolean_on",
+ "treatment", boolDetails.Variant,
+ "has_metadata", len(boolDetails.FlagMetadata) > 0)
+ results.Pass("FlagMetadata(boolean)")
+ } else {
+ results.Pass("FlagMetadata(boolean) - no metadata")
+ }
+
+ intDetails, err := client.IntValueDetails(ctx, "max_retries", 3, evalCtx)
+ if err != nil {
+ results.Fail("FlagMetadata(int)", err.Error())
+ return
+ }
+
+ if intDetails.FlagMetadata != nil {
+ slog.Info("flag metadata in IntDetails",
+ "flag", "max_retries",
+ "value", intDetails.Value,
+ "has_metadata", len(intDetails.FlagMetadata) > 0)
+ results.Pass("FlagMetadata(int)")
+ } else {
+ results.Pass("FlagMetadata(int) - no metadata")
+ }
+
+ floatDetails, err := client.FloatValueDetails(ctx, "timeout_seconds", 5.0, evalCtx)
+ if err != nil {
+ results.Fail("FlagMetadata(float)", err.Error())
+ return
+ }
+
+ if floatDetails.FlagMetadata != nil {
+ slog.Info("flag metadata in FloatDetails",
+ "flag", "timeout_seconds",
+ "value", floatDetails.Value,
+ "has_metadata", len(floatDetails.FlagMetadata) > 0)
+ results.Pass("FlagMetadata(float)")
+ } else {
+ results.Pass("FlagMetadata(float) - no metadata")
+ }
+
+ details, err := client.StringValueDetails(ctx, "api_version", "v1", evalCtx)
+ if err != nil {
+ results.Fail("FlagMetadata(wrapped)", err.Error())
+ return
+ }
+
+ if details.FlagMetadata != nil {
+ if wrappedValue, ok := details.FlagMetadata["value"]; ok {
+ slog.Info("config accessible via 'value' key",
+ "flag", "api_version",
+ "value", wrappedValue)
+ results.Pass("FlagMetadata(wrapped)")
+ } else {
+ results.Pass("FlagMetadata(wrapped) - no value key")
+ }
+ } else {
+ results.Pass("FlagMetadata(wrapped) - no metadata")
+ }
+}
+
+// testFlagSetEvaluation tests flag set evaluation (cloud mode only)
+func testFlagSetEvaluation(ctx context.Context, client *openfeature.Client) {
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ // ============================================================
+ // Test 1: Basic flag set evaluation
+ // ============================================================
+ flagSet := "split_provider_test"
+ slog.Info("evaluating flag set", "flag_set", flagSet)
+
+ result, err := client.ObjectValue(ctx, flagSet, split.FlagSetResult{}, evalCtx)
+ if err != nil {
+ results.Fail("FlagSet(evaluation)", err.Error())
+ return
+ }
+
+ flags, ok := result.(split.FlagSetResult)
+ if !ok {
+ results.Fail("FlagSet(type)", fmt.Sprintf("expected FlagSetResult, got %T", result))
+ return
+ }
+
+ // Should have at least 2 flags (ui_theme and api_version)
+ if len(flags) < 2 {
+ results.Fail("FlagSet(count)", fmt.Sprintf("expected at least 2 flags, got %d", len(flags)))
+ return
+ }
+ results.Pass(fmt.Sprintf("FlagSet(count=%d)", len(flags)))
+
+ // ============================================================
+ // Test 2: Verify flag structure (Treatment and Config fields)
+ // ============================================================
+ if uiTheme, ok := flags["ui_theme"]; ok {
+ slog.Info("flag in set", "flag", "ui_theme", "treatment", uiTheme.Treatment)
+ results.Pass("FlagSet(ui_theme_treatment)")
+ // Config field always exists in FlagResult struct
+ results.Pass("FlagSet(ui_theme_config)")
+ } else {
+ results.Fail("FlagSet(ui_theme)", "flag not found in set")
+ }
+
+ // ============================================================
+ // Test 3: Verify second flag in set
+ // ============================================================
+ if apiVersion, ok := flags["api_version"]; ok {
+ slog.Info("flag in set", "flag", "api_version", "treatment", apiVersion.Treatment)
+ results.Pass("FlagSet(api_version)")
+ } else {
+ results.Fail("FlagSet(api_version)", "flag not found in set")
+ }
+
+ // ============================================================
+ // Test 4: Flag set with targeting attributes
+ // ============================================================
+ evalCtxWithAttr := openfeature.NewEvaluationContext("test-user-2", map[string]any{
+ "variant": "two",
+ })
+
+ result2, err := client.ObjectValue(ctx, flagSet, split.FlagSetResult{}, evalCtxWithAttr)
+ if err != nil {
+ results.Fail("FlagSet(targeting)", err.Error())
+ return
+ }
+
+ flags2, ok := result2.(split.FlagSetResult)
+ if !ok {
+ results.Fail("FlagSet(targeting_type)", fmt.Sprintf("expected FlagSetResult, got %T", result2))
+ return
+ }
+
+ // Verify ui_theme returns "light" when variant=two (targeting rule)
+ if uiTheme, ok := flags2["ui_theme"]; ok {
+ if uiTheme.Treatment == "light" {
+ results.Pass("FlagSet(targeting_ui_theme)")
+ } else {
+ results.Fail("FlagSet(targeting_ui_theme)", fmt.Sprintf("expected light, got %s", uiTheme.Treatment))
+ }
+ } else {
+ results.Fail("FlagSet(targeting_ui_theme)", "flag not found")
+ }
+
+ // Verify api_version returns "v1" when variant=two (targeting rule)
+ if apiVersion, ok := flags2["api_version"]; ok {
+ if apiVersion.Treatment == "v1" {
+ results.Pass("FlagSet(targeting_api_version)")
+ } else {
+ results.Fail("FlagSet(targeting_api_version)", fmt.Sprintf("expected v1, got %s", apiVersion.Treatment))
+ }
+ } else {
+ results.Fail("FlagSet(targeting_api_version)", "flag not found")
+ }
+
+ // ============================================================
+ // Test 5: Non-existent flag set returns default
+ // ============================================================
+ result3, err := client.ObjectValue(ctx, "non_existent_flag_set", split.FlagSetResult{}, evalCtx)
+ if err != nil {
+ // Error is acceptable for non-existent flag set
+ results.Pass("FlagSet(non_existent_error)")
+ } else {
+ // Should return default value (empty FlagSetResult)
+ if resultFlags, ok := result3.(split.FlagSetResult); ok {
+ if len(resultFlags) == 0 {
+ results.Pass("FlagSet(non_existent_empty)")
+ } else {
+ results.Fail("FlagSet(non_existent)", "unexpected non-empty result")
+ }
+ } else {
+ results.Fail("FlagSet(non_existent)", fmt.Sprintf("expected FlagSetResult, got %T", result3))
+ }
+ }
+
+ // ============================================================
+ // Test 6: ObjectValueDetails for flag set
+ // ============================================================
+ details, err := client.ObjectValueDetails(ctx, flagSet, split.FlagSetResult{}, evalCtx)
+ if err != nil {
+ results.Fail("FlagSet(details)", err.Error())
+ return
+ }
+
+ // Verify reason is TARGETING_MATCH
+ if details.Reason == openfeature.TargetingMatchReason {
+ results.Pass("FlagSet(details_reason)")
+ } else {
+ results.Fail("FlagSet(details_reason)", fmt.Sprintf("expected TARGETING_MATCH, got %s", details.Reason))
+ }
+
+ // Verify variant is the flag set name
+ if details.Variant == flagSet {
+ results.Pass("FlagSet(details_variant)")
+ } else {
+ results.Fail("FlagSet(details_variant)", fmt.Sprintf("expected %s, got %s", flagSet, details.Variant))
+ }
+
+ // Verify value is a FlagSetResult with flags
+ if detailsValue, ok := details.Value.(split.FlagSetResult); ok {
+ if len(detailsValue) >= 2 {
+ results.Pass("FlagSet(details_value)")
+ } else {
+ results.Fail("FlagSet(details_value)", fmt.Sprintf("expected at least 2 flags, got %d", len(detailsValue)))
+ }
+ } else {
+ results.Fail("FlagSet(details_value)", fmt.Sprintf("expected FlagSetResult, got %T", details.Value))
+ }
+}
+
+// testAttributeTargeting tests targeting with evaluation context attributes
+func testAttributeTargeting(ctx context.Context, client *openfeature.Client) {
+ evalCtx1 := openfeature.NewEvaluationContext("test-user", map[string]any{
+ "email": "vip@example.com",
+ "plan": "enterprise",
+ "age": int64(30),
+ })
+
+ value, err := client.StringValue(ctx, "ui_theme", "light", evalCtx1)
+ if err != nil {
+ results.Fail("Attributes(with_attrs)", err.Error())
+ } else if value != "dark" {
+ results.Fail("Attributes(with_attrs)", fmt.Sprintf("expected dark, got %s", value))
+ } else {
+ results.Pass("Attributes(with_attrs)")
+ }
+
+ evalCtx2 := openfeature.NewEvaluationContext("another-user", map[string]any{
+ "email": "user@example.com",
+ "plan": "basic",
+ "premium": false,
+ })
+
+ value, err = client.StringValue(ctx, "api_version", "v1", evalCtx2)
+ if err != nil {
+ results.Fail("Attributes(different_user)", err.Error())
+ } else if value != "v2" {
+ results.Fail("Attributes(different_user)", fmt.Sprintf("expected v2, got %s", value))
+ } else {
+ results.Pass("Attributes(different_user)")
+ }
+}
+
+// testContextCancellation tests behavior when context is cancelled
+func testContextCancellation(client *openfeature.Client) {
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ defer cancel()
+
+ time.Sleep(10 * time.Millisecond)
+
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+ value, err := client.BooleanValue(ctx, "feature_boolean_on", false, evalCtx)
+
+ if err == nil {
+ results.Fail("Context(cancellation)", "expected error for cancelled context")
+ } else if value != false {
+ results.Fail("Context(cancellation)", "should return default value on cancellation")
+ } else {
+ results.Pass("Context(cancellation)")
+ }
+}
+
+// testErrorHandling tests error handling for invalid inputs
+func testErrorHandling(ctx context.Context, client *openfeature.Client) {
+ evalCtx := openfeature.NewEvaluationContext("", nil)
+ value, err := client.BooleanValue(ctx, "feature_boolean_on", false, evalCtx)
+
+ if err == nil {
+ results.Fail("Error(missing_key)", "expected error for empty targeting key")
+ } else if value != false {
+ results.Fail("Error(missing_key)", "should return default on error")
+ } else {
+ results.Pass("Error(missing_key)")
+ }
+
+ evalCtx2 := openfeature.NewEvaluationContext("test-user", nil)
+ value, err = client.BooleanValue(ctx, "non_existent_flag", true, evalCtx2)
+
+ if err == nil {
+ results.Fail("Error(non_existent)", "expected error for non-existent flag")
+ } else if value != true {
+ results.Fail("Error(non_existent)", "should return default for non-existent flag")
+ } else {
+ results.Pass("Error(non_existent)")
+ }
+}
+
+// testObjectEvaluationMode tests EvaluationMode context options in ObjectEvaluation.
+// In localhost mode, EvaluationModeSet is ignored (always uses individual).
+// In cloud mode, EvaluationModeIndividual forces single flag evaluation.
+//
+// Key insight: flags in a flag set also exist as individual flags in Split,
+// so "ui_theme" works for both individual and flag set evaluation.
+func testObjectEvaluationMode(ctx context.Context, provider *split.Provider, isLocalhostMode bool) {
+ evalCtx := openfeature.FlattenedContext{
+ openfeature.TargetingKey: "test-user",
+ }
+
+ // Use "ui_theme" for individual evaluation (exists in both localhost split.yaml and cloud dashboard).
+ // Use "split_provider_test" for flag set evaluation in cloud (contains ui_theme and other flags).
+ individualFlag := "ui_theme"
+ flagSet := "split_provider_test"
+
+ // Test 1: EvaluationModeIndividual should evaluate a single flag in both modes
+ individualCtx := split.WithEvaluationMode(ctx, split.EvaluationModeIndividual)
+ result := provider.ObjectEvaluation(individualCtx, individualFlag, nil, evalCtx)
+
+ flags, ok := result.Value.(split.FlagSetResult)
+ if !ok {
+ results.Fail("ObjectMode(individual)", fmt.Sprintf("expected FlagSetResult, got %T", result.Value))
+ } else if len(flags) != 1 {
+ results.Fail("ObjectMode(individual)", fmt.Sprintf("expected 1 flag, got %d", len(flags)))
+ } else {
+ results.Pass("ObjectMode(individual)")
+ }
+
+ // Test 2: EvaluationModeSet in localhost should be ignored (still evaluates single flag)
+ if isLocalhostMode {
+ setCtx := split.WithEvaluationMode(ctx, split.EvaluationModeSet)
+ result = provider.ObjectEvaluation(setCtx, individualFlag, nil, evalCtx)
+
+ flags, ok = result.Value.(split.FlagSetResult)
+ if !ok {
+ results.Fail("ObjectMode(set_ignored_localhost)", fmt.Sprintf("expected FlagSetResult, got %T", result.Value))
+ } else if len(flags) != 1 {
+ results.Fail("ObjectMode(set_ignored_localhost)", fmt.Sprintf("expected 1 flag (set mode ignored), got %d", len(flags)))
+ } else {
+ results.Pass("ObjectMode(set_ignored_localhost)")
+ }
+ }
+
+ // Test 3: Default mode should work
+ // Localhost default: evaluates individual flag
+ // Cloud default: evaluates flag set
+ defaultKey := individualFlag
+ if !isLocalhostMode {
+ defaultKey = flagSet
+ }
+ result = provider.ObjectEvaluation(ctx, defaultKey, nil, evalCtx)
+ flags, ok = result.Value.(split.FlagSetResult)
+ if !ok {
+ results.Fail("ObjectMode(default)", fmt.Sprintf("expected FlagSetResult, got %T", result.Value))
+ } else if len(flags) == 0 {
+ results.Fail("ObjectMode(default)", "expected at least 1 flag")
+ } else {
+ results.Pass("ObjectMode(default)")
+ }
+}
diff --git a/test/integration/lifecycle.go b/test/integration/lifecycle.go
new file mode 100644
index 0000000..2f48a0a
--- /dev/null
+++ b/test/integration/lifecycle.go
@@ -0,0 +1,538 @@
+// lifecycle.go contains provider lifecycle tests.
+// Tests cover initialization, shutdown, named providers, concurrent init,
+// timeout handling, status atomicity, and idempotent operations.
+package main
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/splitio/go-client/v6/splitio/conf"
+
+ "github.com/splitio/split-openfeature-provider-go/v2"
+)
+
+// testInitAfterShutdown tests that init fails after shutdown
+func testInitAfterShutdown(apiKey string, logger *slog.Logger, cfg *conf.SplitSdkConfig) {
+
+ testProvider, err := split.New(apiKey, split.WithLogger(logger), split.WithSplitConfig(cfg))
+ if err != nil {
+ results.Fail("InitAfterShutdown(create)", fmt.Sprintf("failed to create: %v", err))
+ return
+ }
+
+ initCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ if err := testProvider.InitWithContext(initCtx, evalCtx); err != nil {
+ results.Fail("InitAfterShutdown(init)", fmt.Sprintf("init failed: %v", err))
+ testProvider.Shutdown()
+ return
+ }
+
+ shutdownCtx, cancel2 := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel2()
+
+ if err := testProvider.ShutdownWithContext(shutdownCtx); err != nil {
+ // In cloud mode with SSE streaming, the Split SDK has a known hang bug
+ // that can cause shutdown to timeout. Accept this as valid for cloud mode.
+ if apiKey != "localhost" && strings.Contains(err.Error(), "context deadline exceeded") {
+ // Continue with test - shutdown timeout is acceptable in cloud mode
+ } else {
+ results.Fail("InitAfterShutdown(shutdown)", fmt.Sprintf("shutdown failed: %v", err))
+ return
+ }
+ }
+
+ initCtx2, cancel3 := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel3()
+
+ err = testProvider.InitWithContext(initCtx2, evalCtx)
+
+ if err == nil {
+ results.Fail("InitAfterShutdown", "expected error, got nil")
+ } else if !strings.Contains(err.Error(), "cannot initialize provider after shutdown") {
+ results.Fail("InitAfterShutdown", fmt.Sprintf("wrong error message: %v", err))
+ } else {
+ results.Pass("InitAfterShutdown")
+ }
+}
+
+// testNamedProvider tests creating and using a named provider
+func testNamedProvider(ctx context.Context, apiKey string, logger *slog.Logger, cfg *conf.SplitSdkConfig) {
+ // Create a named provider
+ namedProvider, err := split.New(apiKey, split.WithLogger(logger), split.WithSplitConfig(cfg))
+ if err != nil {
+ results.Fail("NamedProvider(create)", fmt.Sprintf("failed to create: %v", err))
+ return
+ }
+ defer namedProvider.Shutdown()
+
+ initCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
+ defer cancel()
+
+ if err := openfeature.SetNamedProviderWithContextAndWait(initCtx, "test-split", namedProvider); err != nil {
+ results.Fail("NamedProvider(init)", fmt.Sprintf("failed to initialize: %v", err))
+ return
+ }
+ results.Pass("NamedProvider(init)")
+
+ namedClient := openfeature.NewClient("test-split")
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ // Test evaluation with named client
+ value, err := namedClient.BooleanValue(ctx, "feature_boolean_on", false, evalCtx)
+ if err != nil {
+ results.Fail("NamedProvider(evaluation)", fmt.Sprintf("evaluation failed: %v", err))
+ return
+ }
+
+ if value != true {
+ results.Fail("NamedProvider(value)", fmt.Sprintf("expected true, got %v", value))
+ } else {
+ results.Pass("NamedProvider(evaluation)")
+ }
+
+ // Cleanup happens via defer namedProvider.Shutdown()
+ results.Pass("NamedProvider(cleanup)")
+}
+
+// testConcurrentInit tests concurrent InitWithContext calls use singleflight
+func testConcurrentInit(ctx context.Context, apiKey string, logger *slog.Logger, cfg *conf.SplitSdkConfig) {
+ // Create a provider but don't initialize
+ concurrentProvider, err := split.New(apiKey, split.WithLogger(logger), split.WithSplitConfig(cfg))
+ if err != nil {
+ results.Fail("ConcurrentInit(create)", fmt.Sprintf("failed to create: %v", err))
+ return
+ }
+ defer concurrentProvider.Shutdown()
+
+ // Launch 10 concurrent InitWithContext calls
+ var wg sync.WaitGroup
+ errors := make(chan error, 10)
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ initCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
+ defer cancel()
+ errors <- concurrentProvider.InitWithContext(initCtx, evalCtx)
+ }()
+ }
+
+ wg.Wait()
+ close(errors)
+
+ // All should succeed (singleflight ensures only one actual init)
+ successCount := 0
+ for err := range errors {
+ if err == nil {
+ successCount++
+ }
+ }
+
+ if successCount == 10 {
+ results.Pass("ConcurrentInit(singleflight)")
+ } else {
+ results.Fail("ConcurrentInit(singleflight)", fmt.Sprintf("only %d/10 succeeded", successCount))
+ }
+}
+
+// testProviderNotReadyError tests PROVIDER_NOT_READY error code via OpenFeature SDK
+func testProviderNotReadyError() {
+ // Use invalid API key so the provider never becomes ready
+ uninitProvider, err := split.New("invalid-key-for-not-ready-test")
+ if err != nil {
+ results.Fail("ProviderNotReady(create)", fmt.Sprintf("failed to create: %v", err))
+ return
+ }
+ defer uninitProvider.Shutdown()
+
+ // Use a named provider to avoid interfering with the default provider
+ ctx := context.Background()
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ // Set as named provider (non-blocking) and immediately try to evaluate
+ openfeature.SetNamedProvider("not-ready-test", uninitProvider)
+ client := openfeature.NewClient("not-ready-test")
+
+ details, err := client.BooleanValueDetails(ctx, "some-flag", false, evalCtx)
+
+ if err == nil {
+ results.Fail("ProviderNotReady(error)", "expected error, got nil")
+ return
+ }
+
+ // Check error code - OpenFeature should return PROVIDER_NOT_READY
+ if details.ErrorCode != openfeature.ProviderNotReadyCode {
+ results.Fail("ProviderNotReady(error_code)",
+ fmt.Sprintf("expected PROVIDER_NOT_READY, got %v", details.ErrorCode))
+ } else {
+ results.Pass("ProviderNotReady(error_code)")
+ }
+
+ // Should return default value
+ if details.Value != false {
+ results.Fail("ProviderNotReady(default)", fmt.Sprintf("expected default false, got %v", details.Value))
+ } else {
+ results.Pass("ProviderNotReady(default_value)")
+ }
+}
+
+// testTrivialGetters tests Metadata() and Hooks() methods
+func testTrivialGetters(provider *split.Provider) {
+ // Test Metadata()
+ metadata := provider.Metadata()
+ if metadata.Name != "Split" {
+ results.Fail("Metadata(name)", fmt.Sprintf("expected Split, got %s", metadata.Name))
+ } else {
+ results.Pass("Metadata(name)")
+ }
+
+ // Test Hooks() - should return nil
+ hooks := provider.Hooks()
+ if hooks != nil {
+ results.Fail("Hooks()", fmt.Sprintf("expected nil, got %v", hooks))
+ } else {
+ results.Pass("Hooks()")
+ }
+}
+
+// testInitWithContextTimeout tests InitWithContext with timeout expiration
+func testInitWithContextTimeout() {
+ // Create provider with invalid API key that will never become ready
+ timeoutProvider, err := split.New("invalid-key-that-will-timeout")
+ if err != nil {
+ results.Fail("InitTimeout(create)", fmt.Sprintf("failed to create: %v", err))
+ return
+ }
+ defer timeoutProvider.Shutdown()
+
+ // Use very short timeout that will expire
+ initCtx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
+ defer cancel()
+
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+ err = timeoutProvider.InitWithContext(initCtx, evalCtx)
+
+ if err == nil {
+ results.Fail("InitTimeout(error)", "expected timeout error, got nil")
+ } else if strings.Contains(err.Error(), "context deadline exceeded") ||
+ strings.Contains(err.Error(), "initialization cancelled") {
+ results.Pass("InitTimeout(context_cancelled)")
+ } else {
+ results.Fail("InitTimeout(error_message)", fmt.Sprintf("unexpected error: %v", err))
+ }
+}
+
+// testShutdownWithContextTimeout tests ShutdownWithContext with timeout
+func testShutdownWithContextTimeout(apiKey string, logger *slog.Logger, cfg *conf.SplitSdkConfig) {
+ isLocalhostMode := apiKey == "localhost"
+
+ shutdownProvider, err := split.New(apiKey, split.WithLogger(logger), split.WithSplitConfig(cfg))
+ if err != nil {
+ results.Fail("ShutdownTimeout(create)", fmt.Sprintf("failed to create: %v", err))
+ return
+ }
+
+ initCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ if err := shutdownProvider.InitWithContext(initCtx, evalCtx); err != nil {
+ results.Fail("ShutdownTimeout(init)", fmt.Sprintf("init failed: %v", err))
+ shutdownProvider.Shutdown()
+ return
+ }
+
+ // In localhost mode, use very short timeout to test best-effort behavior
+ // In cloud mode, use longer timeout due to SSE streaming cleanup
+ var shutdownTimeout time.Duration
+ if isLocalhostMode {
+ shutdownTimeout = 1 * time.Millisecond
+ } else {
+ shutdownTimeout = 100 * time.Millisecond
+ }
+
+ shutdownCtx, cancel2 := context.WithTimeout(context.Background(), shutdownTimeout)
+ defer cancel2()
+
+ err = shutdownProvider.ShutdownWithContext(shutdownCtx)
+
+ // In localhost mode, shutdown should succeed quickly (best-effort)
+ // In cloud mode with SSE streaming, context timeout is expected
+ if err != nil {
+ if strings.Contains(err.Error(), "context deadline exceeded") {
+ if isLocalhostMode {
+ // Localhost mode should succeed quickly
+ results.Fail("ShutdownTimeout(error)", "localhost mode should shutdown quickly")
+ } else {
+ // Cloud mode timeout is expected due to SSE streaming
+ results.Pass("ShutdownTimeout(context_timeout_cloud)")
+ }
+ } else {
+ results.Fail("ShutdownTimeout(error)", fmt.Sprintf("unexpected error: %v", err))
+ }
+ } else {
+ results.Pass("ShutdownTimeout(best_effort)")
+ }
+
+ // Provider should be shut down even if timeout expired
+ if shutdownProvider.Status() != openfeature.NotReadyState {
+ results.Fail("ShutdownTimeout(status)", "provider should be NotReady after shutdown")
+ } else {
+ results.Pass("ShutdownTimeout(status)")
+ }
+}
+
+// testShutdownDuringInit tests shutdown called during initialization
+func testShutdownDuringInit(apiKey string, logger *slog.Logger, cfg *conf.SplitSdkConfig) {
+ // Create provider with slow initialization
+ slowProvider, err := split.New(apiKey, split.WithLogger(logger), split.WithSplitConfig(cfg))
+ if err != nil {
+ results.Fail("ShutdownDuringInit(create)", fmt.Sprintf("failed to create: %v", err))
+ return
+ }
+
+ // Start init in background
+ initDone := make(chan error, 1)
+ go func() {
+ initCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+ initDone <- slowProvider.InitWithContext(initCtx, evalCtx)
+ }()
+
+ // Give init a moment to start
+ time.Sleep(100 * time.Millisecond)
+
+ // Shutdown while init is running
+ shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ shutdownErr := slowProvider.ShutdownWithContext(shutdownCtx)
+ if shutdownErr != nil {
+ results.Fail("ShutdownDuringInit(shutdown)", fmt.Sprintf("shutdown failed: %v", shutdownErr))
+ } else {
+ results.Pass("ShutdownDuringInit(shutdown)")
+ }
+
+ initErr := <-initDone
+ if initErr != nil {
+ // Init should fail because shutdown happened
+ results.Pass("ShutdownDuringInit(init_fails)")
+ } else {
+ // Or init might succeed before shutdown - both acceptable
+ results.Pass("ShutdownDuringInit(init_race)")
+ }
+
+ // Provider should be shut down
+ if slowProvider.Status() != openfeature.NotReadyState {
+ results.Fail("ShutdownDuringInit(final_status)", "expected NotReady after shutdown")
+ } else {
+ results.Pass("ShutdownDuringInit(final_status)")
+ }
+}
+
+// testProviderWithNilConfig tests provider creation with nil config
+func testProviderWithNilConfig(apiKey string, logger *slog.Logger) {
+ // For localhost mode, we still need to configure the split file
+ // This test validates that WithSplitConfig is optional (uses defaults)
+ // but we configure the split file if in localhost mode
+ var opts []split.Option
+ opts = append(opts, split.WithLogger(logger))
+
+ if apiKey == "localhost" {
+ cfg := split.TestConfig()
+ cfg.SplitFile = "./split.yaml"
+ opts = append(opts, split.WithSplitConfig(cfg))
+ }
+
+ nilConfigProvider, err := split.New(apiKey, opts...)
+ if err != nil {
+ results.Fail("NilConfig(create)", fmt.Sprintf("failed to create: %v", err))
+ return
+ }
+ defer nilConfigProvider.Shutdown()
+
+ results.Pass("NilConfig(uses_defaults)")
+
+ // Initialize and verify it works
+ initCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ if err := nilConfigProvider.InitWithContext(initCtx, evalCtx); err != nil {
+ results.Fail("NilConfig(init)", fmt.Sprintf("init failed: %v", err))
+ } else {
+ results.Pass("NilConfig(init)")
+ }
+}
+
+// testBlockUntilReadyZero tests BlockUntilReady=0 uses default timeout
+func testBlockUntilReadyZero(apiKey string, logger *slog.Logger) {
+ // Create optimized test config with BlockUntilReady=0 to test default behavior
+ cfg := split.TestConfig()
+ cfg.BlockUntilReady = 0 // Should use default 10s timeout
+
+ // Configure split file for localhost mode
+ if apiKey == "localhost" {
+ cfg.SplitFile = "./split.yaml"
+ }
+
+ zeroProvider, err := split.New(apiKey, split.WithLogger(logger), split.WithSplitConfig(cfg))
+ if err != nil {
+ results.Fail("BlockUntilReadyZero(create)", fmt.Sprintf("failed to create: %v", err))
+ return
+ }
+ defer zeroProvider.Shutdown()
+
+ results.Pass("BlockUntilReadyZero(create)")
+
+ // Should use default 10s timeout
+ initCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ if err := zeroProvider.InitWithContext(initCtx, evalCtx); err != nil {
+ results.Fail("BlockUntilReadyZero(init)", fmt.Sprintf("init failed: %v", err))
+ } else {
+ results.Pass("BlockUntilReadyZero(init_with_default)")
+ }
+}
+
+// testStatusAtomicity tests Status() method atomicity during lifecycle
+func testStatusAtomicity(apiKey string, logger *slog.Logger, cfg *conf.SplitSdkConfig) {
+ // Create provider
+ statusProvider, err := split.New(apiKey, split.WithLogger(logger), split.WithSplitConfig(cfg))
+ if err != nil {
+ results.Fail("StatusAtomicity(create)", fmt.Sprintf("failed to create: %v", err))
+ return
+ }
+ defer statusProvider.Shutdown()
+
+ // Status should be NotReady before init
+ if statusProvider.Status() != openfeature.NotReadyState {
+ results.Fail("StatusAtomicity(before_init)", "expected NotReady before init")
+ } else {
+ results.Pass("StatusAtomicity(before_init)")
+ }
+
+ // Initialize
+ initCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ if err := statusProvider.InitWithContext(initCtx, evalCtx); err != nil {
+ results.Fail("StatusAtomicity(init)", fmt.Sprintf("init failed: %v", err))
+ return
+ }
+
+ // Status should be Ready after init
+ if statusProvider.Status() != openfeature.ReadyState {
+ results.Fail("StatusAtomicity(after_init)", "expected Ready after init")
+ } else {
+ results.Pass("StatusAtomicity(after_init)")
+ }
+
+ // Call Status() concurrently during shutdown
+ var wg sync.WaitGroup
+ statusResults := make([]openfeature.State, 100)
+
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func(idx int) {
+ defer wg.Done()
+ statusResults[idx] = statusProvider.Status()
+ }(i)
+ }
+
+ // Shutdown while Status() is being called
+ shutdownCtx, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel2()
+ statusProvider.ShutdownWithContext(shutdownCtx)
+
+ wg.Wait()
+
+ // All status calls should return either Ready or NotReady (atomic, no invalid states)
+ allValid := true
+ for _, state := range statusResults {
+ if state != openfeature.ReadyState && state != openfeature.NotReadyState {
+ allValid = false
+ break
+ }
+ }
+
+ if !allValid {
+ results.Fail("StatusAtomicity(during_shutdown)", "invalid state detected")
+ } else {
+ results.Pass("StatusAtomicity(during_shutdown)")
+ }
+
+ // Final status should be NotReady
+ if statusProvider.Status() != openfeature.NotReadyState {
+ results.Fail("StatusAtomicity(after_shutdown)", "expected NotReady after shutdown")
+ } else {
+ results.Pass("StatusAtomicity(after_shutdown)")
+ }
+}
+
+// testDoubleShutdown tests shutdown idempotency
+func testDoubleShutdown(apiKey string, logger *slog.Logger, cfg *conf.SplitSdkConfig) {
+
+ doubleProvider, err := split.New(apiKey, split.WithLogger(logger), split.WithSplitConfig(cfg))
+ if err != nil {
+ results.Fail("DoubleShutdown(create)", fmt.Sprintf("failed to create: %v", err))
+ return
+ }
+
+ initCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ if err := doubleProvider.InitWithContext(initCtx, evalCtx); err != nil {
+ results.Fail("DoubleShutdown(init)", fmt.Sprintf("init failed: %v", err))
+ doubleProvider.Shutdown()
+ return
+ }
+
+ // First shutdown - in cloud mode with SSE streaming, the Split SDK has a known
+ // hang bug, so we accept context deadline exceeded as a valid outcome
+ shutdownCtx1, cancel1 := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel1()
+ err1 := doubleProvider.ShutdownWithContext(shutdownCtx1)
+ if err1 != nil {
+ if strings.Contains(err1.Error(), "context deadline exceeded") {
+ results.Pass("DoubleShutdown(first_timeout_sdk_bug)")
+ } else {
+ results.Fail("DoubleShutdown(first)", fmt.Sprintf("first shutdown failed: %v", err1))
+ }
+ return
+ }
+ results.Pass("DoubleShutdown(first)")
+
+ // Second shutdown - should be idempotent
+ shutdownCtx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel2()
+ err2 := doubleProvider.ShutdownWithContext(shutdownCtx2)
+ if err2 != nil {
+ results.Fail("DoubleShutdown(second)", fmt.Sprintf("second shutdown failed: %v", err2))
+ } else {
+ results.Pass("DoubleShutdown(idempotent)")
+ }
+
+ // Status should still be NotReady
+ if doubleProvider.Status() != openfeature.NotReadyState {
+ results.Fail("DoubleShutdown(status)", "expected NotReady after double shutdown")
+ } else {
+ results.Pass("DoubleShutdown(status)")
+ }
+}
diff --git a/test/integration/main.go b/test/integration/main.go
new file mode 100644
index 0000000..a7c20df
--- /dev/null
+++ b/test/integration/main.go
@@ -0,0 +1,428 @@
+// Package main is a comprehensive integration test suite for the Split OpenFeature Provider.
+//
+// This test suite validates ALL provider functionality and serves as both
+// integration testing and a reference implementation. It demonstrates:
+//
+// - Custom Split SDK configuration
+// - Structured logging with slog
+// - Event handling (PROVIDER_READY, PROVIDER_ERROR, PROVIDER_CONFIGURATION_CHANGED)
+// - Graceful shutdown with context cancellation
+// - All evaluation types (boolean, string, int, float, object)
+// - Evaluation details (variant, reason)
+// - Targeting with attributes
+// - Context cancellation and timeout handling
+// - Flag metadata (configurations attached to flags)
+// - Flag set evaluation (cloud mode only)
+// - Direct Split SDK access (Track, Treatments)
+// - Concurrent evaluations (100 goroutines x 10 evaluations)
+// - Comprehensive error handling
+//
+// This test suite supports both localhost mode and real Split API keys:
+//
+// Run with localhost mode: go run .
+// Run with Split API key: SPLIT_API_KEY=your-key-here go run .
+//
+// Exit codes:
+// - 0: All tests passed
+// - 1: One or more tests failed
+// - 2: Timeout or fatal error
+package main
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "os"
+ "os/signal"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "time"
+
+ "github.com/lmittmann/tint"
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/open-feature/go-sdk/openfeature/hooks"
+ "github.com/splitio/go-client/v6/splitio/conf"
+
+ "github.com/splitio/split-openfeature-provider-go/v2"
+)
+
+func main() {
+ fmt.Println(strings.Repeat("=", 60))
+ fmt.Println(" Split OpenFeature Provider - Integration Test Suite")
+ fmt.Println(" Comprehensive Validation & Reference Implementation")
+ fmt.Println(strings.Repeat("=", 60))
+ fmt.Println()
+
+ // ============================================================
+ // SETUP: CONTEXT WITH TIMEOUT AND SIGNAL HANDLING
+ // ============================================================
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
+ defer cancel()
+
+ var (
+ cleanupSuccess = true
+ exitCode = 0
+ )
+
+ // ============================================================
+ // 1. LOGGING CONFIGURATION (with colored output via tint)
+ // ============================================================
+
+ logLevel := slog.LevelInfo
+ if level := os.Getenv("LOG_LEVEL"); level != "" {
+ switch level {
+ case "debug", "DEBUG", "trace", "TRACE":
+ logLevel = slog.LevelDebug
+ case "info", "INFO":
+ logLevel = slog.LevelInfo
+ case "warn", "WARN", "warning", "WARNING":
+ logLevel = slog.LevelWarn
+ case "error", "ERROR":
+ logLevel = slog.LevelError
+ default:
+ logLevel = slog.LevelInfo
+ }
+ }
+
+ baseLogger := slog.New(tint.NewHandler(os.Stderr, &tint.Options{
+ Level: logLevel,
+ TimeFormat: time.TimeOnly,
+ }))
+
+ appLogger := baseLogger.With("source", "app")
+ ofLogger := baseLogger.With("source", "openfeature-sdk")
+
+ slog.SetDefault(baseLogger)
+
+ section("LOGGING CONFIGURATION")
+ appLogger.Info("logging configured", "format", "tint (colored)", "level", logLevel.String())
+
+ // ============================================================
+ // 2. OPENFEATURE LOGGING HOOK (must be first to capture all evaluations)
+ // ============================================================
+ section("OPENFEATURE LOGGING HOOK")
+ openfeature.AddHooks(hooks.NewLoggingHook(false, ofLogger))
+ appLogger.Info("logging hook added (captures all flag evaluations)")
+
+ // ============================================================
+ // 3. EVENT HANDLERS (API-level handlers run before client handlers)
+ // ============================================================
+ section("EVENT HANDLERS")
+
+ var eventsReceived sync.Map
+
+ handleEvent := func(eventType openfeature.EventType) openfeature.EventCallback {
+ callback := func(details openfeature.EventDetails) {
+ val, _ := eventsReceived.LoadOrStore(eventType, new(atomic.Int64))
+ counter := val.(*atomic.Int64)
+ count := counter.Add(1)
+
+ slog.Info("event received",
+ "type", eventType,
+ "provider", details.ProviderName,
+ "message", details.Message,
+ "count", count)
+ }
+ return &callback
+ }
+
+ openfeature.AddHandler(openfeature.ProviderReady, handleEvent(openfeature.ProviderReady))
+ openfeature.AddHandler(openfeature.ProviderError, handleEvent(openfeature.ProviderError))
+ openfeature.AddHandler(openfeature.ProviderConfigChange, handleEvent(openfeature.ProviderConfigChange))
+
+ appLogger.Info("event handlers registered", "handlers", 3)
+
+ // ============================================================
+ // 4. SPLIT SDK CONFIGURATION (optimized for fast test execution)
+ // ============================================================
+ section("SPLIT SDK CONFIGURATION")
+
+ apiKey := os.Getenv("SPLIT_API_KEY")
+ if apiKey == "" {
+ apiKey = "localhost"
+ appLogger.Info("no SPLIT_API_KEY provided, using localhost mode")
+ } else {
+ appLogger.Info("using Split API key from environment")
+ }
+
+ // Use optimized test configuration for faster execution
+ cfg := split.TestConfig()
+
+ if apiKey == "localhost" {
+ cfg.SplitFile = "./split.yaml"
+ appLogger.Info("split SDK configured",
+ "mode", "localhost",
+ "file", "./split.yaml",
+ "block_until_ready", cfg.BlockUntilReady)
+ } else {
+ appLogger.Info("split SDK configured",
+ "mode", "cloud",
+ "block_until_ready", cfg.BlockUntilReady,
+ "http_timeout", cfg.Advanced.HTTPTimeout)
+ }
+
+ // ============================================================
+ // 5. PROVIDER CREATION
+ // ============================================================
+ section("PROVIDER CREATION")
+
+ provider, err := split.New(apiKey,
+ split.WithLogger(baseLogger),
+ split.WithSplitConfig(cfg),
+ )
+ if err != nil {
+ slog.Error("failed to create provider", "error", err)
+ os.Exit(2)
+ }
+
+ appLogger.Info("provider created with unified logging")
+
+ var cleanupOnce sync.Once
+ cleanup := func() {
+ cleanupOnce.Do(func() {
+ defer func() {
+ if r := recover(); r != nil {
+ slog.Error("panic during shutdown", "panic", r)
+ cleanupSuccess = false
+ }
+ }()
+
+ fmt.Println()
+ fmt.Println(strings.Repeat("─", 60))
+ slog.Info("initiating graceful shutdown")
+
+ shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ if err := openfeature.ShutdownWithContext(shutdownCtx); err != nil {
+ slog.Error("shutdown error", "error", err)
+ cleanupSuccess = false
+ }
+
+ slog.Info("graceful shutdown complete")
+ })
+ }
+
+ defer cleanup()
+
+ shutdownChan := make(chan os.Signal, 1)
+ done := make(chan struct{})
+ signal.Notify(shutdownChan, os.Interrupt, syscall.SIGTERM)
+
+ go func() {
+ select {
+ case sig := <-shutdownChan:
+ slog.Warn("interrupt signal received", "signal", sig)
+ signal.Stop(shutdownChan)
+ cancel()
+ case <-done:
+ signal.Stop(shutdownChan)
+ return
+ }
+ }()
+
+ defer close(done)
+
+ // ============================================================
+ // 6. PROVIDER INITIALIZATION
+ // ============================================================
+ section("PROVIDER INITIALIZATION")
+
+ initCtx, initCancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer initCancel()
+
+ if err := openfeature.SetProviderWithContextAndWait(initCtx, provider); err != nil {
+ slog.Error("failed to initialize provider", "error", err)
+ cleanup()
+ os.Exit(2)
+ }
+
+ appLogger.Info("provider initialized and ready")
+
+ // ============================================================
+ // 7. OPENFEATURE CLIENT CREATION
+ // ============================================================
+ section("CLIENT CREATION")
+
+ ofClient := openfeature.NewDefaultClient()
+
+ appLogger.Info("OpenFeature client created")
+
+ // ============================================================
+ // RUN ALL TESTS
+ // ============================================================
+ section("RUNNING TESTS")
+ runTests(ctx, ofClient, provider, &eventsReceived, apiKey, baseLogger, cfg)
+
+ // ============================================================
+ // RESULTS SUMMARY
+ // ============================================================
+ results.Summary()
+
+ // Print event statistics
+ fmt.Println()
+ fmt.Println("Event Statistics:")
+ eventsReceived.Range(func(key, value any) bool {
+ eventType := key.(openfeature.EventType)
+ counter := value.(*atomic.Int64)
+ count := counter.Load()
+ fmt.Printf(" %s: %d events\n", eventType, count)
+ return true
+ })
+
+ close(done)
+ cleanup()
+
+ if !cleanupSuccess {
+ exitCode = 2
+ } else if results.total.Load() == 0 {
+ exitCode = 2
+ } else if results.failed.Load() > 0 {
+ exitCode = 1
+ }
+
+ os.Exit(exitCode)
+}
+
+// runTests executes all integration tests with the provided context.
+func runTests(ctx context.Context, client *openfeature.Client, provider *split.Provider, eventsReceived *sync.Map, apiKey string, baseLogger *slog.Logger, cfg *conf.SplitSdkConfig) {
+ defer func() {
+ if r := recover(); r != nil {
+ slog.Error("panic during test execution", "panic", r)
+ results.Fail("panic", fmt.Sprintf("test execution panicked: %v", r))
+ }
+ }()
+
+ isLocalhostMode := apiKey == "localhost"
+
+ // ============================================================
+ // FLAG EVALUATION TESTS
+ // ============================================================
+
+ section("BOOLEAN FLAG EVALUATIONS")
+ testBooleanEvaluations(ctx, client)
+
+ section("STRING FLAG EVALUATIONS")
+ testStringEvaluations(ctx, client)
+
+ section("INTEGER FLAG EVALUATIONS")
+ testIntEvaluations(ctx, client)
+
+ section("FLOAT FLAG EVALUATIONS")
+ testFloatEvaluations(ctx, client)
+
+ // Object evaluations only work in localhost mode (cloud mode only evaluates flag sets)
+ if isLocalhostMode {
+ section("OBJECT FLAG EVALUATIONS")
+ testObjectEvaluations(ctx, client)
+ } else {
+ section("OBJECT FLAG EVALUATIONS (SKIPPED - cloud mode)")
+ slog.Info("skipping object evaluations - cloud mode only evaluates flag sets")
+ }
+
+ section("EVALUATION DETAILS")
+ testEvaluationDetails(ctx, client)
+
+ // Flag metadata tests run in both modes - tests that metadata field is properly populated
+ // In localhost mode, flags have JSON configs attached
+ // In cloud mode, flags may or may not have configs (test handles both cases)
+ section("FLAG METADATA")
+ testFlagMetadata(ctx, client)
+
+ // Flag set evaluation only works in cloud mode (localhost doesn't support flag sets)
+ if !isLocalhostMode {
+ section("FLAG SET EVALUATION")
+ testFlagSetEvaluation(ctx, client)
+ } else {
+ section("FLAG SET EVALUATION (SKIPPED - localhost mode)")
+ slog.Info("skipping flag set evaluation - localhost mode doesn't support flag sets")
+ }
+
+ section("TARGETING WITH ATTRIBUTES")
+ testAttributeTargeting(ctx, client)
+
+ section("CONTEXT CANCELLATION")
+ testContextCancellation(client)
+
+ section("ERROR HANDLING")
+ testErrorHandling(ctx, client)
+
+ // ============================================================
+ // ADVANCED TESTS (SDK access, concurrency, metrics)
+ // ============================================================
+
+ section("EVALUATION MODE OPTIONS")
+ testObjectEvaluationMode(ctx, provider, isLocalhostMode)
+
+ section("DIRECT SPLIT SDK ACCESS")
+ testDirectSDKAccess(provider)
+
+ section("CLIENT TRACKING")
+ testClientTrack(ctx, client)
+
+ section("TRACK OPTIONS")
+ testTrackWithOptions(ctx, provider)
+
+ section("CONCURRENT EVALUATIONS")
+ testConcurrentEvaluations(ctx, client)
+
+ section("CLIENT STATE")
+ testClientState(client)
+
+ section("PROVIDER STATUS & HEALTH")
+ testProviderHealth(provider)
+
+ section("EVENT TRACKING VALIDATION")
+ testEventTracking(eventsReceived)
+
+ section("METRICS BEFORE INIT")
+ testMetricsBeforeInit()
+
+ section("METRICS ALL FIELDS")
+ testMetricsAllFields(provider)
+
+ // ============================================================
+ // LIFECYCLE TESTS (init, shutdown, named providers)
+ // ============================================================
+
+ section("INIT AFTER SHUTDOWN")
+ testInitAfterShutdown(apiKey, baseLogger, cfg)
+
+ section("NAMED PROVIDER SUPPORT")
+ testNamedProvider(ctx, apiKey, baseLogger, cfg)
+
+ section("CONCURRENT INIT CALLS")
+ testConcurrentInit(ctx, apiKey, baseLogger, cfg)
+
+ section("PROVIDER_NOT_READY ERROR")
+ testProviderNotReadyError()
+
+ section("METADATA & HOOKS")
+ testTrivialGetters(provider)
+
+ section("INIT TIMEOUT")
+ testInitWithContextTimeout()
+
+ section("SHUTDOWN TIMEOUT")
+ testShutdownWithContextTimeout(apiKey, baseLogger, cfg)
+
+ section("SHUTDOWN DURING INIT")
+ testShutdownDuringInit(apiKey, baseLogger, cfg)
+
+ section("METRICS AFTER SHUTDOWN")
+ testMetricsAfterShutdown(apiKey, baseLogger, cfg)
+
+ section("NIL CONFIG DEFAULTS")
+ testProviderWithNilConfig(apiKey, baseLogger)
+
+ section("BLOCKUNTILREADY ZERO")
+ testBlockUntilReadyZero(apiKey, baseLogger)
+
+ section("STATUS ATOMICITY")
+ testStatusAtomicity(apiKey, baseLogger, cfg)
+
+ section("DOUBLE SHUTDOWN")
+ testDoubleShutdown(apiKey, baseLogger, cfg)
+}
diff --git a/test/integration/results.go b/test/integration/results.go
new file mode 100644
index 0000000..af51a51
--- /dev/null
+++ b/test/integration/results.go
@@ -0,0 +1,78 @@
+// results.go provides test result tracking infrastructure.
+// It includes atomic counters for pass/fail tracking and proper error aggregation.
+package main
+
+import (
+ "fmt"
+ "log/slog"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "github.com/hashicorp/go-multierror"
+)
+
+// TestResults tracks test results with atomic counters and proper error aggregation.
+// Uses atomic.Int64 for lock-free counter updates and go-multierror for proper error handling.
+type TestResults struct {
+ passed atomic.Int64
+ failed atomic.Int64
+ total atomic.Int64
+ mu sync.Mutex // Protects result during concurrent Append operations
+ result *multierror.Error // Accumulated test failures using go-multierror
+}
+
+func (tr *TestResults) Pass(testName string) {
+ tr.passed.Add(1)
+ tr.total.Add(1)
+ slog.Info("PASS", "test", testName)
+}
+
+func (tr *TestResults) Fail(testName string, reason string) {
+ tr.failed.Add(1)
+ tr.total.Add(1)
+
+ // Thread-safe error accumulation using go-multierror
+ tr.mu.Lock()
+ tr.result = multierror.Append(tr.result, fmt.Errorf("%s: %s", testName, reason))
+ tr.mu.Unlock()
+
+ slog.Error("FAIL", "test", testName, "reason", reason)
+}
+
+func (tr *TestResults) Summary() {
+ tr.mu.Lock()
+ defer tr.mu.Unlock()
+
+ passed := tr.passed.Load()
+ failed := tr.failed.Load()
+ total := tr.total.Load()
+
+ percentage := 0.0
+ if total > 0 {
+ percentage = float64(passed) / float64(total) * 100
+ }
+
+ fmt.Println()
+ fmt.Println(strings.Repeat("=", 60))
+ fmt.Printf("Test Results: %d/%d passed (%.1f%%)\n", passed, total, percentage)
+ if tr.result != nil {
+ fmt.Println()
+ fmt.Printf("Failed tests (%d):\n", failed)
+ fmt.Println(tr.result.Error())
+ } else if total > 0 {
+ fmt.Println("All tests passed!")
+ } else {
+ fmt.Println("No tests were run")
+ }
+ fmt.Println(strings.Repeat("=", 60))
+}
+
+var results = new(TestResults)
+
+// section logs a visually distinct section header for test phases.
+func section(name string) {
+ slog.Info(strings.Repeat("-", 60))
+ slog.Info(fmt.Sprintf(">> %s", name))
+ slog.Info(strings.Repeat("-", 60))
+}
diff --git a/test/integration/sdk.go b/test/integration/sdk.go
new file mode 100644
index 0000000..4beb4a0
--- /dev/null
+++ b/test/integration/sdk.go
@@ -0,0 +1,353 @@
+// sdk.go contains tests for direct SDK access, concurrency, health, and metrics.
+// Tests cover Split SDK client access, concurrent evaluations, event tracking,
+// provider health status, and metrics before/after initialization.
+package main
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/splitio/go-client/v6/splitio/conf"
+
+ "github.com/splitio/split-openfeature-provider-go/v2"
+)
+
+// testDirectSDKAccess tests direct access to the Split SDK client
+func testDirectSDKAccess(provider *split.Provider) {
+
+ factory := provider.Factory()
+ splitClient := factory.Client()
+
+ err := splitClient.Track("test-user", "user", "test_event", 1.0, map[string]any{
+ "test": "integration_test",
+ "timestamp": time.Now().Unix(),
+ })
+
+ if err != nil {
+ results.Fail("SDK(Track)", err.Error())
+ } else {
+ results.Pass("SDK(Track)")
+ }
+
+ treatments := splitClient.Treatments("test-user", []string{
+ "feature_boolean_on",
+ "ui_theme",
+ "max_retries",
+ }, nil)
+
+ if len(treatments) != 3 {
+ results.Fail("SDK(Treatments)", fmt.Sprintf("expected 3 treatments, got %d", len(treatments)))
+ } else {
+ results.Pass(fmt.Sprintf("SDK(Treatments) - %d flags evaluated", len(treatments)))
+ }
+}
+
+// testClientTrack tests the OpenFeature Client.Track() method which uses the provider's Tracker interface
+func testClientTrack(ctx context.Context, client *openfeature.Client) {
+ // Test 1: Basic tracking with default traffic type ("user")
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+ details := openfeature.NewTrackingEventDetails(42.0)
+
+ // Track should not panic and should complete (no error return per OpenFeature spec)
+ client.Track(ctx, "test_event", evalCtx, details)
+ results.Pass("Client(Track_basic)")
+
+ // Test 2: Tracking with custom traffic type
+ evalCtxWithTrafficType := openfeature.NewEvaluationContext("test-user", map[string]any{
+ "trafficType": "account",
+ })
+ client.Track(ctx, "account_event", evalCtxWithTrafficType, details)
+ results.Pass("Client(Track_custom_traffic_type)")
+
+ // Test 3: Tracking with properties
+ detailsWithProps := openfeature.NewTrackingEventDetails(99.99).
+ Add("currency", "USD").
+ Add("item_count", 3).
+ Add("is_premium", true)
+ client.Track(ctx, "purchase", evalCtx, detailsWithProps)
+ results.Pass("Client(Track_with_properties)")
+
+ // Test 4: Tracking with empty targeting key should be silently ignored
+ emptyEvalCtx := openfeature.NewEvaluationContext("", nil)
+ client.Track(ctx, "ignored_event", emptyEvalCtx, details)
+ results.Pass("Client(Track_empty_key_ignored)")
+}
+
+// testTrackWithOptions tests Track with context options (MetricValueAbsent).
+func testTrackWithOptions(ctx context.Context, provider *split.Provider) {
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ // Test 1: Track without metric value (count-only event)
+ noValueCtx := split.WithoutMetricValue(ctx)
+ details := openfeature.NewTrackingEventDetails(0)
+ provider.Track(noValueCtx, "page_view", evalCtx, details)
+ results.Pass("Track(without_metric_value)")
+
+ // Test 2: Track with metric value (standard)
+ detailsWithValue := openfeature.NewTrackingEventDetails(99.99)
+ provider.Track(ctx, "purchase", evalCtx, detailsWithValue)
+ results.Pass("Track(with_metric_value)")
+
+ // Test 3: Verify context option is correctly set
+ trackOpts := split.GetTrackOptions(noValueCtx)
+ if !trackOpts.MetricValueAbsent {
+ results.Fail("Track(context_option)", "MetricValueAbsent should be true")
+ } else {
+ results.Pass("Track(context_option)")
+ }
+
+ // Test 4: Track with both WithoutMetricValue and non-zero value in details
+ // MetricValueAbsent should take precedence (nil sent to Split, not 42.0)
+ precedenceCtx := split.WithoutMetricValue(ctx)
+ detailsWithNonZero := openfeature.NewTrackingEventDetails(42.0)
+ provider.Track(precedenceCtx, "count_event", evalCtx, detailsWithNonZero)
+ results.Pass("Track(absent_takes_precedence)")
+}
+
+// testConcurrentEvaluations tests concurrent flag evaluations
+func testConcurrentEvaluations(ctx context.Context, client *openfeature.Client) {
+ const numGoroutines = 100
+ const evaluationsPerGoroutine = 10
+
+ var wg sync.WaitGroup
+ errors := make(chan error, numGoroutines*evaluationsPerGoroutine)
+
+ for i := 0; i < numGoroutines; i++ {
+ wg.Add(1)
+ go func(id int) {
+ defer wg.Done()
+
+ evalCtx := openfeature.NewEvaluationContext(fmt.Sprintf("user-%d", id), nil)
+
+ for j := 0; j < evaluationsPerGoroutine; j++ {
+ _, err := client.BooleanValue(ctx, "feature_boolean_on", false, evalCtx)
+ if err != nil {
+ errors <- fmt.Errorf("goroutine %d iteration %d: %w", id, j, err)
+ }
+ }
+ }(i)
+ }
+
+ wg.Wait()
+ close(errors)
+
+ errorCount := 0
+ for err := range errors {
+ slog.Error("concurrent evaluation error", "error", err)
+ errorCount++
+ }
+
+ if errorCount > 0 {
+ results.Fail("Concurrent(evaluations)", fmt.Sprintf("%d errors in %d evaluations",
+ errorCount, numGoroutines*evaluationsPerGoroutine))
+ } else {
+ results.Pass(fmt.Sprintf("Concurrent(%d goroutines × %d evals)",
+ numGoroutines, evaluationsPerGoroutine))
+ }
+}
+
+// testClientState tests the Client.State() method which queries the provider's status
+func testClientState(client *openfeature.Client) {
+ // Client.State() should return the provider's status via Provider.Status()
+ state := client.State()
+ if state != openfeature.ReadyState {
+ results.Fail("Client(State)", fmt.Sprintf("expected Ready, got %s", state))
+ } else {
+ results.Pass("Client(State)")
+ }
+}
+
+// testProviderHealth tests provider status and metrics
+func testProviderHealth(provider *split.Provider) {
+ status := provider.Status()
+ if status != openfeature.ReadyState {
+ results.Fail("Health(Status)", fmt.Sprintf("expected Ready, got %s", status))
+ } else {
+ results.Pass("Health(Status)")
+ }
+
+ metrics := provider.Metrics()
+
+ if metrics.Provider != "Split" {
+ results.Fail("Health(provider)", fmt.Sprintf("expected Split, got %v", metrics.Provider))
+ } else {
+ results.Pass("Health(provider)")
+ }
+
+ if metrics.Status != openfeature.ReadyState {
+ results.Fail("Health(status)", fmt.Sprintf("expected Ready, got %v", metrics.Status))
+ } else {
+ results.Pass("Health(status)")
+ }
+
+ if !metrics.Initialized {
+ results.Fail("Health(initialized)", "provider should be initialized")
+ } else {
+ results.Pass("Health(initialized)")
+ }
+}
+
+// testEventTracking tests that events are properly tracked
+func testEventTracking(eventsReceived *sync.Map) {
+ // Verify that PROVIDER_READY event was received
+ if val, ok := eventsReceived.Load(openfeature.ProviderReady); ok {
+ counter := val.(*atomic.Int64)
+ count := counter.Load()
+ if count > 0 {
+ results.Pass(fmt.Sprintf("Events(PROVIDER_READY) - %d events", count))
+ } else {
+ results.Fail("Events(PROVIDER_READY)", "no events received")
+ }
+ } else {
+ results.Fail("Events(PROVIDER_READY)", "event type not found in sync.Map")
+ }
+
+ var totalEvents int64
+ eventsReceived.Range(func(key, value any) bool {
+ counter := value.(*atomic.Int64)
+ totalEvents += counter.Load()
+ return true
+ })
+
+ if totalEvents > 0 {
+ results.Pass(fmt.Sprintf("Events(Total) - %d total events", totalEvents))
+ } else {
+ results.Fail("Events(Total)", "no events received at all")
+ }
+}
+
+// testMetricsBeforeInit tests Metrics() before provider initialization
+func testMetricsBeforeInit() {
+ // Use optimized test config with SplitFile to avoid SDK errors looking for ~/.splits
+ cfg := split.TestConfig()
+ cfg.SplitFile = "./split.yaml"
+
+ uninitProvider, err := split.New("localhost", split.WithSplitConfig(cfg))
+ if err != nil {
+ results.Fail("MetricsBeforeInit(create)", fmt.Sprintf("failed to create provider: %v", err))
+ return
+ }
+ defer uninitProvider.Shutdown()
+
+ metrics := uninitProvider.Metrics()
+
+ if metrics.Initialized {
+ results.Fail("MetricsBeforeInit(initialized)", "expected initialized=false")
+ } else {
+ results.Pass("MetricsBeforeInit(initialized)")
+ }
+
+ if metrics.Status != openfeature.NotReadyState {
+ results.Fail("MetricsBeforeInit(status)", fmt.Sprintf("expected NotReady, got %s", metrics.Status))
+ } else {
+ results.Pass("MetricsBeforeInit(status)")
+ }
+
+ if metrics.Ready {
+ results.Fail("MetricsBeforeInit(ready)", "expected ready=false")
+ } else {
+ results.Pass("MetricsBeforeInit(ready)")
+ }
+
+ if metrics.SplitsCount != -1 {
+ results.Fail("MetricsBeforeInit(splits_count)", fmt.Sprintf("expected -1 when not ready, got %d", metrics.SplitsCount))
+ } else {
+ results.Pass("MetricsBeforeInit(splits_count)")
+ }
+}
+
+// testMetricsAllFields tests all Metrics() fields after initialization
+func testMetricsAllFields(provider *split.Provider) {
+ metrics := provider.Metrics()
+
+ if metrics.Provider != "Split" {
+ results.Fail("MetricsAllFields(provider)", fmt.Sprintf("expected Split, got %s", metrics.Provider))
+ } else {
+ results.Pass("MetricsAllFields(provider)")
+ }
+
+ if metrics.Status != openfeature.ReadyState {
+ results.Fail("MetricsAllFields(status)", fmt.Sprintf("expected Ready, got %v", metrics.Status))
+ } else {
+ results.Pass("MetricsAllFields(status)")
+ }
+
+ if !metrics.Initialized {
+ results.Fail("MetricsAllFields(initialized)", "expected true")
+ } else {
+ results.Pass("MetricsAllFields(initialized)")
+ }
+
+ if !metrics.Ready {
+ results.Fail("MetricsAllFields(ready)", "expected true")
+ } else {
+ results.Pass("MetricsAllFields(ready)")
+ }
+
+ if metrics.SplitsCount < 0 {
+ results.Fail("MetricsAllFields(splits_count)", fmt.Sprintf("invalid count: %d", metrics.SplitsCount))
+ } else {
+ results.Pass(fmt.Sprintf("MetricsAllFields(splits_count=%d)", metrics.SplitsCount))
+ }
+}
+
+// testMetricsAfterShutdown tests Metrics() after provider shutdown
+func testMetricsAfterShutdown(apiKey string, logger *slog.Logger, cfg *conf.SplitSdkConfig) {
+
+ metricsProvider, err := split.New(apiKey, split.WithLogger(logger), split.WithSplitConfig(cfg))
+ if err != nil {
+ results.Fail("MetricsAfterShutdown(create)", fmt.Sprintf("failed to create: %v", err))
+ return
+ }
+
+ initCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+
+ if err := metricsProvider.InitWithContext(initCtx, evalCtx); err != nil {
+ results.Fail("MetricsAfterShutdown(init)", fmt.Sprintf("init failed: %v", err))
+ metricsProvider.Shutdown()
+ return
+ }
+
+ // Shutdown the provider
+ // In cloud mode with SSE streaming, the Split SDK has a known hang bug
+ shutdownCtx, cancel2 := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel2()
+ if err := metricsProvider.ShutdownWithContext(shutdownCtx); err != nil {
+ // In cloud mode, shutdown timeout is acceptable due to SSE streaming bug
+ if apiKey != "localhost" && strings.Contains(err.Error(), "context deadline exceeded") {
+ // Continue with test - we can still check metrics after timeout
+ } else {
+ results.Fail("MetricsAfterShutdown(shutdown)", fmt.Sprintf("shutdown failed: %v", err))
+ return
+ }
+ }
+
+ // Get metrics after shutdown
+ metrics := metricsProvider.Metrics()
+
+ if metrics.Status != openfeature.NotReadyState {
+ results.Fail("MetricsAfterShutdown(status)", fmt.Sprintf("expected NotReady, got %v", metrics.Status))
+ } else {
+ results.Pass("MetricsAfterShutdown(status)")
+ }
+
+ if metrics.Initialized {
+ results.Fail("MetricsAfterShutdown(initialized)", "expected false after shutdown")
+ } else {
+ results.Pass("MetricsAfterShutdown(initialized)")
+ }
+
+ if metrics.Ready {
+ results.Fail("MetricsAfterShutdown(ready)", "expected false after shutdown")
+ } else {
+ results.Pass("MetricsAfterShutdown(ready)")
+ }
+}
diff --git a/test/integration/split.yaml b/test/integration/split.yaml
new file mode 100644
index 0000000..54fafbc
--- /dev/null
+++ b/test/integration/split.yaml
@@ -0,0 +1,107 @@
+# Split Localhost Mode - Integration Test Flags
+#
+# Format:
+# - flag_name:
+# treatment: "value" # Required: Treatment (must be a string)
+# keys: "key1,key2" # Optional: Comma-separated targeting keys
+# config: '{"key": "value"}' # Optional: Dynamic Configuration (JSON)
+#
+# Documentation: https://developer.harness.io/docs/feature-management-experimentation/sdks-and-infrastructure/server-side-sdks/go-sdk#yaml
+
+# Boolean Flags - Simple on/off features
+- feature_boolean_on:
+ treatment: "on"
+ config: '{"enabled_at": "2024-01-01T00:00:00Z"}'
+
+- feature_boolean_off:
+ treatment: "off"
+
+# String Flags - Multi-variant features
+- ui_theme:
+ treatment: "dark"
+ config: '{"primary_color": "#1a1a1a", "accent_color": "#4a9eff"}'
+
+- api_version:
+ treatment: "v2"
+ config: '{"endpoint": "https://api.example.com/v2", "timeout_ms": 5000}'
+
+# Integer Flags - Numeric configuration
+- max_retries:
+ treatment: "5"
+ config: '{"backoff_ms": 1000, "exponential": true}'
+
+- page_size:
+ treatment: "50"
+
+- timeout_seconds:
+ treatment: "30"
+
+# Float Flags - Percentage and decimal values
+- discount_rate:
+ treatment: "0.15"
+ config: '{"min_order": 50, "max_discount": 100}'
+
+- cache_hit_ratio:
+ treatment: "0.85"
+
+- sampling_rate:
+ treatment: "0.01"
+
+# Object Flags - Complex JSON configuration via Dynamic Configuration
+- feature_config:
+ treatment: "enabled"
+ config: '{"enabled": true, "rollout_percentage": 100, "metadata": {"owner": "platform-team", "launched": "2024-01-15"}}'
+
+- premium_features:
+ treatment: "on"
+ config: '{"analytics": true, "ai_assistant": true, "priority_support": true, "custom_domains": false}'
+
+- ab_test_config:
+ treatment: "treatment_a"
+ config: '{"variant": "treatment_a", "cohort": "experimental", "tracking_id": "exp_001"}'
+
+# Targeting with Keys - User-specific overrides
+- beta_features:
+ treatment: "on"
+ keys: "user-vip,user-beta-tester"
+ config: '{"features": ["new_dashboard", "advanced_analytics"]}'
+
+- regional_feature:
+ treatment: "enabled"
+ keys: "user-us,user-uk"
+
+# Control Treatment - Default/fallback behavior
+- experimental_algorithm:
+ treatment: "control"
+ config: '{"reason": "not_in_experiment"}'
+
+# System Flags - Operational controls
+- maintenance_mode:
+ treatment: "off"
+
+- debug_logging:
+ treatment: "off"
+
+- rate_limit_enabled:
+ treatment: "on"
+ config: '{"requests_per_minute": 1000, "burst": 1500}'
+
+# Multi-variant Flags - A/B/C testing
+- homepage_variant:
+ treatment: "variant_b"
+ config: '{"layout": "grid", "hero_image": "v2.jpg", "cta_text": "Get Started"}'
+
+- pricing_page:
+ treatment: "pricing_v3"
+ config: '{"annual_discount": 0.20, "show_enterprise": true}'
+
+# Migration Flags - Gradual rollout
+- new_checkout_flow:
+ treatment: "on"
+ keys: "user-vip,user-beta-tester,user-123"
+ config: '{"version": "2.0", "analytics_tracking": true}'
+
+# Onboarding Flow - Complex workflow
+- onboarding_version:
+ treatment: "v2"
+ config: '{"steps": ["welcome", "profile", "preferences", "tutorial", "done"], "skip_allowed": true, "progress_tracking": true}'
diff --git a/testdata/split.yaml b/testdata/split.yaml
new file mode 100644
index 0000000..4383491
--- /dev/null
+++ b/testdata/split.yaml
@@ -0,0 +1,37 @@
+# Split Localhost Mode - Unit Test Data
+#
+# Format:
+# - flag_name:
+# treatment: "value" # Required: Treatment (must be a string)
+# keys: "key1,key2" # Optional: Comma-separated targeting keys
+# config: '{"key": "value"}' # Optional: Dynamic Configuration (JSON)
+#
+# Documentation: https://developer.harness.io/docs/feature-management-experimentation/sdks-and-infrastructure/server-side-sdks/go-sdk#yaml
+
+- my_feature:
+ treatment: "on"
+ keys: "key"
+ config: '{"desc": "this applies only to ON treatment"}'
+
+- my_feature:
+ treatment: "off"
+
+- some_other_feature:
+ treatment: "off"
+
+- int_feature:
+ treatment: "32"
+
+- obj_feature:
+ treatment: "on"
+ config: '{"key": "value"}'
+
+- float_feature:
+ treatment: "32.5"
+
+- unparseable_feature:
+ treatment: "not-a-valid-type"
+
+- malformed_json_feature:
+ treatment: "on"
+ config: '{invalid json: missing quotes}'
\ No newline at end of file
diff --git a/track_test.go b/track_test.go
new file mode 100644
index 0000000..559b956
--- /dev/null
+++ b/track_test.go
@@ -0,0 +1,126 @@
+package split
+
+import (
+ "context"
+ "testing"
+
+ "github.com/open-feature/go-sdk/openfeature"
+ "github.com/splitio/go-client/v6/splitio/conf"
+ "github.com/splitio/go-toolkit/v5/logging"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// TestTrack tests the Track method (Tracker interface).
+func TestTrack(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Failed to create provider")
+ defer func() { _ = provider.ShutdownWithContext(context.Background()) }()
+
+ err = provider.InitWithContext(context.Background(), openfeature.NewEvaluationContext("", nil))
+ require.NoError(t, err, "Init should succeed")
+
+ t.Run("basic tracking", func(t *testing.T) {
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+ details := openfeature.NewTrackingEventDetails(42.0)
+
+ // Should not panic - Track returns void
+ provider.Track(context.Background(), "test_event", evalCtx, details)
+ })
+
+ t.Run("tracking with custom traffic type", func(t *testing.T) {
+ evalCtx := openfeature.NewEvaluationContext("test-user", map[string]any{
+ "trafficType": "account",
+ })
+ details := openfeature.NewTrackingEventDetails(99.99)
+
+ // Should not panic
+ provider.Track(context.Background(), "account_event", evalCtx, details)
+ })
+
+ t.Run("tracking with properties", func(t *testing.T) {
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+ details := openfeature.NewTrackingEventDetails(149.99).
+ Add("currency", "USD").
+ Add("item_count", 3).
+ Add("is_premium", true)
+
+ // Should not panic
+ provider.Track(context.Background(), "purchase", evalCtx, details)
+ })
+
+ t.Run("tracking with empty targeting key is ignored", func(t *testing.T) {
+ evalCtx := openfeature.NewEvaluationContext("", nil)
+ details := openfeature.NewTrackingEventDetails(1.0)
+
+ // Should not panic - silently ignored
+ provider.Track(context.Background(), "ignored_event", evalCtx, details)
+ })
+
+ t.Run("tracking with canceled context is ignored", func(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel() // Cancel immediately
+
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+ details := openfeature.NewTrackingEventDetails(1.0)
+
+ // Should not panic - silently ignored due to canceled context
+ provider.Track(ctx, "canceled_event", evalCtx, details)
+ })
+}
+
+// TestTrackProviderNotReady tests that Track is ignored when provider is not ready.
+func TestTrackProviderNotReady(t *testing.T) {
+ cfg := conf.Default()
+ cfg.SplitFile = testSplitFile
+ cfg.LoggerConfig.LogLevel = logging.LevelNone
+ cfg.BlockUntilReady = 10
+
+ provider, err := New("localhost", WithSplitConfig(cfg))
+ require.NoError(t, err, "Failed to create provider")
+ defer func() { _ = provider.ShutdownWithContext(context.Background()) }()
+
+ // Don't initialize - provider is NotReady
+ assert.Equal(t, openfeature.NotReadyState, provider.Status(), "Provider should be NotReady")
+
+ evalCtx := openfeature.NewEvaluationContext("test-user", nil)
+ details := openfeature.NewTrackingEventDetails(1.0)
+
+ // Should not panic - silently ignored because provider not ready
+ provider.Track(context.Background(), "ignored_event", evalCtx, details)
+}
+
+// =============================================================================
+// Track Options Tests (Localhost Mode)
+// =============================================================================
+
+func TestTrack_WithoutMetricValue_Localhost(t *testing.T) {
+ provider := setupLocalhostProvider(t)
+
+ ctx := WithoutMetricValue(context.Background())
+ evalCtx := openfeature.NewEvaluationContext("key", nil)
+ details := openfeature.NewTrackingEventDetails(0)
+
+ // In localhost mode, Track runs the full code path but the SDK discards the event
+ // (no server to send to). Should not panic.
+ trackOpts := GetTrackOptions(ctx)
+ assert.True(t, trackOpts.MetricValueAbsent)
+
+ // Track call should not panic
+ provider.Track(ctx, "page_view", evalCtx, details)
+}
+
+func TestTrack_WithMetricValue_Localhost(t *testing.T) {
+ provider := setupLocalhostProvider(t)
+
+ evalCtx := openfeature.NewEvaluationContext("key", nil)
+ details := openfeature.NewTrackingEventDetails(99.99)
+
+ // Track call should not panic
+ provider.Track(context.Background(), "purchase", evalCtx, details)
+}