Skip to content

Commit 9732da2

Browse files
committed
WIP evaluator
AI-Session-Id: e73b729a-7564-4bc6-9e12-c7dade8008e6 AI-Tool: claude-code AI-Model: unknown
1 parent 5f608ac commit 9732da2

File tree

72 files changed

+745
-773
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

72 files changed

+745
-773
lines changed

client/pom.xml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,11 @@
167167
</properties>
168168

169169
<dependencies>
170+
<dependency>
171+
<groupId>io.split.client</groupId>
172+
<artifactId>targeting-engine</artifactId>
173+
<version>${project.version}</version>
174+
</dependency>
170175
<dependency>
171176
<groupId>io.split.client</groupId>
172177
<artifactId>pluggable-storage</artifactId>

client/src/main/java/io/split/client/CacheUpdaterService.java

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,13 @@
11
package io.split.client;
22

3-
import com.google.common.collect.Lists;
43
import io.split.client.dtos.ConditionType;
5-
import io.split.client.dtos.MatcherCombiner;
64
import io.split.client.dtos.Partition;
75
import io.split.engine.experiments.ParsedCondition;
86
import io.split.engine.experiments.ParsedSplit;
9-
import io.split.engine.matchers.AllKeysMatcher;
10-
import io.split.engine.matchers.AttributeMatcher;
11-
import io.split.engine.matchers.CombiningMatcher;
12-
import io.split.engine.matchers.strings.WhitelistMatcher;
7+
import io.split.rules.matchers.AllKeysMatcher;
8+
import io.split.rules.matchers.AttributeMatcher;
9+
import io.split.rules.matchers.CombiningMatcher;
10+
import io.split.rules.matchers.WhitelistMatcher;
1311
import io.split.grammar.Treatments;
1412
import io.split.storages.SplitCacheProducer;
1513

@@ -22,15 +20,15 @@
2220
import java.util.HashMap;
2321
import java.util.stream.Collectors;
2422

25-
import static com.google.common.base.Preconditions.checkNotNull;
23+
import java.util.Objects;
2624

2725
public final class CacheUpdaterService {
2826

2927
private static String LOCALHOST = "localhost";
3028
private SplitCacheProducer _splitCacheProducer;
3129

3230
public CacheUpdaterService(SplitCacheProducer splitCacheProducer) {
33-
_splitCacheProducer = checkNotNull(splitCacheProducer);
31+
_splitCacheProducer = Objects.requireNonNull(splitCacheProducer);
3432
}
3533

3634
public void updateCache(Map<SplitAndKey, LocalhostSplit> map) {
@@ -78,9 +76,10 @@ private List<ParsedCondition> getConditions(String splitKey, ParsedSplit split,
7876

7977
private ParsedCondition createWhitelistCondition(String splitKey, Partition partition) {
8078
ParsedCondition parsedCondition = new ParsedCondition(ConditionType.WHITELIST,
81-
new CombiningMatcher(MatcherCombiner.AND,
82-
Lists.newArrayList(new AttributeMatcher(null, new WhitelistMatcher(Lists.newArrayList(splitKey)), false))),
83-
Lists.newArrayList(partition), splitKey);
79+
new CombiningMatcher(CombiningMatcher.Combiner.AND,
80+
new java.util.ArrayList<>(java.util.Arrays.asList(
81+
new AttributeMatcher(null, new WhitelistMatcher(java.util.Arrays.asList(splitKey)), false)))),
82+
new java.util.ArrayList<>(java.util.Arrays.asList(partition)), splitKey);
8483
return parsedCondition;
8584
}
8685

@@ -89,9 +88,9 @@ private ParsedCondition createRolloutCondition(Partition partition) {
8988
rolloutPartition.treatment = "-";
9089
rolloutPartition.size = 0;
9190
ParsedCondition parsedCondition = new ParsedCondition(ConditionType.ROLLOUT,
92-
new CombiningMatcher(MatcherCombiner.AND,
93-
Lists.newArrayList(new AttributeMatcher(null, new AllKeysMatcher(), false))),
94-
Lists.newArrayList(partition, rolloutPartition), "LOCAL");
91+
new CombiningMatcher(CombiningMatcher.Combiner.AND,
92+
new java.util.ArrayList<>(java.util.Arrays.asList(new AttributeMatcher(null, new AllKeysMatcher(), false)))),
93+
new java.util.ArrayList<>(java.util.Arrays.asList(partition, rolloutPartition)), "LOCAL");
9594

9695
return parsedCondition;
9796
}

client/src/main/java/io/split/client/api/SplitView.java

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
import java.util.List;
1212
import java.util.Map;
1313
import java.util.Set;
14+
import java.util.stream.Collectors;
1415

1516

1617
/**
@@ -51,7 +52,13 @@ public static SplitView fromParsedSplit(ParsedSplit parsedSplit) {
5152
splitView.configs = parsedSplit.configurations() == null? Collections.<String, String>emptyMap() : parsedSplit.configurations() ;
5253
splitView.impressionsDisabled = parsedSplit.impressionsDisabled();
5354
splitView.prerequisites = parsedSplit.prerequisitesMatcher() != null ?
54-
parsedSplit.prerequisitesMatcher().getPrerequisites(): new ArrayList<>();
55+
parsedSplit.prerequisitesMatcher().getPrerequisites().stream()
56+
.map(p -> {
57+
Prerequisites prereq = new Prerequisites();
58+
prereq.featureFlagName = p.featureFlagName();
59+
prereq.treatments = p.treatments();
60+
return prereq;
61+
}).collect(Collectors.toList()) : new ArrayList<>();
5562

5663
return splitView;
5764
}
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
# AGENTS.md — client/impressions/
2+
3+
## Purpose
4+
5+
Impression tracking subsystem. Every `getTreatment()` call produces an **impression** — an audit record of what feature flag was evaluated, for which user, and what treatment was returned. This package manages batching, deduplication, and flushing of impressions to the Split API.
6+
7+
**Warning:** Bugs here cause silent data loss — impressions won't be recorded, breaking Split's analytics and A/B testing.
8+
9+
## Three Impression Modes
10+
11+
Controlled by `SplitClientConfig.impressionsMode()`:
12+
13+
| Mode | Class | Behavior |
14+
|------|-------|----------|
15+
| `DEBUG` | `ProcessImpressionDebug` | Every impression sent (no dedup) |
16+
| `OPTIMIZED` (default) | `ProcessImpressionOptimized` | Bloom filter dedup — only first impression per key+flag+treatment sent per hour |
17+
| `NONE` | `ProcessImpressionNone` | No impressions sent; unique keys tracked via `UniqueKeysTracker` |
18+
19+
## Key Files
20+
21+
| File | Purpose |
22+
|------|---------|
23+
| `ImpressionsManagerImpl.java` | Scheduler — batches impressions and flushes periodically |
24+
| `strategy/ProcessImpressionDebug.java` | DEBUG mode strategy |
25+
| `strategy/ProcessImpressionOptimized.java` | OPTIMIZED mode strategy |
26+
| `strategy/ProcessImpressionNone.java` | NONE mode strategy |
27+
| `filters/` | Bloom filter-based deduplication for OPTIMIZED mode |
28+
| `UniqueKeysTracker.java` | Tracks unique (key, featureFlag) pairs for NONE mode |
29+
30+
## Data Flow
31+
32+
```
33+
SplitClientImpl.getTreatment()
34+
→ EvaluatorImp returns (treatment, label, changeNumber)
35+
→ ImpressionsManagerImpl.track(Impression)
36+
→ ActiveStrategy.process(Impression)
37+
→ [DEBUG] always add to queue
38+
→ [OPTIMIZED] check Bloom filter; add if not seen
39+
→ [NONE] track unique keys, don't queue impression
40+
→ Queue flushes to Split API periodically
41+
```
42+
43+
## Testing
44+
45+
- **Run tests**: `mvn -pl :client -Dtest="*Impression*" test`
46+
- Test file pattern: `src/test/java/io/split/client/impressions/`
47+
48+
## DOs
49+
50+
- When changing evaluation logic, verify impressions are still recorded with correct label and change number.
51+
- When adding a new impression field, update all three strategy classes.
52+
53+
## DON'Ts
54+
55+
- Don't add deduplication logic outside the `filters/` package.
56+
- Don't assume `ImpressionsMode` is always `OPTIMIZED` — all three modes must work.
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
AGENTS.md

client/src/main/java/io/split/client/impressions/ImpressionHasher.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
package io.split.client.impressions;
22

3-
import io.split.client.utils.MurmurHash3;
3+
import io.split.rules.bucketing.MurmurHash3;
44

55
public class ImpressionHasher {
66

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
# AGENTS.md — engine/
2+
3+
## Purpose
4+
5+
The evaluation engine and synchronization subsystem. This is the most algorithmically complex package in the SDK. It handles:
6+
1. **Evaluation** — resolving feature flag treatments from `ParsedSplit` domain objects
7+
2. **Matchers** — 29 matcher implementations for all condition types
8+
3. **SSE streaming** — push notifications from the Split API
9+
4. **Polling/streaming orchestration**`SyncManager` decides between streaming and polling
10+
11+
## Package Structure
12+
13+
```
14+
engine/
15+
├── evaluator/ # EvaluatorImp — the hot path for getTreatment()
16+
├── experiments/ # ParsedSplit domain model, SplitFetcher, SplitParser
17+
├── matchers/ # All matcher types (BetweenMatcher, Semver*, UserDefinedSegment, etc.)
18+
├── segments/ # Segment synchronization tasks
19+
├── sse/ # SSE streaming subsystem (38 classes)
20+
│ ├── client/ # EventSourceClientImp — manages SSE connection
21+
│ ├── workers/ # SplitUpdateWorker, SegmentUpdateWorker, etc.
22+
│ ├── dtos/ # SSE event payload DTOs
23+
│ └── ... # PushStatusTrackerImp, NotificationProcessorImp
24+
├── common/ # SyncManager orchestration
25+
│ ├── SyncManagerImp.java # Top-level: picks streaming vs. polling
26+
│ ├── SynchronizerImp.java # Polling mode synchronizer
27+
│ ├── PushManagerImp.java # Streaming mode manager
28+
│ └── ConsumerSynchronizer.java # Pluggable storage sync path
29+
├── splitter/ # Splitter.java — bucket assignment via MurmurHash
30+
└── metrics/ # (deprecated/noop metrics)
31+
```
32+
33+
## Evaluation Flow (Hot Path)
34+
35+
```
36+
SplitClientImpl.getTreatment()
37+
→ InputValidation
38+
→ EvaluatorImp.evaluateFeature()
39+
→ SplitCacheConsumer.get(featureName) → ParsedSplit
40+
→ Evaluates conditions in order (ParsedCondition[])
41+
→ Matcher.match(key, attributes)
42+
→ Splitter.getBucket(key, seed) → treatment
43+
→ ImpressionsManager.track(impression)
44+
→ TelemetryRuntimeProducer.recordLatency()
45+
```
46+
47+
## Streaming vs. Polling Duality
48+
49+
`SyncManagerImp` controls this at startup:
50+
- **Streaming mode** (default): `PushManagerImp` manages SSE connection. When an SSE update arrives, workers update storage directly and notify `SDKReadinessGates`.
51+
- **Polling mode** (fallback): `SynchronizerImp` schedules periodic HTTP fetches.
52+
- **Recovery**: If streaming fails, `SyncManagerImp` falls back to polling automatically.
53+
54+
## Key Distinction: DTOs vs Domain Objects
55+
56+
- `Split` (in `client/dtos/`) = wire format from the API
57+
- `ParsedSplit` (in `engine/experiments/`) = domain object used for evaluation
58+
- `SplitParser` converts `Split``ParsedSplit`
59+
- Never pass raw `Split` DTOs to the evaluator
60+
61+
## Testing
62+
63+
- **Run tests**: `mvn -pl :client -Dtest="*Evaluator*,*Matcher*,*Sse*" test`
64+
- **Key test classes**: `EvaluatorImpTest`, matcher-specific tests in `src/test/java/io/split/engine/`
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
AGENTS.md

client/src/main/java/io/split/engine/evaluator/EvaluationContext.java

Lines changed: 43 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,17 @@
11
package io.split.engine.evaluator;
22

3+
import io.split.client.dtos.ExcludedSegments;
4+
import io.split.engine.experiments.ParsedCondition;
5+
import io.split.engine.experiments.ParsedRuleBasedSegment;
6+
import io.split.rules.engine.EvaluationResult;
37
import io.split.storages.RuleBasedSegmentCacheConsumer;
48
import io.split.storages.SegmentCacheConsumer;
59

10+
import java.util.List;
11+
import java.util.Map;
612
import java.util.Objects;
713

8-
public class EvaluationContext {
14+
public class EvaluationContext implements io.split.rules.engine.EvaluationContext {
915
private final Evaluator _evaluator;
1016
private final SegmentCacheConsumer _segmentCacheConsumer;
1117
private final RuleBasedSegmentCacheConsumer _ruleBasedSegmentCacheConsumer;
@@ -28,4 +34,40 @@ public SegmentCacheConsumer getSegmentCache() {
2834
public RuleBasedSegmentCacheConsumer getRuleBasedSegmentCache() {
2935
return _ruleBasedSegmentCacheConsumer;
3036
}
37+
38+
@Override
39+
public EvaluationResult evaluate(String matchingKey, String bucketingKey, String ruleName, Map<String, Object> attributes) {
40+
EvaluatorImp.TreatmentLabelAndChangeNumber r = _evaluator.evaluateFeature(matchingKey, bucketingKey, ruleName, attributes);
41+
return new EvaluationResult(r.treatment, r.label, r.changeNumber, r.configurations, r.track);
42+
}
43+
44+
@Override
45+
public boolean isInSegment(String segmentName, String key) {
46+
return _segmentCacheConsumer.isInSegment(segmentName, key);
47+
}
48+
49+
@Override
50+
public boolean isInRuleBasedSegment(String segmentName, String key, String bucketingKey, Map<String, Object> attributes) {
51+
ParsedRuleBasedSegment parsedRuleBasedSegment = _ruleBasedSegmentCacheConsumer.get(segmentName);
52+
if (parsedRuleBasedSegment == null) {
53+
return false;
54+
}
55+
if (parsedRuleBasedSegment.excludedKeys().contains(key)) {
56+
return false;
57+
}
58+
for (ExcludedSegments excludedSegment : parsedRuleBasedSegment.excludedSegments()) {
59+
if (excludedSegment.isStandard() && _segmentCacheConsumer.isInSegment(excludedSegment.name, key)) {
60+
return false;
61+
}
62+
if (excludedSegment.isRuleBased() && isInRuleBasedSegment(excludedSegment.name, key, bucketingKey, attributes)) {
63+
return false;
64+
}
65+
}
66+
for (ParsedCondition condition : parsedRuleBasedSegment.parsedConditions()) {
67+
if (condition.matcher().match(key, bucketingKey, attributes, this)) {
68+
return true;
69+
}
70+
}
71+
return false;
72+
}
3173
}

0 commit comments

Comments
 (0)