From dabda81a6ff1de750e684a597b631584e2d369c3 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Tue, 29 Nov 2022 09:41:23 +0200 Subject: [PATCH 01/27] Generalize prettyPrint, add parseHumanReadable and test Introduces prettyPrintBinary (with 2^10 multiplier, e.g. MiB) and prettyPrintDecimal (with 1000 multiplier, e.g. ms) that cover the full range of double, and a corresponding parsing method that can read both. --- .../cassandra/tools/SSTablePartitions.java | 2 +- .../apache/cassandra/utils/FBUtilities.java | 172 ++++++++++++++++-- .../nodetool/stats/TableStatsPrinterTest.java | 48 ++--- .../cassandra/utils/FBUtilitiesTest.java | 109 +++++++++++ .../stress/report/StressMetrics.java | 2 +- 5 files changed, 295 insertions(+), 38 deletions(-) diff --git a/src/java/org/apache/cassandra/tools/SSTablePartitions.java b/src/java/org/apache/cassandra/tools/SSTablePartitions.java index c513853e18bb..2181346271bb 100644 --- a/src/java/org/apache/cassandra/tools/SSTablePartitions.java +++ b/src/java/org/apache/cassandra/tools/SSTablePartitions.java @@ -427,7 +427,7 @@ private static void processSSTable(String[] keys, private static String prettyPrintMemory(long bytes) { - return FBUtilities.prettyPrintMemory(bytes, true); + return FBUtilities.prettyPrintMemory(bytes, " "); } private static ISSTableScanner buildScanner(SSTableReader sstable, diff --git a/src/java/org/apache/cassandra/utils/FBUtilities.java b/src/java/org/apache/cassandra/utils/FBUtilities.java index 83c49c5307a9..49b9dd822cea 100644 --- a/src/java/org/apache/cassandra/utils/FBUtilities.java +++ b/src/java/org/apache/cassandra/utils/FBUtilities.java @@ -50,6 +50,8 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import java.util.zip.CRC32; import java.util.zip.Checksum; import javax.annotation.Nonnull; @@ -827,27 +829,97 @@ public static CloseableIterator closeableIterator(Iterator iterator) return new WrappedCloseableIterator(iterator); } + final static String UNIT_PREFIXES = "qryzafpnum kMGTPEZYRQ"; + final static int UNIT_PREFIXES_BASE = UNIT_PREFIXES.indexOf(' '); + final static Pattern BASE_NUMBER_PATTERN = Pattern.compile("NaN|[+-]?Infinity|[+-]?\\d+(\\.\\d+)?([eE]([+-]?)\\d+)?"); + final static Pattern BINARY_EXPONENT = Pattern.compile("\\*2\\^([+-]?\\d+)"); + + /** + * Convert the given size in bytes to a human-readable value using binary (i.e. 2^10-based) modifiers. + * For example, 1.000kiB, 2.100GiB etc., up to 8.000 EiB. + * @param size Number to convert. + */ public static String prettyPrintMemory(long size) { - return prettyPrintMemory(size, false); + return prettyPrintMemory(size, ""); } - public static String prettyPrintMemory(long size, boolean includeSpace) + /** + * Convert the given size in bytes to a human-readable value using binary (i.e. 2^10-based) modifiers. + * For example, 1.000kiB, 2.100GiB etc., up to 8.000 EiB. + * @param size Number to convert. + * @param separator Separator between the number and the (modified) unit. + */ + public static String prettyPrintMemory(long size, String separator) { - if (size >= 1 << 30) - return String.format("%.3f%sGiB", size / (double) (1 << 30), includeSpace ? " " : ""); - if (size >= 1 << 20) - return String.format("%.3f%sMiB", size / (double) (1 << 20), includeSpace ? " " : ""); - return String.format("%.3f%sKiB", size / (double) (1 << 10), includeSpace ? " " : ""); + int prefixIndex = (63 - Long.numberOfLeadingZeros(Math.abs(size))) / 10; + if (prefixIndex == 0) + return String.format("%d%sB", size, separator); + else + return String.format("%.3f%s%ciB", + Math.scalb(size, -prefixIndex * 10), + separator, + UNIT_PREFIXES.charAt(UNIT_PREFIXES_BASE + prefixIndex)); + } + + /** + * Convert the given value to a human-readable string using binary (i.e. 2^10-based) modifiers. + * If the number is outside the modifier range (i.e. < 1 qi or > 1 Qi), it will be printed as v*2^e where e is a + * multiple of 10 with sign. + * For example, 1.000kiB, 2.100 miB/s, 7.006*2^+150, -Infinity. + * @param value Number to convert. + * @param separator Separator between the number and the (modified) unit. + */ + public static String prettyPrintBinary(double value, String unit, String separator) + { + int prefixIndex = Math.floorDiv(Math.getExponent(value), 10); + if (prefixIndex == 0 || !Double.isFinite(value) || value == 0) + return String.format("%.3f%s%s", value, separator, unit); + else if (prefixIndex > UNIT_PREFIXES_BASE || prefixIndex < -UNIT_PREFIXES_BASE) + return String.format("%.3f*2^%+d%s%s", + Math.scalb(value, -prefixIndex * 10), + prefixIndex * 10, + separator, + unit); + else + return String.format("%.3f%s%ci%s", + Math.scalb(value, -prefixIndex * 10), + separator, + UNIT_PREFIXES.charAt(UNIT_PREFIXES_BASE + prefixIndex), + unit); + } + + /** + * Convert the given value to a human-readable string using decimal (i.e. 10^3-based) modifiers. + * If the number is outside the modifier range (i.e. < 1 qi or > 1 Qi), it will be printed as vEe where e is a + * multiple of 3 with sign. + * For example, 1.000km, 2.100 ms, 10E+45, NaN. + * @param value Number to convert. + * @param separator Separator between the number and the (modified) unit. + */ + public static String prettyPrintDecimal(double value, String unit, String separator) + { + int prefixIndex = (int) Math.floor(Math.log10(Math.abs(value)) / 3); + double base = value * Math.pow(1000.0, -prefixIndex); + if (prefixIndex == 0 || !Double.isFinite(value) || !Double.isFinite(base) || value == 0) + return String.format("%.3f%s%s", value, separator, unit); + else if (prefixIndex > UNIT_PREFIXES_BASE || prefixIndex < -UNIT_PREFIXES_BASE) + return String.format("%.3fe%+d%s%s", + base, + prefixIndex * 3, + separator, + unit); + else + return String.format("%.3f%s%c%s", + base, + separator, + UNIT_PREFIXES.charAt(UNIT_PREFIXES_BASE + prefixIndex), + unit); } public static String prettyPrintMemoryPerSecond(long rate) { - if (rate >= 1 << 30) - return String.format("%.3fGiB/s", rate / (double) (1 << 30)); - if (rate >= 1 << 20) - return String.format("%.3fMiB/s", rate / (double) (1 << 20)); - return String.format("%.3fKiB/s", rate / (double) (1 << 10)); + return prettyPrintMemory(rate) + "/s"; } public static String prettyPrintMemoryPerSecond(long bytes, long timeInNano) @@ -861,6 +933,82 @@ public static String prettyPrintMemoryPerSecond(long bytes, long timeInNano) return prettyPrintMemoryPerSecond(rate); } + /** + * Parse a human-readable value printed using one of the methods above. Understands both binary and decimal + * modifiers, as well as decimal exponents using the E notation and binary exponents using *2^e. + * + * @param datum The human-readable number. + * @param separator Expected separator, null to accept any amount of whitespace. + * @param unit Expected unit. + * @return The parsed value. + */ + public static double parseHumanReadable(String datum, String separator, String unit) + { + int end = datum.length(); + if (unit != null) + { + if (!datum.endsWith(unit)) + throw new NumberFormatException(datum + " does not end in unit " + unit); + end -= unit.length(); + } + + Matcher m = BASE_NUMBER_PATTERN.matcher(datum); + m.region(0, end); + if (!m.lookingAt()) + throw new NumberFormatException(); + double v = Double.parseDouble(m.group(0)); + + int pos = m.end(); + if (m.group(2) == null) // possible binary exponent, parse + { + m = BINARY_EXPONENT.matcher(datum); + m.region(pos, end); + if (m.lookingAt()) + { + int power = Integer.parseInt(m.group(1)); + v = Math.scalb(v, power); + pos = m.end(); + } + } + + if (separator != null) + { + if (!datum.startsWith(separator, pos)) + throw new NumberFormatException("Missing separator " + separator + " in " + datum); + pos += separator.length(); + } + else + { + while (pos < end && Character.isWhitespace(datum.charAt(pos))) + ++pos; + } + + if (pos < end) + { + char prefixChar = datum.charAt(pos); + int prefixIndex = UNIT_PREFIXES.indexOf(prefixChar); + if (prefixIndex >= 0) + { + prefixIndex -= UNIT_PREFIXES_BASE; + ++pos; + if (pos < end && datum.charAt(pos) == 'i') + { + ++pos; + v = Math.scalb(v, prefixIndex * 10); + } + else + { + v *= Math.exp(Math.log(1000.0) * prefixIndex); + } + } + } + + if (pos != end && unit != null) + throw new NumberFormatException("Unexpected characters between pos " + pos + " and " + end + " in " + datum); + + return v; + } + /** * Starts and waits for the given @param pb to finish. * @throws java.io.IOException on non-zero exit code diff --git a/test/unit/org/apache/cassandra/tools/nodetool/stats/TableStatsPrinterTest.java b/test/unit/org/apache/cassandra/tools/nodetool/stats/TableStatsPrinterTest.java index 0ddef9ee6a9c..b83d14f73cca 100644 --- a/test/unit/org/apache/cassandra/tools/nodetool/stats/TableStatsPrinterTest.java +++ b/test/unit/org/apache/cassandra/tools/nodetool/stats/TableStatsPrinterTest.java @@ -39,7 +39,7 @@ public class TableStatsPrinterTest extends TableStatsTestBase "\tTable: %s\n" + "\tSSTable count: 60000\n" + "\tOld SSTable count: 0\n" + - "\tMax SSTable size: 0.000KiB\n" + + "\tMax SSTable size: 0B\n" + "\tSpace used (live): 0\n" + "\tSpace used (total): 9001\n" + "\tSpace used by snapshots (total): 1111\n" + @@ -55,9 +55,9 @@ public class TableStatsPrinterTest extends TableStatsTestBase "\tLocal read/write ratio: 0.00000\n" + "\tPending flushes: 11111\n" + "\tPercent repaired: 100.0\n" + - "\tBytes repaired: 0.000KiB\n" + - "\tBytes unrepaired: 0.000KiB\n" + - "\tBytes pending repair: 0.000KiB\n" + + "\tBytes repaired: 0B\n" + + "\tBytes unrepaired: 0B\n" + + "\tBytes pending repair: 0B\n" + "\tBloom filter false positives: 30\n" + "\tBloom filter false ratio: 0.40000\n" + "\tBloom filter space used: 789\n" + @@ -76,7 +76,7 @@ public class TableStatsPrinterTest extends TableStatsTestBase "\tTable: %s\n" + "\tSSTable count: 3000\n" + "\tOld SSTable count: 0\n" + - "\tMax SSTable size: 0.000KiB\n" + + "\tMax SSTable size: 0B\n" + "\tSpace used (live): 22\n" + "\tSpace used (total): 1024\n" + "\tSpace used by snapshots (total): 222\n" + @@ -94,9 +94,9 @@ public class TableStatsPrinterTest extends TableStatsTestBase "\tLocal read/write ratio: 0.00000\n" + "\tPending flushes: 222222\n" + "\tPercent repaired: 99.9\n" + - "\tBytes repaired: 0.000KiB\n" + - "\tBytes unrepaired: 0.000KiB\n" + - "\tBytes pending repair: 0.000KiB\n" + + "\tBytes repaired: 0B\n" + + "\tBytes unrepaired: 0B\n" + + "\tBytes pending repair: 0B\n" + "\tBloom filter false positives: 600\n" + "\tBloom filter false ratio: 0.01000\n" + "\tBloom filter space used: 161718\n" + @@ -118,7 +118,7 @@ public class TableStatsPrinterTest extends TableStatsTestBase "\tTable: %s\n" + "\tSSTable count: 50000\n" + "\tOld SSTable count: 0\n" + - "\tMax SSTable size: 0.000KiB\n" + + "\tMax SSTable size: 0B\n" + "\tSpace used (live): 0\n" + "\tSpace used (total): 512\n" + "\tSpace used by snapshots (total): 0\n" + @@ -134,9 +134,9 @@ public class TableStatsPrinterTest extends TableStatsTestBase "\tLocal read/write ratio: 0.00000\n" + "\tPending flushes: 333\n" + "\tPercent repaired: 99.8\n" + - "\tBytes repaired: 0.000KiB\n" + - "\tBytes unrepaired: 0.000KiB\n" + - "\tBytes pending repair: 0.000KiB\n" + + "\tBytes repaired: 0B\n" + + "\tBytes unrepaired: 0B\n" + + "\tBytes pending repair: 0B\n" + "\tBloom filter false positives: 20\n" + "\tBloom filter false ratio: 0.50000\n" + "\tBloom filter space used: 456\n" + @@ -155,7 +155,7 @@ public class TableStatsPrinterTest extends TableStatsTestBase "\tTable: %s\n" + "\tSSTable count: 2000\n" + "\tOld SSTable count: 0\n" + - "\tMax SSTable size: 0.000KiB\n" + + "\tMax SSTable size: 0B\n" + "\tSpace used (live): 4444\n" + "\tSpace used (total): 256\n" + "\tSpace used by snapshots (total): 44\n" + @@ -173,9 +173,9 @@ public class TableStatsPrinterTest extends TableStatsTestBase "\tLocal read/write ratio: 0.00000\n" + "\tPending flushes: 4444\n" + "\tPercent repaired: 50.0\n" + - "\tBytes repaired: 0.000KiB\n" + - "\tBytes unrepaired: 0.000KiB\n" + - "\tBytes pending repair: 0.000KiB\n" + + "\tBytes repaired: 0B\n" + + "\tBytes unrepaired: 0B\n" + + "\tBytes pending repair: 0B\n" + "\tBloom filter false positives: 500\n" + "\tBloom filter false ratio: 0.02000\n" + "\tBloom filter space used: 131415\n" + @@ -197,7 +197,7 @@ public class TableStatsPrinterTest extends TableStatsTestBase "\tTable: %s\n" + "\tSSTable count: 40000\n" + "\tOld SSTable count: 0\n" + - "\tMax SSTable size: 0.000KiB\n" + + "\tMax SSTable size: 0B\n" + "\tSpace used (live): 55555\n" + "\tSpace used (total): 64\n" + "\tSpace used by snapshots (total): 55555\n" + @@ -213,9 +213,9 @@ public class TableStatsPrinterTest extends TableStatsTestBase "\tLocal read/write ratio: 0.00000\n" + "\tPending flushes: 5\n" + "\tPercent repaired: 93.0\n" + - "\tBytes repaired: 0.000KiB\n" + - "\tBytes unrepaired: 0.000KiB\n" + - "\tBytes pending repair: 0.000KiB\n" + + "\tBytes repaired: 0B\n" + + "\tBytes unrepaired: 0B\n" + + "\tBytes pending repair: 0B\n" + "\tBloom filter false positives: 10\n" + "\tBloom filter false ratio: 0.60000\n" + "\tBloom filter space used: 123\n" + @@ -234,7 +234,7 @@ public class TableStatsPrinterTest extends TableStatsTestBase "\tTable: %s\n" + "\tSSTable count: 1000\n" + "\tOld SSTable count: 0\n" + - "\tMax SSTable size: 0.000KiB\n" + + "\tMax SSTable size: 0B\n" + "\tSpace used (live): 666666\n" + "\tSpace used (total): 0\n" + "\tSpace used by snapshots (total): 0\n" + @@ -252,9 +252,9 @@ public class TableStatsPrinterTest extends TableStatsTestBase "\tLocal read/write ratio: 0.00000\n" + "\tPending flushes: 66\n" + "\tPercent repaired: 0.0\n" + - "\tBytes repaired: 0.000KiB\n" + - "\tBytes unrepaired: 0.000KiB\n" + - "\tBytes pending repair: 0.000KiB\n" + + "\tBytes repaired: 0B\n" + + "\tBytes unrepaired: 0B\n" + + "\tBytes pending repair: 0B\n" + "\tBloom filter false positives: 400\n" + "\tBloom filter false ratio: 0.03000\n" + "\tBloom filter space used: 101112\n" + diff --git a/test/unit/org/apache/cassandra/utils/FBUtilitiesTest.java b/test/unit/org/apache/cassandra/utils/FBUtilitiesTest.java index 1c77fffe750c..ea1a9bf6f889 100644 --- a/test/unit/org/apache/cassandra/utils/FBUtilitiesTest.java +++ b/test/unit/org/apache/cassandra/utils/FBUtilitiesTest.java @@ -26,6 +26,7 @@ import java.util.Arrays; import java.util.Map; import java.util.Optional; +import java.util.Random; import java.util.TreeMap; import java.util.ArrayList; import java.util.List; @@ -41,6 +42,9 @@ import org.junit.Assert; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.cassandra.db.marshal.*; import org.apache.cassandra.dht.*; @@ -53,6 +57,9 @@ public class FBUtilitiesTest { + + public static final Logger LOGGER = LoggerFactory.getLogger(FBUtilitiesTest.class); + @Test public void testCompareByteSubArrays() { @@ -254,4 +261,106 @@ public void testCamelToSnake() if (error != null) throw error; } + + @Test + public void testPrettyPrintAndParse() + { + String[] tests = new String[]{ + "1", "", "", "1", + "1k", "", "", "1e3", + "1 kiB", " ", "B", "1024", + "10 B/s", " ", "B/s", "10", + "10.2 MiB/s", null, "B/s", "10695475.2", + "10e+5", "", "", "10e5", + "10*2^20", "", "", "10485760", + "1024*2^-10", "", "", "1", + "1024 miB", " ", "B", "1", + "1000000um", "", "m", "1", + "10e+25s", "", "s", "10e25", + "1.12345e-25", "", "", "1.12345e-25", + "10e+45", "", "", "10e45", + "1.12345e-45", "", "", "1.12345e-45", + "55.3 garbage", null, null, "55.3", + "0.00TiB", "", "B", "0", + "-23", null, null, "-23", + "-55 Gt", " ", "t", "-55e9", + "-123e+3", null, null, "-123000", + "-876ns", "", "s", "-876e-9", + Long.toString(Long.MAX_VALUE), null, null, Long.toString(Long.MAX_VALUE), + Long.toString(Long.MIN_VALUE), null, null, Long.toString(Long.MIN_VALUE), + "Infinity kg", " ", "kg", "+Infinity", + "NaN", "", "", "NaN", + "-Infinity", "", "", "-Infinity", + }; + + for (int i = 0; i < tests.length; i += 4) + { + String v = tests[i]; + String sep = tests[i + 1]; + String unit = tests[i + 2]; + double exp = Double.parseDouble(tests[i+3]); + String vBin = FBUtilities.prettyPrintBinary(exp, unit == null ? "" : unit, sep == null ? " " : sep); + String vDec = FBUtilities.prettyPrintDecimal(exp, unit == null ? "w" : unit, sep == null ? "\t" : sep); + LOGGER.info("{} binary {} decimal {} expected {}", v, vBin, vDec, exp); + Assert.assertEquals(exp, FBUtilities.parseHumanReadable(v, sep, unit), getDelta(exp)); + Assert.assertEquals(exp, FBUtilities.parseHumanReadable(vBin, sep, unit), getDelta(exp)); + Assert.assertEquals(exp, FBUtilities.parseHumanReadable(vDec, sep, unit), getDelta(exp)); + + if (((long) exp) == exp) + Assert.assertEquals(exp, + FBUtilities.parseHumanReadable(FBUtilities.prettyPrintMemory((long) exp), + null, + "B"), + getDelta(exp)); + } + } + + private static double getDelta(double exp) + { + return Math.max(0.001 * Math.abs(exp), 1e-305); + } + + @Test + public void testPrettyPrintAndParseRange() + { + String unit = ""; + String sep = ""; + for (int exp = -100; exp < 100; ++exp) + { + for (double base = -1.0; base <= 1.0; base += 0.12) // avoid hitting 0 exactly + { + for (boolean binary : new boolean[] {false, true}) + { + double value = binary + ? Math.scalb(base, exp * 10) + : base * Math.pow(10, exp); + String vBin = FBUtilities.prettyPrintBinary(value, unit, sep); + String vDec = FBUtilities.prettyPrintDecimal(value, unit, sep); + LOGGER.info("{} binary {} decimal {}", value, vBin, vDec); + Assert.assertEquals(value, FBUtilities.parseHumanReadable(vBin, sep, unit), getDelta(value)); + Assert.assertEquals(value, FBUtilities.parseHumanReadable(vDec, sep, unit), getDelta(value)); + } + } + } + } + + @Test + public void testPrettyPrintAndParseRandom() + { + Random rand = new Random(); + String unit = ""; + String sep = ""; + for (int i = 0; i < 1000; ++i) + { + long bits = rand.nextLong(); + double value = Double.longBitsToDouble(bits); + if (Double.isNaN(value)) + value = Double.NaN; // to avoid failures on non-bitwise-equal NaNs + String vBin = FBUtilities.prettyPrintBinary(value, unit, sep); + String vDec = FBUtilities.prettyPrintDecimal(value, unit, sep); + LOGGER.info("{} binary {} decimal {}", value, vBin, vDec); + Assert.assertEquals(value, FBUtilities.parseHumanReadable(vBin, sep, unit), getDelta(value)); + Assert.assertEquals(value, FBUtilities.parseHumanReadable(vDec, sep, unit), getDelta(value)); + } + } } diff --git a/tools/stress/src/org/apache/cassandra/stress/report/StressMetrics.java b/tools/stress/src/org/apache/cassandra/stress/report/StressMetrics.java index 8579bbf173a6..2c54368b2139 100644 --- a/tools/stress/src/org/apache/cassandra/stress/report/StressMetrics.java +++ b/tools/stress/src/org/apache/cassandra/stress/report/StressMetrics.java @@ -420,7 +420,7 @@ public void summarise() output.println(String.format("Total partitions : %,10d %s", history.partitionCount, opHistory.partitionCounts())); output.println(String.format("Total errors : %,10d %s", history.errorCount, opHistory.errorCounts())); output.println(String.format("Total GC count : %,1.0f", totalGcStats.count)); - output.println(String.format("Total GC memory : %s", FBUtilities.prettyPrintMemory((long)totalGcStats.bytes, true))); + output.println(String.format("Total GC memory : %s", FBUtilities.prettyPrintMemory((long)totalGcStats.bytes, " "))); output.println(String.format("Total GC time : %,6.1f seconds", totalGcStats.summs / 1000)); output.println(String.format("Avg GC time : %,6.1f ms", totalGcStats.summs / totalGcStats.count)); output.println(String.format("StdDev GC time : %,6.1f ms", totalGcStats.sdvms)); From 5b91a0165929f539fe1e74c58ef8db104582f2fc Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Wed, 21 Dec 2022 17:43:46 +0200 Subject: [PATCH 02/27] Rename increaseSlightly to nextValidToken --- .../cassandra/dht/ByteOrderedPartitioner.java | 2 +- .../cassandra/dht/ComparableObjectToken.java | 2 +- .../apache/cassandra/dht/Murmur3Partitioner.java | 2 +- .../apache/cassandra/dht/RandomPartitioner.java | 2 +- src/java/org/apache/cassandra/dht/Token.java | 15 +++++++++++---- .../dht/tokenallocator/TokenAllocation.java | 2 +- .../cassandra/db/DiskBoundaryManagerTest.java | 4 ++-- .../service/paxos/PaxosRepairHistoryTest.java | 8 ++++---- .../cassandra/service/reads/DataResolverTest.java | 2 +- 9 files changed, 23 insertions(+), 16 deletions(-) diff --git a/src/java/org/apache/cassandra/dht/ByteOrderedPartitioner.java b/src/java/org/apache/cassandra/dht/ByteOrderedPartitioner.java index ae929c8c0171..43d4d317287f 100644 --- a/src/java/org/apache/cassandra/dht/ByteOrderedPartitioner.java +++ b/src/java/org/apache/cassandra/dht/ByteOrderedPartitioner.java @@ -136,7 +136,7 @@ public double size(Token next) } @Override - public Token increaseSlightly() + public Token nextValidToken() { throw new UnsupportedOperationException(String.format("Token type %s does not support token allocation.", getClass().getSimpleName())); diff --git a/src/java/org/apache/cassandra/dht/ComparableObjectToken.java b/src/java/org/apache/cassandra/dht/ComparableObjectToken.java index 97c0c52d0de8..98e4017342df 100644 --- a/src/java/org/apache/cassandra/dht/ComparableObjectToken.java +++ b/src/java/org/apache/cassandra/dht/ComparableObjectToken.java @@ -75,7 +75,7 @@ public double size(Token next) } @Override - public Token increaseSlightly() + public Token nextValidToken() { throw new UnsupportedOperationException(String.format("Token type %s does not support token allocation.", getClass().getSimpleName())); diff --git a/src/java/org/apache/cassandra/dht/Murmur3Partitioner.java b/src/java/org/apache/cassandra/dht/Murmur3Partitioner.java index 73d7b4f3ed55..17ba92f754cd 100644 --- a/src/java/org/apache/cassandra/dht/Murmur3Partitioner.java +++ b/src/java/org/apache/cassandra/dht/Murmur3Partitioner.java @@ -213,7 +213,7 @@ public double size(Token next) } @Override - public LongToken increaseSlightly() + public LongToken nextValidToken() { return new LongToken(token + 1); } diff --git a/src/java/org/apache/cassandra/dht/RandomPartitioner.java b/src/java/org/apache/cassandra/dht/RandomPartitioner.java index 930ebb1a4771..a8fbe764d47d 100644 --- a/src/java/org/apache/cassandra/dht/RandomPartitioner.java +++ b/src/java/org/apache/cassandra/dht/RandomPartitioner.java @@ -271,7 +271,7 @@ public long getHeapSize() return HEAP_SIZE; } - public Token increaseSlightly() + public Token nextValidToken() { return new BigIntegerToken(token.add(BigInteger.ONE)); } diff --git a/src/java/org/apache/cassandra/dht/Token.java b/src/java/org/apache/cassandra/dht/Token.java index 3543dabc0e3a..c8c8d0aaa506 100644 --- a/src/java/org/apache/cassandra/dht/Token.java +++ b/src/java/org/apache/cassandra/dht/Token.java @@ -144,11 +144,18 @@ public long serializedSize(Token object, int version) */ abstract public double size(Token next); /** - * Returns a token that is slightly greater than this. Used to avoid clashes - * between nodes in separate datacentres trying to use the same token via - * the token allocation algorithm. + * Returns the next possible token in the token space, one that compares + * greater than this and such that there is no other token that sits + * between this token and it in the token order. + * + * This is not possible for all token types, esp. for comparison-based + * tokens such as the LocalPartioner used for classic secondary indexes. + * + * Used to avoid clashes between nodes in separate datacentres trying to + * use the same token via the token allocation algorithm, as well as in + * constructing token ranges for sstables. */ - abstract public Token increaseSlightly(); + abstract public Token nextValidToken(); public Token getToken() { diff --git a/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocation.java b/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocation.java index e3e81dc1163f..7e46b87855ce 100644 --- a/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocation.java +++ b/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocation.java @@ -161,7 +161,7 @@ final Collection adjustForCrossDatacenterClashes(Collection tokens InetAddressAndPort other = tokenMetadata.getEndpoint(t); if (inAllocationRing(other)) throw new ConfigurationException(String.format("Allocated token %s already assigned to node %s. Is another node also allocating tokens?", t, other)); - t = t.increaseSlightly(); + t = t.nextValidToken(); } filtered.add(t); } diff --git a/test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java b/test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java index f043c0bcb069..adca700b62c7 100644 --- a/test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java +++ b/test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java @@ -160,8 +160,8 @@ public void testGetDataDirectoriesForFiles() SSTableReader containedDisk2 = MockSchema.sstable(gen++, (long)sstableFirstDisk2.getTokenValue(), (long)sstableEndDisk2.getTokenValue(), 0, mock); SSTableReader disk1Boundary = MockSchema.sstable(gen++, (long)sstableFirstDisk1.getTokenValue(), (long)tokens.get(0).getTokenValue(), 0, mock); - SSTableReader disk2Full = MockSchema.sstable(gen++, (long)tokens.get(0).increaseSlightly().getTokenValue(), (long)tokens.get(1).getTokenValue(), 0, mock); - SSTableReader disk3Full = MockSchema.sstable(gen++, (long)tokens.get(1).increaseSlightly().getTokenValue(), (long)partitioner.getMaximumToken().getTokenValue(), 0, mock); + SSTableReader disk2Full = MockSchema.sstable(gen++, (long)tokens.get(0).nextValidToken().getTokenValue(), (long)tokens.get(1).getTokenValue(), 0, mock); + SSTableReader disk3Full = MockSchema.sstable(gen++, (long)tokens.get(1).nextValidToken().getTokenValue(), (long)partitioner.getMaximumToken().getTokenValue(), 0, mock); Assert.assertEquals(tableDirs, mock.getDirectoriesForFiles(ImmutableSet.of())); Assert.assertEquals(Lists.newArrayList(tableDirs.get(0)), mock.getDirectoriesForFiles(ImmutableSet.of(containedDisk1))); diff --git a/test/unit/org/apache/cassandra/service/paxos/PaxosRepairHistoryTest.java b/test/unit/org/apache/cassandra/service/paxos/PaxosRepairHistoryTest.java index bcd97143d126..cd13616ccd97 100644 --- a/test/unit/org/apache/cassandra/service/paxos/PaxosRepairHistoryTest.java +++ b/test/unit/org/apache/cassandra/service/paxos/PaxosRepairHistoryTest.java @@ -328,10 +328,10 @@ private void testRandomTrims(long seed, int numberOfAdditions, int maxNumberOfRa if (!range.left.isMinimum()) Assert.assertEquals(none(), trimmed.ballotForToken(range.left)); if (!prev.right.isMinimum()) - Assert.assertEquals(none(), trimmed.ballotForToken(prev.right.increaseSlightly())); + Assert.assertEquals(none(), trimmed.ballotForToken(prev.right.nextValidToken())); } - Assert.assertEquals(history.ballotForToken(range.left.increaseSlightly()), trimmed.ballotForToken(range.left.increaseSlightly())); - if (!range.left.increaseSlightly().equals(range.right)) + Assert.assertEquals(history.ballotForToken(range.left.nextValidToken()), trimmed.ballotForToken(range.left.nextValidToken())); + if (!range.left.nextValidToken().equals(range.right)) Assert.assertEquals(history.ballotForToken(((LongToken)range.right).decreaseSlightly()), trimmed.ballotForToken(((LongToken)range.right).decreaseSlightly())); if (range.right.isMinimum()) @@ -401,7 +401,7 @@ private void testRandomAdds(long seed, int numberOfMerges, int numberOfAdditions LongToken tk = (LongToken) token; Assert.assertEquals(id, check.ballotForToken(tk.decreaseSlightly()), check.test.ballotForToken(tk.decreaseSlightly())); Assert.assertEquals(id, check.ballotForToken(tk), check.test.ballotForToken(token)); - Assert.assertEquals(id, check.ballotForToken(tk.increaseSlightly()), check.test.ballotForToken(token.increaseSlightly())); + Assert.assertEquals(id, check.ballotForToken(tk.nextValidToken()), check.test.ballotForToken(token.nextValidToken())); } // check some random diff --git a/test/unit/org/apache/cassandra/service/reads/DataResolverTest.java b/test/unit/org/apache/cassandra/service/reads/DataResolverTest.java index 07863c823212..24504296ad9e 100644 --- a/test/unit/org/apache/cassandra/service/reads/DataResolverTest.java +++ b/test/unit/org/apache/cassandra/service/reads/DataResolverTest.java @@ -118,7 +118,7 @@ private EndpointsForRange makeReplicas(int num) { InetAddressAndPort endpoint = InetAddressAndPort.getByAddress(new byte[]{ 127, 0, 0, (byte) (i + 1) }); replicas.add(ReplicaUtils.full(endpoint)); - StorageService.instance.getTokenMetadata().updateNormalToken(token = token.increaseSlightly(), endpoint); + StorageService.instance.getTokenMetadata().updateNormalToken(token = token.nextValidToken(), endpoint); Gossiper.instance.initializeNodeUnsafe(endpoint, UUID.randomUUID(), 1); } catch (UnknownHostException e) From 712527af6de95ba4ec6e864070a6f6790614ff4f Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Mon, 6 Mar 2023 11:59:01 +0200 Subject: [PATCH 03/27] Rename sstableComparator to firstKeyComparator --- src/java/org/apache/cassandra/db/ColumnFamilyStore.java | 2 +- .../cassandra/db/compaction/AbstractCompactionStrategy.java | 2 +- .../cassandra/db/compaction/LeveledCompactionStrategy.java | 2 +- .../apache/cassandra/db/compaction/LeveledGenerations.java | 2 +- .../org/apache/cassandra/db/compaction/LeveledManifest.java | 2 +- .../org/apache/cassandra/io/sstable/format/SSTableReader.java | 4 ++-- .../db/compaction/LeveledCompactionStrategyTest.java | 4 ++-- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java index ba68ca2b4e02..4a2ebc7ee75a 100644 --- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java +++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java @@ -1521,7 +1521,7 @@ public Collection getOverlappingLiveSSTables(Iterable sortedByFirst = Lists.newArrayList(sstables); - sortedByFirst.sort(SSTableReader.sstableComparator); + sortedByFirst.sort(SSTableReader.firstKeyComparator); List> bounds = new ArrayList<>(); DecoratedKey first = null, last = null; diff --git a/src/java/org/apache/cassandra/db/compaction/AbstractCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/AbstractCompactionStrategy.java index 7e24909cb5e6..e8e46668b66d 100644 --- a/src/java/org/apache/cassandra/db/compaction/AbstractCompactionStrategy.java +++ b/src/java/org/apache/cassandra/db/compaction/AbstractCompactionStrategy.java @@ -530,7 +530,7 @@ public Collection> groupSSTablesForAntiCompaction(Coll { int groupSize = 2; List sortedSSTablesToGroup = new ArrayList<>(sstablesToGroup); - Collections.sort(sortedSSTablesToGroup, SSTableReader.sstableComparator); + Collections.sort(sortedSSTablesToGroup, SSTableReader.firstKeyComparator); Collection> groupedSSTables = new ArrayList<>(); Collection currGroup = new ArrayList<>(groupSize); diff --git a/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java index a604039f01db..820f30413361 100644 --- a/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java +++ b/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java @@ -428,7 +428,7 @@ public LeveledScanner(TableMetadata metadata, Collection sstables totalLength = length; compressedLength = cLength; - Collections.sort(this.sstables, SSTableReader.sstableComparator); + Collections.sort(this.sstables, SSTableReader.firstKeyComparator); sstableIterator = this.sstables.iterator(); assert sstableIterator.hasNext(); // caller should check intersecting first SSTableReader currentSSTable = sstableIterator.next(); diff --git a/src/java/org/apache/cassandra/db/compaction/LeveledGenerations.java b/src/java/org/apache/cassandra/db/compaction/LeveledGenerations.java index 08cda89c856d..fac582a645e7 100644 --- a/src/java/org/apache/cassandra/db/compaction/LeveledGenerations.java +++ b/src/java/org/apache/cassandra/db/compaction/LeveledGenerations.java @@ -75,7 +75,7 @@ class LeveledGenerations private final TreeSet [] levels = new TreeSet[MAX_LEVEL_COUNT - 1]; private static final Comparator nonL0Comparator = (o1, o2) -> { - int cmp = SSTableReader.sstableComparator.compare(o1, o2); + int cmp = SSTableReader.firstKeyComparator.compare(o1, o2); if (cmp == 0) cmp = SSTableIdFactory.COMPARATOR.compare(o1.descriptor.id, o2.descriptor.id); return cmp; diff --git a/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java b/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java index 433918141108..c2adcf01575f 100644 --- a/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java +++ b/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java @@ -159,7 +159,7 @@ public synchronized void replace(Collection removed, Collection sstables) diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java index 59bea8afb682..b12238996c31 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java +++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java @@ -183,8 +183,8 @@ public static final class UniqueIdentifier } public final UniqueIdentifier instanceId = new UniqueIdentifier(); - public static final Comparator sstableComparator = Comparator.comparing(o -> o.first); - public static final Ordering sstableOrdering = Ordering.from(sstableComparator); + public static final Comparator firstKeyComparator = (o1, o2) -> o1.first.compareTo(o2.first); + public static final Ordering firstKeyOrdering = Ordering.from(firstKeyComparator); public static final Comparator idComparator = Comparator.comparing(t -> t.descriptor.id, SSTableIdFactory.COMPARATOR); public static final Comparator idReverseComparator = idComparator.reversed(); diff --git a/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java b/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java index 22259192de82..235c2735e27d 100644 --- a/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java @@ -733,7 +733,7 @@ public void randomMultiLevelAddTest() assertTrue(level.stream().allMatch(s -> s.getSSTableLevel() == lvl)); if (i > 0) { - level.sort(SSTableReader.sstableComparator); + level.sort(SSTableReader.firstKeyComparator); SSTableReader prev = null; for (SSTableReader sstable : level) { @@ -843,7 +843,7 @@ private static int[] canAdd(LeveledManifest lm, List newSSTables, for (SSTableReader sstable : lvlGroup.getValue()) { newLevel.add(sstable); - newLevel.sort(SSTableReader.sstableComparator); + newLevel.sort(SSTableReader.firstKeyComparator); SSTableReader prev = null; boolean kept = true; From 1318f1db01a1cb35ac22011d76525f2c73862c5c Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Wed, 15 Mar 2023 16:55:20 +0200 Subject: [PATCH 04/27] Encapsulate SSTableReader first and last --- .../apache/cassandra/db/DiskBoundaries.java | 4 +-- .../compaction/LeveledCompactionStrategy.java | 6 ++-- .../db/compaction/LeveledGenerations.java | 8 ++--- .../db/compaction/LeveledManifest.java | 30 +++++++++---------- .../db/lifecycle/SSTableIntervalTree.java | 2 +- .../repair/CassandraTableRepairManager.java | 2 +- .../repair/CassandraValidationIterator.java | 2 +- .../db/streaming/CassandraOutgoingFile.java | 2 +- .../db/streaming/CassandraStreamReceiver.java | 2 +- .../index/sasi/plan/QueryController.java | 2 +- .../cassandra/io/sstable/SSTableRewriter.java | 8 ++--- .../io/sstable/format/SSTableReader.java | 6 ++-- .../io/sstable/format/SSTableScanner.java | 20 ++++++------- .../sstable/format/SortedTableVerifier.java | 2 +- .../io/sstable/format/big/BigTableReader.java | 20 ++++++------- .../io/sstable/format/bti/BtiTableReader.java | 22 +++++++------- .../tools/SSTableOfflineRelevel.java | 6 ++-- .../utils/DiagnosticSnapshotService.java | 4 +-- .../test/PreviewRepairSnapshotTest.java | 2 +- .../LongLeveledCompactionStrategyTest.java | 2 +- .../ZeroCopyStreamingBenchmark.java | 4 +-- .../org/apache/cassandra/db/CleanupTest.java | 8 ++--- .../db/compaction/CancelCompactionsTest.java | 2 +- .../CompactionStrategyManagerTest.java | 2 +- .../LeveledCompactionStrategyTest.java | 8 ++--- .../db/compaction/LeveledGenerationsTest.java | 6 ++-- .../PendingAntiCompactionBytemanTest.java | 2 +- .../db/repair/PendingAntiCompactionTest.java | 6 ++-- ...assandraEntireSSTableStreamWriterTest.java | 2 +- .../streaming/CassandraOutgoingFileTest.java | 4 +-- .../streaming/CassandraStreamHeaderTest.java | 4 +-- .../streaming/CassandraStreamManagerTest.java | 2 +- .../io/sstable/SSTableReaderTest.java | 10 +++---- .../io/sstable/SSTableRewriterTest.java | 8 ++--- .../io/sstable/SSTableWriterTest.java | 2 +- .../cassandra/repair/ValidatorTest.java | 10 +++---- .../streaming/StreamTransferTaskTest.java | 4 +-- 37 files changed, 118 insertions(+), 118 deletions(-) diff --git a/src/java/org/apache/cassandra/db/DiskBoundaries.java b/src/java/org/apache/cassandra/db/DiskBoundaries.java index 32edcac4330c..7fe10f4c1336 100644 --- a/src/java/org/apache/cassandra/db/DiskBoundaries.java +++ b/src/java/org/apache/cassandra/db/DiskBoundaries.java @@ -109,7 +109,7 @@ public int getDiskIndex(SSTableReader sstable) return getBoundariesFromSSTableDirectory(sstable.descriptor); } - int pos = Collections.binarySearch(positions, sstable.first); + int pos = Collections.binarySearch(positions, sstable.getFirst()); assert pos < 0; // boundaries are .minkeybound and .maxkeybound so they should never be equal return -pos - 1; } @@ -146,7 +146,7 @@ public boolean isInCorrectLocation(SSTableReader sstable, Directories.DataDirect { int diskIndex = getDiskIndex(sstable); PartitionPosition diskLast = positions.get(diskIndex); - return directories.get(diskIndex).equals(currentLocation) && sstable.last.compareTo(diskLast) <= 0; + return directories.get(diskIndex).equals(currentLocation) && sstable.getLast().compareTo(diskLast) <= 0; } private int getDiskIndex(DecoratedKey key) diff --git a/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java index 820f30413361..3493674efc10 100644 --- a/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java +++ b/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java @@ -446,7 +446,7 @@ public static Collection intersecting(Collection s { for (SSTableReader sstable : sstables) { - Range sstableRange = new Range<>(sstable.first.getToken(), sstable.last.getToken()); + Range sstableRange = new Range<>(sstable.getFirst().getToken(), sstable.getLast().getToken()); if (range == null || sstableRange.intersects(range)) filtered.add(sstable); } @@ -556,8 +556,8 @@ public JsonNode sstable(SSTableReader sstable) { ObjectNode node = JsonNodeFactory.instance.objectNode(); node.put("level", sstable.getSSTableLevel()); - node.put("min_token", sstable.first.getToken().toString()); - node.put("max_token", sstable.last.getToken().toString()); + node.put("min_token", sstable.getFirst().getToken().toString()); + node.put("max_token", sstable.getLast().getToken().toString()); return node; } diff --git a/src/java/org/apache/cassandra/db/compaction/LeveledGenerations.java b/src/java/org/apache/cassandra/db/compaction/LeveledGenerations.java index fac582a645e7..513e02aad99e 100644 --- a/src/java/org/apache/cassandra/db/compaction/LeveledGenerations.java +++ b/src/java/org/apache/cassandra/db/compaction/LeveledGenerations.java @@ -154,8 +154,8 @@ void addAll(Iterable readers) SSTableReader after = level.ceiling(sstable); SSTableReader before = level.floor(sstable); - if (before != null && before.last.compareTo(sstable.first) >= 0 || - after != null && after.first.compareTo(sstable.last) <= 0) + if (before != null && before.getLast().compareTo(sstable.getFirst()) >= 0 || + after != null && after.getFirst().compareTo(sstable.getLast()) <= 0) { sendToL0(sstable); } @@ -264,7 +264,7 @@ Iterator wrappingIterator(int lvl, SSTableReader lastCompactedSST while (tail.hasNext()) { SSTableReader potentialPivot = tail.peek(); - if (potentialPivot.first.compareTo(lastCompactedSSTable.last) > 0) + if (potentialPivot.getFirst().compareTo(lastCompactedSSTable.getLast()) > 0) { pivot = potentialPivot; break; @@ -322,7 +322,7 @@ private void maybeVerifyLevels() for (SSTableReader sstable : get(i)) { // no overlap: - assert prev == null || prev.last.compareTo(sstable.first) < 0; + assert prev == null || prev.getLast().compareTo(sstable.getFirst()) < 0; prev = sstable; // make sure it does not exist in any other level: for (int j = 0; j < levelCount(); j++) diff --git a/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java b/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java index c2adcf01575f..f5ebed711a0a 100644 --- a/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java +++ b/src/java/org/apache/cassandra/db/compaction/LeveledManifest.java @@ -366,10 +366,10 @@ private Collection getOverlappingStarvedSSTables(int targetLevel, PartitionPosition min = null; for (SSTableReader candidate : candidates) { - if (min == null || candidate.first.compareTo(min) < 0) - min = candidate.first; - if (max == null || candidate.last.compareTo(max) > 0) - max = candidate.last; + if (min == null || candidate.getFirst().compareTo(min) < 0) + min = candidate.getFirst(); + if (max == null || candidate.getLast().compareTo(max) > 0) + max = candidate.getLast(); } if (min == null || max == null || min.equals(max)) // single partition sstables - we cannot include a high level sstable. return candidates; @@ -377,7 +377,7 @@ private Collection getOverlappingStarvedSSTables(int targetLevel, Range boundaries = new Range<>(min, max); for (SSTableReader sstable : generations.get(i)) { - Range r = new Range<>(sstable.first, sstable.last); + Range r = new Range<>(sstable.getFirst(), sstable.getLast()); if (boundaries.contains(r) && !compacting.contains(sstable)) { logger.info("Adding high-level (L{}) {} to candidates", sstable.getSSTableLevel(), sstable); @@ -438,20 +438,20 @@ private static Set overlapping(Collection candidat */ Iterator iter = candidates.iterator(); SSTableReader sstable = iter.next(); - Token first = sstable.first.getToken(); - Token last = sstable.last.getToken(); + Token first = sstable.getFirst().getToken(); + Token last = sstable.getLast().getToken(); while (iter.hasNext()) { sstable = iter.next(); - first = first.compareTo(sstable.first.getToken()) <= 0 ? first : sstable.first.getToken(); - last = last.compareTo(sstable.last.getToken()) >= 0 ? last : sstable.last.getToken(); + first = first.compareTo(sstable.getFirst().getToken()) <= 0 ? first : sstable.getFirst().getToken(); + last = last.compareTo(sstable.getLast().getToken()) >= 0 ? last : sstable.getLast().getToken(); } return overlapping(first, last, others); } private static Set overlappingWithBounds(SSTableReader sstable, Map> others) { - return overlappingWithBounds(sstable.first.getToken(), sstable.last.getToken(), others); + return overlappingWithBounds(sstable.getFirst().getToken(), sstable.getLast().getToken(), others); } /** @@ -482,7 +482,7 @@ private static Map> genBounds(Iterable> boundsMap = new HashMap<>(); for (SSTableReader sstable : ssTableReaders) { - boundsMap.put(sstable, new Bounds<>(sstable.first.getToken(), sstable.last.getToken())); + boundsMap.put(sstable, new Bounds<>(sstable.getFirst().getToken(), sstable.getLast().getToken())); } return boundsMap; } @@ -507,10 +507,10 @@ private Collection getCandidatesFor(int level) PartitionPosition firstCompactingKey = null; for (SSTableReader candidate : compactingL0) { - if (firstCompactingKey == null || candidate.first.compareTo(firstCompactingKey) < 0) - firstCompactingKey = candidate.first; - if (lastCompactingKey == null || candidate.last.compareTo(lastCompactingKey) > 0) - lastCompactingKey = candidate.last; + if (firstCompactingKey == null || candidate.getFirst().compareTo(firstCompactingKey) < 0) + firstCompactingKey = candidate.getFirst(); + if (lastCompactingKey == null || candidate.getLast().compareTo(lastCompactingKey) > 0) + lastCompactingKey = candidate.getLast(); } // L0 is the dumping ground for new sstables which thus may overlap each other. diff --git a/src/java/org/apache/cassandra/db/lifecycle/SSTableIntervalTree.java b/src/java/org/apache/cassandra/db/lifecycle/SSTableIntervalTree.java index 61fab98a0082..91005d39dd91 100644 --- a/src/java/org/apache/cassandra/db/lifecycle/SSTableIntervalTree.java +++ b/src/java/org/apache/cassandra/db/lifecycle/SSTableIntervalTree.java @@ -54,7 +54,7 @@ public static List> buildIntervals(It { List> intervals = new ArrayList<>(Iterables.size(sstables)); for (SSTableReader sstable : sstables) - intervals.add(Interval.create(sstable.first, sstable.last, sstable)); + intervals.add(Interval.create(sstable.getFirst(), sstable.getLast(), sstable)); return intervals; } } diff --git a/src/java/org/apache/cassandra/db/repair/CassandraTableRepairManager.java b/src/java/org/apache/cassandra/db/repair/CassandraTableRepairManager.java index 84f699b0e8be..053342e12b8b 100644 --- a/src/java/org/apache/cassandra/db/repair/CassandraTableRepairManager.java +++ b/src/java/org/apache/cassandra/db/repair/CassandraTableRepairManager.java @@ -79,7 +79,7 @@ public boolean apply(SSTableReader sstable) { return sstable != null && !sstable.metadata().isIndex() && // exclude SSTables from 2i - new Bounds<>(sstable.first.getToken(), sstable.last.getToken()).intersects(ranges); + new Bounds<>(sstable.getFirst().getToken(), sstable.getLast().getToken()).intersects(ranges); } }, true, false); //ephemeral snapshot, if repair fails, it will be cleaned next startup } diff --git a/src/java/org/apache/cassandra/db/repair/CassandraValidationIterator.java b/src/java/org/apache/cassandra/db/repair/CassandraValidationIterator.java index 836bb57042d9..d13fff1e07fa 100644 --- a/src/java/org/apache/cassandra/db/repair/CassandraValidationIterator.java +++ b/src/java/org/apache/cassandra/db/repair/CassandraValidationIterator.java @@ -140,7 +140,7 @@ else if (isIncremental) { for (SSTableReader sstable : sstableCandidates.sstables) { - if (new Bounds<>(sstable.first.getToken(), sstable.last.getToken()).intersects(ranges) && predicate.apply(sstable)) + if (new Bounds<>(sstable.getFirst().getToken(), sstable.getLast().getToken()).intersects(ranges) && predicate.apply(sstable)) { sstablesToValidate.add(sstable); } diff --git a/src/java/org/apache/cassandra/db/streaming/CassandraOutgoingFile.java b/src/java/org/apache/cassandra/db/streaming/CassandraOutgoingFile.java index 88ecff8a4464..73abbe88464a 100644 --- a/src/java/org/apache/cassandra/db/streaming/CassandraOutgoingFile.java +++ b/src/java/org/apache/cassandra/db/streaming/CassandraOutgoingFile.java @@ -89,7 +89,7 @@ private static CassandraStreamHeader makeHeader(SSTableReader sstable, .withSerializationHeader(sstable.header.toComponent()) .isEntireSSTable(shouldStreamEntireSSTable) .withComponentManifest(manifest) - .withFirstKey(sstable.first) + .withFirstKey(sstable.getFirst()) .withTableId(sstable.metadata().id) .build(); } diff --git a/src/java/org/apache/cassandra/db/streaming/CassandraStreamReceiver.java b/src/java/org/apache/cassandra/db/streaming/CassandraStreamReceiver.java index 518d53709525..0014f7bf0e82 100644 --- a/src/java/org/apache/cassandra/db/streaming/CassandraStreamReceiver.java +++ b/src/java/org/apache/cassandra/db/streaming/CassandraStreamReceiver.java @@ -252,7 +252,7 @@ public void finished() if (cfs.isRowCacheEnabled() || cfs.metadata().isCounter()) { List> boundsToInvalidate = new ArrayList<>(readers.size()); - readers.forEach(sstable -> boundsToInvalidate.add(new Bounds(sstable.first.getToken(), sstable.last.getToken()))); + readers.forEach(sstable -> boundsToInvalidate.add(new Bounds(sstable.getFirst().getToken(), sstable.getLast().getToken()))); Set> nonOverlappingBounds = Bounds.getNonOverlappingBounds(boundsToInvalidate); if (cfs.isRowCacheEnabled()) diff --git a/src/java/org/apache/cassandra/index/sasi/plan/QueryController.java b/src/java/org/apache/cassandra/index/sasi/plan/QueryController.java index 60538e107e16..37d254f59c39 100644 --- a/src/java/org/apache/cassandra/index/sasi/plan/QueryController.java +++ b/src/java/org/apache/cassandra/index/sasi/plan/QueryController.java @@ -254,7 +254,7 @@ private Set applyScope(Set indexes) { return Sets.filter(indexes, index -> { SSTableReader sstable = index.getSSTable(); - return range.startKey().compareTo(sstable.last) <= 0 && (range.stopKey().isMinimum() || sstable.first.compareTo(range.stopKey()) <= 0); + return range.startKey().compareTo(sstable.getLast()) <= 0 && (range.stopKey().isMinimum() || sstable.getFirst().compareTo(range.stopKey()) <= 0); }); } } diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableRewriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableRewriter.java index 82ae4dc73cde..1aa352694a70 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableRewriter.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableRewriter.java @@ -152,7 +152,7 @@ private void maybeReopenEarly(DecoratedKey key) writer.openEarly(reader -> { transaction.update(reader, false); currentlyOpenedEarlyAt = writer.getFilePointer(); - moveStarts(reader.last); + moveStarts(reader.getLast()); transaction.checkpoint(); }); } @@ -202,10 +202,10 @@ private void moveStarts(DecoratedKey lowerbound) final SSTableReader latest = transaction.current(sstable); // skip any sstables that we know to already be shadowed - if (latest.first.compareTo(lowerbound) > 0) + if (latest.getFirst().compareTo(lowerbound) > 0) continue; - if (lowerbound.compareTo(latest.last) >= 0) + if (lowerbound.compareTo(latest.getLast()) >= 0) { if (!transaction.isObsolete(latest)) transaction.obsolete(latest); @@ -255,7 +255,7 @@ public void switchWriter(SSTableWriter newWriter) writer.setMaxDataAge(maxAge); SSTableReader reader = writer.openFinalEarly(); transaction.update(reader, false); - moveStarts(reader.last); + moveStarts(reader.getLast()); transaction.checkpoint(); } diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java index b12238996c31..6b2f4460a68d 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java +++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java @@ -183,7 +183,7 @@ public static final class UniqueIdentifier } public final UniqueIdentifier instanceId = new UniqueIdentifier(); - public static final Comparator firstKeyComparator = (o1, o2) -> o1.first.compareTo(o2.first); + public static final Comparator firstKeyComparator = (o1, o2) -> o1.getFirst().compareTo(o2.getFirst()); public static final Ordering firstKeyOrdering = Ordering.from(firstKeyComparator); public static final Comparator idComparator = Comparator.comparing(t -> t.descriptor.id, SSTableIdFactory.COMPARATOR); @@ -267,8 +267,8 @@ public enum OpenReason private volatile double crcCheckChance; - public final DecoratedKey first; - public final DecoratedKey last; + protected final DecoratedKey first; + protected final DecoratedKey last; public final AbstractBounds bounds; /** diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableScanner.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableScanner.java index b15b2a555323..217c17720639 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/SSTableScanner.java +++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableScanner.java @@ -103,30 +103,30 @@ protected static List> makeBounds(SSTableReade protected static AbstractBounds fullRange(SSTableReader sstable) { - return new Bounds<>(sstable.first, sstable.last); + return new Bounds<>(sstable.getFirst(), sstable.getLast()); } private static void addRange(SSTableReader sstable, AbstractBounds requested, List> boundsList) { if (requested instanceof Range && ((Range) requested).isWrapAround()) { - if (requested.right.compareTo(sstable.first) >= 0) + if (requested.right.compareTo(sstable.getFirst()) >= 0) { // since we wrap, we must contain the whole sstable prior to stopKey() - Boundary left = new Boundary<>(sstable.first, true); + Boundary left = new Boundary<>(sstable.getFirst(), true); Boundary right; right = requested.rightBoundary(); - right = minRight(right, sstable.last, true); + right = minRight(right, sstable.getLast(), true); if (!isEmpty(left, right)) boundsList.add(AbstractBounds.bounds(left, right)); } - if (requested.left.compareTo(sstable.last) <= 0) + if (requested.left.compareTo(sstable.getLast()) <= 0) { // since we wrap, we must contain the whole sstable after dataRange.startKey() - Boundary right = new Boundary<>(sstable.last, true); + Boundary right = new Boundary<>(sstable.getLast(), true); Boundary left; left = requested.leftBoundary(); - left = maxLeft(left, sstable.first, true); + left = maxLeft(left, sstable.getFirst(), true); if (!isEmpty(left, right)) boundsList.add(AbstractBounds.bounds(left, right)); } @@ -137,10 +137,10 @@ private static void addRange(SSTableReader sstable, AbstractBounds left, right; left = requested.leftBoundary(); right = requested.rightBoundary(); - left = maxLeft(left, sstable.first, true); + left = maxLeft(left, sstable.getFirst(), true); // apparently isWrapAround() doesn't count Bounds that extend to the limit (min) as wrapping - right = requested.right.isMinimum() ? new Boundary<>(sstable.last, true) - : minRight(right, sstable.last, true); + right = requested.right.isMinimum() ? new Boundary<>(sstable.getLast(), true) + : minRight(right, sstable.getLast(), true); if (!isEmpty(left, right)) boundsList.add(AbstractBounds.bounds(left, right)); } diff --git a/src/java/org/apache/cassandra/io/sstable/format/SortedTableVerifier.java b/src/java/org/apache/cassandra/io/sstable/format/SortedTableVerifier.java index fb3cb4e20a9b..f68e1c968455 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/SortedTableVerifier.java +++ b/src/java/org/apache/cassandra/io/sstable/format/SortedTableVerifier.java @@ -388,7 +388,7 @@ private void deserializeIndex(SSTableReader sstable) throws IOException { ByteBuffer last = it.key(); while (it.advance()) last = it.key(); // no-op, just check if index is readable - if (!Objects.equals(last, sstable.last.getKey())) + if (!Objects.equals(last, sstable.getLast().getKey())) throw new CorruptSSTableException(new IOException("Failed to read partition index"), it.toString()); } } diff --git a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableReader.java b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableReader.java index 0984cc924d03..667e2cd90493 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableReader.java +++ b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableReader.java @@ -198,8 +198,8 @@ public ISSTableScanner getScanner(Collection> ranges) @Override public DecoratedKey firstKeyBeyond(PartitionPosition token) { - if (token.compareTo(first) < 0) - return first; + if (token.compareTo(getFirst()) < 0) + return getFirst(); long sampledPosition = getIndexScanPosition(token); @@ -263,7 +263,7 @@ public RowIndexEntry getRowIndexEntry(PartitionPosition key, // check the smallest and greatest keys in the sstable to see if it can't be present boolean skip = false; - if (key.compareTo(first) < 0) + if (key.compareTo(getFirst()) < 0) { if (searchOp == Operator.EQ) { @@ -271,13 +271,13 @@ public RowIndexEntry getRowIndexEntry(PartitionPosition key, } else { - key = first; + key = getFirst(); searchOp = Operator.GE; // since op != EQ, bloom filter will be skipped; first key is included so no reason to check bloom filter } } else { - int l = last.compareTo(key); + int l = getLast().compareTo(key); skip = l < 0 // out of range, skip || l == 0 && searchOp == Operator.GT; // search entry > key, but key is the last in range, so skip if (l == 0) @@ -529,8 +529,8 @@ public IVerifier getVerifier(ColumnFamilyStore cfs, OutputHandler outputHandler, */ long getIndexScanPosition(PartitionPosition key) { - if (openReason == OpenReason.MOVED_START && key.compareTo(first) < 0) - key = first; + if (openReason == OpenReason.MOVED_START && key.compareTo(getFirst()) < 0) + key = getFirst(); return indexSummary.getScanPosition(key); } @@ -599,7 +599,7 @@ public SSTableReader cloneWithNewStart(DecoratedKey newStart) return runWithLock(ignored -> { assert openReason != OpenReason.EARLY; // TODO: merge with caller's firstKeyBeyond() work,to save time - if (newStart.compareTo(first) > 0) + if (newStart.compareTo(getFirst()) > 0) { Map handleAndPositions = new LinkedHashMap<>(2); if (dfile != null) @@ -654,8 +654,8 @@ else if (samplingLevel < indexSummary.getSamplingLevel()) // Always save the resampled index with lock to avoid racing with entire-sstable streaming return runWithLock(ignored -> { - new IndexSummaryComponent(newSummary, first, last).save(descriptor.fileFor(Components.SUMMARY), true); - return cloneAndReplace(first, OpenReason.METADATA_CHANGE, newSummary); + new IndexSummaryComponent(newSummary, getFirst(), getLast()).save(descriptor.fileFor(Components.SUMMARY), true); + return cloneAndReplace(getFirst(), OpenReason.METADATA_CHANGE, newSummary); }); } diff --git a/src/java/org/apache/cassandra/io/sstable/format/bti/BtiTableReader.java b/src/java/org/apache/cassandra/io/sstable/format/bti/BtiTableReader.java index 9afd48294fc7..c5571e7fbbe3 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/bti/BtiTableReader.java +++ b/src/java/org/apache/cassandra/io/sstable/format/bti/BtiTableReader.java @@ -140,13 +140,13 @@ protected TrieIndexEntry getRowIndexEntry(PartitionPosition key, if (operator == GT || operator == GE) { - if (filterLast() && last.compareTo(key) < 0) + if (filterLast() && getLast().compareTo(key) < 0) { notifySkipped(SkippingReason.MIN_MAX_KEYS, listener, operator, updateStats); return null; } - boolean filteredLeft = (filterFirst() && first.compareTo(key) > 0); - searchKey = filteredLeft ? first : key; + boolean filteredLeft = (filterFirst() && getFirst().compareTo(key) > 0); + searchKey = filteredLeft ? getFirst() : key; searchOp = filteredLeft ? GE : operator; try (PartitionIndex.Reader reader = partitionIndex.openReader()) @@ -226,7 +226,7 @@ TrieIndexEntry getExactPosition(DecoratedKey dk, SSTableReadsListener listener, boolean updateStats) { - if ((filterFirst() && first.compareTo(dk) > 0) || (filterLast() && last.compareTo(dk) < 0)) + if ((filterFirst() && getFirst().compareTo(dk) > 0) || (filterLast() && getLast().compareTo(dk) < 0)) { notifySkipped(SkippingReason.MIN_MAX_KEYS, listener, EQ, updateStats); return null; @@ -329,24 +329,24 @@ public long estimatedKeysForRanges(Collection> ranges) for (Range range : Range.normalize(ranges)) { PartitionPosition left = range.left.minKeyBound(); - if (left.compareTo(first) <= 0) + if (left.compareTo(getFirst()) <= 0) left = null; - else if (left.compareTo(last) > 0) + else if (left.compareTo(getLast()) > 0) continue; // no intersection PartitionPosition right = range.right.minKeyBound(); - if (range.right.isMinimum() || right.compareTo(last) >= 0) + if (range.right.isMinimum() || right.compareTo(getLast()) >= 0) right = null; - else if (right.compareTo(first) < 0) + else if (right.compareTo(getFirst()) < 0) continue; // no intersection if (left == null && right == null) return partitionIndex.size(); // sstable is fully covered, return full partition count to avoid rounding errors if (left == null && filterFirst()) - left = first; + left = getFirst(); if (right == null && filterLast()) - right = last; + right = getLast(); long startPos = left != null ? getPosition(left, GE) : 0; long endPos = right != null ? getPosition(right, GE) : uncompressedLength(); @@ -421,7 +421,7 @@ public BtiTableReader cloneWithNewStart(DecoratedKey newStart) { return runWithLock(d -> { assert openReason != OpenReason.EARLY : "Cannot open early an early-open SSTable"; - if (newStart.compareTo(first) > 0) + if (newStart.compareTo(getFirst()) > 0) { final long dataStart = getPosition(newStart, Operator.EQ); runOnClose(() -> dfile.dropPageCache(dataStart)); diff --git a/src/java/org/apache/cassandra/tools/SSTableOfflineRelevel.java b/src/java/org/apache/cassandra/tools/SSTableOfflineRelevel.java index 16faa9283bff..0c3d3e75ea99 100644 --- a/src/java/org/apache/cassandra/tools/SSTableOfflineRelevel.java +++ b/src/java/org/apache/cassandra/tools/SSTableOfflineRelevel.java @@ -179,7 +179,7 @@ public void relevel(boolean dryRun) throws IOException @Override public int compare(SSTableReader o1, SSTableReader o2) { - return o1.last.compareTo(o2.last); + return o1.getLast().compareTo(o2.getLast()); } }); @@ -193,10 +193,10 @@ public int compare(SSTableReader o1, SSTableReader o2) while (it.hasNext()) { SSTableReader sstable = it.next(); - if (lastLast == null || lastLast.compareTo(sstable.first) < 0) + if (lastLast == null || lastLast.compareTo(sstable.getFirst()) < 0) { level.add(sstable); - lastLast = sstable.last; + lastLast = sstable.getLast(); it.remove(); } } diff --git a/src/java/org/apache/cassandra/utils/DiagnosticSnapshotService.java b/src/java/org/apache/cassandra/utils/DiagnosticSnapshotService.java index 168285da5ff2..5329ceef0dfe 100644 --- a/src/java/org/apache/cassandra/utils/DiagnosticSnapshotService.java +++ b/src/java/org/apache/cassandra/utils/DiagnosticSnapshotService.java @@ -210,8 +210,8 @@ public void run() { cfs.snapshot(command.snapshot_name, (sstable) -> checkIntersection(ranges, - sstable.first.getToken(), - sstable.last.getToken()), + sstable.getFirst().getToken(), + sstable.getLast().getToken()), false, false); } } diff --git a/test/distributed/org/apache/cassandra/distributed/test/PreviewRepairSnapshotTest.java b/test/distributed/org/apache/cassandra/distributed/test/PreviewRepairSnapshotTest.java index e3679e9af7dc..0b2ff533a3b1 100644 --- a/test/distributed/org/apache/cassandra/distributed/test/PreviewRepairSnapshotTest.java +++ b/test/distributed/org/apache/cassandra/distributed/test/PreviewRepairSnapshotTest.java @@ -137,7 +137,7 @@ private IIsolatedExecutor.SerializableRunnable checkSnapshot(Set mismatch for (SSTableReader sstable : cfs.getLiveSSTables()) { - Bounds sstableBounds = new Bounds<>(sstable.first.getToken(), sstable.last.getToken()); + Bounds sstableBounds = new Bounds<>(sstable.getFirst().getToken(), sstable.getLast().getToken()); boolean shouldBeInSnapshot = false; for (Token mismatchingToken : mismatchingTokens) { diff --git a/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java b/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java index a780cf1e263b..bbc1b577de37 100644 --- a/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java +++ b/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java @@ -126,7 +126,7 @@ public void run() if (level > 0) {// overlap check for levels greater than 0 - Set overlaps = LeveledManifest.overlapping(sstable.first.getToken(), sstable.last.getToken(), sstables); + Set overlaps = LeveledManifest.overlapping(sstable.getFirst().getToken(), sstable.getLast().getToken(), sstables); assert overlaps.size() == 1 && overlaps.contains(sstable); } } diff --git a/test/microbench/org/apache/cassandra/test/microbench/ZeroCopyStreamingBenchmark.java b/test/microbench/org/apache/cassandra/test/microbench/ZeroCopyStreamingBenchmark.java index 59acfc6ae696..b3083eff3298 100644 --- a/test/microbench/org/apache/cassandra/test/microbench/ZeroCopyStreamingBenchmark.java +++ b/test/microbench/org/apache/cassandra/test/microbench/ZeroCopyStreamingBenchmark.java @@ -141,7 +141,7 @@ public void setupBenchmark() throws IOException .withSerializationHeader(sstable.header.toComponent()) .withComponentManifest(context.manifest()) .isEntireSSTable(true) - .withFirstKey(sstable.first) + .withFirstKey(sstable.getFirst()) .withTableId(sstable.metadata().id) .build(); @@ -150,7 +150,7 @@ public void setupBenchmark() throws IOException 0, 0, 0, null), entireSSTableStreamHeader, session); - List> requestedRanges = Arrays.asList(new Range<>(sstable.first.minValue().getToken(), sstable.last.getToken())); + List> requestedRanges = Arrays.asList(new Range<>(sstable.getFirst().minValue().getToken(), sstable.getLast().getToken())); CassandraStreamHeader partialSSTableStreamHeader = CassandraStreamHeader.builder() .withSSTableVersion(sstable.descriptor.version) diff --git a/test/unit/org/apache/cassandra/db/CleanupTest.java b/test/unit/org/apache/cassandra/db/CleanupTest.java index d82eef8850c0..5d176859c281 100644 --- a/test/unit/org/apache/cassandra/db/CleanupTest.java +++ b/test/unit/org/apache/cassandra/db/CleanupTest.java @@ -312,8 +312,8 @@ public void testCleanupSkippingSSTablesHelper(boolean repaired) throws UnknownHo cfs.forceCleanup(2); for (SSTableReader sstable : cfs.getLiveSSTables()) { - assertEquals(sstable.first, sstable.last); // single-token sstables - assertTrue(sstable.first.getToken().compareTo(token(new byte[]{ 50 })) <= 0); + assertEquals(sstable.getFirst(), sstable.getLast()); // single-token sstables + assertTrue(sstable.getFirst().getToken().compareTo(token(new byte[]{ 50 })) <= 0); // with single-token sstables they should all either be skipped or dropped: assertTrue(beforeFirstCleanup.contains(sstable)); } @@ -358,8 +358,8 @@ public void testNeedsCleanup() throws Exception // prepare SSTable and some useful tokens SSTableReader ssTable = cfs.getLiveSSTables().iterator().next(); - final Token ssTableMin = ssTable.first.getToken(); - final Token ssTableMax = ssTable.last.getToken(); + final Token ssTableMin = ssTable.getFirst().getToken(); + final Token ssTableMax = ssTable.getLast().getToken(); final Token min = token((byte) 0); final Token before1 = token((byte) 2); diff --git a/test/unit/org/apache/cassandra/db/compaction/CancelCompactionsTest.java b/test/unit/org/apache/cassandra/db/compaction/CancelCompactionsTest.java index 51da0c443124..4a50d2de5d2d 100644 --- a/test/unit/org/apache/cassandra/db/compaction/CancelCompactionsTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/CancelCompactionsTest.java @@ -361,7 +361,7 @@ public boolean hasNext() long first(SSTableReader sstable) { - return (long)sstable.first.getToken().getTokenValue(); + return (long) sstable.getFirst().getToken().getTokenValue(); } Token token(long t) diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionStrategyManagerTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionStrategyManagerTest.java index 8a9f2fb22594..98d3966affee 100644 --- a/test/unit/org/apache/cassandra/db/compaction/CompactionStrategyManagerTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/CompactionStrategyManagerTest.java @@ -495,7 +495,7 @@ private Integer[] computeBoundaries(int numSSTables, int numDisks) private int getSSTableIndex(Integer[] boundaries, SSTableReader reader) { int index = 0; - int firstKey = Integer.parseInt(new String(ByteBufferUtil.getArray(reader.first.getKey()))); + int firstKey = Integer.parseInt(new String(ByteBufferUtil.getArray(reader.getFirst().getKey()))); while (boundaries[index] <= firstKey) index++; logger.debug("Index for SSTable {} on boundary {} is {}", reader.descriptor.id, Arrays.toString(boundaries), index); diff --git a/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java b/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java index 235c2735e27d..e9e2d42ab4a7 100644 --- a/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java @@ -737,10 +737,10 @@ public void randomMultiLevelAddTest() SSTableReader prev = null; for (SSTableReader sstable : level) { - if (prev != null && sstable.first.compareTo(prev.last) <= 0) + if (prev != null && sstable.getFirst().compareTo(prev.getLast()) <= 0) { - String levelStr = level.stream().map(s -> String.format("[%s, %s]", s.first, s.last)).collect(Collectors.joining(", ")); - String overlap = String.format("sstable [%s, %s] overlaps with [%s, %s] in level %d (%s) ", sstable.first, sstable.last, prev.first, prev.last, i, levelStr); + String levelStr = level.stream().map(s -> String.format("[%s, %s]", s.getFirst(), s.getLast())).collect(Collectors.joining(", ")); + String overlap = String.format("sstable [%s, %s] overlaps with [%s, %s] in level %d (%s) ", sstable.getFirst(), sstable.getLast(), prev.getFirst(), prev.getLast(), i, levelStr); Assert.fail("[seed = "+seed+"] overlap in level "+lvl+": " + overlap); } prev = sstable; @@ -849,7 +849,7 @@ private static int[] canAdd(LeveledManifest lm, List newSSTables, boolean kept = true; for (SSTableReader sst : newLevel) { - if (prev != null && prev.last.compareTo(sst.first) >= 0) + if (prev != null && prev.getLast().compareTo(sst.getFirst()) >= 0) { newLevel.remove(sstable); kept = false; diff --git a/test/unit/org/apache/cassandra/db/compaction/LeveledGenerationsTest.java b/test/unit/org/apache/cassandra/db/compaction/LeveledGenerationsTest.java index d47ffd2ca8d1..11592f01d55a 100644 --- a/test/unit/org/apache/cassandra/db/compaction/LeveledGenerationsTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/LeveledGenerationsTest.java @@ -167,8 +167,8 @@ private void assertIter(Iterator iter, long first, long last, int { List drained = Lists.newArrayList(iter); assertEquals(expectedCount, drained.size()); - assertEquals(dk(first).getToken(), first(drained).first.getToken()); - assertEquals(dk(last).getToken(), last(drained).first.getToken()); // we sort by first token, so this is the first token of the last sstable in iter + assertEquals(dk(first).getToken(), first(drained).getFirst().getToken()); + assertEquals(dk(last).getToken(), last(drained).getFirst().getToken()); // we sort by first token, so this is the first token of the last sstable in iter } private SSTableReader last(Iterable iter) @@ -194,6 +194,6 @@ private SSTableReader sst(int gen, ColumnFamilyStore cfs, long first, long last) private void print(SSTableReader sstable) { - System.out.println(String.format("%d %s %s %d", sstable.descriptor.id, sstable.first, sstable.last, sstable.getSSTableLevel())); + System.out.println(String.format("%d %s %s %d", sstable.descriptor.id, sstable.getFirst(), sstable.getLast(), sstable.getSSTableLevel())); } } diff --git a/test/unit/org/apache/cassandra/db/repair/PendingAntiCompactionBytemanTest.java b/test/unit/org/apache/cassandra/db/repair/PendingAntiCompactionBytemanTest.java index e43027deffb3..795c7fe82c47 100644 --- a/test/unit/org/apache/cassandra/db/repair/PendingAntiCompactionBytemanTest.java +++ b/test/unit/org/apache/cassandra/db/repair/PendingAntiCompactionBytemanTest.java @@ -63,7 +63,7 @@ public void testExceptionAnticompaction() throws InterruptedException for (SSTableReader sstable : cfs.getLiveSSTables()) { - ranges.add(new Range<>(sstable.first.getToken(), sstable.last.getToken())); + ranges.add(new Range<>(sstable.getFirst().getToken(), sstable.getLast().getToken())); } TimeUUID prsid = prepareSession(); try diff --git a/test/unit/org/apache/cassandra/db/repair/PendingAntiCompactionTest.java b/test/unit/org/apache/cassandra/db/repair/PendingAntiCompactionTest.java index c95b2dbb0e25..d4a2b991d404 100644 --- a/test/unit/org/apache/cassandra/db/repair/PendingAntiCompactionTest.java +++ b/test/unit/org/apache/cassandra/db/repair/PendingAntiCompactionTest.java @@ -176,7 +176,7 @@ public void acquisitionSuccess() throws Exception Collection> ranges = new HashSet<>(); for (SSTableReader sstable : expected) { - ranges.add(new Range<>(sstable.first.getToken(), sstable.last.getToken())); + ranges.add(new Range<>(sstable.getFirst().getToken(), sstable.getLast().getToken())); } PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, ranges, nextTimeUUID(), 0, 0); @@ -400,8 +400,8 @@ public void cancelledAntiCompaction() throws Exception // attempt to anti-compact the sstable in half SSTableReader sstable = Iterables.getOnlyElement(cfs.getLiveSSTables()); - Token left = cfs.getPartitioner().midpoint(sstable.first.getToken(), sstable.last.getToken()); - Token right = sstable.last.getToken(); + Token left = cfs.getPartitioner().midpoint(sstable.getFirst().getToken(), sstable.getLast().getToken()); + Token right = sstable.getLast().getToken(); CompactionManager.instance.performAnticompaction(result.cfs, atEndpoint(Collections.singleton(new Range<>(left, right)), NO_RANGES), result.refs, result.txn, sessionID, () -> true); diff --git a/test/unit/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamWriterTest.java b/test/unit/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamWriterTest.java index 898da7c78290..e3305f24b697 100644 --- a/test/unit/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamWriterTest.java +++ b/test/unit/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamWriterTest.java @@ -156,7 +156,7 @@ public void testBlockReadingAndWritingOverWire() throws Throwable .withSerializationHeader(sstable.header.toComponent()) .withComponentManifest(context.manifest()) .isEntireSSTable(true) - .withFirstKey(sstable.first) + .withFirstKey(sstable.getFirst()) .withTableId(sstable.metadata().id) .build(); diff --git a/test/unit/org/apache/cassandra/db/streaming/CassandraOutgoingFileTest.java b/test/unit/org/apache/cassandra/db/streaming/CassandraOutgoingFileTest.java index 6adfdedd21ab..2921b5ae1749 100644 --- a/test/unit/org/apache/cassandra/db/streaming/CassandraOutgoingFileTest.java +++ b/test/unit/org/apache/cassandra/db/streaming/CassandraOutgoingFileTest.java @@ -89,7 +89,7 @@ public static void defineSchemaAndPrepareSSTable() @Test public void validateFullyContainedIn_SingleContiguousRange_Succeeds() { - List> requestedRanges = Arrays.asList(new Range<>(store.getPartitioner().getMinimumToken(), sstable.last.getToken())); + List> requestedRanges = Arrays.asList(new Range<>(store.getPartitioner().getMinimumToken(), sstable.getLast().getToken())); List sections = sstable.getPositionsForRanges(requestedRanges); CassandraOutgoingFile cof = new CassandraOutgoingFile(StreamOperation.BOOTSTRAP, sstable.ref(), @@ -117,7 +117,7 @@ public void validateFullyContainedIn_SplitRange_Succeeds() { List> requestedRanges = Arrays.asList(new Range<>(store.getPartitioner().getMinimumToken(), getTokenAtIndex(4)), new Range<>(getTokenAtIndex(2), getTokenAtIndex(6)), - new Range<>(getTokenAtIndex(5), sstable.last.getToken())); + new Range<>(getTokenAtIndex(5), sstable.getLast().getToken())); requestedRanges = Range.normalize(requestedRanges); List sections = sstable.getPositionsForRanges(requestedRanges); diff --git a/test/unit/org/apache/cassandra/db/streaming/CassandraStreamHeaderTest.java b/test/unit/org/apache/cassandra/db/streaming/CassandraStreamHeaderTest.java index a0e4e510681a..52e680f15843 100644 --- a/test/unit/org/apache/cassandra/db/streaming/CassandraStreamHeaderTest.java +++ b/test/unit/org/apache/cassandra/db/streaming/CassandraStreamHeaderTest.java @@ -133,7 +133,7 @@ public void transferedSizeWithoutCompressionTest() private CassandraStreamHeader header(boolean entireSSTable, boolean compressed) { - List> requestedRanges = Collections.singletonList(new Range<>(store.getPartitioner().getMinimumToken(), sstable.last.getToken())); + List> requestedRanges = Collections.singletonList(new Range<>(store.getPartitioner().getMinimumToken(), sstable.getLast().getToken())); requestedRanges = Range.normalize(requestedRanges); List sections = sstable.getPositionsForRanges(requestedRanges); @@ -143,7 +143,7 @@ private CassandraStreamHeader header(boolean entireSSTable, boolean compressed) TableMetadata metadata = store.metadata(); SerializationHeader.Component serializationHeader = SerializationHeader.makeWithoutStats(metadata).toComponent(); ComponentManifest componentManifest = entireSSTable ? ComponentManifest.create(sstable.descriptor) : null; - DecoratedKey firstKey = entireSSTable ? sstable.first : null; + DecoratedKey firstKey = entireSSTable ? sstable.getFirst() : null; return CassandraStreamHeader.builder() .withSSTableVersion(sstable.descriptor.version) diff --git a/test/unit/org/apache/cassandra/db/streaming/CassandraStreamManagerTest.java b/test/unit/org/apache/cassandra/db/streaming/CassandraStreamManagerTest.java index fac570647e47..eba574ebaadf 100644 --- a/test/unit/org/apache/cassandra/db/streaming/CassandraStreamManagerTest.java +++ b/test/unit/org/apache/cassandra/db/streaming/CassandraStreamManagerTest.java @@ -210,7 +210,7 @@ public void testSSTableSectionsForRanges() throws Exception Collection allSSTables = cfs.getLiveSSTables(); Assert.assertEquals(1, allSSTables.size()); - final Token firstToken = allSSTables.iterator().next().first.getToken(); + final Token firstToken = allSSTables.iterator().next().getFirst().getToken(); DatabaseDescriptor.setSSTablePreemptiveOpenIntervalInMiB(1); Set sstablesBeforeRewrite = getReadersForRange(new Range<>(firstToken, firstToken)); diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java index 5a82bc37dbb7..98d9656cec9f 100644 --- a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java @@ -622,8 +622,8 @@ public void testOpeningSSTable() throws Exception SSTableReader target = SSTableReader.open(store, desc); try { - assert target.first.equals(firstKey); - assert target.last.equals(lastKey); + assert target.getFirst().equals(firstKey); + assert target.getLast().equals(lastKey); } finally { @@ -851,13 +851,13 @@ public void testLoadingSummaryUsesCorrectPartitioner() throws Exception { assert indexCfs.isIndex(); SSTableReader sstable = indexCfs.getLiveSSTables().iterator().next(); - assert sstable.first.getToken() instanceof LocalToken; + assert sstable.getFirst().getToken() instanceof LocalToken; if (sstable instanceof IndexSummarySupport) { - new IndexSummaryComponent(((IndexSummarySupport) sstable).getIndexSummary(), sstable.first, sstable.last).save(sstable.descriptor.fileFor(Components.SUMMARY), true); + new IndexSummaryComponent(((IndexSummarySupport) sstable).getIndexSummary(), sstable.getFirst(), sstable.getLast()).save(sstable.descriptor.fileFor(Components.SUMMARY), true); SSTableReader reopened = SSTableReader.open(store, sstable.descriptor); - assert reopened.first.getToken() instanceof LocalToken; + assert reopened.getFirst().getToken() instanceof LocalToken; reopened.selfRef().release(); } } diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java index da17208f11cc..5cb5bc32c18f 100644 --- a/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java @@ -426,8 +426,8 @@ private void testNumberOfFiles_abort(RewriterTest test) SSTableReader s = writeFile(cfs, 1000); cfs.addSSTable(s); - DecoratedKey origFirst = s.first; - DecoratedKey origLast = s.last; + DecoratedKey origFirst = s.getFirst(); + DecoratedKey origLast = s.getLast(); long startSize = cfs.metric.liveDiskSpaceUsed.getCount(); Set compacting = Sets.newHashSet(s); try (ISSTableScanner scanner = s.getScanner(); @@ -444,8 +444,8 @@ private void testNumberOfFiles_abort(RewriterTest test) assertEquals(startSize, cfs.metric.liveDiskSpaceUsed.getCount()); assertEquals(1, cfs.getLiveSSTables().size()); assertFileCounts(s.descriptor.directory.tryListNames()); - assertEquals(cfs.getLiveSSTables().iterator().next().first, origFirst); - assertEquals(cfs.getLiveSSTables().iterator().next().last, origLast); + assertEquals(cfs.getLiveSSTables().iterator().next().getFirst(), origFirst); + assertEquals(cfs.getLiveSSTables().iterator().next().getLast(), origLast); validateCFS(cfs); } diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java index ad6bd61f57dc..556f55f71262 100644 --- a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java @@ -78,7 +78,7 @@ public void testAbortTxnWithOpenEarlyShouldRemoveSSTable() } writer.setMaxDataAge(1000); writer.openEarly(s2 -> { - assertTrue(s.last.compareTo(s2.last) < 0); + assertTrue(s.getLast().compareTo(s2.getLast()) < 0); assertFileCounts(dir.tryListNames()); s.selfRef().release(); s2.selfRef().release(); diff --git a/test/unit/org/apache/cassandra/repair/ValidatorTest.java b/test/unit/org/apache/cassandra/repair/ValidatorTest.java index 86704d3d4065..4a3d4fdbfa46 100644 --- a/test/unit/org/apache/cassandra/repair/ValidatorTest.java +++ b/test/unit/org/apache/cassandra/repair/ValidatorTest.java @@ -197,8 +197,8 @@ public void simpleValidationTest(int n) throws Exception SSTableReader sstable = cfs.getLiveSSTables().iterator().next(); TimeUUID repairSessionId = nextTimeUUID(); final RepairJobDesc desc = new RepairJobDesc(repairSessionId, nextTimeUUID(), cfs.keyspace.getName(), - cfs.getTableName(), singletonList(new Range<>(sstable.first.getToken(), - sstable.last.getToken()))); + cfs.getTableName(), singletonList(new Range<>(sstable.getFirst().getToken(), + sstable.getLast().getToken()))); InetAddressAndPort host = InetAddressAndPort.getByName("127.0.0.2"); @@ -254,8 +254,8 @@ public void testSizeLimiting() throws Exception SSTableReader sstable = cfs.getLiveSSTables().iterator().next(); TimeUUID repairSessionId = nextTimeUUID(); final RepairJobDesc desc = new RepairJobDesc(repairSessionId, nextTimeUUID(), cfs.keyspace.getName(), - cfs.getTableName(), singletonList(new Range<>(sstable.first.getToken(), - sstable.last.getToken()))); + cfs.getTableName(), singletonList(new Range<>(sstable.getFirst().getToken(), + sstable.getLast().getToken()))); InetAddressAndPort host = InetAddressAndPort.getByName("127.0.0.2"); @@ -313,7 +313,7 @@ public void testRangeSplittingTreeSizeLimit() throws Exception SSTableReader sstable = cfs.getLiveSSTables().iterator().next(); TimeUUID repairSessionId = nextTimeUUID(); - List> ranges = splitHelper(new Range<>(sstable.first.getToken(), sstable.last.getToken()), 2); + List> ranges = splitHelper(new Range<>(sstable.getFirst().getToken(), sstable.getLast().getToken()), 2); final RepairJobDesc desc = new RepairJobDesc(repairSessionId, nextTimeUUID(), cfs.keyspace.getName(), diff --git a/test/unit/org/apache/cassandra/streaming/StreamTransferTaskTest.java b/test/unit/org/apache/cassandra/streaming/StreamTransferTaskTest.java index dad1e7924f97..462bbe849bab 100644 --- a/test/unit/org/apache/cassandra/streaming/StreamTransferTaskTest.java +++ b/test/unit/org/apache/cassandra/streaming/StreamTransferTaskTest.java @@ -108,7 +108,7 @@ public void testScheduleTimeout() throws Exception for (SSTableReader sstable : cfs.getLiveSSTables()) { List> ranges = new ArrayList<>(); - ranges.add(new Range<>(sstable.first.getToken(), sstable.last.getToken())); + ranges.add(new Range<>(sstable.getFirst().getToken(), sstable.getLast().getToken())); task.addTransferStream(new CassandraOutgoingFile(StreamOperation.BOOTSTRAP, sstable.selfRef(), sstable.getPositionsForRanges(ranges), ranges, 1)); } assertEquals(14, task.getTotalNumberOfFiles()); @@ -159,7 +159,7 @@ public void testFailSessionDuringTransferShouldNotReleaseReferences() throws Exc for (SSTableReader sstable : cfs.getLiveSSTables()) { List> ranges = new ArrayList<>(); - ranges.add(new Range<>(sstable.first.getToken(), sstable.last.getToken())); + ranges.add(new Range<>(sstable.getFirst().getToken(), sstable.getLast().getToken())); Ref ref = sstable.selfRef(); refs.add(ref); task.addTransferStream(new CassandraOutgoingFile(StreamOperation.BOOTSTRAP, ref, sstable.getPositionsForRanges(ranges), ranges, 1)); From e814929cc1636896ca04b4e717f050c49e3bc920 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Thu, 16 Mar 2023 17:44:58 +0200 Subject: [PATCH 05/27] Introduce CFS.getKeyspaceName() --- .../db/AbstractCompactionController.java | 2 +- .../cassandra/db/ColumnFamilyStore.java | 47 ++++++++++--------- .../cassandra/db/DiskBoundaryManager.java | 6 +-- .../apache/cassandra/db/SSTableImporter.java | 8 ++-- .../db/compaction/CompactionController.java | 2 +- .../db/compaction/CompactionIterator.java | 2 +- .../db/compaction/CompactionLogger.java | 2 +- .../db/compaction/CompactionManager.java | 22 ++++----- .../compaction/CompactionStrategyManager.java | 8 ++-- .../db/compaction/CompactionTask.java | 4 +- .../db/compaction/LeveledManifest.java | 2 +- .../cassandra/db/lifecycle/Tracker.java | 4 +- .../repair/CassandraValidationIterator.java | 2 +- .../CassandraCompressedStreamReader.java | 4 +- .../db/streaming/CassandraStreamReader.java | 4 +- .../db/streaming/CassandraStreamReceiver.java | 4 +- .../org/apache/cassandra/db/view/View.java | 2 +- .../cassandra/db/view/ViewBuilderTask.java | 4 +- .../db/virtual/TableMetricTables.java | 2 +- .../index/SecondaryIndexManager.java | 10 ++-- .../index/internal/CassandraIndex.java | 2 +- .../io/sstable/RangeAwareSSTableWriter.java | 2 +- .../sstable/format/SortedTableScrubber.java | 2 +- .../cassandra/metrics/TableMetrics.java | 2 +- .../cassandra/repair/RepairRunnable.java | 2 +- .../repair/consistent/LocalSessions.java | 2 +- .../consistent/admin/CleanupSummary.java | 2 +- .../service/ActiveRepairService.java | 6 +-- .../cassandra/service/CassandraDaemon.java | 2 +- .../cassandra/service/StorageService.java | 2 +- .../uncommitted/UncommittedTableData.java | 2 +- .../cassandra/streaming/StreamSession.java | 6 +-- .../apache/cassandra/utils/StatusLogger.java | 2 +- .../CompactionStrategyManagerTest.java | 10 ++-- .../db/compaction/CompactionsBytemanTest.java | 2 +- .../db/compaction/CompactionsCQLTest.java | 4 +- .../db/lifecycle/LogTransactionTest.java | 2 +- .../db/lifecycle/RealTransactionsTest.java | 4 +- .../index/internal/CustomCassandraIndex.java | 2 +- .../io/sstable/SSTableReaderTest.java | 6 +-- .../metrics/TrieMemtableMetricsTest.java | 2 +- .../cassandra/repair/ValidatorTest.java | 6 +-- .../apache/cassandra/schema/MockSchema.java | 2 +- .../streaming/StreamingTransferTest.java | 4 +- 44 files changed, 112 insertions(+), 107 deletions(-) diff --git a/src/java/org/apache/cassandra/db/AbstractCompactionController.java b/src/java/org/apache/cassandra/db/AbstractCompactionController.java index 14c0dfa814b2..db533ee870f3 100644 --- a/src/java/org/apache/cassandra/db/AbstractCompactionController.java +++ b/src/java/org/apache/cassandra/db/AbstractCompactionController.java @@ -44,7 +44,7 @@ public AbstractCompactionController(final ColumnFamilyStore cfs, final long gcBe public String getKeyspace() { - return cfs.keyspace.getName(); + return cfs.getKeyspaceName(); } public String getColumnFamily() diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java index 4a2ebc7ee75a..0c5b1bbe18c6 100644 --- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java +++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java @@ -347,7 +347,7 @@ TablePaxosRepairHistory get() if (history != null) return history; - history = TablePaxosRepairHistory.load(keyspace.getName(), name); + history = TablePaxosRepairHistory.load(getKeyspaceName(), name); return history; } } @@ -488,7 +488,7 @@ public ColumnFamilyStore(Keyspace keyspace, additionalWriteLatencyMicros = DatabaseDescriptor.getWriteRpcTimeout(TimeUnit.MICROSECONDS) / 2; memtableFactory = metadata.get().params.memtable.factory(); - logger.info("Initializing {}.{}", keyspace.getName(), name); + logger.info("Initializing {}.{}", getKeyspaceName(), name); // Create Memtable and its metrics object only on online Memtable initialMemtable = null; @@ -539,8 +539,8 @@ public ColumnFamilyStore(Keyspace keyspace, if (registerBookeeping) { // register the mbean - mbeanName = getTableMBeanName(keyspace.getName(), name, isIndex()); - oldMBeanName = getColumnFamilieMBeanName(keyspace.getName(), name, isIndex()); + mbeanName = getTableMBeanName(getKeyspaceName(), name, isIndex()); + oldMBeanName = getColumnFamilieMBeanName(getKeyspaceName(), name, isIndex()); String[] objectNames = {mbeanName, oldMBeanName}; for (String objectName : objectNames) @@ -556,7 +556,7 @@ public ColumnFamilyStore(Keyspace keyspace, repairManager = new CassandraTableRepairManager(this); sstableImporter = new SSTableImporter(this); - if (SchemaConstants.isSystemKeyspace(keyspace.getName())) + if (SchemaConstants.isSystemKeyspace(getKeyspaceName())) topPartitions = null; else topPartitions = new TopPartitionTracker(metadata()); @@ -887,7 +887,7 @@ Descriptor getUniqueDescriptorFor(Descriptor descriptor, File targetDirectory) public void rebuildSecondaryIndex(String idxName) { - rebuildSecondaryIndex(keyspace.getName(), metadata.name, idxName); + rebuildSecondaryIndex(getKeyspaceName(), metadata.name, idxName); } public static void rebuildSecondaryIndex(String ksName, String cfName, String... idxNames) @@ -923,6 +923,11 @@ public String getTableName() return name; } + public String getKeyspaceName() + { + return keyspace.getName(); + } + public Descriptor newSSTableDescriptor(File directory) { return newSSTableDescriptor(directory, DatabaseDescriptor.getSelectedSSTableFormat().getLatestVersion()); @@ -937,7 +942,7 @@ public Descriptor newSSTableDescriptor(File directory, Version version) { Descriptor newDescriptor = new Descriptor(version, directory, - keyspace.getName(), + getKeyspaceName(), name, sstableIdGenerator.get()); assert !newDescriptor.fileFor(Components.DATA).exists(); @@ -1003,7 +1008,7 @@ private void logFlush(FlushReason reason) for (ColumnFamilyStore indexCfs : indexManager.getAllIndexColumnFamilyStores()) indexCfs.getTracker().getView().getCurrentMemtable().addMemoryUsageTo(usage); - logger.info("Enqueuing flush of {}.{}, Reason: {}, Usage: {}", keyspace.getName(), name, reason, usage); + logger.info("Enqueuing flush of {}.{}, Reason: {}, Usage: {}", getKeyspaceName(), name, reason, usage); } @@ -1250,7 +1255,7 @@ public Collection flushMemtable(ColumnFamilyStore cfs, Memtable m { // flush the memtable flushRunnables = Flushing.flushRunnables(cfs, memtable, txn); - ExecutorPlus[] executors = perDiskflushExecutors.getExecutorsFor(keyspace.getName(), name); + ExecutorPlus[] executors = perDiskflushExecutors.getExecutorsFor(getKeyspaceName(), name); for (int i = 0; i < flushRunnables.size(); i++) futures.add(executors[i].submit(flushRunnables.get(i))); @@ -1440,7 +1445,7 @@ public void apply(PartitionUpdate update, UpdateTransaction indexer, OpOrder.Gro { throw new RuntimeException(e.getMessage() + " for ks: " - + keyspace.getName() + ", table: " + name, e); + + getKeyspaceName() + ", table: " + name, e); } } @@ -1458,7 +1463,7 @@ public ShardBoundaries localRangeSplits(int shardCount) { List weightedRanges; long ringVersion; - if (!SchemaConstants.isLocalSystemKeyspace(keyspace.getName()) + if (!SchemaConstants.isLocalSystemKeyspace(getKeyspaceName()) && getPartitioner() == StorageService.instance.getTokenMetadata().partitioner) { DiskBoundaryManager.VersionedRangesAtEndpoint versionedLocalRanges = DiskBoundaryManager.getVersionedLocalRanges(this); @@ -1494,7 +1499,7 @@ && getPartitioner() == StorageService.instance.getTokenMetadata().partitioner) shardBoundaries = new ShardBoundaries(boundaries.subList(0, boundaries.size() - 1), ringVersion); cachedShardBoundaries = shardBoundaries; - logger.debug("Memtable shard boundaries for {}.{}: {}", keyspace.getName(), getTableName(), boundaries); + logger.debug("Memtable shard boundaries for {}.{}: {}", getKeyspaceName(), getTableName(), boundaries); } return shardBoundaries; } @@ -1622,7 +1627,7 @@ public long getExpectedCompactedFileSize(Iterable sstables, Opera // cleanup size estimation only counts bytes for keys local to this node long expectedFileSize = 0; - Collection> ranges = StorageService.instance.getLocalReplicas(keyspace.getName()).ranges(); + Collection> ranges = StorageService.instance.getLocalReplicas(getKeyspaceName()).ranges(); for (SSTableReader sstable : sstables) { List positions = sstable.getPositionsForRanges(ranges); @@ -1989,7 +1994,7 @@ public List finishLocalSampling(String sampler, int count) throws //Not duplicating the buffer for safety because AbstractSerializer and ByteBufferUtil.bytesToHex //don't modify position or limit result.add(new CompositeDataSupport(COUNTER_COMPOSITE_TYPE, COUNTER_NAMES, new Object[] { - keyspace.getName() + "." + name, + getKeyspaceName() + "." + name, counter.count, counter.error, samplerImpl.toString(counter.value) })); // string @@ -2011,7 +2016,7 @@ public void compactionDiskSpaceCheck(boolean enable) public void cleanupCache() { - Collection> ranges = StorageService.instance.getLocalReplicas(keyspace.getName()).ranges(); + Collection> ranges = StorageService.instance.getLocalReplicas(getKeyspaceName()).ranges(); for (Iterator keyIter = CacheService.instance.rowCache.keyIterator(); keyIter.hasNext(); ) @@ -2643,7 +2648,7 @@ private void truncateBlocking(boolean noSnapshot) // beginning if we restart before they [the CL segments] are discarded for // normal reasons post-truncate. To prevent this, we store truncation // position in the System keyspace. - logger.info("Truncating {}.{}", keyspace.getName(), name); + logger.info("Truncating {}.{}", getKeyspaceName(), name); viewManager.stopBuild(); @@ -2677,7 +2682,7 @@ private void truncateBlocking(boolean noSnapshot) { public void run() { - logger.info("Truncating {}.{} with truncatedAt={}", keyspace.getName(), getTableName(), truncatedAt); + logger.info("Truncating {}.{} with truncatedAt={}", getKeyspaceName(), getTableName(), truncatedAt); // since truncation can happen at different times on different nodes, we need to make sure // that any repairs are aborted, otherwise we might clear the data on one node and then // stream in data that is actually supposed to have been deleted @@ -2704,7 +2709,7 @@ public void run() viewManager.build(); - logger.info("Truncate of {}.{} is complete", keyspace.getName(), name); + logger.info("Truncate of {}.{} is complete", getKeyspaceName(), name); } /** @@ -2866,7 +2871,7 @@ public T withAllSSTables(final OperationType operationType, Function importNewSSTables(Options options) { UUID importID = UUID.randomUUID(); - logger.info("[{}] Loading new SSTables for {}/{}: {}", importID, cfs.keyspace.getName(), cfs.getTableName(), options); + logger.info("[{}] Loading new SSTables for {}/{}: {}", importID, cfs.getKeyspaceName(), cfs.getTableName(), options); List> listers = getSSTableListers(options.srcPaths); @@ -175,11 +175,11 @@ synchronized List importNewSSTables(Options options) if (newSSTables.isEmpty()) { - logger.info("[{}] No new SSTables were found for {}/{}", importID, cfs.keyspace.getName(), cfs.getTableName()); + logger.info("[{}] No new SSTables were found for {}/{}", importID, cfs.getKeyspaceName(), cfs.getTableName()); return failedDirectories; } - logger.info("[{}] Loading new SSTables and building secondary indexes for {}/{}: {}", importID, cfs.keyspace.getName(), cfs.getTableName(), newSSTables); + logger.info("[{}] Loading new SSTables and building secondary indexes for {}/{}: {}", importID, cfs.getKeyspaceName(), cfs.getTableName(), newSSTables); if (logger.isTraceEnabled()) logLeveling(importID, newSSTables); @@ -199,7 +199,7 @@ synchronized List importNewSSTables(Options options) throw new RuntimeException("Failed adding SSTables", t); } - logger.info("[{}] Done loading load new SSTables for {}/{}", importID, cfs.keyspace.getName(), cfs.getTableName()); + logger.info("[{}] Done loading load new SSTables for {}/{}", importID, cfs.getKeyspaceName(), cfs.getTableName()); return failedDirectories; } diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionController.java b/src/java/org/apache/cassandra/db/compaction/CompactionController.java index 35a41d11d6c8..69f31996c341 100644 --- a/src/java/org/apache/cassandra/db/compaction/CompactionController.java +++ b/src/java/org/apache/cassandra/db/compaction/CompactionController.java @@ -109,7 +109,7 @@ public void maybeRefreshOverlaps() if (cfs.getNeverPurgeTombstones()) { - logger.debug("not refreshing overlaps for {}.{} - neverPurgeTombstones is enabled", cfs.keyspace.getName(), cfs.getTableName()); + logger.debug("not refreshing overlaps for {}.{} - neverPurgeTombstones is enabled", cfs.getKeyspaceName(), cfs.getTableName()); return; } diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java b/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java index e3fedfeb980b..ce3e7d0363e5 100644 --- a/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java +++ b/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java @@ -743,6 +743,6 @@ public Row applyToRow(Row row) private static boolean isPaxos(ColumnFamilyStore cfs) { - return cfs.name.equals(SystemKeyspace.PAXOS) && cfs.keyspace.getName().equals(SchemaConstants.SYSTEM_KEYSPACE_NAME); + return cfs.name.equals(SystemKeyspace.PAXOS) && cfs.getKeyspaceName().equals(SchemaConstants.SYSTEM_KEYSPACE_NAME); } } \ No newline at end of file diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionLogger.java b/src/java/org/apache/cassandra/db/compaction/CompactionLogger.java index 88a770599775..dd4983ddda6b 100644 --- a/src/java/org/apache/cassandra/db/compaction/CompactionLogger.java +++ b/src/java/org/apache/cassandra/db/compaction/CompactionLogger.java @@ -228,7 +228,7 @@ private void describeStrategy(ObjectNode node) ColumnFamilyStore cfs = cfsRef.get(); if (cfs == null) return; - node.put("keyspace", cfs.keyspace.getName()); + node.put("keyspace", cfs.getKeyspaceName()); node.put("table", cfs.getTableName()); node.put("time", currentTimeMillis()); } diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java index 7376d197cf01..c927db0d8044 100644 --- a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java +++ b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java @@ -243,12 +243,12 @@ public List> submitBackground(final ColumnFamilyStore cfs) if (count > 0 && executor.getActiveTaskCount() >= executor.getMaximumPoolSize()) { logger.trace("Background compaction is still running for {}.{} ({} remaining). Skipping", - cfs.keyspace.getName(), cfs.name, count); + cfs.getKeyspaceName(), cfs.name, count); return Collections.emptyList(); } logger.trace("Scheduling a background task check for {}.{} with {}", - cfs.keyspace.getName(), + cfs.getKeyspaceName(), cfs.name, cfs.getCompactionStrategyManager().getName()); @@ -353,7 +353,7 @@ public void run() boolean ranCompaction = false; try { - logger.trace("Checking {}.{}", cfs.keyspace.getName(), cfs.name); + logger.trace("Checking {}.{}", cfs.getKeyspaceName(), cfs.name); if (!cfs.isValid()) { logger.trace("Aborting compaction for dropped CF"); @@ -383,7 +383,7 @@ public void run() boolean maybeRunUpgradeTask(CompactionStrategyManager strategy) { - logger.debug("Checking for upgrade tasks {}.{}", cfs.keyspace.getName(), cfs.getTableName()); + logger.debug("Checking for upgrade tasks {}.{}", cfs.getKeyspaceName(), cfs.getTableName()); try { if (currentlyBackgroundUpgrading.incrementAndGet() <= DatabaseDescriptor.maxConcurrentAutoUpgradeTasks()) @@ -426,10 +426,10 @@ private AllSSTableOpStatus parallelAllSSTableOperation(final ColumnFamilyStore c OperationType operationType) { String operationName = operationType.name(); - String keyspace = cfs.keyspace.getName(); + String keyspace = cfs.getKeyspaceName(); String table = cfs.getTableName(); return cfs.withAllSSTables(operationType, (compacting) -> { - logger.info("Starting {} for {}.{}", operationType, cfs.keyspace.getName(), cfs.getTableName()); + logger.info("Starting {} for {}.{}", operationType, cfs.getKeyspaceName(), cfs.getTableName()); List transactions = new ArrayList<>(); List> futures = new ArrayList<>(); try @@ -657,7 +657,7 @@ public Iterable filterSSTables(LifecycleTransaction transaction) } } logger.info("Skipping cleanup for {}/{} sstables for {}.{} since they are fully contained in owned ranges (full ranges: {}, transient ranges: {})", - skippedSStables, totalSSTables, cfStore.keyspace.getName(), cfStore.getTableName(), fullRanges, transientRanges); + skippedSStables, totalSSTables, cfStore.getKeyspaceName(), cfStore.getTableName(), fullRanges, transientRanges); sortedSSTables.sort(SSTableReader.sizeComparator); return sortedSSTables; } @@ -744,7 +744,7 @@ public AllSSTableOpStatus relocateSSTables(final ColumnFamilyStore cfs, int jobs return AllSSTableOpStatus.ABORTED; } - if (StorageService.instance.getLocalReplicas(cfs.keyspace.getName()).isEmpty()) + if (StorageService.instance.getLocalReplicas(cfs.getKeyspaceName()).isEmpty()) { logger.info("Relocate cannot run before a node has joined the ring"); return AllSSTableOpStatus.ABORTED; @@ -903,7 +903,7 @@ public void performAnticompaction(ColumnFamilyStore cfs, Preconditions.checkArgument(!replicas.isEmpty(), "No ranges to anti-compact"); if (logger.isInfoEnabled()) - logger.info("{} Starting anticompaction for {}.{} on {}/{} sstables", PreviewKind.NONE.logPrefix(sessionID), cfs.keyspace.getName(), cfs.getTableName(), validatedForRepair.size(), cfs.getLiveSSTables().size()); + logger.info("{} Starting anticompaction for {}.{} on {}/{} sstables", PreviewKind.NONE.logPrefix(sessionID), cfs.getKeyspaceName(), cfs.getTableName(), validatedForRepair.size(), cfs.getLiveSSTables().size()); if (logger.isTraceEnabled()) logger.trace("{} Starting anticompaction for ranges {}", PreviewKind.NONE.logPrefix(sessionID), replicas); @@ -1736,7 +1736,7 @@ int antiCompactGroup(ColumnFamilyStore cfs, return 0; } - logger.info("Anticompacting {} in {}.{} for {}", txn.originals(), cfs.keyspace.getName(), cfs.getTableName(), pendingRepair); + logger.info("Anticompacting {} in {}.{} for {}", txn.originals(), cfs.getKeyspaceName(), cfs.getTableName(), pendingRepair); Set sstableAsSet = txn.originals(); File destination = cfs.getDirectories().getWriteableLocationAsFile(cfs.getExpectedCompactedFileSize(sstableAsSet, OperationType.ANTICOMPACTION)); @@ -1846,7 +1846,7 @@ else if (transChecker.test(token)) txn.commit(); logger.info("Anticompacted {} in {}.{} to full = {}, transient = {}, unrepaired = {} for {}", sstableAsSet, - cfs.keyspace.getName(), + cfs.getKeyspaceName(), cfs.getTableName(), fullSSTables, transSSTables, diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java b/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java index 53527b83e9e7..d3347fc58767 100644 --- a/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java +++ b/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java @@ -485,7 +485,7 @@ public void maybeReloadParamsFromSchema(CompactionParams params) private void reloadParamsFromSchema(CompactionParams newParams) { logger.debug("Recreating compaction strategy for {}.{} - compaction parameters changed via CQL", - cfs.keyspace.getName(), cfs.getTableName()); + cfs.getKeyspaceName(), cfs.getTableName()); /* * It's possible for compaction to be explicitly enabled/disabled @@ -532,7 +532,7 @@ private void maybeReloadParamsFromJMX(CompactionParams params) private void reloadParamsFromJMX(CompactionParams newParams) { logger.debug("Recreating compaction strategy for {}.{} - compaction parameters changed via JMX", - cfs.keyspace.getName(), cfs.getTableName()); + cfs.getKeyspaceName(), cfs.getTableName()); setStrategy(newParams); @@ -587,12 +587,12 @@ private void reloadDiskBoundaries(DiskBoundaries newBoundaries) if (newBoundaries.isEquivalentTo(oldBoundaries)) { logger.debug("Not recreating compaction strategy for {}.{} - disk boundaries are equivalent", - cfs.keyspace.getName(), cfs.getTableName()); + cfs.getKeyspaceName(), cfs.getTableName()); return; } logger.debug("Recreating compaction strategy for {}.{} - disk boundaries are out of date", - cfs.keyspace.getName(), cfs.getTableName()); + cfs.getKeyspaceName(), cfs.getTableName()); setStrategy(params); startup(); } diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionTask.java b/src/java/org/apache/cassandra/db/compaction/CompactionTask.java index 1f28ee07f237..4ca0e0f53ff5 100644 --- a/src/java/org/apache/cassandra/db/compaction/CompactionTask.java +++ b/src/java/org/apache/cassandra/db/compaction/CompactionTask.java @@ -252,7 +252,7 @@ public boolean apply(SSTableReader sstable) for (int i = 0; i < mergedRowCounts.length; i++) totalSourceRows += mergedRowCounts[i] * (i + 1); - String mergeSummary = updateCompactionHistory(taskId, cfs.keyspace.getName(), cfs.getTableName(), mergedRowCounts, startsize, endsize, + String mergeSummary = updateCompactionHistory(taskId, cfs.getKeyspaceName(), cfs.getTableName(), mergedRowCounts, startsize, endsize, ImmutableMap.of(COMPACTION_TYPE_PROPERTY, compactionType.type)); logger.info(String.format("Compacted (%s) %d sstables to [%s] to level=%d. %s to %s (~%d%% of original) in %,dms. Read Throughput = %s, Write Throughput = %s, Row Throughput = ~%,d/s. %,d total partitions merged to %,d. Partition merge counts were {%s}. Time spent writing keys = %,dms", @@ -417,7 +417,7 @@ protected boolean buildCompactionCandidatesForAvailableDiskSpace(final Set fnTotalSizeByt } logger.trace("Estimating {} compactions to do for {}.{}", - Arrays.toString(estimated), cfs.keyspace.getName(), cfs.name); + Arrays.toString(estimated), cfs.getKeyspaceName(), cfs.name); return Ints.checkedCast(tasks); } diff --git a/src/java/org/apache/cassandra/db/lifecycle/Tracker.java b/src/java/org/apache/cassandra/db/lifecycle/Tracker.java index c54ee83bd752..6f2b8c6d855d 100644 --- a/src/java/org/apache/cassandra/db/lifecycle/Tracker.java +++ b/src/java/org/apache/cassandra/db/lifecycle/Tracker.java @@ -176,7 +176,7 @@ Throwable updateSizeTracking(Iterable oldSSTables, Iterable oldSSTables, Iterable 0) logger.debug("[Stream #{}] Invalidated {} row cache entries on table {}.{} after stream " + "receive task completed.", session.planId(), invalidatedKeys, - cfs.keyspace.getName(), cfs.getTableName()); + cfs.getKeyspaceName(), cfs.getTableName()); } if (cfs.metadata().isCounter()) @@ -270,7 +270,7 @@ public void finished() if (invalidatedKeys > 0) logger.debug("[Stream #{}] Invalidated {} counter cache entries on table {}.{} after stream " + "receive task completed.", session.planId(), invalidatedKeys, - cfs.keyspace.getName(), cfs.getTableName()); + cfs.getKeyspaceName(), cfs.getTableName()); } } } diff --git a/src/java/org/apache/cassandra/db/view/View.java b/src/java/org/apache/cassandra/db/view/View.java index a42c9e2e8960..5828008388b0 100644 --- a/src/java/org/apache/cassandra/db/view/View.java +++ b/src/java/org/apache/cassandra/db/view/View.java @@ -169,7 +169,7 @@ SelectStatement getSelectStatement() false); SelectStatement.RawStatement rawSelect = - new SelectStatement.RawStatement(new QualifiedName(baseCfs.keyspace.getName(), baseCfs.name), + new SelectStatement.RawStatement(new QualifiedName(baseCfs.getKeyspaceName(), baseCfs.name), parameters, selectClause(), definition.whereClause, diff --git a/src/java/org/apache/cassandra/db/view/ViewBuilderTask.java b/src/java/org/apache/cassandra/db/view/ViewBuilderTask.java index a705a516a910..59c7b3c55112 100644 --- a/src/java/org/apache/cassandra/db/view/ViewBuilderTask.java +++ b/src/java/org/apache/cassandra/db/view/ViewBuilderTask.java @@ -135,7 +135,7 @@ public Long call() */ boolean schemaConverged = Gossiper.instance.waitForSchemaAgreement(10, TimeUnit.SECONDS, () -> this.isStopped); if (!schemaConverged) - logger.warn("Failed to get schema to converge before building view {}.{}", baseCfs.keyspace.getName(), view.name); + logger.warn("Failed to get schema to converge before building view {}.{}", baseCfs.getKeyspaceName(), view.name); Function> function; function = org.apache.cassandra.db.lifecycle.View.select(SSTableSet.CANONICAL, s -> range.intersects(s.getBounds())); @@ -175,7 +175,7 @@ public Long call() private void finish() { - String ksName = baseCfs.keyspace.getName(); + String ksName = baseCfs.getKeyspaceName(); if (!isStopped) { // Save the completed status using the end of the range as last token. This way it will be possible for diff --git a/src/java/org/apache/cassandra/db/virtual/TableMetricTables.java b/src/java/org/apache/cassandra/db/virtual/TableMetricTables.java index 8368fd9ae51b..5528c92011cc 100644 --- a/src/java/org/apache/cassandra/db/virtual/TableMetricTables.java +++ b/src/java/org/apache/cassandra/db/virtual/TableMetricTables.java @@ -195,7 +195,7 @@ public DataSet data() Metric metric = func.apply(cfs.metric); // set new partition for this table - result.row(cfs.keyspace.getName(), cfs.name); + result.row(cfs.getKeyspaceName(), cfs.name); // extract information by metric type and put it in row based on implementation of `add` if (metric instanceof Counting) diff --git a/src/java/org/apache/cassandra/index/SecondaryIndexManager.java b/src/java/org/apache/cassandra/index/SecondaryIndexManager.java index 3694478ead34..31d3e84561d4 100644 --- a/src/java/org/apache/cassandra/index/SecondaryIndexManager.java +++ b/src/java/org/apache/cassandra/index/SecondaryIndexManager.java @@ -622,7 +622,7 @@ private String getIndexNames(Set indexes) */ private synchronized void markIndexesBuilding(Set indexes, boolean isFullRebuild, boolean isNewCF) { - String keyspaceName = baseCfs.keyspace.getName(); + String keyspaceName = baseCfs.getKeyspaceName(); // First step is to validate against concurrent rebuilds; it would be more optimized to do everything on a single // step, but we're not really expecting a very high number of indexes, and this isn't on any hot path, so @@ -677,7 +677,7 @@ private synchronized void markIndexBuilt(Index index, boolean isFullRebuild) { inProgressBuilds.remove(indexName); if (!needsFullRebuild.contains(indexName) && DatabaseDescriptor.isDaemonInitialized() && Keyspace.isInitialized()) - SystemKeyspace.setIndexBuilt(baseCfs.keyspace.getName(), indexName); + SystemKeyspace.setIndexBuilt(baseCfs.getKeyspaceName(), indexName); } } } @@ -701,7 +701,7 @@ private synchronized void markIndexFailed(Index index, boolean isInitialBuild) counter.decrementAndGet(); if (DatabaseDescriptor.isDaemonInitialized()) - SystemKeyspace.setIndexRemoved(baseCfs.keyspace.getName(), indexName); + SystemKeyspace.setIndexRemoved(baseCfs.getKeyspaceName(), indexName); needsFullRebuild.add(indexName); @@ -730,7 +730,7 @@ private void logAndMarkIndexesFailed(Set indexes, Throwable indexBuildFai */ private synchronized void markIndexRemoved(String indexName) { - SystemKeyspace.setIndexRemoved(baseCfs.keyspace.getName(), indexName); + SystemKeyspace.setIndexRemoved(baseCfs.getKeyspaceName(), indexName); queryableIndexes.remove(indexName); writableIndexes.remove(indexName); needsFullRebuild.remove(indexName); @@ -863,7 +863,7 @@ public List getBuiltIndexNames() indexes.values().stream() .map(i -> i.getIndexMetadata().name) .forEach(allIndexNames::add); - return SystemKeyspace.getBuiltIndexes(baseCfs.keyspace.getName(), allIndexNames); + return SystemKeyspace.getBuiltIndexes(baseCfs.getKeyspaceName(), allIndexNames); } /** diff --git a/src/java/org/apache/cassandra/index/internal/CassandraIndex.java b/src/java/org/apache/cassandra/index/internal/CassandraIndex.java index 09ccf1aa3a2b..4f274cf70735 100644 --- a/src/java/org/apache/cassandra/index/internal/CassandraIndex.java +++ b/src/java/org/apache/cassandra/index/internal/CassandraIndex.java @@ -667,7 +667,7 @@ private void invalidate() private boolean isBuilt() { - return SystemKeyspace.isIndexBuilt(baseCfs.keyspace.getName(), metadata.name); + return SystemKeyspace.isIndexBuilt(baseCfs.getKeyspaceName(), metadata.name); } private boolean isPrimaryKeyIndex() diff --git a/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java index 9caef0bf9d40..a3e76166b4eb 100644 --- a/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java @@ -155,7 +155,7 @@ public SSTableMultiWriter setOpenResult(boolean openResult) public String getFilename() { - return String.join("/", cfs.keyspace.getName(), cfs.getTableName()); + return String.join("/", cfs.getKeyspaceName(), cfs.getTableName()); } @Override diff --git a/src/java/org/apache/cassandra/io/sstable/format/SortedTableScrubber.java b/src/java/org/apache/cassandra/io/sstable/format/SortedTableScrubber.java index 387919fa65e8..e8fbea22d279 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/SortedTableScrubber.java +++ b/src/java/org/apache/cassandra/io/sstable/format/SortedTableScrubber.java @@ -114,7 +114,7 @@ protected SortedTableScrubber(ColumnFamilyStore cfs, { this.sstable = (R) transaction.onlyOne(); Preconditions.checkNotNull(sstable.metadata()); - assert sstable.metadata().keyspace.equals(cfs.keyspace.getName()); + assert sstable.metadata().keyspace.equals(cfs.getKeyspaceName()); if (!sstable.descriptor.cfname.equals(cfs.metadata().name)) { logger.warn("Descriptor points to a different table {} than metadata {}", sstable.descriptor.cfname, cfs.metadata().name); diff --git a/src/java/org/apache/cassandra/metrics/TableMetrics.java b/src/java/org/apache/cassandra/metrics/TableMetrics.java index f4b9221aa58a..024c9a50223c 100644 --- a/src/java/org/apache/cassandra/metrics/TableMetrics.java +++ b/src/java/org/apache/cassandra/metrics/TableMetrics.java @@ -1260,7 +1260,7 @@ static class TableMetricNameFactory implements MetricNameFactory TableMetricNameFactory(ColumnFamilyStore cfs, String type) { - this.keyspaceName = cfs.keyspace.getName(); + this.keyspaceName = cfs.getKeyspaceName(); this.tableName = cfs.name; this.isIndex = cfs.isIndex(); this.type = type; diff --git a/src/java/org/apache/cassandra/repair/RepairRunnable.java b/src/java/org/apache/cassandra/repair/RepairRunnable.java index 7607b045fa90..c56601010f01 100644 --- a/src/java/org/apache/cassandra/repair/RepairRunnable.java +++ b/src/java/org/apache/cassandra/repair/RepairRunnable.java @@ -298,7 +298,7 @@ private TraceState maybeCreateTraceState(Iterable columnFamil StringBuilder cfsb = new StringBuilder(); for (ColumnFamilyStore cfs : columnFamilyStores) - cfsb.append(", ").append(cfs.keyspace.getName()).append(".").append(cfs.name); + cfsb.append(", ").append(cfs.getKeyspaceName()).append(".").append(cfs.name); TimeUUID sessionId = Tracing.instance.newSession(Tracing.TraceType.REPAIR); TraceState traceState = Tracing.instance.begin("repair", ImmutableMap.of("keyspace", state.keyspace, "columnFamilies", diff --git a/src/java/org/apache/cassandra/repair/consistent/LocalSessions.java b/src/java/org/apache/cassandra/repair/consistent/LocalSessions.java index a281c0c796ad..31df2e69bbe1 100644 --- a/src/java/org/apache/cassandra/repair/consistent/LocalSessions.java +++ b/src/java/org/apache/cassandra/repair/consistent/LocalSessions.java @@ -301,7 +301,7 @@ public PendingStats getPendingStats(TableId tid, Collection> ranges } } - return new PendingStats(cfs.keyspace.getName(), cfs.name, pending.build(), finalized.build(), failed.build()); + return new PendingStats(cfs.getKeyspaceName(), cfs.name, pending.build(), finalized.build(), failed.build()); } public CleanupSummary cleanup(TableId tid, Collection> ranges, boolean force) diff --git a/src/java/org/apache/cassandra/repair/consistent/admin/CleanupSummary.java b/src/java/org/apache/cassandra/repair/consistent/admin/CleanupSummary.java index 89b1eec573f6..f715cc98f5b6 100644 --- a/src/java/org/apache/cassandra/repair/consistent/admin/CleanupSummary.java +++ b/src/java/org/apache/cassandra/repair/consistent/admin/CleanupSummary.java @@ -76,7 +76,7 @@ public CleanupSummary(String keyspace, String table, Set successful, S public CleanupSummary(ColumnFamilyStore cfs, Set successful, Set unsuccessful) { - this(cfs.keyspace.getName(), cfs.name, successful, unsuccessful); + this(cfs.getKeyspaceName(), cfs.name, successful, unsuccessful); } public static CleanupSummary add(CleanupSummary l, CleanupSummary r) diff --git a/src/java/org/apache/cassandra/service/ActiveRepairService.java b/src/java/org/apache/cassandra/service/ActiveRepairService.java index 5d69507656dd..738551d7c368 100644 --- a/src/java/org/apache/cassandra/service/ActiveRepairService.java +++ b/src/java/org/apache/cassandra/service/ActiveRepairService.java @@ -345,7 +345,7 @@ public List getRepairStats(List schemaArgs, String rangeS for (ColumnFamilyStore cfs : SchemaArgsParser.parse(schemaArgs)) { - String keyspace = cfs.keyspace.getName(); + String keyspace = cfs.getKeyspaceName(); Collection> ranges = userRanges != null ? userRanges : StorageService.instance.getLocalReplicas(keyspace).ranges(); @@ -365,7 +365,7 @@ public List getPendingStats(List schemaArgs, String range : null; for (ColumnFamilyStore cfs : SchemaArgsParser.parse(schemaArgs)) { - String keyspace = cfs.keyspace.getName(); + String keyspace = cfs.getKeyspaceName(); Collection> ranges = userRanges != null ? userRanges : StorageService.instance.getLocalReplicas(keyspace).ranges(); @@ -385,7 +385,7 @@ public List cleanupPending(List schemaArgs, String rangeS : null; for (ColumnFamilyStore cfs : SchemaArgsParser.parse(schemaArgs)) { - String keyspace = cfs.keyspace.getName(); + String keyspace = cfs.getKeyspaceName(); Collection> ranges = userRanges != null ? userRanges : StorageService.instance.getLocalReplicas(keyspace).ranges(); diff --git a/src/java/org/apache/cassandra/service/CassandraDaemon.java b/src/java/org/apache/cassandra/service/CassandraDaemon.java index fe91e81d81b3..61320f823b53 100644 --- a/src/java/org/apache/cassandra/service/CassandraDaemon.java +++ b/src/java/org/apache/cassandra/service/CassandraDaemon.java @@ -455,7 +455,7 @@ protected void setup() } else { - logger.info("Not enabling compaction for {}.{}; autocompaction_on_startup_enabled is set to false", store.keyspace.getName(), store.name); + logger.info("Not enabling compaction for {}.{}; autocompaction_on_startup_enabled is set to false", store.getKeyspaceName(), store.name); } } } diff --git a/src/java/org/apache/cassandra/service/StorageService.java b/src/java/org/apache/cassandra/service/StorageService.java index 72f8758fb8e7..fc740a1fab9b 100644 --- a/src/java/org/apache/cassandra/service/StorageService.java +++ b/src/java/org/apache/cassandra/service/StorageService.java @@ -1342,7 +1342,7 @@ else if (isBootstrapMode()) private void executePreJoinTasks(boolean bootstrap) { StreamSupport.stream(ColumnFamilyStore.all().spliterator(), false) - .filter(cfs -> Schema.instance.getUserKeyspaces().names().contains(cfs.keyspace.getName())) + .filter(cfs -> Schema.instance.getUserKeyspaces().names().contains(cfs.getKeyspaceName())) .forEach(cfs -> cfs.indexManager.executePreJoinTasksBlocking(bootstrap)); } diff --git a/src/java/org/apache/cassandra/service/paxos/uncommitted/UncommittedTableData.java b/src/java/org/apache/cassandra/service/paxos/uncommitted/UncommittedTableData.java index dd47e40ce66e..9e2ae9eeb19e 100644 --- a/src/java/org/apache/cassandra/service/paxos/uncommitted/UncommittedTableData.java +++ b/src/java/org/apache/cassandra/service/paxos/uncommitted/UncommittedTableData.java @@ -184,7 +184,7 @@ List> getReplicatedRanges() if (table == null) return Range.normalize(FULL_RANGE); - String ksName = table.keyspace.getName(); + String ksName = table.getKeyspaceName(); List> ranges = StorageService.instance.getLocalAndPendingRanges(ksName); // don't filter anything if we're not aware of any locally replicated ranges diff --git a/src/java/org/apache/cassandra/streaming/StreamSession.java b/src/java/org/apache/cassandra/streaming/StreamSession.java index e170ca60ef6c..74228280c3a1 100644 --- a/src/java/org/apache/cassandra/streaming/StreamSession.java +++ b/src/java/org/apache/cassandra/streaming/StreamSession.java @@ -881,7 +881,7 @@ static boolean checkDiskSpace(Map perTableIdIncomingBytes, Set allWriteableFileStores = cfs.getDirectories().allFileStores(fileStoreMapper); if (allWriteableFileStores.isEmpty()) { - logger.error("[Stream #{}] Could not get any writeable FileStores for {}.{}", planId, cfs.keyspace.getName(), cfs.getTableName()); + logger.error("[Stream #{}] Could not get any writeable FileStores for {}.{}", planId, cfs.getKeyspaceName(), cfs.getTableName()); continue; } allFileStores.addAll(allWriteableFileStores); @@ -906,7 +906,7 @@ static boolean checkDiskSpace(Map perTableIdIncomingBytes, newStreamBytesToWritePerFileStore, perTableIdIncomingBytes.keySet().stream() .map(ColumnFamilyStore::getIfExists).filter(Objects::nonNull) - .map(cfs -> cfs.keyspace.getName() + '.' + cfs.name) + .map(cfs -> cfs.getKeyspaceName() + '.' + cfs.name) .collect(Collectors.joining(",")), totalStreamRemaining, totalCompactionWriteRemaining, @@ -943,7 +943,7 @@ static boolean checkPendingCompactions(Map perTableIdIncomingByte tasksStreamed = csm.getEstimatedRemainingTasks(perTableIdIncomingFiles.get(tableId), perTableIdIncomingBytes.get(tableId), isForIncremental); - tables.add(String.format("%s.%s", cfs.keyspace.getName(), cfs.name)); + tables.add(String.format("%s.%s", cfs.getKeyspaceName(), cfs.name)); } pendingCompactionsBeforeStreaming += tasksOther; pendingCompactionsAfterStreaming += tasksStreamed; diff --git a/src/java/org/apache/cassandra/utils/StatusLogger.java b/src/java/org/apache/cassandra/utils/StatusLogger.java index 0850224f2f15..9dbc935f1c3b 100644 --- a/src/java/org/apache/cassandra/utils/StatusLogger.java +++ b/src/java/org/apache/cassandra/utils/StatusLogger.java @@ -119,7 +119,7 @@ private static void logStatus() for (ColumnFamilyStore cfs : ColumnFamilyStore.all()) { logger.info(String.format("%-25s%20s", - cfs.keyspace.getName() + "." + cfs.name, + cfs.getKeyspaceName() + "." + cfs.name, cfs.metric.memtableColumnsCount.getValue() + "," + cfs.metric.memtableLiveDataSize.getValue())); } } diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionStrategyManagerTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionStrategyManagerTest.java index 98d3966affee..d66c357c8c15 100644 --- a/test/unit/org/apache/cassandra/db/compaction/CompactionStrategyManagerTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/CompactionStrategyManagerTest.java @@ -340,7 +340,7 @@ public void groupSSTables() throws Exception { final int numDir = 4; ColumnFamilyStore cfs = createJBODMockCFS(numDir); - Keyspace.open(cfs.keyspace.getName()).getColumnFamilyStore(cfs.name).disableAutoCompaction(); + Keyspace.open(cfs.getKeyspaceName()).getColumnFamilyStore(cfs.name).disableAutoCompaction(); assertTrue(cfs.getLiveSSTables().isEmpty()); List transientRepairs = new ArrayList<>(); List pendingRepair = new ArrayList<>(); @@ -350,10 +350,10 @@ public void groupSSTables() throws Exception for (int i = 0; i < numDir; i++) { int key = 100 * i; - transientRepairs.add(createSSTableWithKey(cfs.keyspace.getName(), cfs.name, key++)); - pendingRepair.add(createSSTableWithKey(cfs.keyspace.getName(), cfs.name, key++)); - unrepaired.add(createSSTableWithKey(cfs.keyspace.getName(), cfs.name, key++)); - repaired.add(createSSTableWithKey(cfs.keyspace.getName(), cfs.name, key++)); + transientRepairs.add(createSSTableWithKey(cfs.getKeyspaceName(), cfs.name, key++)); + pendingRepair.add(createSSTableWithKey(cfs.getKeyspaceName(), cfs.name, key++)); + unrepaired.add(createSSTableWithKey(cfs.getKeyspaceName(), cfs.name, key++)); + repaired.add(createSSTableWithKey(cfs.getKeyspaceName(), cfs.name, key++)); } cfs.getCompactionStrategyManager().mutateRepaired(transientRepairs, 0, nextTimeUUID(), true); diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionsBytemanTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionsBytemanTest.java index 987ce18d0111..d338c8b1690e 100644 --- a/test/unit/org/apache/cassandra/db/compaction/CompactionsBytemanTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/CompactionsBytemanTest.java @@ -120,7 +120,7 @@ public void testRuntimeExceptionWhenNoDiskSpaceForCompaction() throws Throwable targetClass = "CompactionManager", targetMethod = "submitBackground", targetLocation = "AT INVOKE java.util.concurrent.Future.isCancelled", - condition = "!$cfs.keyspace.getName().contains(\"system\")", + condition = "!$cfs.getKeyspaceName().contains(\"system\")", action = "Thread.sleep(5000)") public void testCompactingCFCounting() throws Throwable { diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java index 95bbd48ecfb3..52ff8a82548c 100644 --- a/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java @@ -677,7 +677,7 @@ private void readAndValidate(boolean asc) throws Throwable private void readAndValidate(boolean asc, ColumnFamilyStore cfs) throws Throwable { - String kscf = cfs.keyspace.getName() + "." + cfs.name; + String kscf = cfs.getKeyspaceName() + "." + cfs.name; executeFormattedQuery("select * from " + kscf + " where id = 0 order by id2 "+(asc ? "ASC" : "DESC")); boolean gotException = false; @@ -912,7 +912,7 @@ public boolean isGlobal() private void loadTestSStables(ColumnFamilyStore cfs, File ksDir) throws IOException { - Keyspace.open(cfs.keyspace.getName()).getColumnFamilyStore(cfs.name).truncateBlocking(); + Keyspace.open(cfs.getKeyspaceName()).getColumnFamilyStore(cfs.name).truncateBlocking(); for (File cfDir : cfs.getDirectories().getCFDirectories()) { File tableDir = new File(ksDir, cfs.name); diff --git a/test/unit/org/apache/cassandra/db/lifecycle/LogTransactionTest.java b/test/unit/org/apache/cassandra/db/lifecycle/LogTransactionTest.java index 8afe77e77d3b..710c869fab84 100644 --- a/test/unit/org/apache/cassandra/db/lifecycle/LogTransactionTest.java +++ b/test/unit/org/apache/cassandra/db/lifecycle/LogTransactionTest.java @@ -1263,7 +1263,7 @@ public void testGetTemporaryFilesThrowsIfCompletingAfterObsoletion() throws Thro private static SSTableReader sstable(File dataFolder, ColumnFamilyStore cfs, int generation, int size) throws IOException { - Descriptor descriptor = new Descriptor(dataFolder, cfs.keyspace.getName(), cfs.getTableName(), new SequenceBasedSSTableId(generation), DatabaseDescriptor.getSelectedSSTableFormat()); + Descriptor descriptor = new Descriptor(dataFolder, cfs.getKeyspaceName(), cfs.getTableName(), new SequenceBasedSSTableId(generation), DatabaseDescriptor.getSelectedSSTableFormat()); if (BigFormat.isSelected()) { Set components = ImmutableSet.of(Components.DATA, Components.PRIMARY_INDEX, Components.FILTER, Components.TOC); diff --git a/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java b/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java index 501c491b35da..bf03f9cd95e0 100644 --- a/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java +++ b/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java @@ -134,8 +134,8 @@ private void createSSTable(ColumnFamilyStore cfs, int numPartitions) throws IOEx try (CQLSSTableWriter writer = CQLSSTableWriter.builder() .inDirectory(cfs.getDirectories().getDirectoryForNewSSTables()) - .forTable(String.format(schema, cfs.keyspace.getName(), cfs.name)) - .using(String.format(query, cfs.keyspace.getName(), cfs.name)) + .forTable(String.format(schema, cfs.getKeyspaceName(), cfs.name)) + .using(String.format(query, cfs.getKeyspaceName(), cfs.name)) .build()) { for (int j = 0; j < numPartitions; j ++) diff --git a/test/unit/org/apache/cassandra/index/internal/CustomCassandraIndex.java b/test/unit/org/apache/cassandra/index/internal/CustomCassandraIndex.java index c1dd00d42afd..9640af135714 100644 --- a/test/unit/org/apache/cassandra/index/internal/CustomCassandraIndex.java +++ b/test/unit/org/apache/cassandra/index/internal/CustomCassandraIndex.java @@ -605,7 +605,7 @@ private void invalidate() private boolean isBuilt() { - return SystemKeyspace.isIndexBuilt(baseCfs.keyspace.getName(), metadata.name); + return SystemKeyspace.isIndexBuilt(baseCfs.getKeyspaceName(), metadata.name); } private boolean isPrimaryKeyIndex() diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java index 98d9656cec9f..f5fe75eb9cf3 100644 --- a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java @@ -286,17 +286,17 @@ public void testReadRateTracking() // With persistence enabled, we should be able to retrieve the state of the meter. sstable.maybePersistSSTableReadMeter(); - UntypedResultSet meter = SystemKeyspace.readSSTableActivity(store.keyspace.getName(), store.name, sstable.descriptor.id); + UntypedResultSet meter = SystemKeyspace.readSSTableActivity(store.getKeyspaceName(), store.name, sstable.descriptor.id); assertFalse(meter.isEmpty()); Util.getAll(Util.cmd(store, key).includeRow("0").build()); assertEquals(3, sstable.getReadMeter().count()); // After cleaning existing state and disabling persistence, there should be no meter state to read. - SystemKeyspace.clearSSTableReadMeter(store.keyspace.getName(), store.name, sstable.descriptor.id); + SystemKeyspace.clearSSTableReadMeter(store.getKeyspaceName(), store.name, sstable.descriptor.id); DatabaseDescriptor.setSStableReadRatePersistenceEnabled(false); sstable.maybePersistSSTableReadMeter(); - meter = SystemKeyspace.readSSTableActivity(store.keyspace.getName(), store.name, sstable.descriptor.id); + meter = SystemKeyspace.readSSTableActivity(store.getKeyspaceName(), store.name, sstable.descriptor.id); assertTrue(meter.isEmpty()); } finally diff --git a/test/unit/org/apache/cassandra/metrics/TrieMemtableMetricsTest.java b/test/unit/org/apache/cassandra/metrics/TrieMemtableMetricsTest.java index 62d8f7c754bd..37cf8b1156ea 100644 --- a/test/unit/org/apache/cassandra/metrics/TrieMemtableMetricsTest.java +++ b/test/unit/org/apache/cassandra/metrics/TrieMemtableMetricsTest.java @@ -180,7 +180,7 @@ public void testMetricsCleanupOnDrop() private TrieMemtableMetricsView getMemtableMetrics(ColumnFamilyStore cfs) { - return new TrieMemtableMetricsView(cfs.keyspace.getName(), cfs.name); + return new TrieMemtableMetricsView(cfs.getKeyspaceName(), cfs.name); } private void writeAndFlush(int rows) throws IOException, ExecutionException, InterruptedException diff --git a/test/unit/org/apache/cassandra/repair/ValidatorTest.java b/test/unit/org/apache/cassandra/repair/ValidatorTest.java index 4a3d4fdbfa46..437b360da68c 100644 --- a/test/unit/org/apache/cassandra/repair/ValidatorTest.java +++ b/test/unit/org/apache/cassandra/repair/ValidatorTest.java @@ -196,7 +196,7 @@ public void simpleValidationTest(int n) throws Exception SSTableReader sstable = cfs.getLiveSSTables().iterator().next(); TimeUUID repairSessionId = nextTimeUUID(); - final RepairJobDesc desc = new RepairJobDesc(repairSessionId, nextTimeUUID(), cfs.keyspace.getName(), + final RepairJobDesc desc = new RepairJobDesc(repairSessionId, nextTimeUUID(), cfs.getKeyspaceName(), cfs.getTableName(), singletonList(new Range<>(sstable.getFirst().getToken(), sstable.getLast().getToken()))); @@ -253,7 +253,7 @@ public void testSizeLimiting() throws Exception SSTableReader sstable = cfs.getLiveSSTables().iterator().next(); TimeUUID repairSessionId = nextTimeUUID(); - final RepairJobDesc desc = new RepairJobDesc(repairSessionId, nextTimeUUID(), cfs.keyspace.getName(), + final RepairJobDesc desc = new RepairJobDesc(repairSessionId, nextTimeUUID(), cfs.getKeyspaceName(), cfs.getTableName(), singletonList(new Range<>(sstable.getFirst().getToken(), sstable.getLast().getToken()))); @@ -316,7 +316,7 @@ public void testRangeSplittingTreeSizeLimit() throws Exception List> ranges = splitHelper(new Range<>(sstable.getFirst().getToken(), sstable.getLast().getToken()), 2); - final RepairJobDesc desc = new RepairJobDesc(repairSessionId, nextTimeUUID(), cfs.keyspace.getName(), + final RepairJobDesc desc = new RepairJobDesc(repairSessionId, nextTimeUUID(), cfs.getKeyspaceName(), cfs.getTableName(), ranges); InetAddressAndPort host = InetAddressAndPort.getByName("127.0.0.2"); diff --git a/test/unit/org/apache/cassandra/schema/MockSchema.java b/test/unit/org/apache/cassandra/schema/MockSchema.java index c806049ef772..5d8b7c1dc7c1 100644 --- a/test/unit/org/apache/cassandra/schema/MockSchema.java +++ b/test/unit/org/apache/cassandra/schema/MockSchema.java @@ -179,7 +179,7 @@ public static SSTableReader sstable(int generation, int size, boolean keepRef, l { SSTableFormat format = DatabaseDescriptor.getSelectedSSTableFormat(); Descriptor descriptor = new Descriptor(cfs.getDirectories().getDirectoryForNewSSTables(), - cfs.keyspace.getName(), + cfs.getKeyspaceName(), cfs.getTableName(), sstableId(generation), format); diff --git a/test/unit/org/apache/cassandra/streaming/StreamingTransferTest.java b/test/unit/org/apache/cassandra/streaming/StreamingTransferTest.java index 0527648968d1..ebd392ced023 100644 --- a/test/unit/org/apache/cassandra/streaming/StreamingTransferTest.java +++ b/test/unit/org/apache/cassandra/streaming/StreamingTransferTest.java @@ -241,13 +241,13 @@ private void transferRanges(ColumnFamilyStore cfs) throws Exception List> ranges = new ArrayList<>(); // wrapped range ranges.add(new Range(p.getToken(ByteBufferUtil.bytes("key1")), p.getToken(ByteBufferUtil.bytes("key0")))); - StreamPlan streamPlan = new StreamPlan(StreamOperation.OTHER).transferRanges(LOCAL, cfs.keyspace.getName(), RangesAtEndpoint.toDummyList(ranges), cfs.getTableName()); + StreamPlan streamPlan = new StreamPlan(StreamOperation.OTHER).transferRanges(LOCAL, cfs.getKeyspaceName(), RangesAtEndpoint.toDummyList(ranges), cfs.getTableName()); streamPlan.execute().get(); //cannot add ranges after stream session is finished try { - streamPlan.transferRanges(LOCAL, cfs.keyspace.getName(), RangesAtEndpoint.toDummyList(ranges), cfs.getTableName()); + streamPlan.transferRanges(LOCAL, cfs.getKeyspaceName(), RangesAtEndpoint.toDummyList(ranges), cfs.getTableName()); fail("Should have thrown exception"); } catch (RuntimeException e) From 3be1fd4f0808f604b46818c0adbdd483eb9e50b8 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Fri, 17 Mar 2023 14:56:34 +0200 Subject: [PATCH 06/27] Use SSTableReader.getMaxLocalDeletionTime --- .../db/compaction/CompactionController.java | 10 ++++----- .../tools/SSTableExpiredBlockers.java | 6 ++--- .../io/sstable/SSTableMetadataTest.java | 22 +++++++++---------- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionController.java b/src/java/org/apache/cassandra/db/compaction/CompactionController.java index 69f31996c341..eadd85c3fdb0 100644 --- a/src/java/org/apache/cassandra/db/compaction/CompactionController.java +++ b/src/java/org/apache/cassandra/db/compaction/CompactionController.java @@ -173,11 +173,11 @@ public static Set getFullyExpiredSSTables(ColumnFamilyStore cfSto Set fullyExpired = new HashSet<>(); for (SSTableReader candidate : compacting) { - if (candidate.getSSTableMetadata().maxLocalDeletionTime < gcBefore) + if (candidate.getMaxLocalDeletionTime() < gcBefore) { fullyExpired.add(candidate); logger.trace("Dropping overlap ignored expired SSTable {} (maxLocalDeletionTime={}, gcBefore={})", - candidate, candidate.getSSTableMetadata().maxLocalDeletionTime, gcBefore); + candidate, candidate.getMaxLocalDeletionTime(), gcBefore); } } return fullyExpired; @@ -190,13 +190,13 @@ public static Set getFullyExpiredSSTables(ColumnFamilyStore cfSto { // Overlapping might include fully expired sstables. What we care about here is // the min timestamp of the overlapping sstables that actually contain live data. - if (sstable.getSSTableMetadata().maxLocalDeletionTime >= gcBefore) + if (sstable.getMaxLocalDeletionTime() >= gcBefore) minTimestamp = Math.min(minTimestamp, sstable.getMinTimestamp()); } for (SSTableReader candidate : compacting) { - if (candidate.getSSTableMetadata().maxLocalDeletionTime < gcBefore) + if (candidate.getMaxLocalDeletionTime() < gcBefore) candidates.add(candidate); else minTimestamp = Math.min(minTimestamp, candidate.getMinTimestamp()); @@ -224,7 +224,7 @@ public static Set getFullyExpiredSSTables(ColumnFamilyStore cfSto else { logger.trace("Dropping expired SSTable {} (maxLocalDeletionTime={}, gcBefore={})", - candidate, candidate.getSSTableMetadata().maxLocalDeletionTime, gcBefore); + candidate, candidate.getMaxLocalDeletionTime(), gcBefore); } } return new HashSet<>(candidates); diff --git a/src/java/org/apache/cassandra/tools/SSTableExpiredBlockers.java b/src/java/org/apache/cassandra/tools/SSTableExpiredBlockers.java index abf839cf755b..74204fcdeef8 100644 --- a/src/java/org/apache/cassandra/tools/SSTableExpiredBlockers.java +++ b/src/java/org/apache/cassandra/tools/SSTableExpiredBlockers.java @@ -108,13 +108,13 @@ public static Multimap checkForExpiredSSTableBlock Multimap blockers = ArrayListMultimap.create(); for (SSTableReader sstable : sstables) { - if (sstable.getSSTableMetadata().maxLocalDeletionTime < gcBefore) + if (sstable.getMaxLocalDeletionTime() < gcBefore) { for (SSTableReader potentialBlocker : sstables) { if (!potentialBlocker.equals(sstable) && potentialBlocker.getMinTimestamp() <= sstable.getMaxTimestamp() && - potentialBlocker.getSSTableMetadata().maxLocalDeletionTime > gcBefore) + potentialBlocker.getMaxLocalDeletionTime() > gcBefore) blockers.put(potentialBlocker, sstable); } } @@ -127,7 +127,7 @@ private static String formatForExpiryTracing(Iterable sstables) StringBuilder sb = new StringBuilder(); for (SSTableReader sstable : sstables) - sb.append(String.format("[%s (minTS = %d, maxTS = %d, maxLDT = %d)]", sstable, sstable.getMinTimestamp(), sstable.getMaxTimestamp(), sstable.getSSTableMetadata().maxLocalDeletionTime)).append(", "); + sb.append(String.format("[%s (minTS = %d, maxTS = %d, maxLDT = %d)]", sstable, sstable.getMinTimestamp(), sstable.getMaxTimestamp(), sstable.getMaxLocalDeletionTime())).append(", "); return sb.toString(); } diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableMetadataTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableMetadataTest.java index f66e3dd82812..706de6bf74c4 100644 --- a/test/unit/org/apache/cassandra/io/sstable/SSTableMetadataTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/SSTableMetadataTest.java @@ -104,7 +104,7 @@ public void testTrackMaxDeletionTime() long firstDelTime = 0; for (SSTableReader sstable : store.getLiveSSTables()) { - firstDelTime = sstable.getSSTableMetadata().maxLocalDeletionTime; + firstDelTime = sstable.getMaxLocalDeletionTime(); assertEquals(ttltimestamp + 10000, firstDelTime, DELTA); } @@ -119,22 +119,22 @@ public void testTrackMaxDeletionTime() Util.flush(store); assertEquals(2, store.getLiveSSTables().size()); List sstables = new ArrayList<>(store.getLiveSSTables()); - if (sstables.get(0).getSSTableMetadata().maxLocalDeletionTime < sstables.get(1).getSSTableMetadata().maxLocalDeletionTime) + if (sstables.get(0).getMaxLocalDeletionTime() < sstables.get(1).getMaxLocalDeletionTime()) { - assertEquals(sstables.get(0).getSSTableMetadata().maxLocalDeletionTime, firstDelTime); - assertEquals(sstables.get(1).getSSTableMetadata().maxLocalDeletionTime, ttltimestamp + 20000, DELTA); + assertEquals(sstables.get(0).getMaxLocalDeletionTime(), firstDelTime); + assertEquals(sstables.get(1).getMaxLocalDeletionTime(), ttltimestamp + 20000, DELTA); } else { - assertEquals(sstables.get(1).getSSTableMetadata().maxLocalDeletionTime, firstDelTime); - assertEquals(sstables.get(0).getSSTableMetadata().maxLocalDeletionTime, ttltimestamp + 20000, DELTA); + assertEquals(sstables.get(1).getMaxLocalDeletionTime(), firstDelTime); + assertEquals(sstables.get(0).getMaxLocalDeletionTime(), ttltimestamp + 20000, DELTA); } Util.compact(store, store.getLiveSSTables()); assertEquals(1, store.getLiveSSTables().size()); for (SSTableReader sstable : store.getLiveSSTables()) { - assertEquals(sstable.getSSTableMetadata().maxLocalDeletionTime, ttltimestamp + 20000, DELTA); + assertEquals(sstable.getMaxLocalDeletionTime(), ttltimestamp + 20000, DELTA); } } @@ -172,7 +172,7 @@ public void testWithDeletes() long firstMaxDelTime = 0; for (SSTableReader sstable : store.getLiveSSTables()) { - firstMaxDelTime = sstable.getSSTableMetadata().maxLocalDeletionTime; + firstMaxDelTime = sstable.getMaxLocalDeletionTime(); assertEquals(ttltimestamp + 1000, firstMaxDelTime, DELTA); } @@ -183,9 +183,9 @@ public void testWithDeletes() boolean foundDelete = false; for (SSTableReader sstable : store.getLiveSSTables()) { - if (sstable.getSSTableMetadata().maxLocalDeletionTime != firstMaxDelTime) + if (sstable.getMaxLocalDeletionTime() != firstMaxDelTime) { - assertEquals(sstable.getSSTableMetadata().maxLocalDeletionTime, ttltimestamp, DELTA); + assertEquals(sstable.getMaxLocalDeletionTime(), ttltimestamp, DELTA); foundDelete = true; } } @@ -194,7 +194,7 @@ public void testWithDeletes() assertEquals(1, store.getLiveSSTables().size()); for (SSTableReader sstable : store.getLiveSSTables()) { - assertEquals(ttltimestamp + 100, sstable.getSSTableMetadata().maxLocalDeletionTime, DELTA); + assertEquals(ttltimestamp + 100, sstable.getMaxLocalDeletionTime(), DELTA); } } From f9a2d0e2a7e87bf3567e773dd8ec36678e317d83 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Fri, 17 Mar 2023 14:58:19 +0200 Subject: [PATCH 07/27] Remove unused version of SSTableMultiWriter.finish --- .../io/sstable/RangeAwareSSTableWriter.java | 16 ---------------- .../cassandra/io/sstable/SSTableMultiWriter.java | 1 - .../io/sstable/SSTableZeroCopyWriter.java | 6 ------ .../io/sstable/SimpleSSTableMultiWriter.java | 7 ------- 4 files changed, 30 deletions(-) diff --git a/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java index a3e76166b4eb..61367a739206 100644 --- a/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java @@ -107,22 +107,6 @@ public boolean append(UnfilteredRowIterator partition) return currentWriter.append(partition); } - @Override - public Collection finish(long repairedAt, long maxDataAge, boolean openResult) - { - if (currentWriter != null) - finishedWriters.add(currentWriter); - currentWriter = null; - for (SSTableMultiWriter writer : finishedWriters) - { - if (writer.getFilePointer() > 0) - finishedReaders.addAll(writer.finish(repairedAt, maxDataAge, openResult)); - else - SSTableMultiWriter.abortOrDie(writer); - } - return finishedReaders; - } - @Override public Collection finish(boolean openResult) { diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableMultiWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableMultiWriter.java index 1be79abf8ba8..6948efc894ab 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableMultiWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableMultiWriter.java @@ -36,7 +36,6 @@ public interface SSTableMultiWriter extends Transactional */ boolean append(UnfilteredRowIterator partition); - Collection finish(long repairedAt, long maxDataAge, boolean openResult); Collection finish(boolean openResult); Collection finished(); diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableZeroCopyWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableZeroCopyWriter.java index 91e6490971cb..034d46b0496c 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableZeroCopyWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableZeroCopyWriter.java @@ -121,12 +121,6 @@ public boolean append(UnfilteredRowIterator partition) throw new UnsupportedOperationException(); } - @Override - public Collection finish(long repairedAt, long maxDataAge, boolean openResult) - { - return finish(openResult); - } - @Override public Collection finish(boolean openResult) { diff --git a/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java b/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java index 381c5eb33f96..baa38338973b 100644 --- a/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java @@ -48,13 +48,6 @@ public boolean append(UnfilteredRowIterator partition) return indexEntry != null; } - public Collection finish(long repairedAt, long maxDataAge, boolean openResult) - { - writer.setRepairedAt(repairedAt); - writer.setMaxDataAge(maxDataAge); - return Collections.singleton(writer.finish(openResult)); - } - public Collection finish(boolean openResult) { return Collections.singleton(writer.finish(openResult)); From a0d68b6ecbe484adc81ccfad06853518506977ab Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Mon, 27 Feb 2023 17:33:43 +0200 Subject: [PATCH 08/27] Fix code repetition in compaction writers --- .../db/compaction/CompactionManager.java | 2 +- .../writers/CompactionAwareWriter.java | 83 ++++++++++++++++--- .../writers/DefaultCompactionWriter.java | 29 ++----- .../writers/MajorLeveledCompactionWriter.java | 44 +++++----- .../writers/MaxSSTableSizeWriter.java | 38 +++------ .../SplittingSizeTieredCompactionWriter.java | 35 +++----- .../sstable/metadata/MetadataCollector.java | 3 +- .../db/compaction/CompactionsCQLTest.java | 10 ++- 8 files changed, 127 insertions(+), 117 deletions(-) diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java index c927db0d8044..a32242d7c429 100644 --- a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java +++ b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java @@ -1661,7 +1661,7 @@ public static SSTableWriter createWriterForAntiCompaction(ColumnFamilyStore cfs, .setPendingRepair(pendingRepair) .setTransientSSTable(isTransient) .setTableMetadataRef(cfs.metadata) - .setMetadataCollector(new MetadataCollector(sstables, cfs.metadata().comparator, minLevel)) + .setMetadataCollector(new MetadataCollector(sstables, cfs.metadata().comparator).sstableLevel(minLevel)) .setSerializationHeader(SerializationHeader.make(cfs.metadata(), sstables)) .addDefaultComponents() .addFlushObserversForSecondaryIndexes(cfs.indexManager.listIndexes(), txn.opType()) diff --git a/src/java/org/apache/cassandra/db/compaction/writers/CompactionAwareWriter.java b/src/java/org/apache/cassandra/db/compaction/writers/CompactionAwareWriter.java index 451a78d3946e..82c56eeb2b9e 100644 --- a/src/java/org/apache/cassandra/db/compaction/writers/CompactionAwareWriter.java +++ b/src/java/org/apache/cassandra/db/compaction/writers/CompactionAwareWriter.java @@ -68,9 +68,7 @@ public abstract class CompactionAwareWriter extends Transactional.AbstractTransa private final List locations; private final List diskBoundaries; private int locationIndex; - - // Keep targetDirectory for compactions, needed for `nodetool compactionstats` - protected Directories.DataDirectory sstableDirectory; + protected Directories.DataDirectory currentDirectory; public CompactionAwareWriter(ColumnFamilyStore cfs, Directories directories, @@ -145,7 +143,7 @@ public final boolean append(UnfilteredRowIterator partition) public final File getSStableDirectory() throws IOException { - return getDirectories().getLocationForDisk(sstableDirectory); + return getDirectories().getLocationForDisk(currentDirectory); } @Override @@ -155,43 +153,102 @@ protected Throwable doPostCleanup(Throwable accumulate) return super.doPostCleanup(accumulate); } - protected abstract boolean realAppend(UnfilteredRowIterator partition); + protected boolean realAppend(UnfilteredRowIterator partition) + { + return sstableWriter.append(partition) != null; + } /** + * Switches the writer if necessary, i.e. if the new key should be placed in a different data directory, or if the + * specific strategy has decided a new sstable is needed. * Guaranteed to be called before the first call to realAppend. - * @param key */ protected void maybeSwitchWriter(DecoratedKey key) + { + if (maybeSwitchLocation(key)) + return; + + if (shouldSwitchWriterInCurrentLocation(key)) + switchCompactionWriter(currentDirectory, key); + } + + /** + * Switches the file location and writer and returns true if the new key should be placed in a different data + * directory. + */ + protected boolean maybeSwitchLocation(DecoratedKey key) { if (diskBoundaries == null) { if (locationIndex < 0) { Directories.DataDirectory defaultLocation = getWriteDirectory(nonExpiredSSTables, getExpectedWriteSize()); - switchCompactionLocation(defaultLocation); + switchCompactionWriter(defaultLocation, key); locationIndex = 0; + return true; } - return; + return false; } if (locationIndex > -1 && key.compareTo(diskBoundaries.get(locationIndex)) < 0) - return; + return false; int prevIdx = locationIndex; while (locationIndex == -1 || key.compareTo(diskBoundaries.get(locationIndex)) > 0) locationIndex++; + Directories.DataDirectory newLocation = locations.get(locationIndex); if (prevIdx >= 0) - logger.debug("Switching write location from {} to {}", locations.get(prevIdx), locations.get(locationIndex)); - switchCompactionLocation(locations.get(locationIndex)); + logger.debug("Switching write location from {} to {}", locations.get(prevIdx), newLocation); + switchCompactionWriter(newLocation, key); + return true; } + /** + * Returns true if the writer should be switched for reasons other than switching to a new data directory + * (e.g. because an sstable size limit has been reached). + */ + protected abstract boolean shouldSwitchWriterInCurrentLocation(DecoratedKey key); + /** * Implementations of this method should finish the current sstable writer and start writing to this directory. - * + *

* Called once before starting to append and then whenever we see a need to start writing to another directory. + * * @param directory + * @param nextKey + */ + protected void switchCompactionWriter(Directories.DataDirectory directory, DecoratedKey nextKey) + { + currentDirectory = directory; + sstableWriter.switchWriter(sstableWriter(directory, nextKey)); + } + + @SuppressWarnings("resource") + protected SSTableWriter sstableWriter(Directories.DataDirectory directory, DecoratedKey nextKey) + { + Descriptor descriptor = cfs.newSSTableDescriptor(getDirectories().getLocationForDisk(directory)); + MetadataCollector collector = new MetadataCollector(txn.originals(), cfs.metadata().comparator) + .sstableLevel(sstableLevel()); + SerializationHeader header = SerializationHeader.make(cfs.metadata(), nonExpiredSSTables); + + return newWriterBuilder(descriptor).setMetadataCollector(collector) + .setSerializationHeader(header) + .setKeyCount(sstableKeyCount()) + .build(txn, cfs); + } + + /** + * Returns the level that should be used when creating sstables. + */ + protected int sstableLevel() + { + return 0; + } + + /** + * Returns the key count with which created sstables should be set up. */ - protected abstract void switchCompactionLocation(Directories.DataDirectory directory); + abstract protected long sstableKeyCount(); /** * The directories we can write to diff --git a/src/java/org/apache/cassandra/db/compaction/writers/DefaultCompactionWriter.java b/src/java/org/apache/cassandra/db/compaction/writers/DefaultCompactionWriter.java index f9858473e249..6c049b2343df 100644 --- a/src/java/org/apache/cassandra/db/compaction/writers/DefaultCompactionWriter.java +++ b/src/java/org/apache/cassandra/db/compaction/writers/DefaultCompactionWriter.java @@ -24,14 +24,10 @@ import org.slf4j.LoggerFactory; import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.DecoratedKey; import org.apache.cassandra.db.Directories; -import org.apache.cassandra.db.SerializationHeader; import org.apache.cassandra.db.lifecycle.LifecycleTransaction; -import org.apache.cassandra.db.rows.UnfilteredRowIterator; -import org.apache.cassandra.io.sstable.Descriptor; import org.apache.cassandra.io.sstable.format.SSTableReader; -import org.apache.cassandra.io.sstable.format.SSTableWriter; -import org.apache.cassandra.io.sstable.metadata.MetadataCollector; /** * The default compaction writer - creates one output file in L0 @@ -60,30 +56,17 @@ public DefaultCompactionWriter(ColumnFamilyStore cfs, Directories directories, L } @Override - public boolean realAppend(UnfilteredRowIterator partition) + protected boolean shouldSwitchWriterInCurrentLocation(DecoratedKey key) { - return sstableWriter.append(partition) != null; + return false; } - @Override - public void switchCompactionLocation(Directories.DataDirectory directory) + protected int sstableLevel() { - sstableDirectory = directory; - - Descriptor descriptor = cfs.newSSTableDescriptor(getDirectories().getLocationForDisk(directory)); - MetadataCollector collector = new MetadataCollector(txn.originals(), cfs.metadata().comparator, sstableLevel); - SerializationHeader header = SerializationHeader.make(cfs.metadata(), nonExpiredSSTables); - - @SuppressWarnings("resource") - SSTableWriter writer = newWriterBuilder(descriptor).setMetadataCollector(collector) - .setSerializationHeader(header) - .setKeyCount(estimatedTotalKeys) - .build(txn, cfs); - sstableWriter.switchWriter(writer); + return sstableLevel; } - @Override - public long estimatedKeys() + protected long sstableKeyCount() { return estimatedTotalKeys; } diff --git a/src/java/org/apache/cassandra/db/compaction/writers/MajorLeveledCompactionWriter.java b/src/java/org/apache/cassandra/db/compaction/writers/MajorLeveledCompactionWriter.java index 38e2d2d2c995..a9f2627b2b20 100644 --- a/src/java/org/apache/cassandra/db/compaction/writers/MajorLeveledCompactionWriter.java +++ b/src/java/org/apache/cassandra/db/compaction/writers/MajorLeveledCompactionWriter.java @@ -20,16 +20,12 @@ import java.util.Set; import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.DecoratedKey; import org.apache.cassandra.db.Directories; -import org.apache.cassandra.db.SerializationHeader; import org.apache.cassandra.db.compaction.LeveledManifest; import org.apache.cassandra.db.lifecycle.LifecycleTransaction; import org.apache.cassandra.db.rows.UnfilteredRowIterator; -import org.apache.cassandra.io.sstable.AbstractRowIndexEntry; -import org.apache.cassandra.io.sstable.Descriptor; import org.apache.cassandra.io.sstable.format.SSTableReader; -import org.apache.cassandra.io.sstable.format.SSTableWriter; -import org.apache.cassandra.io.sstable.metadata.MetadataCollector; public class MajorLeveledCompactionWriter extends CompactionAwareWriter { @@ -67,11 +63,15 @@ public MajorLeveledCompactionWriter(ColumnFamilyStore cfs, } @Override - @SuppressWarnings("resource") public boolean realAppend(UnfilteredRowIterator partition) { - AbstractRowIndexEntry rie = sstableWriter.append(partition); partitionsWritten++; + return super.realAppend(partition); + } + + @Override + protected boolean shouldSwitchWriterInCurrentLocation(DecoratedKey key) + { long totalWrittenInCurrentWriter = sstableWriter.currentWriter().getEstimatedOnDiskBytesWritten(); if (totalWrittenInCurrentWriter > maxSSTableSize) { @@ -81,31 +81,29 @@ public boolean realAppend(UnfilteredRowIterator partition) totalWrittenInLevel = 0; currentLevel++; } - switchCompactionLocation(sstableDirectory); + return true; } - return rie != null; + return false; } @Override - public void switchCompactionLocation(Directories.DataDirectory location) + public void switchCompactionWriter(Directories.DataDirectory location, DecoratedKey nextKey) { - sstableDirectory = location; averageEstimatedKeysPerSSTable = Math.round(((double) averageEstimatedKeysPerSSTable * sstablesWritten + partitionsWritten) / (sstablesWritten + 1)); - - Descriptor descriptor = cfs.newSSTableDescriptor(getDirectories().getLocationForDisk(sstableDirectory)); - MetadataCollector collector = new MetadataCollector(txn.originals(), cfs.metadata().comparator, currentLevel); - SerializationHeader serializationHeader = SerializationHeader.make(cfs.metadata(), txn.originals()); - - @SuppressWarnings("resource") - SSTableWriter writer = newWriterBuilder(descriptor).setKeyCount(keysPerSSTable) - .setSerializationHeader(serializationHeader) - .setMetadataCollector(collector) - .build(txn, cfs); - - sstableWriter.switchWriter(writer); partitionsWritten = 0; sstablesWritten = 0; + super.switchCompactionWriter(location, nextKey); + } + + protected int sstableLevel() + { + return currentLevel; + } + + protected long sstableKeyCount() + { + return keysPerSSTable; } @Override diff --git a/src/java/org/apache/cassandra/db/compaction/writers/MaxSSTableSizeWriter.java b/src/java/org/apache/cassandra/db/compaction/writers/MaxSSTableSizeWriter.java index 834bda9e2308..1ded2128e77d 100644 --- a/src/java/org/apache/cassandra/db/compaction/writers/MaxSSTableSizeWriter.java +++ b/src/java/org/apache/cassandra/db/compaction/writers/MaxSSTableSizeWriter.java @@ -20,23 +20,17 @@ import java.util.Set; import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.DecoratedKey; import org.apache.cassandra.db.Directories; -import org.apache.cassandra.db.SerializationHeader; import org.apache.cassandra.db.compaction.OperationType; import org.apache.cassandra.db.lifecycle.LifecycleTransaction; -import org.apache.cassandra.db.rows.UnfilteredRowIterator; -import org.apache.cassandra.io.sstable.AbstractRowIndexEntry; -import org.apache.cassandra.io.sstable.Descriptor; import org.apache.cassandra.io.sstable.format.SSTableReader; -import org.apache.cassandra.io.sstable.format.SSTableWriter; -import org.apache.cassandra.io.sstable.metadata.MetadataCollector; public class MaxSSTableSizeWriter extends CompactionAwareWriter { private final long maxSSTableSize; private final int level; private final long estimatedSSTables; - private final Set allSSTables; public MaxSSTableSizeWriter(ColumnFamilyStore cfs, Directories directories, @@ -57,7 +51,6 @@ public MaxSSTableSizeWriter(ColumnFamilyStore cfs, boolean keepOriginals) { super(cfs, directories, txn, nonExpiredSSTables, keepOriginals); - this.allSSTables = txn.originals(); this.level = level; this.maxSSTableSize = maxSSTableSize; @@ -79,31 +72,20 @@ private static long getTotalWriteSize(Iterable nonExpiredSSTables return Math.round(estimatedCompactionRatio * cfs.getExpectedCompactedFileSize(nonExpiredSSTables, compactionType)); } - protected boolean realAppend(UnfilteredRowIterator partition) + @Override + protected boolean shouldSwitchWriterInCurrentLocation(DecoratedKey key) { - AbstractRowIndexEntry rie = sstableWriter.append(partition); - if (sstableWriter.currentWriter().getEstimatedOnDiskBytesWritten() > maxSSTableSize) - { - switchCompactionLocation(sstableDirectory); - } - return rie != null; + return sstableWriter.currentWriter().getEstimatedOnDiskBytesWritten() > maxSSTableSize; } - @Override - public void switchCompactionLocation(Directories.DataDirectory location) + protected int sstableLevel() { - sstableDirectory = location; - - Descriptor descriptor = cfs.newSSTableDescriptor(getDirectories().getLocationForDisk(sstableDirectory)); - MetadataCollector collector = new MetadataCollector(allSSTables, cfs.metadata().comparator, level); - SerializationHeader header = SerializationHeader.make(cfs.metadata(), nonExpiredSSTables); + return level; + } - @SuppressWarnings("resource") - SSTableWriter writer = newWriterBuilder(descriptor).setKeyCount(estimatedTotalKeys / estimatedSSTables) - .setMetadataCollector(collector) - .setSerializationHeader(header) - .build(txn, cfs); - sstableWriter.switchWriter(writer); + protected long sstableKeyCount() + { + return estimatedTotalKeys / estimatedSSTables; } @Override diff --git a/src/java/org/apache/cassandra/db/compaction/writers/SplittingSizeTieredCompactionWriter.java b/src/java/org/apache/cassandra/db/compaction/writers/SplittingSizeTieredCompactionWriter.java index 458d80a71a04..4cd0858e18a4 100644 --- a/src/java/org/apache/cassandra/db/compaction/writers/SplittingSizeTieredCompactionWriter.java +++ b/src/java/org/apache/cassandra/db/compaction/writers/SplittingSizeTieredCompactionWriter.java @@ -24,15 +24,10 @@ import org.slf4j.LoggerFactory; import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.DecoratedKey; import org.apache.cassandra.db.Directories; -import org.apache.cassandra.db.SerializationHeader; import org.apache.cassandra.db.lifecycle.LifecycleTransaction; -import org.apache.cassandra.db.rows.UnfilteredRowIterator; -import org.apache.cassandra.io.sstable.AbstractRowIndexEntry; -import org.apache.cassandra.io.sstable.Descriptor; import org.apache.cassandra.io.sstable.format.SSTableReader; -import org.apache.cassandra.io.sstable.format.SSTableWriter; -import org.apache.cassandra.io.sstable.metadata.MetadataCollector; /** * CompactionAwareWriter that splits input in differently sized sstables @@ -84,36 +79,28 @@ public SplittingSizeTieredCompactionWriter(ColumnFamilyStore cfs, Directories di } @Override - public boolean realAppend(UnfilteredRowIterator partition) + protected boolean shouldSwitchWriterInCurrentLocation(DecoratedKey key) { - AbstractRowIndexEntry rie = sstableWriter.append(partition); if (sstableWriter.currentWriter().getEstimatedOnDiskBytesWritten() > currentBytesToWrite && currentRatioIndex < ratios.length - 1) // if we underestimate how many keys we have, the last sstable might get more than we expect { currentRatioIndex++; - currentBytesToWrite = getExpectedWriteSize(); - switchCompactionLocation(sstableDirectory); logger.debug("Switching writer, currentBytesToWrite = {}", currentBytesToWrite); + return true; } - return rie != null; + return false; } - @Override - public void switchCompactionLocation(Directories.DataDirectory location) + protected int sstableLevel() { - sstableDirectory = location; - long currentPartitionsToWrite = Math.round(ratios[currentRatioIndex] * estimatedTotalKeys); - Descriptor descriptor = cfs.newSSTableDescriptor(getDirectories().getLocationForDisk(location)); - MetadataCollector collector = new MetadataCollector(allSSTables, cfs.metadata().comparator, 0); - SerializationHeader header = SerializationHeader.make(cfs.metadata(), nonExpiredSSTables); + return 0; + } - @SuppressWarnings("resource") - SSTableWriter writer = newWriterBuilder(descriptor).setKeyCount(currentPartitionsToWrite) - .setMetadataCollector(collector) - .setSerializationHeader(header) - .build(txn, cfs); + protected long sstableKeyCount() + { + long currentPartitionsToWrite = Math.round(ratios[currentRatioIndex] * estimatedTotalKeys); logger.trace("Switching writer, currentPartitionsToWrite = {}", currentPartitionsToWrite); - sstableWriter.switchWriter(writer); + return currentPartitionsToWrite; } @Override diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java index 20b9b262f7c2..409b4e317b75 100644 --- a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java +++ b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java @@ -159,7 +159,7 @@ public MetadataCollector(ClusteringComparator comparator, UUID originatingHostId this.originatingHostId = originatingHostId; } - public MetadataCollector(Iterable sstables, ClusteringComparator comparator, int level) + public MetadataCollector(Iterable sstables, ClusteringComparator comparator) { this(comparator); @@ -173,7 +173,6 @@ public MetadataCollector(Iterable sstables, ClusteringComparator } } commitLogIntervals(intervals.build()); - sstableLevel(level); } public MetadataCollector addKey(ByteBuffer key) diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java index 52ff8a82548c..3ad042b22d38 100644 --- a/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java @@ -40,6 +40,7 @@ import org.apache.cassandra.cql3.CQLTester; import org.apache.cassandra.cql3.UntypedResultSet; import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.DecoratedKey; import org.apache.cassandra.db.Directories; import org.apache.cassandra.db.Keyspace; import org.apache.cassandra.db.RowUpdateBuilder; @@ -55,6 +56,7 @@ import org.apache.cassandra.io.sstable.ISSTableScanner; import org.apache.cassandra.io.sstable.LegacySSTableTest; import org.apache.cassandra.io.sstable.format.SSTableReader; +import org.apache.cassandra.io.sstable.format.SSTableWriter; import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.PathUtils; import org.apache.cassandra.schema.CompactionParams; @@ -346,7 +348,7 @@ public void testCompactionInvalidPartitionDeletion() throws Throwable // PartitionUpdate pu = PartitionUpdate.simpleBuilder(cfs.metadata(), 22).nowInSec(-1).delete().build(); // new Mutation(pu).apply(); // flush(); -// +// // // Store sstables for later use // StorageService.instance.forceKeyspaceFlush(cfs.keyspace.getName(), ColumnFamilyStore.FlushReason.UNIT_TESTS); // File ksDir = new File("test/data/negative-ldts-invalid-deletions-test/"); @@ -618,12 +620,14 @@ public CompactionAwareWriter getCompactionAwareWriter(ColumnFamilyStore cfs, return new MaxSSTableSizeWriter(cfs, directories, txn, nonExpiredSSTables, 1 << 20, 1) { int switchCount = 0; - public void switchCompactionLocation(Directories.DataDirectory directory) + + @Override + public SSTableWriter sstableWriter(Directories.DataDirectory directory, DecoratedKey nextKey) { switchCount++; if (switchCount > 5) throw new RuntimeException("Throw after a few sstables have had their starts moved"); - super.switchCompactionLocation(directory); + return super.sstableWriter(directory, nextKey); } }; } From 1b0639686772bba0336f4894ec61244694876925 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Thu, 5 Jan 2023 13:21:23 +0000 Subject: [PATCH 09/27] CASSANDRA-18123: Fix invalid reuse of metadata collector during flushing If a compaction strategy splits sstables during flush, this can cause the cardinality information of an sstable to include more keys than it actually has, resulting in grossly overestimated key counts when picking a bloom filter size; the latter can result in several times larger bloom filters on L1 than they should be. --- .../cassandra/db/ColumnFamilyStore.java | 20 +++++++---- .../AbstractCompactionStrategy.java | 17 ++++++++-- .../db/compaction/AbstractStrategyHolder.java | 6 ++-- .../compaction/CompactionStrategyHolder.java | 9 +++-- .../compaction/CompactionStrategyManager.java | 11 ++++--- .../db/compaction/PendingRepairHolder.java | 9 +++-- .../cassandra/db/memtable/Flushing.java | 8 ++--- .../sstable/AbstractSSTableSimpleWriter.java | 3 +- .../io/sstable/RangeAwareSSTableWriter.java | 4 +-- .../io/sstable/SSTableTxnWriter.java | 17 +++------- .../io/sstable/SimpleSSTableMultiWriter.java | 8 ++++- .../db/memtable/MemtableQuickTest.java | 33 +++++++++++++++++++ .../cassandra/io/sstable/SSTableUtils.java | 2 +- 13 files changed, 101 insertions(+), 46 deletions(-) diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java index 0c5b1bbe18c6..e3dc036e8a11 100644 --- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java +++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java @@ -83,6 +83,7 @@ import org.apache.cassandra.config.DurationSpec; import org.apache.cassandra.db.commitlog.CommitLog; import org.apache.cassandra.db.commitlog.CommitLogPosition; +import org.apache.cassandra.db.commitlog.IntervalSet; import org.apache.cassandra.db.compaction.AbstractCompactionStrategy; import org.apache.cassandra.db.compaction.CompactionInfo; import org.apache.cassandra.db.compaction.CompactionManager; @@ -129,7 +130,6 @@ import org.apache.cassandra.io.sstable.format.SSTableFormat.Components; import org.apache.cassandra.io.sstable.format.SSTableReader; import org.apache.cassandra.io.sstable.format.Version; -import org.apache.cassandra.io.sstable.metadata.MetadataCollector; import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.FileOutputStreamPlus; import org.apache.cassandra.metrics.Sampler; @@ -646,15 +646,19 @@ public boolean streamFromMemtable() return memtableFactory.streamFromMemtable(); } - public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, long keyCount, long repairedAt, TimeUUID pendingRepair, boolean isTransient, int sstableLevel, SerializationHeader header, LifecycleNewTracker lifecycleNewTracker) + public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, long keyCount, long repairedAt, TimeUUID pendingRepair, boolean isTransient, SerializationHeader header, LifecycleNewTracker lifecycleNewTracker) { - MetadataCollector collector = new MetadataCollector(metadata().comparator).sstableLevel(sstableLevel); - return createSSTableMultiWriter(descriptor, keyCount, repairedAt, pendingRepair, isTransient, collector, header, lifecycleNewTracker); + return createSSTableMultiWriter(descriptor, keyCount, repairedAt, pendingRepair, isTransient, null, 0, header, lifecycleNewTracker); } - public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, long keyCount, long repairedAt, TimeUUID pendingRepair, boolean isTransient, MetadataCollector metadataCollector, SerializationHeader header, LifecycleNewTracker lifecycleNewTracker) + public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, long keyCount, long repairedAt, TimeUUID pendingRepair, boolean isTransient, IntervalSet commitLogPositions, SerializationHeader header, LifecycleNewTracker lifecycleNewTracker) { - return getCompactionStrategyManager().createSSTableMultiWriter(descriptor, keyCount, repairedAt, pendingRepair, isTransient, metadataCollector, header, indexManager.listIndexes(), lifecycleNewTracker); + return createSSTableMultiWriter(descriptor, keyCount, repairedAt, pendingRepair, isTransient, commitLogPositions, 0, header, lifecycleNewTracker); + } + + public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, long keyCount, long repairedAt, TimeUUID pendingRepair, boolean isTransient, IntervalSet commitLogPositions, int sstableLevel, SerializationHeader header, LifecycleNewTracker lifecycleNewTracker) + { + return getCompactionStrategyManager().createSSTableMultiWriter(descriptor, keyCount, repairedAt, pendingRepair, isTransient, commitLogPositions, sstableLevel, header, indexManager.listIndexes(), lifecycleNewTracker); } public boolean supportsEarlyOpen() @@ -2548,11 +2552,13 @@ private SSTableMultiWriter writeMemtableRanges(Supplier> dataSets = new ArrayList<>(ranges.size()); + IntervalSet.Builder commitLogIntervals = new IntervalSet.Builder(); long keys = 0; for (Range range : ranges) { Memtable.FlushablePartitionSet dataSet = current.getFlushSet(range.left, range.right); dataSets.add(dataSet); + commitLogIntervals.add(dataSet.commitLogLowerBound(), dataSet.commitLogUpperBound()); keys += dataSet.partitionCount(); } if (keys == 0) @@ -2565,7 +2571,7 @@ private SSTableMultiWriter writeMemtableRanges(Supplier commitLogPositions, + int sstableLevel, SerializationHeader header, Collection indexes, LifecycleNewTracker lifecycleNewTracker) { - return SimpleSSTableMultiWriter.create(descriptor, keyCount, repairedAt, pendingRepair, isTransient, cfs.metadata, meta, header, indexes, lifecycleNewTracker, cfs); + return SimpleSSTableMultiWriter.create(descriptor, + keyCount, + repairedAt, + pendingRepair, + isTransient, + cfs.metadata, + commitLogPositions, + sstableLevel, + header, + indexes, + lifecycleNewTracker, cfs); } public boolean supportsEarlyOpen() diff --git a/src/java/org/apache/cassandra/db/compaction/AbstractStrategyHolder.java b/src/java/org/apache/cassandra/db/compaction/AbstractStrategyHolder.java index 8337730699f3..38123d32d524 100644 --- a/src/java/org/apache/cassandra/db/compaction/AbstractStrategyHolder.java +++ b/src/java/org/apache/cassandra/db/compaction/AbstractStrategyHolder.java @@ -29,6 +29,8 @@ import org.apache.cassandra.db.ColumnFamilyStore; import org.apache.cassandra.db.SerializationHeader; +import org.apache.cassandra.db.commitlog.CommitLogPosition; +import org.apache.cassandra.db.commitlog.IntervalSet; import org.apache.cassandra.db.lifecycle.LifecycleNewTracker; import org.apache.cassandra.dht.Range; import org.apache.cassandra.dht.Token; @@ -37,7 +39,6 @@ import org.apache.cassandra.io.sstable.ISSTableScanner; import org.apache.cassandra.io.sstable.SSTableMultiWriter; import org.apache.cassandra.io.sstable.format.SSTableReader; -import org.apache.cassandra.io.sstable.metadata.MetadataCollector; import org.apache.cassandra.schema.CompactionParams; import org.apache.cassandra.utils.TimeUUID; @@ -194,7 +195,8 @@ public abstract SSTableMultiWriter createSSTableMultiWriter(Descriptor descripto long repairedAt, TimeUUID pendingRepair, boolean isTransient, - MetadataCollector collector, + IntervalSet commitLogPositions, + int sstableLevel, SerializationHeader header, Collection indexes, LifecycleNewTracker lifecycleNewTracker); diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionStrategyHolder.java b/src/java/org/apache/cassandra/db/compaction/CompactionStrategyHolder.java index d3daec04a843..9a31752d6993 100644 --- a/src/java/org/apache/cassandra/db/compaction/CompactionStrategyHolder.java +++ b/src/java/org/apache/cassandra/db/compaction/CompactionStrategyHolder.java @@ -27,6 +27,8 @@ import org.apache.cassandra.db.ColumnFamilyStore; import org.apache.cassandra.db.SerializationHeader; +import org.apache.cassandra.db.commitlog.CommitLogPosition; +import org.apache.cassandra.db.commitlog.IntervalSet; import org.apache.cassandra.db.lifecycle.LifecycleNewTracker; import org.apache.cassandra.dht.Range; import org.apache.cassandra.dht.Token; @@ -35,7 +37,6 @@ import org.apache.cassandra.io.sstable.ISSTableScanner; import org.apache.cassandra.io.sstable.SSTableMultiWriter; import org.apache.cassandra.io.sstable.format.SSTableReader; -import org.apache.cassandra.io.sstable.metadata.MetadataCollector; import org.apache.cassandra.schema.CompactionParams; import org.apache.cassandra.service.ActiveRepairService; import org.apache.cassandra.utils.TimeUUID; @@ -220,7 +221,8 @@ public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, long repairedAt, TimeUUID pendingRepair, boolean isTransient, - MetadataCollector collector, + IntervalSet commitLogPositions, + int sstableLevel, SerializationHeader header, Collection indexes, LifecycleNewTracker lifecycleNewTracker) @@ -244,7 +246,8 @@ public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, repairedAt, pendingRepair, isTransient, - collector, + commitLogPositions, + sstableLevel, header, indexes, lifecycleNewTracker); diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java b/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java index d3347fc58767..05add93d7f21 100644 --- a/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java +++ b/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java @@ -50,6 +50,8 @@ import org.apache.cassandra.db.Directories; import org.apache.cassandra.db.DiskBoundaries; import org.apache.cassandra.db.SerializationHeader; +import org.apache.cassandra.db.commitlog.CommitLogPosition; +import org.apache.cassandra.db.commitlog.IntervalSet; import org.apache.cassandra.db.compaction.AbstractStrategyHolder.TaskSupplier; import org.apache.cassandra.db.compaction.PendingRepairManager.CleanupTask; import org.apache.cassandra.db.lifecycle.LifecycleNewTracker; @@ -64,7 +66,6 @@ import org.apache.cassandra.io.sstable.SSTableMultiWriter; import org.apache.cassandra.io.sstable.format.SSTableFormat.Components; import org.apache.cassandra.io.sstable.format.SSTableReader; -import org.apache.cassandra.io.sstable.metadata.MetadataCollector; import org.apache.cassandra.io.sstable.metadata.StatsMetadata; import org.apache.cassandra.io.util.File; import org.apache.cassandra.notifications.INotification; @@ -1073,7 +1074,7 @@ public CompactionTasks getMaximalTasks(final long gcBefore, final boolean splitO { for (AbstractStrategyHolder holder : holders) { - for (AbstractCompactionTask task: holder.getMaximalTasks(gcBefore, splitOutput)) + for (AbstractCompactionTask task: holder.getMaximalTasks(gcBefore, splitOutput)) { tasks.add(task.setCompactionType(operationType)); } @@ -1233,7 +1234,8 @@ public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, long repairedAt, TimeUUID pendingRepair, boolean isTransient, - MetadataCollector collector, + IntervalSet commitLogPositions, + int sstableLevel, SerializationHeader header, Collection indexes, LifecycleNewTracker lifecycleNewTracker) @@ -1248,7 +1250,8 @@ public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, repairedAt, pendingRepair, isTransient, - collector, + commitLogPositions, + sstableLevel, header, indexes, lifecycleNewTracker); diff --git a/src/java/org/apache/cassandra/db/compaction/PendingRepairHolder.java b/src/java/org/apache/cassandra/db/compaction/PendingRepairHolder.java index d9a41ff26989..639481322a0a 100644 --- a/src/java/org/apache/cassandra/db/compaction/PendingRepairHolder.java +++ b/src/java/org/apache/cassandra/db/compaction/PendingRepairHolder.java @@ -28,6 +28,8 @@ import org.apache.cassandra.db.ColumnFamilyStore; import org.apache.cassandra.db.SerializationHeader; +import org.apache.cassandra.db.commitlog.CommitLogPosition; +import org.apache.cassandra.db.commitlog.IntervalSet; import org.apache.cassandra.db.lifecycle.LifecycleNewTracker; import org.apache.cassandra.dht.Range; import org.apache.cassandra.dht.Token; @@ -36,7 +38,6 @@ import org.apache.cassandra.io.sstable.ISSTableScanner; import org.apache.cassandra.io.sstable.SSTableMultiWriter; import org.apache.cassandra.io.sstable.format.SSTableReader; -import org.apache.cassandra.io.sstable.metadata.MetadataCollector; import org.apache.cassandra.schema.CompactionParams; import org.apache.cassandra.service.ActiveRepairService; import org.apache.cassandra.utils.TimeUUID; @@ -238,7 +239,8 @@ public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, long repairedAt, TimeUUID pendingRepair, boolean isTransient, - MetadataCollector collector, + IntervalSet commitLogPositions, + int sstableLevel, SerializationHeader header, Collection indexes, LifecycleNewTracker lifecycleNewTracker) @@ -254,7 +256,8 @@ public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, repairedAt, pendingRepair, isTransient, - collector, + commitLogPositions, + sstableLevel, header, indexes, lifecycleNewTracker); diff --git a/src/java/org/apache/cassandra/db/memtable/Flushing.java b/src/java/org/apache/cassandra/db/memtable/Flushing.java index afe7a120008a..c7d0c90682be 100644 --- a/src/java/org/apache/cassandra/db/memtable/Flushing.java +++ b/src/java/org/apache/cassandra/db/memtable/Flushing.java @@ -41,7 +41,6 @@ import org.apache.cassandra.io.sstable.Descriptor; import org.apache.cassandra.io.sstable.SSTableMultiWriter; import org.apache.cassandra.io.sstable.format.SSTableFormat; -import org.apache.cassandra.io.sstable.metadata.MetadataCollector; import org.apache.cassandra.metrics.TableMetrics; import org.apache.cassandra.service.ActiveRepairService; import org.apache.cassandra.utils.FBUtilities; @@ -201,16 +200,13 @@ public static SSTableMultiWriter createFlushWriter(ColumnFamilyStore cfs, Descriptor descriptor, long partitionCount) { - MetadataCollector sstableMetadataCollector = new MetadataCollector(flushSet.metadata().comparator) - .commitLogIntervals(new IntervalSet<>(flushSet.commitLogLowerBound(), - flushSet.commitLogUpperBound())); - return cfs.createSSTableMultiWriter(descriptor, partitionCount, ActiveRepairService.UNREPAIRED_SSTABLE, ActiveRepairService.NO_PENDING_REPAIR, false, - sstableMetadataCollector, + new IntervalSet<>(flushSet.commitLogLowerBound(), + flushSet.commitLogUpperBound()), new SerializationHeader(true, flushSet.metadata(), flushSet.columns(), diff --git a/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java b/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java index 429995cc488c..b12806c15716 100644 --- a/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java @@ -72,7 +72,7 @@ protected SSTableTxnWriter createWriter(SSTable.Owner owner) throws IOException SerializationHeader header = new SerializationHeader(true, metadata.get(), columns, EncodingStats.NO_STATS); if (makeRangeAware) - return SSTableTxnWriter.createRangeAware(metadata, 0, ActiveRepairService.UNREPAIRED_SSTABLE, ActiveRepairService.NO_PENDING_REPAIR, false, format, 0, header); + return SSTableTxnWriter.createRangeAware(metadata, 0, ActiveRepairService.UNREPAIRED_SSTABLE, ActiveRepairService.NO_PENDING_REPAIR, false, format, header); return SSTableTxnWriter.create(metadata, createDescriptor(directory, metadata.keyspace, metadata.name, format), @@ -80,7 +80,6 @@ protected SSTableTxnWriter createWriter(SSTable.Owner owner) throws IOException ActiveRepairService.UNREPAIRED_SSTABLE, ActiveRepairService.NO_PENDING_REPAIR, false, - 0, header, Collections.emptySet(), owner); diff --git a/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java index 61367a739206..dbf131689f7b 100644 --- a/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java @@ -75,7 +75,7 @@ public RangeAwareSSTableWriter(ColumnFamilyStore cfs, long estimatedKeys, long r throw new IOException(String.format("Insufficient disk space to store %s", FBUtilities.prettyPrintMemory(totalSize))); Descriptor desc = cfs.newSSTableDescriptor(cfs.getDirectories().getLocationForDisk(localDir), format); - currentWriter = cfs.createSSTableMultiWriter(desc, estimatedKeys, repairedAt, pendingRepair, isTransient, sstableLevel, header, lifecycleNewTracker); + currentWriter = cfs.createSSTableMultiWriter(desc, estimatedKeys, repairedAt, pendingRepair, isTransient, null, sstableLevel, header, lifecycleNewTracker); } } @@ -97,7 +97,7 @@ private void maybeSwitchWriter(DecoratedKey key) finishedWriters.add(currentWriter); Descriptor desc = cfs.newSSTableDescriptor(cfs.getDirectories().getLocationForDisk(directories.get(currentIndex)), format); - currentWriter = cfs.createSSTableMultiWriter(desc, estimatedKeys, repairedAt, pendingRepair, isTransient, sstableLevel, header, lifecycleNewTracker); + currentWriter = cfs.createSSTableMultiWriter(desc, estimatedKeys, repairedAt, pendingRepair, isTransient, null, sstableLevel, header, lifecycleNewTracker); } } diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java index fad7d544243d..b817f3aa1088 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java @@ -30,7 +30,6 @@ import org.apache.cassandra.index.Index; import org.apache.cassandra.io.sstable.format.SSTableFormat; import org.apache.cassandra.io.sstable.format.SSTableReader; -import org.apache.cassandra.io.sstable.metadata.MetadataCollector; import org.apache.cassandra.schema.TableMetadataRef; import org.apache.cassandra.utils.TimeUUID; import org.apache.cassandra.utils.concurrent.Transactional; @@ -98,10 +97,10 @@ public Collection finish(boolean openResult) } @SuppressWarnings("resource") // log and writer closed during doPostCleanup - public static SSTableTxnWriter create(ColumnFamilyStore cfs, Descriptor descriptor, long keyCount, long repairedAt, TimeUUID pendingRepair, boolean isTransient, int sstableLevel, SerializationHeader header) + public static SSTableTxnWriter create(ColumnFamilyStore cfs, Descriptor descriptor, long keyCount, long repairedAt, TimeUUID pendingRepair, boolean isTransient, SerializationHeader header) { LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.WRITE); - SSTableMultiWriter writer = cfs.createSSTableMultiWriter(descriptor, keyCount, repairedAt, pendingRepair, isTransient, sstableLevel, header, txn); + SSTableMultiWriter writer = cfs.createSSTableMultiWriter(descriptor, keyCount, repairedAt, pendingRepair, isTransient, header, txn); return new SSTableTxnWriter(txn, writer); } @@ -113,7 +112,6 @@ public static SSTableTxnWriter createRangeAware(TableMetadataRef metadata, TimeUUID pendingRepair, boolean isTransient, SSTableFormat type, - int sstableLevel, SerializationHeader header) { @@ -122,7 +120,7 @@ public static SSTableTxnWriter createRangeAware(TableMetadataRef metadata, SSTableMultiWriter writer; try { - writer = new RangeAwareSSTableWriter(cfs, keyCount, repairedAt, pendingRepair, isTransient, type, sstableLevel, 0, txn, header); + writer = new RangeAwareSSTableWriter(cfs, keyCount, repairedAt, pendingRepair, isTransient, type, 0, 0, txn, header); } catch (IOException e) { @@ -141,20 +139,13 @@ public static SSTableTxnWriter create(TableMetadataRef metadata, long repairedAt, TimeUUID pendingRepair, boolean isTransient, - int sstableLevel, SerializationHeader header, Collection indexes, SSTable.Owner owner) { // if the column family store does not exist, we create a new default SSTableMultiWriter to use: LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.WRITE); - MetadataCollector collector = new MetadataCollector(metadata.get().comparator).sstableLevel(sstableLevel); - SSTableMultiWriter writer = SimpleSSTableMultiWriter.create(descriptor, keyCount, repairedAt, pendingRepair, isTransient, metadata, collector, header, indexes, txn, owner); + SSTableMultiWriter writer = SimpleSSTableMultiWriter.create(descriptor, keyCount, repairedAt, pendingRepair, isTransient, metadata, null, 0, header, indexes, txn, owner); return new SSTableTxnWriter(txn, writer); } - - public static SSTableTxnWriter create(ColumnFamilyStore cfs, Descriptor desc, long keyCount, long repairedAt, TimeUUID pendingRepair, boolean isTransient, SerializationHeader header) - { - return create(cfs, desc, keyCount, repairedAt, pendingRepair, isTransient, 0, header); - } } diff --git a/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java b/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java index baa38338973b..2eec6157c413 100644 --- a/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java @@ -21,6 +21,8 @@ import java.util.Collections; import org.apache.cassandra.db.SerializationHeader; +import org.apache.cassandra.db.commitlog.CommitLogPosition; +import org.apache.cassandra.db.commitlog.IntervalSet; import org.apache.cassandra.db.lifecycle.LifecycleNewTracker; import org.apache.cassandra.db.rows.UnfilteredRowIterator; import org.apache.cassandra.index.Index; @@ -107,12 +109,16 @@ public static SSTableMultiWriter create(Descriptor descriptor, TimeUUID pendingRepair, boolean isTransient, TableMetadataRef metadata, - MetadataCollector metadataCollector, + IntervalSet commitLogPositions, + int sstableLevel, SerializationHeader header, Collection indexes, LifecycleNewTracker lifecycleNewTracker, SSTable.Owner owner) { + MetadataCollector metadataCollector = new MetadataCollector(metadata.get().comparator) + .commitLogIntervals(commitLogPositions != null ? commitLogPositions : IntervalSet.empty()) + .sstableLevel(sstableLevel); SSTableWriter writer = descriptor.getFormat().getWriterFactory().builder(descriptor) .setKeyCount(keyCount) .setRepairedAt(repairedAt) diff --git a/test/unit/org/apache/cassandra/db/memtable/MemtableQuickTest.java b/test/unit/org/apache/cassandra/db/memtable/MemtableQuickTest.java index b2cfa3e5865b..a3dbcd77e9e4 100644 --- a/test/unit/org/apache/cassandra/db/memtable/MemtableQuickTest.java +++ b/test/unit/org/apache/cassandra/db/memtable/MemtableQuickTest.java @@ -18,9 +18,11 @@ package org.apache.cassandra.db.memtable; +import java.util.Collection; import java.util.List; import com.google.common.collect.ImmutableList; +import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; @@ -34,6 +36,9 @@ import org.apache.cassandra.cql3.UntypedResultSet; import org.apache.cassandra.db.ColumnFamilyStore; import org.apache.cassandra.db.Keyspace; +import org.apache.cassandra.dht.Range; +import org.apache.cassandra.io.sstable.format.SSTableReader; +import org.apache.cassandra.utils.concurrent.Refs; @RunWith(Parameterized.class) public class MemtableQuickTest extends CQLTester @@ -138,5 +143,33 @@ public void testMemtable() throws Throwable logger.info("Selecting *"); result = execute("SELECT * FROM " + table); assertRowCount(result, rowsPerPartition * (partitions - deletedPartitions) - deletedRows); + + try (Refs refs = new Refs()) + { + Collection sstables = cfs.getLiveSSTables(); + if (sstables.isEmpty()) // persistent memtables won't flush + { + assert cfs.streamFromMemtable(); + cfs.writeAndAddMemtableRanges(null, + () -> ImmutableList.of(new Range(Util.testPartitioner().getMinimumToken().minKeyBound(), + Util.testPartitioner().getMinimumToken().minKeyBound())), + refs); + sstables = refs; + Assert.assertTrue(cfs.getLiveSSTables().isEmpty()); + } + + // make sure the row counts are correct in both the metadata as well as the cardinality estimator + // (see STAR-1826) + long totalPartitions = 0; + for (SSTableReader sstable : sstables) + { + long sstableKeys = sstable.estimatedKeys(); + long cardinality = SSTableReader.getApproximateKeyCount(ImmutableList.of(sstable)); + // should be within 10% of each other + Assert.assertEquals((double) sstableKeys, (double) cardinality, sstableKeys * 0.1); + totalPartitions += sstableKeys; + } + Assert.assertEquals((double) partitions, (double) totalPartitions, partitions * 0.1); + } } } \ No newline at end of file diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java b/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java index 05964a8000f4..dd82a327d63a 100644 --- a/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java +++ b/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java @@ -233,7 +233,7 @@ public Collection write(int expectedSize, Appender appender) thro TableMetadata metadata = Schema.instance.getTableMetadata(ksname, cfname); ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(metadata.id); SerializationHeader header = appender.header(); - SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, Descriptor.fromFileWithComponent(datafile, false).left, expectedSize, UNREPAIRED_SSTABLE, NO_PENDING_REPAIR, false, 0, header); + SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, Descriptor.fromFileWithComponent(datafile, false).left, expectedSize, UNREPAIRED_SSTABLE, NO_PENDING_REPAIR, false, header); while (appender.append(writer)) { /* pass */ } Collection readers = writer.finish(true); From d4cf1f369e9215f9df0ed2556bc8fbace5fe4b9d Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Mon, 20 Mar 2023 17:53:57 +0200 Subject: [PATCH 10/27] Fix weighted splitting --- src/java/org/apache/cassandra/dht/Splitter.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/java/org/apache/cassandra/dht/Splitter.java b/src/java/org/apache/cassandra/dht/Splitter.java index e410a9cb2900..159385932105 100644 --- a/src/java/org/apache/cassandra/dht/Splitter.java +++ b/src/java/org/apache/cassandra/dht/Splitter.java @@ -144,10 +144,11 @@ public List splitOwnedRanges(int parts, List weightedRange { BigInteger currentRangeWidth = weightedRange.totalTokens(this); BigInteger left = valueForToken(weightedRange.left()); + BigInteger currentRangeFactor = BigInteger.valueOf(Math.max(1, (long) (1 / weightedRange.weight))); while (sum.add(currentRangeWidth).compareTo(perPart) >= 0) { BigInteger withinRangeBoundary = perPart.subtract(sum); - left = left.add(withinRangeBoundary); + left = left.add(withinRangeBoundary.multiply(currentRangeFactor)); boundaries.add(tokenForValue(left)); tokensLeft = tokensLeft.subtract(perPart); currentRangeWidth = currentRangeWidth.subtract(withinRangeBoundary); From e5582c33c7fd75a5cf634dadbf505605c2a3e3a8 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Tue, 28 Feb 2023 14:16:42 +0200 Subject: [PATCH 11/27] Add flushSizeOnDisk metric --- .../cassandra/db/ColumnFamilyStore.java | 5 +- .../cassandra/db/memtable/Flushing.java | 2 +- .../io/sstable/RangeAwareSSTableWriter.java | 12 +- .../io/sstable/SSTableMultiWriter.java | 3 +- .../io/sstable/SSTableTxnWriter.java | 2 +- .../io/sstable/SSTableZeroCopyWriter.java | 8 +- .../io/sstable/SimpleSSTableMultiWriter.java | 7 +- .../cassandra/metrics/TableMetrics.java | 5 + .../cassandra/utils/ExpMovingAverage.java | 107 ++++++++++++++++++ .../apache/cassandra/utils/MovingAverage.java | 26 +++++ .../cassandra/io/DiskSpaceMetricsTest.java | 24 ++++ .../sstable/RangeAwareSSTableWriterTest.java | 4 +- 12 files changed, 194 insertions(+), 11 deletions(-) create mode 100644 src/java/org/apache/cassandra/utils/ExpMovingAverage.java create mode 100644 src/java/org/apache/cassandra/utils/MovingAverage.java diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java index e3dc036e8a11..4e3e8945bd5a 100644 --- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java +++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java @@ -1290,7 +1290,7 @@ public Collection flushMemtable(ColumnFamilyStore cfs, Memtable m { @SuppressWarnings("resource") SSTableMultiWriter writer = writerIterator.next(); - if (writer.getFilePointer() > 0) + if (writer.getBytesWritten() > 0) { writer.setOpenResult(true).prepareToCommit(); } @@ -1314,7 +1314,10 @@ public Collection flushMemtable(ColumnFamilyStore cfs, Memtable m Throwable accumulate = null; for (SSTableMultiWriter writer : flushResults) + { accumulate = writer.commit(accumulate); + metric.flushSizeOnDisk.update(writer.getOnDiskBytesWritten()); + } maybeFail(txn.commit(accumulate)); diff --git a/src/java/org/apache/cassandra/db/memtable/Flushing.java b/src/java/org/apache/cassandra/db/memtable/Flushing.java index c7d0c90682be..f97f59518d38 100644 --- a/src/java/org/apache/cassandra/db/memtable/Flushing.java +++ b/src/java/org/apache/cassandra/db/memtable/Flushing.java @@ -169,7 +169,7 @@ private void writeSortedContents() if (logCompletion) { - long bytesFlushed = writer.getFilePointer(); + long bytesFlushed = writer.getBytesWritten(); logger.info("Completed flushing {} ({}) for commitlog position {}", writer.getFilename(), FBUtilities.prettyPrintMemory(bytesFlushed), diff --git a/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java index dbf131689f7b..b52b2b3137b3 100644 --- a/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java @@ -115,7 +115,7 @@ public Collection finish(boolean openResult) currentWriter = null; for (SSTableMultiWriter writer : finishedWriters) { - if (writer.getFilePointer() > 0) + if (writer.getBytesWritten() > 0) finishedReaders.addAll(writer.finish(openResult)); else SSTableMultiWriter.abortOrDie(writer); @@ -143,9 +143,15 @@ public String getFilename() } @Override - public long getFilePointer() + public long getBytesWritten() { - return currentWriter != null ? currentWriter.getFilePointer() : 0L; + return currentWriter != null ? currentWriter.getBytesWritten() : 0L; + } + + @Override + public long getOnDiskBytesWritten() + { + return currentWriter != null ? currentWriter.getOnDiskBytesWritten() : 0L; } @Override diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableMultiWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableMultiWriter.java index 6948efc894ab..0a1495c43abc 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableMultiWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableMultiWriter.java @@ -42,7 +42,8 @@ public interface SSTableMultiWriter extends Transactional SSTableMultiWriter setOpenResult(boolean openResult); String getFilename(); - long getFilePointer(); + long getBytesWritten(); + long getOnDiskBytesWritten(); TableId getTableId(); static void abortOrDie(SSTableMultiWriter writer) diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java index b817f3aa1088..e917107cc4f0 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java @@ -62,7 +62,7 @@ public String getFilename() public long getFilePointer() { - return writer.getFilePointer(); + return writer.getBytesWritten(); } protected Throwable doCommit(Throwable accumulate) diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableZeroCopyWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableZeroCopyWriter.java index 034d46b0496c..f6febf834126 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableZeroCopyWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableZeroCopyWriter.java @@ -148,7 +148,13 @@ public SSTableMultiWriter setOpenResult(boolean openResult) } @Override - public long getFilePointer() + public long getBytesWritten() + { + return 0; + } + + @Override + public long getOnDiskBytesWritten() { return 0; } diff --git a/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java b/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java index 2eec6157c413..8e613180f77d 100644 --- a/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java @@ -71,11 +71,16 @@ public String getFilename() return writer.getFilename(); } - public long getFilePointer() + public long getBytesWritten() { return writer.getFilePointer(); } + public long getOnDiskBytesWritten() + { + return writer.getEstimatedOnDiskBytesWritten(); + } + public TableId getTableId() { return writer.metadata().id; diff --git a/src/java/org/apache/cassandra/metrics/TableMetrics.java b/src/java/org/apache/cassandra/metrics/TableMetrics.java index 024c9a50223c..ba916ccb48c6 100644 --- a/src/java/org/apache/cassandra/metrics/TableMetrics.java +++ b/src/java/org/apache/cassandra/metrics/TableMetrics.java @@ -58,6 +58,8 @@ import org.apache.cassandra.schema.Schema; import org.apache.cassandra.schema.SchemaConstants; import org.apache.cassandra.utils.EstimatedHistogram; +import org.apache.cassandra.utils.ExpMovingAverage; +import org.apache.cassandra.utils.MovingAverage; import org.apache.cassandra.utils.Pair; import static java.util.concurrent.TimeUnit.MICROSECONDS; @@ -119,6 +121,8 @@ public class TableMetrics public final Counter pendingFlushes; /** Total number of bytes flushed since server [re]start */ public final Counter bytesFlushed; + /** The average on-disk flushed size for sstables. */ + public final MovingAverage flushSizeOnDisk; /** Total number of bytes written by compaction since server [re]start */ public final Counter compactionBytesWritten; /** Estimate of number of pending compactios for this table */ @@ -623,6 +627,7 @@ public Long getValue() rangeLatency = createLatencyMetrics("Range", cfs.keyspace.metric.rangeLatency, GLOBAL_RANGE_LATENCY); pendingFlushes = createTableCounter("PendingFlushes"); bytesFlushed = createTableCounter("BytesFlushed"); + flushSizeOnDisk = ExpMovingAverage.decayBy1000(); compactionBytesWritten = createTableCounter("CompactionBytesWritten"); pendingCompactions = createTableGauge("PendingCompactions", () -> cfs.getCompactionStrategyManager().getEstimatedRemainingTasks()); diff --git a/src/java/org/apache/cassandra/utils/ExpMovingAverage.java b/src/java/org/apache/cassandra/utils/ExpMovingAverage.java new file mode 100644 index 000000000000..2dc726a2e42d --- /dev/null +++ b/src/java/org/apache/cassandra/utils/ExpMovingAverage.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.utils; + +import com.google.common.util.concurrent.AtomicDouble; + +/** + * Sample-based exponential moving average. On every update a fraction of the current average is replaced by the new + * sample. New values have greater representation in the average, and older samples' effect exponentially decays with + * new data. + */ +public class ExpMovingAverage implements MovingAverage +{ + /** The ratio of decay, between 0 and 1, where smaller alpha means values are averaged over more samples */ + private final double alpha; + + /** The long term average with exponential decay */ + private final AtomicDouble average = new AtomicDouble(Double.NaN); + + /** + * Create a {@link ExpMovingAverage} where older values have less than 1% effect after 1000 samples. + */ + public static MovingAverage decayBy1000() + { + return new ExpMovingAverage(0.0046); + } + + /** + * Create a {@link ExpMovingAverage} where older values have less than 1% effect after 100 samples. + */ + public static ExpMovingAverage decayBy100() + { + return new ExpMovingAverage(0.045); + } + + /** + * Create a {@link ExpMovingAverage} where older values have less than 1% effect after 10 samples. + */ + public static ExpMovingAverage decayBy10() + { + return new ExpMovingAverage(0.37); + } + + /** + * Create a {@link ExpMovingAverage} where older values have less effect than the given ratio after the given + * number of samples. + */ + public static ExpMovingAverage withDecay(double ratio, int samples) + { + assert ratio > 0.0 && ratio < 1.0; + assert samples > 0; + return new ExpMovingAverage(1 - Math.pow(ratio, 1.0 / samples)); + } + + ExpMovingAverage(double alpha) + { + assert alpha > 0.0 && alpha <= 1.0; + this.alpha = alpha; + } + + @Override + public MovingAverage update(double val) + { + double current, update; + do + { + current = average.get(); + + if (!Double.isNaN(current)) + update = current + alpha * (val - current); + else + update = val; // Not initialized yet. Incidentally, passing NaN will cause reinitialization on the + // next update. + } + while (!average.compareAndSet(current, update)); + + return this; + } + + @Override + public double get() + { + return average.get(); + } + + @Override + public String toString() + { + return String.format("%.2f", get()); + } +} diff --git a/src/java/org/apache/cassandra/utils/MovingAverage.java b/src/java/org/apache/cassandra/utils/MovingAverage.java new file mode 100644 index 000000000000..11f97934eeb8 --- /dev/null +++ b/src/java/org/apache/cassandra/utils/MovingAverage.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.utils; + +public interface MovingAverage +{ + MovingAverage update(double val); + + double get(); +} diff --git a/test/unit/org/apache/cassandra/io/DiskSpaceMetricsTest.java b/test/unit/org/apache/cassandra/io/DiskSpaceMetricsTest.java index 5ecba47eb34b..95335746060e 100644 --- a/test/unit/org/apache/cassandra/io/DiskSpaceMetricsTest.java +++ b/test/unit/org/apache/cassandra/io/DiskSpaceMetricsTest.java @@ -48,6 +48,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertEquals; +import static org.psjava.util.AssertStatus.assertTrue; public class DiskSpaceMetricsTest extends CQLTester { @@ -95,6 +96,29 @@ public void summaryRedistribution() throws Throwable } } + @Test + public void testFlushSize() throws Throwable + { + createTable("CREATE TABLE %s (pk bigint, PRIMARY KEY (pk))"); + ColumnFamilyStore cfs = getCurrentColumnFamilyStore(); + assertTrue(Double.isNaN(cfs.metric.flushSizeOnDisk.get())); + + // disable compaction so nothing changes between calculations + cfs.disableAutoCompaction(); + + for (int i = 0; i < 3; i++) + insertN(cfs, 1000, 55); + + int totalSize = 0; + final Set liveSSTables = cfs.getLiveSSTables(); + for (SSTableReader rdr : liveSSTables) + { + totalSize += rdr.onDiskLength(); + } + final int avgSize = totalSize / liveSSTables.size(); + assertEquals(avgSize, cfs.metric.flushSizeOnDisk.get(), 0.05 * avgSize); + } + private void insert(ColumnFamilyStore cfs, long value) throws Throwable { insertN(cfs, 1, value); diff --git a/test/unit/org/apache/cassandra/io/sstable/RangeAwareSSTableWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/RangeAwareSSTableWriterTest.java index 2b258bad79b5..0c79fef7fb28 100644 --- a/test/unit/org/apache/cassandra/io/sstable/RangeAwareSSTableWriterTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/RangeAwareSSTableWriterTest.java @@ -85,7 +85,7 @@ public void testAccessWriterBeforeAppend() throws IOException SerializationHeader.make(cfs.metadata(), cfs.getLiveSSTables())); assertEquals(cfs.metadata.id, writer.getTableId()); - assertEquals(0L, writer.getFilePointer()); - + assertEquals(0L, writer.getBytesWritten()); + assertEquals(0L, writer.getOnDiskBytesWritten()); } } From fec6e8bca0b8bb7e04f6ef90a62034713643a9ae Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Mon, 27 Feb 2023 16:10:35 +0200 Subject: [PATCH 12/27] UnifiedCompactionStrategy, CEP-26 --- .../config/CassandraRelevantProperties.java | 82 ++ .../cassandra/db/ColumnFamilyStore.java | 96 +- .../cassandra/db/compaction/ShardManager.java | 154 +++ .../db/compaction/ShardManagerDiskAware.java | 220 +++++ .../db/compaction/ShardManagerNoDisks.java | 201 ++++ .../db/compaction/ShardManagerTrivial.java | 135 +++ .../cassandra/db/compaction/ShardTracker.java | 63 ++ .../compaction/UnifiedCompactionStrategy.java | 865 +++++++++++++++++ .../compaction/UnifiedCompactionStrategy.md | 371 +++++++ .../db/compaction/unified/Controller.java | 566 +++++++++++ .../unified/ShardedCompactionWriter.java | 102 ++ .../unified/ShardedMultiWriter.java | 248 +++++ .../unified/UnifiedCompactionTask.java | 59 ++ src/java/org/apache/cassandra/dht/Range.java | 56 ++ .../org/apache/cassandra/dht/Splitter.java | 13 + .../cassandra/io/sstable/SSTableRewriter.java | 16 + .../io/sstable/format/SSTableReader.java | 1 + .../cassandra/schema/CompactionParams.java | 6 + .../apache/cassandra/utils/FBUtilities.java | 5 + .../org/apache/cassandra/utils/Overlaps.java | 212 ++++ .../db/compaction/CompactionsCQLTest.java | 12 + .../CorruptedSSTablesCompactionsTest.java | 12 +- .../db/compaction/ShardManagerTest.java | 408 ++++++++ .../UnifiedCompactionStrategyTest.java | 913 ++++++++++++++++++ .../db/compaction/unified/ControllerTest.java | 336 +++++++ .../unified/ShardedCompactionWriterTest.java | 293 ++++++ .../unified/ShardedMultiWriterTest.java | 139 +++ .../db/memtable/MemtableQuickTest.java | 5 +- .../apache/cassandra/utils/OverlapsTest.java | 372 +++++++ 29 files changed, 5919 insertions(+), 42 deletions(-) create mode 100644 src/java/org/apache/cassandra/db/compaction/ShardManager.java create mode 100644 src/java/org/apache/cassandra/db/compaction/ShardManagerDiskAware.java create mode 100644 src/java/org/apache/cassandra/db/compaction/ShardManagerNoDisks.java create mode 100644 src/java/org/apache/cassandra/db/compaction/ShardManagerTrivial.java create mode 100644 src/java/org/apache/cassandra/db/compaction/ShardTracker.java create mode 100644 src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java create mode 100644 src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.md create mode 100644 src/java/org/apache/cassandra/db/compaction/unified/Controller.java create mode 100644 src/java/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriter.java create mode 100644 src/java/org/apache/cassandra/db/compaction/unified/ShardedMultiWriter.java create mode 100644 src/java/org/apache/cassandra/db/compaction/unified/UnifiedCompactionTask.java create mode 100644 src/java/org/apache/cassandra/utils/Overlaps.java create mode 100644 test/unit/org/apache/cassandra/db/compaction/ShardManagerTest.java create mode 100644 test/unit/org/apache/cassandra/db/compaction/UnifiedCompactionStrategyTest.java create mode 100644 test/unit/org/apache/cassandra/db/compaction/unified/ControllerTest.java create mode 100644 test/unit/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriterTest.java create mode 100644 test/unit/org/apache/cassandra/db/compaction/unified/ShardedMultiWriterTest.java create mode 100644 test/unit/org/apache/cassandra/utils/OverlapsTest.java diff --git a/src/java/org/apache/cassandra/config/CassandraRelevantProperties.java b/src/java/org/apache/cassandra/config/CassandraRelevantProperties.java index fbb20766fa2c..0ea4e2cc01cc 100644 --- a/src/java/org/apache/cassandra/config/CassandraRelevantProperties.java +++ b/src/java/org/apache/cassandra/config/CassandraRelevantProperties.java @@ -29,6 +29,7 @@ import org.apache.cassandra.db.virtual.LogMessagesTable; import org.apache.cassandra.exceptions.ConfigurationException; import org.apache.cassandra.service.FileSystemOwnershipCheck; +import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.StorageCompatibilityMode; // checkstyle: suppress below 'blockSystemPropertyUsage' @@ -516,6 +517,11 @@ public enum CassandraRelevantProperties TRIGGERS_DIR("cassandra.triggers_dir"), TRUNCATE_BALLOT_METADATA("cassandra.truncate_ballot_metadata"), TYPE_UDT_CONFLICT_BEHAVIOR("cassandra.type.udt.conflict_behavior"), + UCS_BASE_SHARD_COUNT("unified_compaction.base_shard_count", "4"), + UCS_OVERLAP_INCLUSION_METHOD("unified_compaction.overlap_inclusion_method"), + UCS_SCALING_PARAMETER("unified_compaction.scaling_parameters", "T4"), + UCS_SURVIVAL_FACTOR("unified_compaction.survival_factor", "1"), + UCS_TARGET_SSTABLE_SIZE("unified_compaction.target_sstable_size", "1GiB"), UDF_EXECUTOR_THREAD_KEEPALIVE_MS("cassandra.udf_executor_thread_keepalive_ms", "30000"), UNSAFE_SYSTEM("cassandra.unsafesystem"), /** User's home directory. */ @@ -725,6 +731,56 @@ public long getLong(long overrideDefaultValue) return LONG_CONVERTER.convert(value); } + /** + * Gets the value of a system property as a double. + * @return System property value if it exists, defaultValue otherwise. Throws an exception if no default value is set. + */ + public double getDouble() + { + String value = System.getProperty(key); + if (value == null && defaultVal == null) + throw new ConfigurationException("Missing property value or default value is not set: " + key); + return DOUBLE_CONVERTER.convert(value == null ? defaultVal : value); + } + + /** + * Gets the value of a system property as a double. + * @return system property long value if it exists, defaultValue otherwise. + */ + public double getLong(double overrideDefaultValue) + { + String value = System.getProperty(key); + if (value == null) + return overrideDefaultValue; + + return DOUBLE_CONVERTER.convert(value); + } + + /** + * Gets the value of a system property, given as a human-readable size in bytes (e.g. 100MiB, 10GB, 500B). + * @return System property value if it exists, defaultValue otherwise. Throws an exception if no default value is set. + */ + public long getSizeInBytes() + { + String value = System.getProperty(key); + if (value == null && defaultVal == null) + throw new ConfigurationException("Missing property value or default value is not set: " + key); + return SIZE_IN_BYTES_CONVERTER.convert(value == null ? defaultVal : value); + } + + /** + * Gets the value of a system property, given as a human-readable size in bytes (e.g. 100MiB, 10GB, 500B). + * @return System property value if it exists, defaultValue otherwise. + */ + public long getSizeInBytes(long overrideDefaultValue) + { + String value = System.getProperty(key); + if (value == null) + return overrideDefaultValue; + + return SIZE_IN_BYTES_CONVERTER.convert(value); + } + /** * Gets the value of a system property as an int. * @return system property int value if it exists, overrideDefaultValue otherwise. @@ -847,6 +903,32 @@ public interface PropertyConverter } }; + private static final PropertyConverter SIZE_IN_BYTES_CONVERTER = value -> + { + try + { + return FBUtilities.parseHumanReadableBytes(value); + } + catch (ConfigurationException e) + { + throw new ConfigurationException(String.format("Invalid value for system property: " + + "expected size in bytes with unit but got '%s'\n%s", value, e)); + } + }; + + private static final PropertyConverter DOUBLE_CONVERTER = value -> + { + try + { + return Double.parseDouble(value); + } + catch (NumberFormatException e) + { + throw new ConfigurationException(String.format("Invalid value for system property: " + + "expected floating point value but got '%s'", value)); + } + }; + /** * @return whether a system property is present or not. */ diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java index 4e3e8945bd5a..ef18097a0a22 100644 --- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java +++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java @@ -64,7 +64,6 @@ import com.google.common.base.Predicates; import com.google.common.base.Strings; import com.google.common.base.Throwables; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; @@ -260,6 +259,9 @@ public enum FlushReason public static final String SNAPSHOT_DROP_PREFIX = "dropped"; static final String TOKEN_DELIMITER = ":"; + /** Special values used when the local ranges are not changed with ring changes (e.g. local tables). */ + public static final int RING_VERSION_IRRELEVANT = -1; + static { try @@ -1456,64 +1458,82 @@ public void apply(PartitionUpdate update, UpdateTransaction indexer, OpOrder.Gro } } - @Override - public ShardBoundaries localRangeSplits(int shardCount) + public static class VersionedLocalRanges extends ArrayList { - if (shardCount == 1 || !getPartitioner().splitter().isPresent()) - return ShardBoundaries.NONE; + public final long ringVersion; - ShardBoundaries shardBoundaries = cachedShardBoundaries; + public VersionedLocalRanges(long ringVersion, int initialSize) + { + super(initialSize); + this.ringVersion = ringVersion; + } + } - if (shardBoundaries == null || - shardBoundaries.shardCount() != shardCount || - shardBoundaries.ringVersion != -1 && shardBoundaries.ringVersion != StorageService.instance.getTokenMetadata().getRingVersion()) + public VersionedLocalRanges localRangesWeighted() + { + if (!SchemaConstants.isLocalSystemKeyspace(getKeyspaceName()) + && getPartitioner() == StorageService.instance.getTokenMetadata().partitioner) { - List weightedRanges; - long ringVersion; - if (!SchemaConstants.isLocalSystemKeyspace(getKeyspaceName()) - && getPartitioner() == StorageService.instance.getTokenMetadata().partitioner) - { - DiskBoundaryManager.VersionedRangesAtEndpoint versionedLocalRanges = DiskBoundaryManager.getVersionedLocalRanges(this); - Set> localRanges = versionedLocalRanges.rangesAtEndpoint.ranges(); - ringVersion = versionedLocalRanges.ringVersion; + DiskBoundaryManager.VersionedRangesAtEndpoint versionedLocalRanges = DiskBoundaryManager.getVersionedLocalRanges(this); + Set> localRanges = versionedLocalRanges.rangesAtEndpoint.ranges(); + long ringVersion = versionedLocalRanges.ringVersion; - if (!localRanges.isEmpty()) - { - weightedRanges = new ArrayList<>(localRanges.size()); - for (Range r : localRanges) - { - // WeightedRange supports only unwrapped ranges as it relies - // on right - left == num tokens equality - for (Range u: r.unwrap()) - weightedRanges.add(new Splitter.WeightedRange(1.0, u)); - } - weightedRanges.sort(Comparator.comparing(Splitter.WeightedRange::left)); - } - else + if (!localRanges.isEmpty()) + { + VersionedLocalRanges weightedRanges = new VersionedLocalRanges(ringVersion, localRanges.size()); + for (Range r : localRanges) { - weightedRanges = fullWeightedRange(); + // WeightedRange supports only unwrapped ranges as it relies + // on right - left == num tokens equality + for (Range u: r.unwrap()) + weightedRanges.add(new Splitter.WeightedRange(1.0, u)); } + weightedRanges.sort(Comparator.comparing(Splitter.WeightedRange::left)); + return weightedRanges; } else { - // Local tables need to cover the full token range and don't care about ring changes. - // We also end up here if the table's partitioner is not the database's, which can happen in tests. - weightedRanges = fullWeightedRange(); - ringVersion = -1; + return fullWeightedRange(ringVersion, getPartitioner()); } + } + else + { + // Local tables need to cover the full token range and don't care about ring changes. + // We also end up here if the table's partitioner is not the database's, which can happen in tests. + return fullWeightedRange(RING_VERSION_IRRELEVANT, getPartitioner()); + } + } + + @Override + public ShardBoundaries localRangeSplits(int shardCount) + { + if (shardCount == 1 || !getPartitioner().splitter().isPresent()) + return ShardBoundaries.NONE; + + ShardBoundaries shardBoundaries = cachedShardBoundaries; + + if (shardBoundaries == null || + shardBoundaries.shardCount() != shardCount || + (shardBoundaries.ringVersion != RING_VERSION_IRRELEVANT && + shardBoundaries.ringVersion != StorageService.instance.getTokenMetadata().getRingVersion())) + { + VersionedLocalRanges weightedRanges = localRangesWeighted(); List boundaries = getPartitioner().splitter().get().splitOwnedRanges(shardCount, weightedRanges, false); shardBoundaries = new ShardBoundaries(boundaries.subList(0, boundaries.size() - 1), - ringVersion); + weightedRanges.ringVersion); cachedShardBoundaries = shardBoundaries; logger.debug("Memtable shard boundaries for {}.{}: {}", getKeyspaceName(), getTableName(), boundaries); } return shardBoundaries; } - private ImmutableList fullWeightedRange() + @VisibleForTesting + public static VersionedLocalRanges fullWeightedRange(long ringVersion, IPartitioner partitioner) { - return ImmutableList.of(new Splitter.WeightedRange(1.0, new Range<>(getPartitioner().getMinimumToken(), getPartitioner().getMaximumToken()))); + VersionedLocalRanges ranges = new VersionedLocalRanges(ringVersion, 1); + ranges.add(new Splitter.WeightedRange(1.0, new Range<>(partitioner.getMinimumToken(), partitioner.getMinimumToken()))); + return ranges; } /** diff --git a/src/java/org/apache/cassandra/db/compaction/ShardManager.java b/src/java/org/apache/cassandra/db/compaction/ShardManager.java new file mode 100644 index 000000000000..a58be340a97a --- /dev/null +++ b/src/java/org/apache/cassandra/db/compaction/ShardManager.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.db.compaction; + +import java.util.Set; +import java.util.stream.Collectors; + +import com.google.common.collect.ImmutableList; + +import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.PartitionPosition; +import org.apache.cassandra.dht.IPartitioner; +import org.apache.cassandra.dht.Range; +import org.apache.cassandra.dht.Token; +import org.apache.cassandra.io.sstable.format.SSTableReader; + +public interface ShardManager +{ + /** + * Single-partition, and generally sstables with very few partitions, can cover very small sections of the token + * space, resulting in very high densities. + * Additionally, sstables that have completely fallen outside of the local token ranges will end up with a zero + * coverage. + * To avoid problems with both we check if coverage is below the minimum, and replace it with 1. + */ + static final double MINIMUM_TOKEN_COVERAGE = Math.scalb(1.0, -48); + + static ShardManager create(ColumnFamilyStore cfs) + { + final ImmutableList diskPositions = cfs.getDiskBoundaries().positions; + ColumnFamilyStore.VersionedLocalRanges localRanges = cfs.localRangesWeighted(); + IPartitioner partitioner = cfs.getPartitioner(); + + if (diskPositions != null && diskPositions.size() > 1) + return new ShardManagerDiskAware(localRanges, diskPositions.stream() + .map(PartitionPosition::getToken) + .collect(Collectors.toList())); + else if (partitioner.splitter().isPresent()) + return new ShardManagerNoDisks(localRanges); + else + return new ShardManagerTrivial(partitioner); + } + + boolean isOutOfDate(long ringVersion); + + /** + * The token range fraction spanned by the given range, adjusted for the local range ownership. + */ + double rangeSpanned(Range tableRange); + + /** + * The total fraction of the local space covered by the local ranges. + */ + double localSpaceCoverage(); + + /** + * Construct a boundary/shard iterator for the given number of shards. + * + * Note: This does not offer a method of listing the shard boundaries it generates, just to advance to the + * corresponding one for a given token. The only usage for listing is currently in tests. Should a need for this + * arise, see {@link CompactionSimulationTest} for a possible implementation. + */ + ShardTracker boundaries(int shardCount); + + static Range coveringRange(SSTableReader sstable) + { + return coveringRange(sstable.getFirst(), sstable.getLast()); + } + + static Range coveringRange(PartitionPosition first, PartitionPosition last) + { + // To include the token of last, the range's upper bound must be increased. + return new Range<>(first.getToken(), last.getToken().nextValidToken()); + } + + + /** + * Return the token space share that the given SSTable spans, excluding any non-locally owned space. + * Returns a positive floating-point number between 0 and 1. + */ + default double rangeSpanned(SSTableReader rdr) + { + double span = rangeSpanned(rdr.getFirst(), rdr.getLast()); + + if (span >= MINIMUM_TOKEN_COVERAGE) + return span; + + // Too small ranges are expected to be the result of either a single-partition sstable or falling outside + // of the local token ranges. In these cases we substitute it with 1 because for them sharding and density + // tiering does not make sense. + return 1.0; // This will be chosen if span is NaN too. + } + + default double rangeSpanned(PartitionPosition first, PartitionPosition last) + { + return rangeSpanned(ShardManager.coveringRange(first, last)); + } + + /** + * Return the density of an SSTable, i.e. its size divided by the covered token space share. + * This is an improved measure of the compaction age of an SSTable that grows both with STCS-like full-SSTable + * compactions (where size grows, share is constant), LCS-like size-threshold splitting (where size is constant + * but share shrinks), UCS-like compactions (where size may grow and covered shards i.e. share may decrease) + * and can reproduce levelling structure that corresponds to all, including their mixtures. + */ + default double density(SSTableReader rdr) + { + return rdr.onDiskLength() / rangeSpanned(rdr); + } + + default int compareByDensity(SSTableReader a, SSTableReader b) + { + return Double.compare(density(a), density(b)); + } + + /** + * Estimate the density of the sstable that will be the result of compacting the given sources. + */ + default double calculateCombinedDensity(Set sstables) + { + if (sstables.isEmpty()) + return 0; + long onDiskLength = 0; + PartitionPosition min = null; + PartitionPosition max = null; + for (SSTableReader sstable : sstables) + { + onDiskLength += sstable.onDiskLength(); + min = min == null || min.compareTo(sstable.getFirst()) > 0 ? sstable.getFirst() : min; + max = max == null || max.compareTo(sstable.getLast()) < 0 ? sstable.getLast() : max; + } + double span = rangeSpanned(min, max); + if (span >= MINIMUM_TOKEN_COVERAGE) + return onDiskLength / span; + else + return onDiskLength; + } +} diff --git a/src/java/org/apache/cassandra/db/compaction/ShardManagerDiskAware.java b/src/java/org/apache/cassandra/db/compaction/ShardManagerDiskAware.java new file mode 100644 index 000000000000..3fff8beaf39c --- /dev/null +++ b/src/java/org/apache/cassandra/db/compaction/ShardManagerDiskAware.java @@ -0,0 +1,220 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.db.compaction; + +import java.util.Collections; +import java.util.List; + +import javax.annotation.Nullable; + +import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.dht.Range; +import org.apache.cassandra.dht.Splitter; +import org.apache.cassandra.dht.Token; + +public class ShardManagerDiskAware extends ShardManagerNoDisks +{ + /** + * Positions for the disk boundaries, in covered token range. The last number defines the total token + * share owned by the node. + */ + private final double[] diskBoundaryPositions; + private final int[] diskStartRangeIndex; + private final List diskBoundaries; + + public ShardManagerDiskAware(ColumnFamilyStore.VersionedLocalRanges localRanges, List diskBoundaries) + { + super(localRanges); + assert diskBoundaries != null && !diskBoundaries.isEmpty(); + this.diskBoundaries = diskBoundaries; + + double position = 0; + final List ranges = localRanges; + int diskIndex = 0; + diskBoundaryPositions = new double[diskBoundaries.size()]; + diskStartRangeIndex = new int[diskBoundaryPositions.length]; + diskStartRangeIndex[0] = 0; + + for (int i = 0; i < localRangePositions.length; ++i) + { + Range range = ranges.get(i).range(); + double weight = ranges.get(i).weight(); + double span = localRangePositions[i] - position; + + Token diskBoundary = diskBoundaries.get(diskIndex); + while (diskIndex < diskBoundaryPositions.length - 1 && (range.right.isMinimum() || diskBoundary.compareTo(range.right) < 0)) + { + double leftPart = range.left.size(diskBoundary) * weight; + if (leftPart > span) // if the boundary falls on left or before it + leftPart = 0; + diskBoundaryPositions[diskIndex] = position + leftPart; + diskStartRangeIndex[diskIndex + 1] = i; + ++diskIndex; + diskBoundary = diskBoundaries.get(diskIndex); + } + + position += span; + } + diskBoundaryPositions[diskIndex] = position; + assert diskIndex + 1 == diskBoundaryPositions.length : "Disk boundaries are not within local ranges"; + } + + /** + * Construct a boundary/shard iterator for the given number of shards. + */ + public ShardTracker boundaries(int shardCount) + { + return new BoundaryTrackerDiskAware(shardCount); + } + + public class BoundaryTrackerDiskAware implements ShardTracker + { + private final int countPerDisk; + private double shardStep; + private double diskStart; + private int diskIndex; + private int nextShardIndex; + private int currentRange; + private Token currentStart; + @Nullable + private Token currentEnd; // null for the last shard + + public BoundaryTrackerDiskAware(int countPerDisk) + { + this.countPerDisk = countPerDisk; + currentStart = localRanges.get(0).left(); + diskIndex = -1; + } + + void enterDisk(int diskIndex) + { + this.diskIndex = diskIndex; + currentRange = 0; + diskStart = diskIndex > 0 ? diskBoundaryPositions[diskIndex - 1] : 0; + shardStep = (diskBoundaryPositions[diskIndex] - diskStart) / countPerDisk; + nextShardIndex = 1; + } + + private Token getEndToken(double toPos) + { + double left = currentRange > 0 ? localRangePositions[currentRange - 1] : 0; + double right = localRangePositions[currentRange]; + while (toPos > right) + { + left = right; + right = localRangePositions[++currentRange]; + } + + final Range range = localRanges.get(currentRange).range(); + return currentStart.getPartitioner().split(range.left, range.right, (toPos - left) / (right - left)); + } + + public Token shardStart() + { + return currentStart; + } + + public Token shardEnd() + { + return currentEnd; + } + + public Range shardSpan() + { + return new Range<>(currentStart, currentEnd != null ? currentEnd : currentStart.minValue()); + } + + public double shardSpanSize() + { + return shardStep; + } + + /** + * Advance to the given token (e.g. before writing a key). Returns true if this resulted in advancing to a new + * shard, and false otherwise. + */ + public boolean advanceTo(Token nextToken) + { + if (diskIndex < 0) + { + int search = Collections.binarySearch(diskBoundaries, nextToken); + if (search < 0) + search = -1 - search; + // otherwise (on equal) we are good as ranges are end-inclusive + enterDisk(search); + setEndToken(); + } + + if (currentEnd == null || nextToken.compareTo(currentEnd) <= 0) + return false; + do + { + currentStart = currentEnd; + if (nextShardIndex == countPerDisk) + enterDisk(diskIndex + 1); + else + ++nextShardIndex; + + setEndToken(); + } + while (!(currentEnd == null || nextToken.compareTo(currentEnd) <= 0)); + return true; + } + + private void setEndToken() + { + if (nextShardIndex == countPerDisk) + { + if (diskIndex + 1 == diskBoundaryPositions.length) + currentEnd = null; + else + currentEnd = diskBoundaries.get(diskIndex); + } + else + currentEnd = getEndToken(diskStart + shardStep * nextShardIndex); + } + + public int count() + { + return countPerDisk; + } + + /** + * Returns the fraction of the given token range's coverage that falls within this shard. + * E.g. if the span covers two shards exactly and the current shard is one of them, it will return 0.5. + */ + public double fractionInShard(Range targetSpan) + { + Range shardSpan = shardSpan(); + Range covered = targetSpan.intersectionNonWrapping(shardSpan); + if (covered == null) + return 0; + if (covered == targetSpan) + return 1; + double inShardSize = covered == shardSpan ? shardSpanSize() : ShardManagerDiskAware.this.rangeSpanned(covered); + double totalSize = ShardManagerDiskAware.this.rangeSpanned(targetSpan); + return inShardSize / totalSize; + } + + public int shardIndex() + { + return nextShardIndex - 1; + } + } +} diff --git a/src/java/org/apache/cassandra/db/compaction/ShardManagerNoDisks.java b/src/java/org/apache/cassandra/db/compaction/ShardManagerNoDisks.java new file mode 100644 index 000000000000..618c27d1d7fc --- /dev/null +++ b/src/java/org/apache/cassandra/db/compaction/ShardManagerNoDisks.java @@ -0,0 +1,201 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.db.compaction; + +import java.util.List; + +import javax.annotation.Nullable; + +import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.dht.Range; +import org.apache.cassandra.dht.Splitter; +import org.apache.cassandra.dht.Token; + +public class ShardManagerNoDisks implements ShardManager +{ + final ColumnFamilyStore.VersionedLocalRanges localRanges; + + /** + * Ending positions for the local token ranges, in covered token range; in other words, the accumulated share of + * the local ranges up and including the given index. + * The last number defines the total token share owned by the node. + */ + final double[] localRangePositions; + + public ShardManagerNoDisks(ColumnFamilyStore.VersionedLocalRanges localRanges) + { + this.localRanges = localRanges; + double position = 0; + final List ranges = localRanges; + localRangePositions = new double[ranges.size()]; + for (int i = 0; i < localRangePositions.length; ++i) + { + double span = ranges.get(i).size(); + position += span; + localRangePositions[i] = position; + } + } + + public boolean isOutOfDate(long ringVersion) + { + return ringVersion != localRanges.ringVersion && + localRanges.ringVersion != ColumnFamilyStore.RING_VERSION_IRRELEVANT; + } + + @Override + public double rangeSpanned(Range tableRange) + { + assert !tableRange.isTrulyWrapAround(); + return rangeSizeNonWrapping(tableRange); + } + + private double rangeSizeNonWrapping(Range tableRange) + { + double size = 0; + for (Splitter.WeightedRange range : localRanges) + { + Range ix = range.range().intersectionNonWrapping(tableRange); // local and table ranges are non-wrapping + if (ix == null) + continue; + size += ix.left.size(ix.right) * range.weight(); + } + return size; + } + + @Override + public double localSpaceCoverage() + { + return localRangePositions[localRangePositions.length - 1]; + } + + @Override + public ShardTracker boundaries(int shardCount) + { + return new BoundaryTracker(shardCount); + } + + public class BoundaryTracker implements ShardTracker + { + private final double rangeStep; + private final int count; + private int nextShardIndex; + private int currentRange; + private Token currentStart; + @Nullable + private Token currentEnd; // null for the last shard + + public BoundaryTracker(int count) + { + this.count = count; + rangeStep = localSpaceCoverage() / count; + currentStart = localRanges.get(0).left(); + currentRange = 0; + nextShardIndex = 1; + if (nextShardIndex == count) + currentEnd = null; + else + currentEnd = getEndToken(rangeStep * nextShardIndex); + } + + private Token getEndToken(double toPos) + { + double left = currentRange > 0 ? localRangePositions[currentRange - 1] : 0; + double right = localRangePositions[currentRange]; + while (toPos > right) + { + left = right; + right = localRangePositions[++currentRange]; + } + + final Range range = localRanges.get(currentRange).range(); + return currentStart.getPartitioner().split(range.left, range.right, (toPos - left) / (right - left)); + } + + @Override + public Token shardStart() + { + return currentStart; + } + + @Override + public Token shardEnd() + { + return currentEnd; + } + + @Override + public Range shardSpan() + { + return new Range<>(currentStart, currentEnd != null ? currentEnd + : currentStart.getPartitioner().getMinimumToken()); + } + + @Override + public double shardSpanSize() + { + return rangeStep; + } + + @Override + public boolean advanceTo(Token nextToken) + { + if (currentEnd == null || nextToken.compareTo(currentEnd) <= 0) + return false; + do + { + currentStart = currentEnd; + if (++nextShardIndex == count) + currentEnd = null; + else + currentEnd = getEndToken(rangeStep * nextShardIndex); + } + while (!(currentEnd == null || nextToken.compareTo(currentEnd) <= 0)); + return true; + } + + @Override + public int count() + { + return count; + } + + @Override + public double fractionInShard(Range targetSpan) + { + Range shardSpan = shardSpan(); + Range covered = targetSpan.intersectionNonWrapping(shardSpan); + if (covered == null) + return 0; + // If one of the ranges is completely subsumed in the other, intersectionNonWrapping returns that range. + // We take advantage of this in the shortcuts below (note that if they are equal but not the same, the + // path below will still return the expected result). + if (covered == targetSpan) + return 1; + double inShardSize = covered == shardSpan ? shardSpanSize() : ShardManagerNoDisks.this.rangeSpanned(covered); + double totalSize = ShardManagerNoDisks.this.rangeSpanned(targetSpan); + return inShardSize / totalSize; + } + + @Override + public int shardIndex() + { + return nextShardIndex - 1; + } + } +} diff --git a/src/java/org/apache/cassandra/db/compaction/ShardManagerTrivial.java b/src/java/org/apache/cassandra/db/compaction/ShardManagerTrivial.java new file mode 100644 index 000000000000..7678ca1a230c --- /dev/null +++ b/src/java/org/apache/cassandra/db/compaction/ShardManagerTrivial.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.db.compaction; + +import java.util.Set; + +import org.apache.cassandra.dht.IPartitioner; +import org.apache.cassandra.dht.Range; +import org.apache.cassandra.dht.Token; +import org.apache.cassandra.io.sstable.format.SSTableReader; + +public class ShardManagerTrivial implements ShardManager +{ + private final IPartitioner partitioner; + + public ShardManagerTrivial(IPartitioner partitioner) + { + this.partitioner = partitioner; + } + + public boolean isOutOfDate(long ringVersion) + { + // We don't do any routing, always up to date + return false; + } + + @Override + public double rangeSpanned(Range tableRange) + { + return 1; + } + + @Override + public double rangeSpanned(SSTableReader rdr) + { + return 1; + } + + @Override + public double calculateCombinedDensity(Set sstables) + { + double totalSize = 0; + for (SSTableReader sstable : sstables) + totalSize += sstable.onDiskLength(); + return totalSize; + } + + @Override + public double localSpaceCoverage() + { + return 1; + } + + ShardTracker iterator = new ShardTracker() + { + @Override + public Token shardStart() + { + return partitioner.getMinimumToken(); + } + + @Override + public Token shardEnd() + { + return partitioner.getMinimumToken(); + } + + @Override + public Range shardSpan() + { + return new Range<>(partitioner.getMinimumToken(), partitioner.getMinimumToken()); + } + + @Override + public double shardSpanSize() + { + return 1; + } + + @Override + public boolean advanceTo(Token nextToken) + { + return false; + } + + @Override + public int count() + { + return 1; + } + + @Override + public double fractionInShard(Range targetSpan) + { + return 1; + } + + @Override + public int shardIndex() + { + return 0; + } + + @Override + public long shardAdjustedKeyCount(Set sstables) + { + long shardAdjustedKeyCount = 0; + for (SSTableReader sstable : sstables) + shardAdjustedKeyCount += sstable.estimatedKeys(); + return shardAdjustedKeyCount; + } + }; + + @Override + public ShardTracker boundaries(int shardCount) + { + return iterator; + } +} diff --git a/src/java/org/apache/cassandra/db/compaction/ShardTracker.java b/src/java/org/apache/cassandra/db/compaction/ShardTracker.java new file mode 100644 index 000000000000..6f8be3237e86 --- /dev/null +++ b/src/java/org/apache/cassandra/db/compaction/ShardTracker.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.db.compaction; + +import java.util.Set; +import javax.annotation.Nullable; + +import org.apache.cassandra.dht.Range; +import org.apache.cassandra.dht.Token; +import org.apache.cassandra.io.sstable.format.SSTableReader; + +public interface ShardTracker +{ + Token shardStart(); + + @Nullable + Token shardEnd(); + + Range shardSpan(); + + double shardSpanSize(); + + /** + * Advance to the given token (e.g. before writing a key). Returns true if this resulted in advancing to a new + * shard, and false otherwise. + */ + boolean advanceTo(Token nextToken); + + int count(); + + /** + * Returns the fraction of the given token range's coverage that falls within this shard. + * E.g. if the span covers two shards exactly and the current shard is one of them, it will return 0.5. + */ + double fractionInShard(Range targetSpan); + + int shardIndex(); + + default long shardAdjustedKeyCount(Set sstables) + { + // Note: computationally non-trivial; can be optimized if we save start/stop shards and size per table. + long shardAdjustedKeyCount = 0; + for (SSTableReader sstable : sstables) + shardAdjustedKeyCount += sstable.estimatedKeys() * fractionInShard(ShardManager.coveringRange(sstable)); + return shardAdjustedKeyCount; + } +} diff --git a/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java new file mode 100644 index 000000000000..a8427a84f4ad --- /dev/null +++ b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java @@ -0,0 +1,865 @@ +/* + * Copyright DataStax, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.db.compaction; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.base.Predicate; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.SerializationHeader; +import org.apache.cassandra.db.commitlog.CommitLogPosition; +import org.apache.cassandra.db.commitlog.IntervalSet; +import org.apache.cassandra.db.compaction.unified.Controller; +import org.apache.cassandra.db.compaction.unified.ShardedMultiWriter; +import org.apache.cassandra.db.compaction.unified.UnifiedCompactionTask; +import org.apache.cassandra.db.lifecycle.LifecycleNewTracker; +import org.apache.cassandra.db.lifecycle.LifecycleTransaction; +import org.apache.cassandra.exceptions.ConfigurationException; +import org.apache.cassandra.index.Index; +import org.apache.cassandra.io.sstable.Descriptor; +import org.apache.cassandra.io.sstable.SSTableMultiWriter; +import org.apache.cassandra.io.sstable.format.SSTableReader; +import org.apache.cassandra.schema.TableMetadata; +import org.apache.cassandra.service.StorageService; +import org.apache.cassandra.utils.Clock; +import org.apache.cassandra.utils.FBUtilities; +import org.apache.cassandra.utils.Overlaps; +import org.apache.cassandra.utils.TimeUUID; + +/** + * The design of the unified compaction strategy is described in the accompanying UnifiedCompactionStrategy.md. + * + * See CEP-26: https://cwiki.apache.org/confluence/display/CASSANDRA/CEP-26%3A+Unified+Compaction+Strategy + */ +public class UnifiedCompactionStrategy extends AbstractCompactionStrategy +{ + private static final Logger logger = LoggerFactory.getLogger(UnifiedCompactionStrategy.class); + + static final int MAX_LEVELS = 32; // This is enough for a few petabytes of data (with the worst case fan factor + // at W=0 this leaves room for 2^32 sstables, presumably of at least 1MB each). + + private static final Pattern SCALING_PARAMETER_PATTERN = Pattern.compile("(N)|L(\\d+)|T(\\d+)|([+-]?\\d+)"); + private static final String SCALING_PARAMETER_PATTERN_SIMPLIFIED = SCALING_PARAMETER_PATTERN.pattern() + .replaceAll("[()]", "") + + .replace("\\d", "[0-9]"); + + private final Controller controller; + + private volatile ShardManager shardManager; + + private long lastExpiredCheck; + + protected volatile int estimatedRemainingTasks; + @VisibleForTesting + protected final Set sstables = new HashSet<>(); + + public UnifiedCompactionStrategy(ColumnFamilyStore cfs, Map options) + { + this(cfs, options, Controller.fromOptions(cfs, options)); + } + + public UnifiedCompactionStrategy(ColumnFamilyStore cfs, Map options, Controller controller) + { + super(cfs, options); + this.controller = controller; + estimatedRemainingTasks = 0; + } + + public static Map validateOptions(Map options) throws ConfigurationException + { + return Controller.validateOptions(AbstractCompactionStrategy.validateOptions(options)); + } + + public static int fanoutFromScalingParameter(int w) + { + return w < 0 ? 2 - w : 2 + w; // see formula in design doc + } + + public static int thresholdFromScalingParameter(int w) + { + return w <= 0 ? 2 : 2 + w; // see formula in design doc + } + + public static int parseScalingParameter(String value) + { + Matcher m = SCALING_PARAMETER_PATTERN.matcher(value); + if (!m.matches()) + throw new ConfigurationException("Scaling parameter " + value + " must match " + SCALING_PARAMETER_PATTERN_SIMPLIFIED); + + if (m.group(1) != null) + return 0; + else if (m.group(2) != null) + return 2 - atLeast2(Integer.parseInt(m.group(2)), value); + else if (m.group(3) != null) + return atLeast2(Integer.parseInt(m.group(3)), value) - 2; + else + return Integer.parseInt(m.group(4)); + } + + private static int atLeast2(int value, String str) + { + if (value < 2) + throw new ConfigurationException("Fan factor cannot be lower than 2 in " + str); + return value; + } + + public static String printScalingParameter(int w) + { + if (w < 0) + return "L" + Integer.toString(2 - w); + else if (w > 0) + return "T" + Integer.toString(w + 2); + else + return "N"; + } + + @Override + public synchronized Collection getMaximalTask(long gcBefore, boolean splitOutput) + { + maybeUpdateShardManager(); + // The tasks are split by repair status and disk, as well as in non-overlapping sections to enable some + // parallelism (to the amount that L0 sstables are split, i.e. at least base_shard_count). The result will be + // split across shards according to its density. Depending on the parallelism, the operation may require up to + // 100% extra space to complete. + List tasks = new ArrayList<>(); + List> nonOverlapping = splitInNonOverlappingSets(filterSuspectSSTables(getSSTables())); + for (Set set : nonOverlapping) + { + @SuppressWarnings("resource") // closed by the returned task + LifecycleTransaction txn = cfs.getTracker().tryModify(set, OperationType.COMPACTION); + if (txn != null) + tasks.add(createCompactionTask(txn, gcBefore)); + } + return tasks; + } + + private static List> splitInNonOverlappingSets(Collection sstables) + { + List> overlapSets = Overlaps.constructOverlapSets(new ArrayList<>(sstables), + UnifiedCompactionStrategy::startsAfter, + SSTableReader.firstKeyComparator, + SSTableReader.lastKeyComparator); + if (overlapSets.isEmpty()) + return overlapSets; + + Set group = overlapSets.get(0); + List> groups = new ArrayList<>(); + for (int i = 1; i < overlapSets.size(); ++i) + { + Set current = overlapSets.get(i); + if (Sets.intersection(current, group).isEmpty()) + { + groups.add(group); + group = current; + } + else + { + group.addAll(current); + } + } + groups.add(group); + return groups; + } + + @Override + @SuppressWarnings("resource") // transaction closed by the returned task + public AbstractCompactionTask getUserDefinedTask(Collection sstables, final long gcBefore) + { + assert !sstables.isEmpty(); // checked for by CM.submitUserDefined + + LifecycleTransaction transaction = cfs.getTracker().tryModify(sstables, OperationType.COMPACTION); + if (transaction == null) + { + logger.trace("Unable to mark {} for compaction; probably a background compaction got to it first. You can disable background compactions temporarily if this is a problem", sstables); + return null; + } + + return createCompactionTask(transaction, gcBefore).setUserDefined(true); + } + + /** + * Returns a compaction task to run next. + * + * This method is synchronized because task creation is significantly more expensive in UCS; the strategy is + * stateless, therefore it has to compute the shard/bucket structure on each call. + * + * @param gcBefore throw away tombstones older than this + */ + @Override + public synchronized UnifiedCompactionTask getNextBackgroundTask(long gcBefore) + { + controller.onStrategyBackgroundTaskRequest(); + + while (true) + { + CompactionPick pick = getNextCompactionPick(gcBefore); + if (pick == null) + return null; + UnifiedCompactionTask task = createCompactionTask(pick, gcBefore); + if (task != null) + return task; + } + } + + @SuppressWarnings("resource") // transaction closed by the returned task + private UnifiedCompactionTask createCompactionTask(CompactionPick pick, long gcBefore) + { + Preconditions.checkNotNull(pick); + Preconditions.checkArgument(!pick.isEmpty()); + + LifecycleTransaction transaction = cfs.getTracker().tryModify(pick, + OperationType.COMPACTION); + if (transaction != null) + { + return createCompactionTask(transaction, gcBefore); + } + else + { + // This can happen e.g. due to a race with upgrade tasks + logger.error("Failed to submit compaction {} because a transaction could not be created. If this happens frequently, it should be reported", pick); + // FIXME: Needs the sstable removal race fix + return null; + } + } + + /** + * Create the sstable writer used for flushing. + * + * @return an sstable writer that will split sstables into a number of shards as calculated by the controller for + * the expected flush density. + */ + @Override + public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, + long keyCount, + long repairedAt, + TimeUUID pendingRepair, + boolean isTransient, + IntervalSet commitLogPositions, + int sstableLevel, + SerializationHeader header, + Collection indexes, + LifecycleNewTracker lifecycleNewTracker) + { + // FIXME: needs the metadata collector fix + ShardManager shardManager = getShardManager(); + double flushDensity = cfs.metric.flushSizeOnDisk.get() / shardManager.localSpaceCoverage(); + ShardTracker boundaries = shardManager.boundaries(controller.getNumShards(flushDensity)); + return new ShardedMultiWriter(cfs, + descriptor, + keyCount, + repairedAt, + pendingRepair, + isTransient, + commitLogPositions, + header, + indexes, + lifecycleNewTracker, + boundaries); + } + + /** + * Create the task that in turns creates the sstable writer used for compaction. + * + * @return a sharded compaction task that in turn will create a sharded compaction writer. + */ + private UnifiedCompactionTask createCompactionTask(LifecycleTransaction transaction, long gcBefore) + { + return new UnifiedCompactionTask(cfs, this, transaction, gcBefore, getShardManager()); + } + + private void maybeUpdateShardManager() + { + if (shardManager != null && !shardManager.isOutOfDate(StorageService.instance.getTokenMetadata().getRingVersion())) + return; // the disk boundaries (and thus the local ranges too) have not changed since the last time we calculated + + synchronized (this) + { + // Recheck after entering critical section, another thread may have beaten us to it. + while (shardManager == null || shardManager.isOutOfDate(StorageService.instance.getTokenMetadata().getRingVersion())) + shardManager = ShardManager.create(cfs); + // Note: this can just as well be done without the synchronization (races would be benign, just doing some + // redundant work). For the current usages of this blocking is fine and expected to perform no worse. + } + } + + @VisibleForTesting + ShardManager getShardManager() + { + maybeUpdateShardManager(); + return shardManager; + } + + /** + * Selects a compaction to run next. + */ + @VisibleForTesting + CompactionPick getNextCompactionPick(long gcBefore) + { + SelectionContext context = new SelectionContext(controller); + List suitable = getCompactableSSTables(getSSTables(), UnifiedCompactionStrategy::isSuitableForCompaction); + Set expired = maybeGetExpiredSSTables(gcBefore, suitable); + suitable.removeAll(expired); + + CompactionPick selected = chooseCompactionPick(suitable, context); + estimatedRemainingTasks = context.estimatedRemainingTasks; + if (selected == null) + { + if (expired.isEmpty()) + return null; + else + return new CompactionPick(-1, -1, expired); + } + + selected.addAll(expired); + return selected; + } + + private Set maybeGetExpiredSSTables(long gcBefore, List suitable) + { + Set expired; + long ts = Clock.Global.currentTimeMillis(); + if (ts - lastExpiredCheck > controller.getExpiredSSTableCheckFrequency()) + { + lastExpiredCheck = ts; + expired = CompactionController.getFullyExpiredSSTables(cfs, + suitable, + cfs.getOverlappingLiveSSTables(suitable), + gcBefore, + controller.getIgnoreOverlapsInExpirationCheck()); + if (logger.isTraceEnabled() && !expired.isEmpty()) + logger.trace("Expiration check for {}.{} found {} fully expired SSTables", + cfs.getKeyspaceName(), + cfs.getTableName(), + expired.size()); + } + else + expired = Collections.emptySet(); + return expired; + } + + private CompactionPick chooseCompactionPick(List suitable, SelectionContext context) + { + // Select the level with the highest overlap; when multiple levels have the same overlap, prefer the lower one + // (i.e. reduction of RA for bigger token coverage). + int maxOverlap = -1; + CompactionPick selected = null; + for (Level level : formLevels(suitable)) + { + CompactionPick pick = level.getCompactionPick(context); + int levelOverlap = level.maxOverlap; + if (levelOverlap > maxOverlap) + { + maxOverlap = levelOverlap; + selected = pick; + } + } + if (logger.isDebugEnabled() && selected != null) + logger.debug("Selected compaction on level {} overlap {} sstables {}", + selected.level, selected.overlap, selected.size()); + + return selected; + } + + @Override + public int getEstimatedRemainingTasks() + { + return estimatedRemainingTasks; + } + + @Override + public long getMaxSSTableBytes() + { + return Long.MAX_VALUE; + } + + @VisibleForTesting + public Controller getController() + { + return controller; + } + + public static boolean isSuitableForCompaction(SSTableReader rdr) + { + return !rdr.isMarkedSuspect() && rdr.openReason != SSTableReader.OpenReason.EARLY; + } + + @Override + public synchronized void addSSTable(SSTableReader added) + { + sstables.add(added); + } + + @Override + public synchronized void removeSSTable(SSTableReader sstable) + { + sstables.remove(sstable); + } + + @Override + protected synchronized Set getSSTables() + { + return ImmutableSet.copyOf(sstables); + } + + /** + * @return a LinkedHashMap of arenas with buckets where order of arenas are preserved + */ + @VisibleForTesting + List getLevels() + { + return getLevels(getSSTables(), UnifiedCompactionStrategy::isSuitableForCompaction); + } + + /** + * Groups the sstables passed in into arenas and buckets. This is used by the strategy to determine + * new compactions, and by external tools in CNDB to analyze the strategy decisions. + * + * @param sstables a collection of the sstables to be assigned to arenas + * @param compactionFilter a filter to exclude CompactionSSTables, + * e.g., {@link #isSuitableForCompaction} + * + * @return a map of arenas to their buckets + */ + public List getLevels(Collection sstables, + Predicate compactionFilter) + { + List suitable = getCompactableSSTables(sstables, compactionFilter); + return formLevels(suitable); + } + + private List formLevels(List suitable) + { + maybeUpdateShardManager(); + List levels = new ArrayList<>(MAX_LEVELS); + suitable.sort(shardManager::compareByDensity); + + double maxSize = controller.getMaxLevelDensity(0, controller.getBaseSstableSize(controller.getFanout(0)) / shardManager.localSpaceCoverage()); + int index = 0; + Level level = new Level(controller, index, 0, maxSize); + for (SSTableReader candidate : suitable) + { + final double size = shardManager.density(candidate); + if (size < level.max) + { + level.add(candidate); + continue; + } + + level.complete(); + levels.add(level); // add even if empty + + while (true) + { + ++index; + double minSize = maxSize; + maxSize = controller.getMaxLevelDensity(index, minSize); + level = new Level(controller, index, minSize, maxSize); + if (size < level.max) + { + level.add(candidate); + break; + } + else + { + levels.add(level); // add the empty level + } + } + } + + if (!level.sstables.isEmpty()) + { + level.complete(); + levels.add(level); + } + + return levels; + } + + private List getCompactableSSTables(Collection sstables, + Predicate compactionFilter) + { + Set compacting = cfs.getTracker().getCompacting(); + List suitable = new ArrayList<>(sstables.size()); + for (SSTableReader rdr : sstables) + { + if (compactionFilter.test(rdr) && !compacting.contains(rdr)) + suitable.add(rdr); + } + return suitable; + } + + public TableMetadata getMetadata() + { + return cfs.metadata(); + } + + private static boolean startsAfter(SSTableReader a, SSTableReader b) + { + // Strict comparison because the span is end-inclusive. + return a.getFirst().compareTo(b.getLast()) > 0; + } + + @Override + public String toString() + { + return String.format("Unified strategy %s", getMetadata()); + } + + /** + * A level: index, sstables and some properties. + */ + public static class Level + { + final List sstables; + final int index; + final double survivalFactor; + final int scalingParameter; // scaling parameter used to calculate fanout and threshold + final int fanout; // fanout factor between levels + final int threshold; // number of SSTables that trigger a compaction + final double min; // min density of sstables for this level + final double max; // max density of sstables for this level + int maxOverlap = -1; // maximum number of overlapping sstables + + Level(Controller controller, int index, double minSize, double maxSize) + { + this.index = index; + this.survivalFactor = controller.getSurvivalFactor(index); + this.scalingParameter = controller.getScalingParameter(index); + this.fanout = controller.getFanout(index); + this.threshold = controller.getThreshold(index); + this.sstables = new ArrayList<>(threshold); + this.min = minSize; + this.max = maxSize; + } + + public Collection getSSTables() + { + return sstables; + } + + public int getIndex() + { + return index; + } + + void add(SSTableReader sstable) + { + this.sstables.add(sstable); + } + + void complete() + { + if (logger.isTraceEnabled()) + logger.trace("Level: {}", this); + } + + /** + * Return the compaction pick + */ + CompactionPick getCompactionPick(SelectionContext context) + { + List buckets = getBuckets(context); + if (buckets == null) + { + if (logger.isDebugEnabled()) + logger.debug("Level {} sstables {} max overlap {} buckets with compactions {} tasks {}", + index, sstables.size(), maxOverlap, 0, 0); + return null; // nothing crosses the threshold in this level, nothing to do + } + + int estimatedRemainingTasks = 0; + int overlapMatchingCount = 0; + Bucket selectedBucket = null; + Controller controller = context.controller; + for (Bucket bucket : buckets) + { + // We can have just one pick in each level. Pick one bucket randomly out of the ones with + // the highest overlap. + // The random() part below implements reservoir sampling with size 1, giving us a uniform selection. + if (bucket.maxOverlap == maxOverlap && controller.random().nextInt(++overlapMatchingCount) == 0) + selectedBucket = bucket; + // The estimated remaining tasks is a measure of the remaining amount of work, thus we prefer to + // calculate the number of tasks we would do in normal operation, even though we may compact in bigger + // chunks when we are late. + estimatedRemainingTasks += bucket.maxOverlap / threshold; + } + context.estimatedRemainingTasks += estimatedRemainingTasks; + assert selectedBucket != null; + + if (logger.isDebugEnabled()) + logger.debug("Level {} sstables {} max overlap {} buckets with compactions {} tasks {}", + index, sstables.size(), maxOverlap, buckets.size(), estimatedRemainingTasks); + + CompactionPick selected = selectedBucket.constructPick(controller); + + if (logger.isTraceEnabled()) + logger.trace("Returning compaction pick with selected compaction {}", + selected); + return selected; + } + + @VisibleForTesting + List getBuckets(SelectionContext context) + { + List liveSet = sstables; + + if (logger.isTraceEnabled()) + logger.trace("Creating compaction pick with live set {}", liveSet); + + List> overlaps = Overlaps.constructOverlapSets(liveSet, + UnifiedCompactionStrategy::startsAfter, + SSTableReader.firstKeyComparator, + SSTableReader.lastKeyComparator); + for (Set overlap : overlaps) + maxOverlap = Math.max(maxOverlap, overlap.size()); + if (maxOverlap < threshold) + return null; + + List buckets = Overlaps.assignOverlapsIntoBuckets(threshold, + context.controller.overlapInclusionMethod(), + overlaps, + this::makeBucket); + return buckets; + } + + private Bucket makeBucket(List> overlaps, int startIndex, int endIndex) + { + return endIndex == startIndex + 1 + ? new SimpleBucket(this, overlaps.get(startIndex)) + : new MultiSetBucket(this, overlaps.subList(startIndex, endIndex)); + } + + @Override + public String toString() + { + return String.format("W: %d, T: %d, F: %d, index: %d, min: %s, max %s, %d sstables, overlap %s", + scalingParameter, + threshold, + fanout, + index, + densityAsString(min), + densityAsString(max), + sstables.size(), + maxOverlap); + } + + private String densityAsString(double density) + { + return FBUtilities.prettyPrintBinary(density, "B", " "); + } + } + + + /** + * A compaction bucket, i.e. a selection of overlapping sstables from which a compaction should be selected. + */ + static abstract class Bucket + { + final Level level; + final List allSSTablesSorted; + final int maxOverlap; + + Bucket(Level level, Collection allSSTablesSorted, int maxOverlap) + { + // single section + this.level = level; + this.allSSTablesSorted = new ArrayList<>(allSSTablesSorted); + this.allSSTablesSorted.sort(SSTableReader.maxTimestampDescending); // we remove entries from the back + this.maxOverlap = maxOverlap; + } + + Bucket(Level level, List> overlapSections) + { + // multiple sections + this.level = level; + int maxOverlap = 0; + Set all = new HashSet<>(); + for (Set section : overlapSections) + { + maxOverlap = Math.max(maxOverlap, section.size()); + all.addAll(section); + } + this.allSSTablesSorted = new ArrayList<>(all); + this.allSSTablesSorted.sort(SSTableReader.maxTimestampDescending); // we remove entries from the back + this.maxOverlap = maxOverlap; + } + + /** + * Select compactions from this bucket. Normally this would form a compaction out of all sstables in the + * bucket, but if compaction is very late we may prefer to act more carefully: + * - we should not use more inputs than the permitted maximum + * - we should select SSTables in a way that preserves the structure of the compaction hierarchy + * These impose a limit on the size of a compaction; to make sure we always reduce the read amplification by + * this much, we treat this number as a limit on overlapping sstables, i.e. if A and B don't overlap with each + * other but both overlap with C and D, all four will be selected to form a limit-three compaction. A limit-two + * one may choose CD, ABC or ABD. + * Also, the subset is selected by max timestamp order, oldest first, to avoid violating sstable time order. In + * the example above, if B is oldest and C is older than D, the limit-two choice would be ABC (if A is older + * than D) or BC (if A is younger, avoiding combining C with A skipping D). + * + * @param controller The compaction controller. + * @return A compaction pick to execute next. + */ + CompactionPick constructPick(Controller controller) + { + int count = maxOverlap; + int threshold = level.threshold; + int fanout = level.fanout; + int index = level.index; + int maxSSTablesToCompact = Math.max(fanout, controller.maxSSTablesToCompact()); + + assert count >= threshold; + if (count <= fanout) + { + /** + * Happy path. We are not late or (for levelled) we are only so late that a compaction now will + * have the same effect as doing levelled compactions one by one. Compact all. We do not cap + * this pick at maxSSTablesToCompact due to an assumption that maxSSTablesToCompact is much + * greater than F. See {@link Controller#MAX_SSTABLES_TO_COMPACT_OPTION} for more details. + */ + return new CompactionPick(index, count, allSSTablesSorted); + } + else if (count <= fanout * controller.getFanout(index + 1) || maxSSTablesToCompact == fanout) + { + // Compaction is a bit late, but not enough to jump levels via layout compactions. We need a special + // case to cap compaction pick at maxSSTablesToCompact. + if (count <= maxSSTablesToCompact) + return new CompactionPick(index, count, allSSTablesSorted); + + return new CompactionPick(index, maxSSTablesToCompact, pullOldestSSTables(maxSSTablesToCompact)); + } + else + { + // We may, however, have accumulated a lot more than T if compaction is very late. + // In this case we pick a compaction in such a way that the result of doing it spreads the data in + // a similar way to how compaction would lay them if it was able to keep up. This means: + // - for tiered compaction (w >= 0), compact in sets of as many as required to get to a level. + // for example, for w=2 and 55 sstables, pick a compaction of 16 sstables (on the next calls, given no + // new files, 2 more of 16, 1 of 4, and leaving the other 3 sstables alone). + // - for levelled compaction (w < 0), compact all that would reach a level. + // for w=-2 and 55, this means pick a compaction of 48 (on the next calls, given no new files, one of + // 4, and one of 3 sstables). + int pickSize = selectPickSize(controller, maxSSTablesToCompact); + return new CompactionPick(index, pickSize, pullOldestSSTables(pickSize)); + } + } + + private int selectPickSize(Controller controller, int maxSSTablesToCompact) + { + int pickSize; + int fanout = level.fanout; + int nextStep = fanout; + int index = level.index; + int limit = Math.min(maxSSTablesToCompact, maxOverlap); + do + { + pickSize = nextStep; + fanout = controller.getFanout(++index); + nextStep *= fanout; + } + while (nextStep <= limit); + + if (level.scalingParameter < 0) + { + // For levelled compaction all the sstables that would reach this level need to be compacted to one, + // so select the highest multiple of step that fits. + pickSize *= limit / pickSize; + assert pickSize > 0; + } + return pickSize; + } + + /** + * Pull the oldest sstables to get at most limit-many overlapping sstables to compact in each overlap section. + */ + abstract Collection pullOldestSSTables(int overlapLimit); + } + + public static class SimpleBucket extends Bucket + { + public SimpleBucket(Level level, Collection sstables) + { + super(level, sstables, sstables.size()); + } + + Collection pullOldestSSTables(int overlapLimit) + { + if (allSSTablesSorted.size() <= overlapLimit) + return allSSTablesSorted; + return Overlaps.pullLast(allSSTablesSorted, overlapLimit); + } + } + + public static class MultiSetBucket extends Bucket + { + final List> overlapSets; + + public MultiSetBucket(Level level, List> overlapSets) + { + super(level, overlapSets); + this.overlapSets = overlapSets; + } + + Collection pullOldestSSTables(int overlapLimit) + { + return Overlaps.pullLastWithOverlapLimit(allSSTablesSorted, overlapSets, overlapLimit); + } + } + + /** + * Utility class holding a collection of sstables for compaction. + */ + static class CompactionPick extends ArrayList + { + final int level; + final int overlap; + + CompactionPick(int level, int overlap, Collection sstables) + { + super(sstables); + this.level = level; + this.overlap = overlap; + } + } + + static class SelectionContext + { + final Controller controller; + int estimatedRemainingTasks = 0; + + SelectionContext(Controller controller) + { + this.controller = controller; + } + } +} diff --git a/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.md b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.md new file mode 100644 index 000000000000..7fd53010e117 --- /dev/null +++ b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.md @@ -0,0 +1,371 @@ + + +# Unified compaction strategy (UCS) + +This is a new compaction strategy that unifies tiered and leveled compaction strategies, adds sharding, lends itself to +be reconfigured at any time and forms the basis for future compaction improvements including automatic adaptation to the +workload. + +The strategy is based on two observations: + +- that tiered and levelled compaction can be generalized as the same thing if one observes that both form + exponentially-growing levels based on the size of sstables (or non-overlapping sstable runs) and trigger a + compaction when more than a given number of sstables are present on one level; +- that instead of "size" in the description above we can use "density", i.e. the size of an sstable divided by + the width of the token range it covers, which permits sstables to be split at arbitrary points when the output + of a compaction is written and still produce a levelled hierarchy. + +UCS groups sstables in levels based on the logarithm of the sstable density, with +the fanout factor $f$ as the base of the logarithm, and with each level triggering a compaction as soon as it has +$t$ overlapping sstables. The choice of the parameters $f$ and $t$, and of a minimum sstable size, determines the +behaviour of the strategy. This allows users to choose a levelled strategy by setting $t=2$, or a tiered strategy by +choosing $t=f$. Because the two options are mutually exclusive, meet at $f=2$ and form a space of options for choosing +different ratios of read amplification (RA) vs write amplification (WA) (where levelled compaction improves reads at +the expense of writes and approaches a sorted array as $f$ increases, and tiered compaction favors writes at the +expense of reads and approaches an unsorted log as $f$ increases), we combine the two parameters into one integer +value, $w$, and set them to be: + +* If $w < 0$ then $f = 2 - w$ and $t = 2$. This means leveled compactions, high WA but low RA. + We write this as L*f* (e.g. L10 for $w = -8$). +* If $w > 0$ then $f = 2 + w$ and $t = f$. This means tiered compactions, low WA but high RA. + We write this as T*f* (e.g. T4 for $w = 2$). +* If $w = 0$ then $f = t = 2$. This is the middle ground, leveled and tiered compactions behave identically. + We write this as N. + +Further, UCS permits the value of $w$ to be defined separately for each level, and thus levels can have different +behaviours. For example level zero could use tiered compaction (STCS-like) but higher levels could switch to levelled +(LCS-like) with increasing levels of read optimization. + +The strategy splits sstables at specific shard boundaries whose number grows with the density of an sstable, and +uses the non-overlap between sstables created by this splitting to be able to perform compactions concurrently. + +## Size-based levels + +Let's ignore density and splitting for a while and explore more closely how sstables are grouped into levels if +they are never split. + +For a fixed fanout factor $f$ and a memtable flush size $m$, calculated as the average size of the runs of sstables +written when a memtable is flushed and intended to form a base of the hierarchy where all newly-flushed sstables end +up, the level $L$ for an sstable of size $s$ is calculated as follows: + +$$ +L = +\begin{cases} +\left \lfloor \log_f {\frac s m} \right \rfloor & \text{if } s \ge m \\ +0 & \text{otherwise} +\end{cases} +$$ + +This means that sstables are assigned to levels as follows: + + +| Level | Min sstable size | Max sstable size | +| ----- | ---------------- | ----------------- | +| 0 | 0 | $m \cdot f$ | +| 1 | $m \cdot f$ | $m \cdot f^2$ | +| 2 | $m \cdot f^2$ | $m \cdot f^3$ | +| 3 | $m \cdot f^3$ | $m \cdot f^4$ | +| ... | ... | ... | +| n | $m \cdot f^n$ | $m \cdot f^{n+1}$ | + +If we define $t$ as the number of sstables in a level that triggers a compaction, then: + +* $t = 2$ means the strategy is using a leveled merged policy. An sstable enters level $n$ with size $\ge mf^n$. + When another sstable enters (also with size $\ge mf^n$) they compact and form a new table with size + $\sim 2mf^n$, which keeps the result in the same level for $f > 2$. After this repeats at least $f-2$ + more times (i.e. $f$ tables enter the level altogether), the compaction result grows to $\ge mf^{n+1}$ + and enters the next level. +* $t = f$ means the strategy is using a tiered merge policy. After $f$ sstables enter level $n$, each of size + $\ge mf^n$, they are compacted together, resulting in an sstable of size $\ge mf^{n+1}$ which belongs to the next + level. + +Note that the above ignores overwrites and deletions. Given knowledge of the expected proportion of overwrites/deletion, +they can also be accounted for (this is implemented but not exposed at this time). + +For leveled strategies, the write amplification will be proportional to $f-1$ times the number of levels whilst +for tiered strategies it will be proportional only to the number of levels. On the other hand, the read +amplification will be proportional to the number of levels for leveled strategies and to $f-1$ times the number +of levels for tiered strategies. + +The number of levels for our size based scheme can be calculated by substituting the maximal dataset size $D$ in our +equation above, giving a maximal number of levels inversely proportional to the logarithm of $f$. + +Therefore when we try to control the overheads of compaction on the database, we have a space of choices for the strategy +that range from: + +* leveled compaction ( $t=2$ ) with high $f$ — low number of levels, high read efficiency, high write cost, + moving closer to the behaviour of a sorted array as $f$ increases; +* compaction with $t = f = 2$ where leveled is the same as tiered and we have a middle ground with logarithmically + increasing read and write costs; +* tiered compaction ( $t=f$ ) with high $f$ — very high number of sstables, low read efficiency and low write cost, + moving closer to an unsorted log as $f$ increases. + +This can be easily generalised to varying fan factors, by replacing the exponentiation with the product of the fan +factors for all lower levels: + + +| Level | Min sstable size | Max sstable size | +| ----- | --------------------------- | --------------------------------- | +| 0 | 0 | $m \cdot f_0$ | +| 1 | $m \cdot f_0$ | $m \cdot f_0 \cdot f_1$ | +| 2 | $m \cdot f_0 \cdot f_1$ | $m \cdot f_0 \cdot f_1 \cdot f_2$ | +| ... | ... | ... | +| n | $m \cdot \prod_{i < n} f_i$ | $m \cdot \prod_{i\le n} f_i$ | + +## Density levelling + +If we replace the size $s$ in the previous paragraph with the density measure $d = s / v$ where $v$ is the fraction of +the token space that the sstable covers, all formulae and conclusions remain valid. However, we can now split the +output at arbitrary points and still make use of the results. For example, if we start with four sstables each spanning +a shard that covers 1/10 of the token space on a T4 level and compact them, splitting the output equally into four +sstables, the resulting sstables (provided no overwrite/deletion) will be of the same size as the input sstables, but +will now cover 1/40 of the token share each. As a result, they will now be interpreted to be four times as dense and +thus fall on the next level of the hierarchy (recall that the upper density limit for a level is $f$ times the lower). +If we can ensure that the split points are fixed (see below), when this repeats enough times for the next level to +receive sufficiently many sstables, we can start 4 independent compactions concurrently. + +It is important to account for locally-owned token share when calculating $v$. Because vnodes mean that the local +token ownership of a node is not contiguous, the difference between the first and last token is not sufficient to +calculate token share — any non-locally-owned ranges must be excluded. + +Using the density measure allows us to control the size of sstables through sharding, as well as to execute +compactions in parallel. With size levelling we could achieve parallelisation by pre-splitting the data in a fixed +number of compaction arenas (e.g. by using the data directories mechanism), but this requires the number of shards to be +predetermined and equal for all levels of the hierarchy, which still permits sstables to become too small or too large. +Large sstables complicate streaming and repair and increase the duration of compaction operations, pinning resources to +long-running operations and making it more likely that too many sstables will accumulate on lower levels of the +hierarchy. + +Density levelling permits a much wider variety of splitting options including ones where the size of sstables can +be kept close to a selected target, and also allows UCS to understand the levelling structure of STCS (where size grows +with each level) as well as LCS (where token share shrinks with each level). + +## Sharding + +Once density levelling is in place, we have a range of choices for splitting sstables. One is to simply split +when a certain output size is reached (like LCS), forming non-overlapping sstable runs instead of individual +sstables. Another is to split the token space into shards at predefined boundary points. A third hybrid option is +to split at predefined boundaries, but only if a certain minimum size has been reached. + +Splitting only by size has the problem that individual sstables start at positions that vary, and if we need to +compact sstables split in this way we must either always start from the beginning and proceed to process the whole +level sequentially, or have some part of the data compacted/copied more times than necessary as any smaller selection +of sstables has to exclude some overlapping sstable. The other side of the latter problem is that some section of the +compacted token range will include fewer inputs, and will thus be sparser than the rest of the compaction output; +this will skew the density of the result, or need to be controlled by further splitting of the output. In the hybrid +option the same problem occurs less frequently but is still present. + +To avoid these and permit concurrent compactions of all levels of the compaction hierarchy, we choose to predefine +boundary points for every compaction and always split sstables on these points. The number of the boundaries is +determined based on the density of the inputs and the estimated density of the result — as it grows higher +the number of boundaries is increased to keep the size of individual sstables close to a predefined target. By +only using power-of-two multiples of a specified base count (in other words, by only splitting shards in the +middle), we also ensure that any boundary that applies to a given output density also applies to all higher +densities. + +More precisely, the user specifies two sharding parameters: + +- base shard count $b$ +- target sstable size $t$ + +At the start of every compaction, we estimate the density of the output $d$ and calculate a number of shards +$S$ to split the local token space into to be + +$$ +S = +\begin{cases} +2^{\mathrm{round}\left( \log_2 \left( {\frac d t \cdot \frac 1 b}\right)\right)} \cdot b + & \text{if } d \ge tb\\ +b & \text{otherwise} +\end{cases} +$$ + +That is, we divide the density by the target size and round this to a power-of-two multiple of $b$. +We then generate $S - 1$ boundaries that split the local token space equally into $S$ shards, and split the result +of the compaction on these boundaries to form a separate sstable for each shard. This aims to produce sstable sizes that +fall between $t/\sqrt 2$ and $t\cdot \sqrt 2$. + +For example, for a target sstable size of 100MiB and 4 base shards, a 200 MiB memtable will be split in four L0 shards +of roughly 50 MiB each, because ${\frac{200}{100} \cdot \frac 1 4} < 1$ and thus we get +the minimum of 4 shards, each spanning 1/4 of the token space. If in one of these shards we compact 6 of these 50 MiB +sstables, the estimated density of the output would be 1200 MiB $({6 \cdot 50 \mathrm{MiB}} / (1/4))$, which results in +a target ratio of $\frac{1200}{100} \cdot \frac 1 4 = 2^{\log_2 3}$, rounded to $2^2 \cdot 4$ shards for the whole +local token space, thus 4 for the 1/4 span that the compaction covers. Assuming no overwrites and +deletions, the resulting sstables will be of size 75 MiB, token share 1/16 and density 1200 MiB. + +This sharding mechanism is independent of the compaction specification. + +## Choosing sstables to compact + +The density levelling lets us separate sstables in levels defined by the compaction configuration's fan factors. +However, unlike in the size levelling case where sstables are expected to cover the full token space, we cannot use the +number of sstables on a level as a trigger as many of these sstables may be non-overlapping, i.e. not making read +queries less efficient. To deal with this, take advantage of sharding to perform multiple compactions on a level +concurrently, and reduce the size of individual compaction operations, we also need to separate non-overlapping +sections in different buckets, and decide what to do based on the number of overlapping sstables in a bucket. + +To do this, we first form a minimal list of overlap sets that satisfy the following requirements: + +- two sstables that do not overlap are never put in the same set; +- if two sstables overlap, there is a set in the list that contains both; +- sstables are placed in consecutive positions in the list. + +The second condition can also be rephrased to say that for any point in the token range, there is a set in the list +that contains all sstables whose range covers that point. In other words, the overlap sets give us the maximum number +of sstables that need to be consulted to read any key, i.e. the read amplification that our trigger $t$ aims to +control. We don't calculate or store the exact spans the overlapping sets cover, only the participating sstables. +The sets can be obtained in $O(n\log n)$ time. + +For example, if sstables A, B, C and D cover, respectively, tokens 0-3, 2-7, 6-9 and 1-8, the overlap sets we compute +are ABD and BCD. A and C don't overlap, so they must be in separate sets. A, B and D overlap at token 2 and must thus +be present in at least one set, and similarly for B, C and D at 7. Only A and D overlap at 1, but the set ABD already +includes this combination. + +These overlap sets are sufficient to decide whether or not a compaction should be carried out — if and only if the +number of elements in a set is at least as large as $t$. However, we may need to include more sstables in the compaction +than this set alone. + +It is possible for our sharding scheme to end up constructing sstables spanning differently-sized shards for the same +level. One clear example is the case of levelled compaction, where, for example, sstables enter at some density, and +after the first compaction the result — being 2x bigger than that density — is split in the middle because +it has double the density. As another sstable enters the same level, we will have separate overlap sets for the first +and second half of that older sstable; to be efficient, the compaction that is triggered next needs to select both. + +To deal with this and any other cases of partial overlap, the compaction strategy will transitively extend +the overlap sets with all neighboring ones that share some sstable, constructing the set of all sstables that have some +chain of overlapping ones that connects it to the initial set[^1]. This extended set forms the compaction bucket. + +In normal operation we compact all sstables in the compaction bucket. If compaction is very late we may apply a limit +on the number of overlapping sources we compact; in that case we use the collection of oldest sstables that would +select at most limit-many in any included overlap set, making sure that if an sstable is included in this compaction, +all older ones are also included to maintain time order. + +## Selecting the compaction to run + +Compaction strategies aim to minimize the read amplification of queries, which is defined by the number of sstables +that overlap on any given key. In order to do this most efficiently in situations where compaction is late, we select +a compaction bucket whose overlap is the highest among the possible choices. If there are multiple such choices, we +choose one uniformly randomly within each level, and between the levels we prefer the lowest level (as this is expected +to cover a larger fraction of the token space for the same amount of work). + +Under sustained load, this mechanism prevents the accumulation of sstables on some level that could sometimes happen +with legacy strategies (e.g. all resources consumed by L0 and sstables accumulating on L1) and can lead to a +steady state where compactions always use more sstables than the assigned threshold and fan factor and maintain a tiered +hierarchy based on the lowest overlap they are able to maintain for the load. + +## Major compaction + +Under the working principles of UCS, a major compaction is an operation which compacts together all sstables that have +(transitive) overlap, and where the output is split on shard boundaries appropriate for the expected result density. + +In other words, it is expected that a major compaction will result in $b$ concurrent compactions, each containing all +sstables covered in each of the base shards, and that the result will be split on shard boundaries whose number +depends on the total size of data contained in the shard. + +## Differences with STCS and LCS + +Note that there are some differences between the tiered flavors of UCS (UCS-tiered) and STCS, and between the leveled +flavors of UCS (UCS-leveled) and LCS. + +#### UCS-tiered vs STCS + +SizeTieredCompactionStrategy is pretty close to UCS. However, it defines buckets/levels by looking for sstables of +similar size rather than a predefined banding of sizes. This can result in some odd selections of buckets, possibly +spanning sstables of wildly different sizes, while UCS's selection is more stable and predictable. + +STCS triggers a compaction when it finds at least `min_threshold` sstables on some bucket, and it compacts between +`min_threshold` and `max_threshold` sstables from that bucket at a time. `min_threshold` is equivalent to UCS's +$t = f = w + 2$. UCS drops the upper limit as we have seen that compaction is still efficient with very large numbers of +sstables. + +UCS makes use of the density measure to split results in order to keep the size of sstables and the length of +compactions low. Within a level it will only consider overlapping sstables when deciding whether or not the threshold +is hit, and will independently compact sets of sstables that do not overlap. + +If there are multiple choices to pick SSTables within a bucket, STCS groups them by size while UCS groups them by +timestamp. Because of that, STCS easily loses time order and makes whole table expiration less efficient. + +#### UCS-leveled vs LCS + +On first glance LeveledCompactionStrategy look very different in behaviour compared to UCS. + +LCS keeps multiple sstables per level which form a sorted run of non-overlapping sstables of small fixed size. So +physical sstables on increasing levels increase in number (by a factor of `fanout_size`) instead of size. LCS does that +to reduce space amplification and to ensure shorter compaction operations. When it finds that the combined size of a +run on a level is higher than expected, it selects some sstables to compact with overlapping ones from the next level +of the hierarchy. This eventually pushes the size of the next level over its size limit and triggers higher-level +operations. + +In UCS sstables on increasing levels increase in density (by a factor of $f$, see the **Size based levels** section +above). UCS-leveled triggers a compaction when it finds a second overlapping sstable on some sharded level. It compacts +the overlapping bucket on that level, and the result most often ends up on that level too, but eventually it reaches +sufficient size for the next level. Given an even data spread, this is the same time as a run in LCS would outgrow its +size, thus compactions are in effect triggered at the same time as LCS would trigger them. + +The two approaches end up with a very similar effect, with the added benefits for UCS that compactions cannot affect +other levels like e.g. L0-to-L1 compactions in LCS can prevent any concurrent L1-to-L2 compactions, and that sstables +are structured in a way that can be easily switched to UCS-tiered or a different set of values for the UCS parameters. + +Because the split positions of LCS sstables are based on size only and thus vary, when LCS selects sstables on the next +level to compact with, it must include some that only partially overlap, which tends to cause these sstables to be +compacted more often than strictly necessary. This is not acceptable if we need tight write amplification control (i.e. +this solution suits UCS-leveled, but not UCS-tiered and is thus not general enough for UCS). UCS deals with this by +splitting the run on specific boundaries selected before the compaction starts based on a file's density. As the +boundaries for a specific density are also boundaries for the next ones, whenever we select sstables to compact some +shard boundaries are shared, which guarantees that we can efficiently select higher-density sstables that exactly match +the span of the lower-density ones. + +## Configuration + +UCS accepts these compaction strategy parameters: + +* **scaling_parameters**. A list of per-level scaling parameters, specified as L*f*, T*f*, N, or an integer value + specifying $w$ directly. If more levels are present than the length of this list, the last value is used for all + higher levels. Often this will be a single parameter, specifying the behaviour for all levels of the + hierarchy. +
Levelled compaction, specified as L*f*, is preferable for read-heavy workloads, especially if bloom filters are + not effective (e.g. with wide partitions); higher levelled fan factors improve read amplification (and hence latency, + as well as throughput for read-dominated workloads) at the expense of increased write costs. +
Tiered compaction, specified as T*f*, is preferable for write-heavy workloads, or ones where bloom filters or + time order can be exploited; higher tiered fan factors improve the cost of writes (and hence throughput) at the + expense of making reads more difficult. +
N is the middle ground that has the features of levelled (one sstable run per level) as well as tiered (one + compaction to be promoted to the next level) and a fan factor of 2. This can also be specified as T2 or L2. +
The default value is T4, matching the default STCS behaviour with threshold 4. To select an equivalent of LCS + with its default fan factor 10, use L10. +* **target_sstable_size**. The target sstable size $t$, specified as a human-friendly size in bytes (e.g. 100 MiB = + $100\cdot 2^{20}$ B or (10 MB = 10,000,000 B)). The strategy will split data in shards that aim to produce sstables + of size between $t / \sqrt 2$ and $t \cdot \sqrt 2$. +
Smaller sstables improve streaming and repair, and make compactions shorter. On the other hand, each sstable + on disk has a non-trivial in-memory footprint that also affects garbage collection times. +
Increase this if the memory pressure from the number of sstables in the system becomes too high. +
The default value is 1 GiB. +* **base_shard_count**. The minimum number of shards $b$, used for levels with the smallest density. This gives the + minimum compaction concurrency for the lowest levels. A low number would result in larger L0 sstables but may limit + the overall maximum write throughput (as every piece of data has to go through L0). +
The default value is 4 (1 for system tables, or when multiple data locations are defined). +* **expired_sstable_check_frequency_seconds**. Determines how often to check for expired SSTables. +
The default value is 10 minutes. + +In **cassandra.yaml**: + +* **concurrent_compactors**. The number of compaction threads available. Set this to a large number, at minimum the number of expected levels of the compaction hierarchy to make sure that each level is given a dedicated compaction thread. This will avoid latency spikes caused by lower levels of the compaction hierarchy not getting a chance to run. + +[^1]: Note: in addition to TRANSITIVE, "overlap inclusion methods" of NONE and SINGLE are also implemented for + experimentation, but they are not recommended for the UCS sharding scheme. diff --git a/src/java/org/apache/cassandra/db/compaction/unified/Controller.java b/src/java/org/apache/cassandra/db/compaction/unified/Controller.java new file mode 100644 index 000000000000..7d39e58babca --- /dev/null +++ b/src/java/org/apache/cassandra/db/compaction/unified/Controller.java @@ -0,0 +1,566 @@ +/* + * Copyright DataStax, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.db.compaction.unified; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; + +import org.apache.cassandra.config.CassandraRelevantProperties; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.cassandra.config.DatabaseDescriptor; +import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.compaction.UnifiedCompactionStrategy; +import org.apache.cassandra.exceptions.ConfigurationException; +import org.apache.cassandra.schema.SchemaConstants; +import org.apache.cassandra.utils.Overlaps; +import org.apache.cassandra.utils.FBUtilities; +import org.apache.cassandra.utils.MonotonicClock; + +/** +* The controller provides compaction parameters to the unified compaction strategy +*/ +public class Controller +{ + protected static final Logger logger = LoggerFactory.getLogger(Controller.class); + + /** + * The scaling parameters W, one per bucket index and separated by a comma. + * Higher indexes will use the value of the last index with a W specified. + */ + final static String SCALING_PARAMETERS_OPTION = "scaling_parameters"; + private final static String DEFAULT_SCALING_PARAMETERS = CassandraRelevantProperties.UCS_SCALING_PARAMETER.getString(); + + /** + * Override for the flush size in MB. The database should be able to calculate this from executing flushes, this + * should only be necessary in rare cases. + */ + static final String FLUSH_SIZE_OVERRIDE_OPTION = "flush_size_override"; + + static final String BASE_SHARD_COUNT_OPTION = "base_shard_count"; + /** + * Default base shard count, used when a base count is not explicitly supplied. This value applies as long as the + * table is not a system one, and directories are not defined. + * + * For others a base count of 1 is used as system tables are usually small and do not need as much compaction + * parallelism, while having directories defined provides for parallelism in a different way. + */ + public static final int DEFAULT_BASE_SHARD_COUNT = CassandraRelevantProperties.UCS_BASE_SHARD_COUNT.getInt(); + + static final String TARGET_SSTABLE_SIZE_OPTION = "target_sstable_size"; + public static final double DEFAULT_TARGET_SSTABLE_SIZE = CassandraRelevantProperties.UCS_TARGET_SSTABLE_SIZE.getSizeInBytes(); + static final double MIN_TARGET_SSTABLE_SIZE = 1L << 20; + + /** + * This parameter is intended to modify the shape of the LSM by taking into account the survival ratio of data, for now it is fixed to one. + */ + static final double DEFAULT_SURVIVAL_FACTOR = CassandraRelevantProperties.UCS_SURVIVAL_FACTOR.getDouble(); + static final double[] DEFAULT_SURVIVAL_FACTORS = new double[] { DEFAULT_SURVIVAL_FACTOR }; + + /** + * The maximum number of sstables to compact in one operation. + * + * This is expected to be large and never be reached, but compaction going very very late may cause the accumulation + * of thousands and even tens of thousands of sstables which may cause problems if compacted in one long operation. + * The default is chosen to be half of the maximum permitted space overhead when the source sstables are of the + * minimum sstable size. + * + * If the fanout factor is larger than the maximum number of sstables, the strategy will ignore the latter. + */ + static final String MAX_SSTABLES_TO_COMPACT_OPTION = "max_sstables_to_compact"; + + static final String ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION_OPTION = "unsafe_aggressive_sstable_expiration"; + static final boolean ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION = CassandraRelevantProperties.ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION.getBoolean(); + static final boolean DEFAULT_ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION = false; + + static final int DEFAULT_EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS = 60 * 10; + static final String EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_OPTION = "expired_sstable_check_frequency_seconds"; + + /** The maximum splitting factor for shards. The maximum number of shards is this number multiplied by the base count. */ + static final double MAX_SHARD_SPLIT = 1048576; + + /** + * Overlap inclusion method. NONE for participating sstables only (not recommended), SINGLE to only include sstables + * that overlap with participating (LCS-like, higher concurrency during upgrades but some double compaction), + * TRANSITIVE to include overlaps of overlaps (likely to trigger whole level compactions, safest). + */ + static final String OVERLAP_INCLUSION_METHOD_OPTION = "overlap_inclusion_method"; + static final Overlaps.InclusionMethod DEFAULT_OVERLAP_INCLUSION_METHOD = + CassandraRelevantProperties.UCS_OVERLAP_INCLUSION_METHOD.getEnum(Overlaps.InclusionMethod.TRANSITIVE); + + protected final ColumnFamilyStore cfs; + protected final MonotonicClock clock; + private final int[] scalingParameters; + protected final double[] survivalFactors; + protected final long flushSizeOverride; + protected volatile long currentFlushSize; + protected final int maxSSTablesToCompact; + protected final long expiredSSTableCheckFrequency; + protected final boolean ignoreOverlapsInExpirationCheck; + + protected final int baseShardCount; + + protected final double targetSSTableSizeMin; + + protected final Overlaps.InclusionMethod overlapInclusionMethod; + + Controller(ColumnFamilyStore cfs, + MonotonicClock clock, + int[] scalingParameters, + double[] survivalFactors, + long flushSizeOverride, + int maxSSTablesToCompact, + long expiredSSTableCheckFrequency, + boolean ignoreOverlapsInExpirationCheck, + int baseShardCount, + double targetSStableSize, + Overlaps.InclusionMethod overlapInclusionMethod) + { + this.cfs = cfs; + this.clock = clock; + this.scalingParameters = scalingParameters; + this.survivalFactors = survivalFactors; + this.flushSizeOverride = flushSizeOverride; + this.currentFlushSize = flushSizeOverride; + this.expiredSSTableCheckFrequency = TimeUnit.MILLISECONDS.convert(expiredSSTableCheckFrequency, TimeUnit.SECONDS); + this.baseShardCount = baseShardCount; + this.targetSSTableSizeMin = targetSStableSize * Math.sqrt(0.5); + this.overlapInclusionMethod = overlapInclusionMethod; + + if (maxSSTablesToCompact <= 0) + maxSSTablesToCompact = Integer.MAX_VALUE; + + this.maxSSTablesToCompact = maxSSTablesToCompact; + + if (ignoreOverlapsInExpirationCheck && !ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION) + { + logger.warn("Not enabling aggressive SSTable expiration, as the system property '" + CassandraRelevantProperties.ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION.name() + "' is set to 'false'. " + + "Set it to 'true' to enable aggressive SSTable expiration."); + } + this.ignoreOverlapsInExpirationCheck = ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION && ignoreOverlapsInExpirationCheck; + } + + /** + * @return the scaling parameter W + * @param index + */ + public int getScalingParameter(int index) + { + if (index < 0) + throw new IllegalArgumentException("Index should be >= 0: " + index); + + return index < scalingParameters.length ? scalingParameters[index] : scalingParameters[scalingParameters.length - 1]; + } + + @Override + public String toString() + { + return String.format("Controller, m: %s, o: %s, Ws: %s", + FBUtilities.prettyPrintBinary(targetSSTableSizeMin, "B", ""), + Arrays.toString(survivalFactors), + printScalingParameters(scalingParameters)); + } + + public int getFanout(int index) { + int W = getScalingParameter(index); + return UnifiedCompactionStrategy.fanoutFromScalingParameter(W); + } + + public int getThreshold(int index) { + int W = getScalingParameter(index); + return UnifiedCompactionStrategy.thresholdFromScalingParameter(W); + } + + /** + * Calculate the number of shards to split the local token space in for the given sstable density. + * This is calculated as a power-of-two multiple of baseShardCount, so that the expected size of resulting sstables + * is between targetSSTableSizeMin and 2*targetSSTableSizeMin (in other words, sqrt(0.5) * targetSSTableSize and + * sqrt(2) * targetSSTableSize), with a minimum of baseShardCount shards for smaller sstables. + */ + public int getNumShards(double density) + { + // How many we would have to aim for the target size. Divided by the base shard count, so that we can ensure + // the result is a multiple of it by multiplying back below. + double count = density / (targetSSTableSizeMin * baseShardCount); + if (count > MAX_SHARD_SPLIT) + count = MAX_SHARD_SPLIT; + assert !(count < 0); // Must be positive, 0 or NaN, which should translate to baseShardCount + + // Make it a power of two multiple of the base count so that split points for lower levels remain split points for higher. + // The conversion to int and highestOneBit round down, for which we compensate by using the sqrt(0.5) multiplier + // already applied in targetSSTableSizeMin. + // Setting the bottom bit to 1 ensures the result is at least baseShardCount. + int shards = baseShardCount * Integer.highestOneBit((int) count | 1); + logger.debug("Shard count {} for density {}, {} times target {}", + shards, + FBUtilities.prettyPrintBinary(density, "B", " "), + density / targetSSTableSizeMin, + FBUtilities.prettyPrintBinary(targetSSTableSizeMin, "B", " ")); + return shards; + } + + /** + * @return the survival factor o + * @param index + */ + public double getSurvivalFactor(int index) + { + if (index < 0) + throw new IllegalArgumentException("Index should be >= 0: " + index); + + return index < survivalFactors.length ? survivalFactors[index] : survivalFactors[survivalFactors.length - 1]; + } + + /** + * Return the flush sstable size in bytes. + * + * This is usually obtained from the observed sstable flush sizes, refreshed when it differs significantly + * from the current values. + * It can also be set by the user in the options. + * + * @return the flush size in bytes. + */ + public long getFlushSizeBytes() + { + if (flushSizeOverride > 0) + return flushSizeOverride; + + double envFlushSize = cfs.metric.flushSizeOnDisk.get(); + if (currentFlushSize == 0 || Math.abs(1 - (currentFlushSize / envFlushSize)) > 0.5) + { + // The current size is not initialized, or it differs by over 50% from the observed. + // Use the observed size rounded up to a whole megabyte. + currentFlushSize = ((long) (Math.ceil(Math.scalb(envFlushSize, -20)))) << 20; + } + return currentFlushSize; + } + + /** + * @return whether is allowed to drop expired SSTables without checking if partition keys appear in other SSTables. + * Same behavior as in TWCS. + */ + public boolean getIgnoreOverlapsInExpirationCheck() + { + return ignoreOverlapsInExpirationCheck; + } + + public long getExpiredSSTableCheckFrequency() + { + return expiredSSTableCheckFrequency; + } + + /** + * The strategy will call this method each time {@link CompactionStrategy#getNextBackgroundTask} is called. + */ + public void onStrategyBackgroundTaskRequest() + { + } + + /** + * Returns a maximum bucket index for the given data size and fanout. + */ + private int maxBucketIndex(long totalLength, int fanout) + { + double o = getSurvivalFactor(0); + long m = getFlushSizeBytes(); + return Math.max(0, (int) Math.floor((Math.log(totalLength) - Math.log(m)) / (Math.log(fanout) - Math.log(o)))); + } + + public static Controller fromOptions(ColumnFamilyStore cfs, Map options) + { + int[] Ws = parseScalingParameters(options.getOrDefault(SCALING_PARAMETERS_OPTION, DEFAULT_SCALING_PARAMETERS)); + + long flushSizeOverride = (long) FBUtilities.parseHumanReadable(options.getOrDefault(FLUSH_SIZE_OVERRIDE_OPTION, "0MiB"), null, "B"); + int maxSSTablesToCompact = Integer.parseInt(options.getOrDefault(MAX_SSTABLES_TO_COMPACT_OPTION, "0")); + long expiredSSTableCheckFrequency = options.containsKey(EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_OPTION) + ? Long.parseLong(options.get(EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_OPTION)) + : DEFAULT_EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS; + boolean ignoreOverlapsInExpirationCheck = options.containsKey(ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION_OPTION) + ? Boolean.parseBoolean(options.get(ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION_OPTION)) + : DEFAULT_ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION; + + int baseShardCount; + if (options.containsKey(BASE_SHARD_COUNT_OPTION)) + { + baseShardCount = Integer.parseInt(options.get(BASE_SHARD_COUNT_OPTION)); + } + else + { + if (SchemaConstants.isSystemKeyspace(cfs.getKeyspaceName()) || (cfs.getDiskBoundaries().positions != null && cfs.getDiskBoundaries().positions.size() > 1)) + baseShardCount = 1; + else + baseShardCount = DEFAULT_BASE_SHARD_COUNT; + } + + double targetSStableSize = options.containsKey(TARGET_SSTABLE_SIZE_OPTION) + ? FBUtilities.parseHumanReadable(options.get(TARGET_SSTABLE_SIZE_OPTION), null, "B") + : DEFAULT_TARGET_SSTABLE_SIZE; + + Overlaps.InclusionMethod inclusionMethod = options.containsKey(OVERLAP_INCLUSION_METHOD_OPTION) + ? Overlaps.InclusionMethod.valueOf(options.get(OVERLAP_INCLUSION_METHOD_OPTION).toUpperCase()) + : DEFAULT_OVERLAP_INCLUSION_METHOD; + + return new Controller(cfs, + MonotonicClock.Global.preciseTime, + Ws, + DEFAULT_SURVIVAL_FACTORS, + flushSizeOverride, + maxSSTablesToCompact, + expiredSSTableCheckFrequency, + ignoreOverlapsInExpirationCheck, + baseShardCount, + targetSStableSize, + inclusionMethod); + } + + public static Map validateOptions(Map options) throws ConfigurationException + { + String nonPositiveErr = "Invalid configuration, %s should be positive: %d"; + String booleanParseErr = "%s should either be 'true' or 'false', not %s"; + String intParseErr = "%s is not a parsable int (base10) for %s"; + String longParseErr = "%s is not a parsable long (base10) for %s"; + String sizeUnacceptableErr = "%s %s is not acceptable, size must be at least %s"; + String invalidSizeErr = "%s %s is not a valid size in bytes: %s"; + options = new HashMap<>(options); + String s; + + s = options.remove(SCALING_PARAMETERS_OPTION); + if (s != null) + parseScalingParameters(s); + + s = options.remove(BASE_SHARD_COUNT_OPTION); + if (s != null) + { + try + { + int numShards = Integer.parseInt(s); + if (numShards <= 0) + throw new ConfigurationException(String.format(nonPositiveErr, + BASE_SHARD_COUNT_OPTION, + numShards)); + } + catch (NumberFormatException e) + { + throw new ConfigurationException(String.format(intParseErr, s, BASE_SHARD_COUNT_OPTION), e); + } + } + + s = options.remove(TARGET_SSTABLE_SIZE_OPTION); + if (s != null) + { + try + { + long targetSSTableSize = (long) FBUtilities.parseHumanReadable(s, null, "B"); + if (targetSSTableSize < MIN_TARGET_SSTABLE_SIZE) + { + throw new ConfigurationException(String.format(sizeUnacceptableErr, + TARGET_SSTABLE_SIZE_OPTION, + s, + FBUtilities.prettyPrintBinary(MIN_TARGET_SSTABLE_SIZE, "B", ""))); + } + } + catch (NumberFormatException e) + { + throw new ConfigurationException(String.format(invalidSizeErr, + TARGET_SSTABLE_SIZE_OPTION, + s, + e.getMessage()), + e); + } + } + + s = options.remove(FLUSH_SIZE_OVERRIDE_OPTION); + if (s != null) + { + try + { + long flushSize = (long) FBUtilities.parseHumanReadable(s, null, "B"); + if (flushSize < MIN_TARGET_SSTABLE_SIZE) + throw new ConfigurationException(String.format(sizeUnacceptableErr, + FLUSH_SIZE_OVERRIDE_OPTION, + s, + FBUtilities.prettyPrintBinary(MIN_TARGET_SSTABLE_SIZE, "B", ""))); + } + catch (NumberFormatException e) + { + throw new ConfigurationException(String.format(invalidSizeErr, + FLUSH_SIZE_OVERRIDE_OPTION, + s, + e.getMessage()), + e); + } + } + + s = options.remove(MAX_SSTABLES_TO_COMPACT_OPTION); + if (s != null) + { + try + { + Integer.parseInt(s); // values less than or equal to 0 enable the default + } + catch (NumberFormatException e) + { + throw new ConfigurationException(String.format(intParseErr, + s, + MAX_SSTABLES_TO_COMPACT_OPTION), + e); + } + } + s = options.remove(EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_OPTION); + if (s != null) + { + try + { + long expiredSSTableCheckFrequency = Long.parseLong(s); + if (expiredSSTableCheckFrequency <= 0) + throw new ConfigurationException(String.format(nonPositiveErr, + EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_OPTION, + expiredSSTableCheckFrequency)); + } + catch (NumberFormatException e) + { + throw new ConfigurationException(String.format(longParseErr, + s, + EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_OPTION), + e); + } + } + + s = options.remove(ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION_OPTION); + if (s != null && !s.equalsIgnoreCase("true") && !s.equalsIgnoreCase("false")) + { + throw new ConfigurationException(String.format(booleanParseErr, + ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION_OPTION, s)); + } + + s = options.remove(OVERLAP_INCLUSION_METHOD_OPTION); + if (s != null) + { + try + { + Overlaps.InclusionMethod.valueOf(s.toUpperCase()); + } + catch (IllegalArgumentException e) + { + throw new ConfigurationException(String.format("Invalid overlap inclusion method %s. The valid options are %s.", + s, + Arrays.toString(Overlaps.InclusionMethod.values()))); + } + } + + return options; + } + + // The methods below are implemented here (rather than directly in UCS) to aid testability. + + public double getBaseSstableSize(int F) + { + // The compaction hierarchy should start at a minimum size which is close to the typical flush size, with + // some leeway to make sure we don't overcompact when flushes end up a little smaller. + // The leeway should be less than 1/F, though, to make sure we don't overshoot the boundary combining F-1 + // sources instead of F. + // Note that while we have not had flushes, the size will be 0 and we will use 1MB as the flush size. With + // fixed and positive W this should not hurt us, as the hierarchy will be in multiples of F and will still + // result in the same buckets, but for negative W or hybrid strategies this may cause temporary overcompaction. + // If this is a concern, the flush size override should be used to avoid it until DB-4401. + return Math.max(1 << 20, getFlushSizeBytes()) * (1.0 - 0.9 / F); + } + + public double getMaxLevelDensity(int index, double minSize) + { + return Math.floor(minSize * getFanout(index) * getSurvivalFactor(index)); + } + + public double maxThroughput() + { + double compactionThroughputMbPerSec = DatabaseDescriptor.getCompactionThroughputMebibytesPerSec(); + if (compactionThroughputMbPerSec <= 0) + return Double.MAX_VALUE; + return Math.scalb(compactionThroughputMbPerSec, 20); + } + + public int maxConcurrentCompactions() + { + return DatabaseDescriptor.getConcurrentCompactors(); + } + + public int maxSSTablesToCompact() + { + return maxSSTablesToCompact; + } + + /** + * Random number generator to be used for the selection of tasks. + * Replaced by some tests. + */ + public Random random() + { + return ThreadLocalRandom.current(); + } + + /** + * Return the overlap inclusion method to use when combining overlap sections into a bucket. For example, with + * SSTables A(0, 5), B(2, 9), C(6, 12), D(10, 12) whose overlap sections calculation returns [AB, BC, CD], + * - NONE means no sections are to be merged. AB, BC and CD will be separate buckets, compactions AB, BC and CD + * will be added separately, thus some SSTables will be partially used / single-source compacted, likely + * to be recompacted again with the next selected bucket. + * - SINGLE means only overlaps of the sstables in the selected bucket will be added. AB+BC will be one bucket, + * and CD will be another (as BC is already used). A middle ground of sorts, should reduce overcompaction but + * still has some. + * - TRANSITIVE means a transitive closure of overlapping sstables will be selected. AB+BC+CD will be in the same + * bucket, selected compactions will apply to all overlapping sstables and no overcompaction will be done, at + * the cost of reduced compaction parallelism and increased length of the operation. + * TRANSITIVE is the default and makes most sense. NONE is a closer approximation to operation of legacy UCS. + * The option is exposed for experimentation. + */ + public Overlaps.InclusionMethod overlapInclusionMethod() + { + return overlapInclusionMethod; + } + + public static int[] parseScalingParameters(String str) + { + String[] vals = str.split(","); + int[] ret = new int[vals.length]; + for (int i = 0; i < vals.length; i++) + { + String value = vals[i].trim(); + int W = UnifiedCompactionStrategy.parseScalingParameter(value); + ret[i] = W; + } + + return ret; + } + + public static String printScalingParameters(int[] parameters) + { + StringBuilder builder = new StringBuilder(); + int i; + for (i = 0; i < parameters.length - 1; ++i) + { + builder.append(UnifiedCompactionStrategy.printScalingParameter(parameters[i])); + builder.append(", "); + } + builder.append(UnifiedCompactionStrategy.printScalingParameter(parameters[i])); + return builder.toString(); + } +} diff --git a/src/java/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriter.java b/src/java/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriter.java new file mode 100644 index 000000000000..c7c1eb92339c --- /dev/null +++ b/src/java/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriter.java @@ -0,0 +1,102 @@ +/* + * Copyright DataStax, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.db.compaction.unified; + +import java.util.Set; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.DecoratedKey; +import org.apache.cassandra.db.Directories; +import org.apache.cassandra.db.compaction.ShardTracker; +import org.apache.cassandra.db.compaction.writers.CompactionAwareWriter; +import org.apache.cassandra.db.lifecycle.LifecycleTransaction; +import org.apache.cassandra.io.sstable.format.SSTableReader; +import org.apache.cassandra.io.sstable.format.SSTableWriter; +import org.apache.cassandra.utils.FBUtilities; + +/** + * A {@link CompactionAwareWriter} that splits the output sstable at the partition boundaries of the compaction + * shards used by {@link org.apache.cassandra.db.compaction.UnifiedCompactionStrategy} as long as the size of + * the sstable so far is sufficiently large. + */ +public class ShardedCompactionWriter extends CompactionAwareWriter +{ + protected final static Logger logger = LoggerFactory.getLogger(ShardedCompactionWriter.class); + + private final double uniqueKeyRatio; + + private final ShardTracker boundaries; + + public ShardedCompactionWriter(ColumnFamilyStore cfs, + Directories directories, + LifecycleTransaction txn, + Set nonExpiredSSTables, + boolean keepOriginals, + ShardTracker boundaries) + { + super(cfs, directories, txn, nonExpiredSSTables, keepOriginals); + + this.boundaries = boundaries; + long totalKeyCount = nonExpiredSSTables.stream() + .mapToLong(SSTableReader::estimatedKeys) + .sum(); + this.uniqueKeyRatio = 1.0 * SSTableReader.getApproximateKeyCount(nonExpiredSSTables) / totalKeyCount; + } + + @Override + protected boolean shouldSwitchWriterInCurrentLocation(DecoratedKey key) + { + // If we have written anything and cross a shard boundary, switch to a new writer. + final long uncompressedBytesWritten = sstableWriter.currentWriter().getFilePointer(); + if (boundaries.advanceTo(key.getToken()) && uncompressedBytesWritten > 0) + { + logger.debug("Switching writer at boundary {}/{} index {}, with uncompressed size {} for {}.{}", + key.getToken(), boundaries.shardStart(), + boundaries.shardIndex(), + FBUtilities.prettyPrintMemory(uncompressedBytesWritten), + cfs.getKeyspaceName(), cfs.getTableName()); + return true; + } + + return false; + } + + @Override + @SuppressWarnings("resource") + protected SSTableWriter sstableWriter(Directories.DataDirectory directory, DecoratedKey nextKey) + { + if (nextKey != null) + boundaries.advanceTo(nextKey.getToken()); + return super.sstableWriter(directory, nextKey); + } + + protected long sstableKeyCount() + { + return shardAdjustedKeyCount(boundaries, nonExpiredSSTables, uniqueKeyRatio); + } + + private static long shardAdjustedKeyCount(ShardTracker boundaries, + Set sstables, + double survivalRatio) + { + // Note: computationally non-trivial; can be optimized if we save start/stop shards and size per table. + return Math.round(boundaries.shardAdjustedKeyCount(sstables) * survivalRatio); + } +} \ No newline at end of file diff --git a/src/java/org/apache/cassandra/db/compaction/unified/ShardedMultiWriter.java b/src/java/org/apache/cassandra/db/compaction/unified/ShardedMultiWriter.java new file mode 100644 index 000000000000..aae606691c45 --- /dev/null +++ b/src/java/org/apache/cassandra/db/compaction/unified/ShardedMultiWriter.java @@ -0,0 +1,248 @@ +/* + * Copyright DataStax, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.db.compaction.unified; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.DecoratedKey; +import org.apache.cassandra.db.SerializationHeader; +import org.apache.cassandra.db.commitlog.CommitLogPosition; +import org.apache.cassandra.db.commitlog.IntervalSet; +import org.apache.cassandra.db.compaction.ShardTracker; +import org.apache.cassandra.db.lifecycle.LifecycleNewTracker; +import org.apache.cassandra.db.rows.UnfilteredRowIterator; +import org.apache.cassandra.index.Index; +import org.apache.cassandra.io.sstable.Descriptor; +import org.apache.cassandra.io.sstable.SSTableMultiWriter; +import org.apache.cassandra.io.sstable.format.SSTableReader; +import org.apache.cassandra.io.sstable.format.SSTableWriter; +import org.apache.cassandra.io.sstable.metadata.MetadataCollector; +import org.apache.cassandra.schema.TableId; +import org.apache.cassandra.utils.FBUtilities; +import org.apache.cassandra.utils.TimeUUID; + +/** + * A {@link SSTableMultiWriter} that splits the output sstable at the partition boundaries of the compaction + * shards used by {@link org.apache.cassandra.db.compaction.UnifiedCompactionStrategy} as long as the size of + * the sstable so far is sufficiently large. + *

+ * This is class is similar to {@link ShardedMultiWriter} but for flushing. Unfortunately + * we currently have 2 separate writers hierarchy that are not compatible and so we must + * duplicate the functionality of splitting sstables over compaction shards if they have + * reached a minimum size. + */ +public class ShardedMultiWriter implements SSTableMultiWriter +{ + protected final static Logger logger = LoggerFactory.getLogger(ShardedMultiWriter.class); + + private final ColumnFamilyStore cfs; + private final Descriptor descriptor; + private final long keyCount; + private final long repairedAt; + private final TimeUUID pendingRepair; + private final boolean isTransient; + private final IntervalSet commitLogPositions; + private final SerializationHeader header; + private final Collection indexes; + private final LifecycleNewTracker lifecycleNewTracker; + private final ShardTracker boundaries; + private final SSTableWriter[] writers; + private int currentWriter; + + public ShardedMultiWriter(ColumnFamilyStore cfs, + Descriptor descriptor, + long keyCount, + long repairedAt, + TimeUUID pendingRepair, + boolean isTransient, + IntervalSet commitLogPositions, + SerializationHeader header, + Collection indexes, + LifecycleNewTracker lifecycleNewTracker, + ShardTracker boundaries) + { + this.cfs = cfs; + this.descriptor = descriptor; + this.keyCount = keyCount; + this.repairedAt = repairedAt; + this.pendingRepair = pendingRepair; + this.isTransient = isTransient; + this.commitLogPositions = commitLogPositions; + this.header = header; + this.indexes = indexes; + this.lifecycleNewTracker = lifecycleNewTracker; + this.boundaries = boundaries; + this.writers = new SSTableWriter[this.boundaries.count()]; // at least one + + this.currentWriter = 0; + this.writers[currentWriter] = createWriter(descriptor); + } + + private SSTableWriter createWriter() + { + Descriptor newDesc = cfs.newSSTableDescriptor(descriptor.directory); + return createWriter(newDesc); + } + + private SSTableWriter createWriter(Descriptor descriptor) + { + MetadataCollector metadataCollector = new MetadataCollector(cfs.metadata().comparator) + .commitLogIntervals(commitLogPositions != null ? commitLogPositions : IntervalSet.empty()); + return descriptor.getFormat().getWriterFactory().builder(descriptor) + .setKeyCount(forSplittingKeysBy(boundaries.count())) + .setRepairedAt(repairedAt) + .setPendingRepair(pendingRepair) + .setTransientSSTable(isTransient) + .setTableMetadataRef(cfs.metadata) + .setMetadataCollector(metadataCollector) + .setSerializationHeader(header) + .addDefaultComponents() + .addFlushObserversForSecondaryIndexes(indexes, lifecycleNewTracker.opType()) + .build(lifecycleNewTracker, cfs); + } + + private long forSplittingKeysBy(long splits) { + return splits <= 1 ? keyCount : keyCount / splits; + } + + @Override + public boolean append(UnfilteredRowIterator partition) + { + DecoratedKey key = partition.partitionKey(); + + // If we have written anything and cross a shard boundary, switch to a new writer. + final long currentUncompressedSize = writers[currentWriter].getFilePointer(); + if (boundaries.advanceTo(key.getToken()) && currentUncompressedSize > 0) + { + logger.debug("Switching writer at boundary {}/{} index {}, with uncompressed size {} for {}.{}", + key.getToken(), boundaries.shardStart(), currentWriter, + FBUtilities.prettyPrintMemory(currentUncompressedSize), + cfs.getKeyspaceName(), cfs.getTableName()); + + writers[++currentWriter] = createWriter(); + } + + return writers[currentWriter].append(partition) != null; + } + + @Override + public Collection finish(boolean openResult) + { + List sstables = new ArrayList<>(writers.length); + for (SSTableWriter writer : writers) + if (writer != null) + sstables.add(writer.finish(openResult)); + return sstables; + } + + @Override + public Collection finished() + { + List sstables = new ArrayList<>(writers.length); + for (SSTableWriter writer : writers) + if (writer != null) + sstables.add(writer.finished()); + return sstables; + } + + @Override + public SSTableMultiWriter setOpenResult(boolean openResult) + { + for (SSTableWriter writer : writers) + if (writer != null) + writer.setOpenResult(openResult); + return this; + } + + @Override + public String getFilename() + { + for (SSTableWriter writer : writers) + if (writer != null) + return writer.getFilename(); + return ""; + } + + @Override + public long getBytesWritten() + { + long bytesWritten = 0; + for (int i = 0; i <= currentWriter; ++i) + bytesWritten += writers[i].getFilePointer(); + return bytesWritten; + } + + @Override + public long getOnDiskBytesWritten() + { + long bytesWritten = 0; + for (int i = 0; i <= currentWriter; ++i) + bytesWritten += writers[i].getEstimatedOnDiskBytesWritten(); + return bytesWritten; + } + + @Override + public TableId getTableId() + { + return cfs.metadata().id; + } + + @Override + public Throwable commit(Throwable accumulate) + { + Throwable t = accumulate; + for (SSTableWriter writer : writers) + if (writer != null) + t = writer.commit(t); + return t; + } + + @Override + public Throwable abort(Throwable accumulate) + { + Throwable t = accumulate; + for (SSTableWriter writer : writers) + if (writer != null) + { + lifecycleNewTracker.untrackNew(writer); + t = writer.abort(t); + } + return t; + } + + @Override + public void prepareToCommit() + { + for (SSTableWriter writer : writers) + if (writer != null) + writer.prepareToCommit(); + } + + @Override + public void close() + { + for (SSTableWriter writer : writers) + if (writer != null) + writer.close(); + } +} \ No newline at end of file diff --git a/src/java/org/apache/cassandra/db/compaction/unified/UnifiedCompactionTask.java b/src/java/org/apache/cassandra/db/compaction/unified/UnifiedCompactionTask.java new file mode 100644 index 000000000000..720ce2dbdc1d --- /dev/null +++ b/src/java/org/apache/cassandra/db/compaction/unified/UnifiedCompactionTask.java @@ -0,0 +1,59 @@ +/* + * Copyright DataStax, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.db.compaction.unified; + +import java.util.Set; + +import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.Directories; +import org.apache.cassandra.db.compaction.CompactionTask; +import org.apache.cassandra.db.compaction.ShardManager; +import org.apache.cassandra.db.compaction.UnifiedCompactionStrategy; +import org.apache.cassandra.db.compaction.writers.CompactionAwareWriter; +import org.apache.cassandra.db.lifecycle.LifecycleTransaction; +import org.apache.cassandra.io.sstable.format.SSTableReader; + +/** + * The sole purpose of this class is to currently create a {@link ShardedCompactionWriter}. + */ +public class UnifiedCompactionTask extends CompactionTask +{ + private final ShardManager shardManager; + private final Controller controller; + + public UnifiedCompactionTask(ColumnFamilyStore cfs, + UnifiedCompactionStrategy strategy, + LifecycleTransaction txn, + long gcBefore, + ShardManager shardManager) + { + super(cfs, txn, gcBefore, strategy.getController().getIgnoreOverlapsInExpirationCheck()); + this.controller = strategy.getController(); + this.shardManager = shardManager; + } + + @Override + public CompactionAwareWriter getCompactionAwareWriter(ColumnFamilyStore cfs, + Directories directories, + LifecycleTransaction txn, + Set nonExpiredSSTables) + { + double density = shardManager.calculateCombinedDensity(nonExpiredSSTables); + int numShards = controller.getNumShards(density); + return new ShardedCompactionWriter(cfs, directories, txn, nonExpiredSSTables, keepOriginals, shardManager.boundaries(numShards)); + } +} \ No newline at end of file diff --git a/src/java/org/apache/cassandra/dht/Range.java b/src/java/org/apache/cassandra/dht/Range.java index 2c468990d21e..0ba6d2087092 100644 --- a/src/java/org/apache/cassandra/dht/Range.java +++ b/src/java/org/apache/cassandra/dht/Range.java @@ -221,6 +221,39 @@ private static > Set> intersectionOneWrapping return Collections.unmodifiableSet(intersection); } + /** + * Returns the intersection of this range with the provided one, assuming neither are wrapping. + * + * @param that the other range to return the intersection with. It must not be wrapping. + * @return the intersection of {@code this} and {@code that}, or {@code null} if both ranges don't intersect. + */ + public Range intersectionNonWrapping(Range that) + { + assert !isTrulyWrapAround() : "wraparound " + this; + assert !that.isTrulyWrapAround() : "wraparound " + that; + + if (left.compareTo(that.left) < 0) + { + if (right.isMinimum() || (!that.right.isMinimum() && right.compareTo(that.right) >= 0)) + return that; // this contains that. + + if (right.compareTo(that.left) <= 0) + return null; // this is fully before that. + + return new Range<>(that.left, right); + } + else + { + if (that.right.isMinimum() || (!right.isMinimum() && that.right.compareTo(right) >= 0)) + return this; // that contains this. + + if (that.right.compareTo(left) <= 0) + return null; // that is fully before this. + + return new Range<>(left, that.right); + } + } + public Pair, AbstractBounds> split(T position) { assert contains(position) || left.equals(position); @@ -262,6 +295,29 @@ public static > boolean isWrapAround(T left, T right) return left.compareTo(right) >= 0; } + /** + * Checks if the range truly wraps around. + * + * This exists only because {@link #isWrapAround()} is a tad dumb and return true if right is the minimum token, + * no matter what left is, but for most intent and purposes, such range doesn't truly warp around (unwrap produces + * the identity in this case). + *

+ * Also note that it could be that the remaining uses of {@link #isWrapAround()} could be replaced by this method, + * but that is to be checked carefully at some other time (Sylvain). + *

+ * The one thing this method guarantees is that if it's true, then {@link #unwrap()} will return a list with + * exactly 2 ranges, never one. + */ + public boolean isTrulyWrapAround() + { + return isTrulyWrapAround(left, right); + } + + public static > boolean isTrulyWrapAround(T left, T right) + { + return isWrapAround(left, right) && !right.isMinimum(); + } + /** * Tells if the given range covers the entire ring */ diff --git a/src/java/org/apache/cassandra/dht/Splitter.java b/src/java/org/apache/cassandra/dht/Splitter.java index 159385932105..53b4462221cd 100644 --- a/src/java/org/apache/cassandra/dht/Splitter.java +++ b/src/java/org/apache/cassandra/dht/Splitter.java @@ -283,6 +283,14 @@ public BigInteger totalTokens(Splitter splitter) return size.abs().divide(factor); } + /** + * A less precise version of the above, returning the size of the span as a double approximation. + */ + public double size() + { + return left().size(right()) * weight; + } + public Token left() { return range.left; @@ -298,6 +306,11 @@ public Range range() return range; } + public double weight() + { + return weight; + } + public String toString() { return "WeightedRange{" + diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableRewriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableRewriter.java index 1aa352694a70..101e76ff5a2e 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableRewriter.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableRewriter.java @@ -19,6 +19,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.function.Consumer; import com.google.common.annotations.VisibleForTesting; @@ -59,6 +60,7 @@ public class SSTableRewriter extends Transactional.AbstractTransactional impleme private final List preparedForCommit = new ArrayList<>(); private long currentlyOpenedEarlyAt; // the position (in MiB) in the target file we last (re)opened at + private long bytesWritten; // the bytes written by previous writers, or zero if the current writer is the first writer private final List writers = new ArrayList<>(); private final boolean keepOriginals; // true if we do not want to obsolete the originals @@ -112,6 +114,19 @@ public SSTableWriter currentWriter() return writer; } + public long bytesWritten() + { + return bytesWritten + (writer == null ? 0 : writer.getFilePointer()); + } + + public void forEachWriter(Consumer op) + { + for (SSTableWriter writer : writers) + op.accept(writer); + if (writer != null) + op.accept(writer); + } + public AbstractRowIndexEntry append(UnfilteredRowIterator partition) { // we do this before appending to ensure we can resetAndTruncate() safely if appending fails @@ -260,6 +275,7 @@ public void switchWriter(SSTableWriter newWriter) } currentlyOpenedEarlyAt = 0; + bytesWritten += writer.getFilePointer(); writer = newWriter; } diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java index 6b2f4460a68d..4c8b69991402 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java +++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java @@ -185,6 +185,7 @@ public static final class UniqueIdentifier public static final Comparator firstKeyComparator = (o1, o2) -> o1.getFirst().compareTo(o2.getFirst()); public static final Ordering firstKeyOrdering = Ordering.from(firstKeyComparator); + public static final Comparator lastKeyComparator = (o1, o2) -> o1.getLast().compareTo(o2.getLast()); public static final Comparator idComparator = Comparator.comparing(t -> t.descriptor.id, SSTableIdFactory.COMPARATOR); public static final Comparator idReverseComparator = idComparator.reversed(); diff --git a/src/java/org/apache/cassandra/schema/CompactionParams.java b/src/java/org/apache/cassandra/schema/CompactionParams.java index eff634f429a5..06446276c2b6 100644 --- a/src/java/org/apache/cassandra/schema/CompactionParams.java +++ b/src/java/org/apache/cassandra/schema/CompactionParams.java @@ -34,6 +34,7 @@ import org.apache.cassandra.db.compaction.LeveledCompactionStrategy; import org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy; import org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy; +import org.apache.cassandra.db.compaction.UnifiedCompactionStrategy; import org.apache.cassandra.exceptions.ConfigurationException; import org.apache.cassandra.utils.FBUtilities; @@ -136,6 +137,11 @@ public static CompactionParams lcs(Map options) return create(LeveledCompactionStrategy.class, options); } + public static CompactionParams ucs(Map options) + { + return create(UnifiedCompactionStrategy.class, options); + } + public static CompactionParams twcs(Map options) { return create(TimeWindowCompactionStrategy.class, options); diff --git a/src/java/org/apache/cassandra/utils/FBUtilities.java b/src/java/org/apache/cassandra/utils/FBUtilities.java index 49b9dd822cea..1740833d9663 100644 --- a/src/java/org/apache/cassandra/utils/FBUtilities.java +++ b/src/java/org/apache/cassandra/utils/FBUtilities.java @@ -1009,6 +1009,11 @@ public static double parseHumanReadable(String datum, String separator, String u return v; } + public static long parseHumanReadableBytes(String value) + { + return (long) parseHumanReadable(value, null, "B"); + } + /** * Starts and waits for the given @param pb to finish. * @throws java.io.IOException on non-zero exit code diff --git a/src/java/org/apache/cassandra/utils/Overlaps.java b/src/java/org/apache/cassandra/utils/Overlaps.java new file mode 100644 index 000000000000..6e7c2ef41636 --- /dev/null +++ b/src/java/org/apache/cassandra/utils/Overlaps.java @@ -0,0 +1,212 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.utils; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.PriorityQueue; +import java.util.Set; +import java.util.function.BiPredicate; + +public class Overlaps +{ + /** + * Construct a minimal list of overlap sets, i.e. the sections of the range span when we have overlapping items, + * where we ensure: + * - non-overlapping items are never put in the same set + * - no item is present in non-consecutive sets + * - for any point where items overlap, the result includes a set listing all overlapping items + *

+ * For example, for inputs A[0, 4), B[2, 8), C[6, 10), D[1, 9) the result would be the sets ABD and BCD. We are not + * interested in the spans where A, B, or C are present on their own or in combination with D, only that there + * exists a set in the list that is a superset of any such combination, and that the non-overlapping A and C are + * never together in a set. + *

+ * Note that the full list of overlap sets A, AD, ABD, BD, BCD, CD, C is also an answer that satisfies the three + * conditions above, but it contains redundant sets (e.g. AD is already contained in ABD). + * + * @param items A list of items to distribute in overlap sets. This is assumed to be a transient list and the method + * may modify or consume it. It is assumed that the start and end positions of an item are ordered, + * and the items are non-empty. + * @param startsAfter Predicate determining if its left argument's start if fully after the right argument's end. + * This will only be used with arguments where left's start is known to be after right's start. + * It is up to the caller if this is a strict comparison -- strict (>) for end-inclusive spans + * and non-strict (>=) for end-exclusive. + * @param startsComparator Comparator of items' starting positions. + * @param endsComparator Comparator of items' ending positions. + * @return List of overlap sets. + */ + public static List> constructOverlapSets(List items, + BiPredicate startsAfter, + Comparator startsComparator, + Comparator endsComparator) + { + List> overlaps = new ArrayList<>(); + if (items.isEmpty()) + return overlaps; + + PriorityQueue active = new PriorityQueue<>(endsComparator); + items.sort(startsComparator); + for (E item : items) + { + if (!active.isEmpty() && startsAfter.test(item, active.peek())) + { + // New item starts after some active ends. It does not overlap with it, so: + // -- output the previous active set + overlaps.add(new HashSet<>(active)); + // -- remove all items that also end before the current start + do + { + active.poll(); + } + while (!active.isEmpty() && startsAfter.test(item, active.peek())); + } + + // Add the new item to the active state. We don't care if it starts later than others in the active set, + // the important point is that it overlaps with all of them. + active.add(item); + } + + assert !active.isEmpty(); + overlaps.add(new HashSet<>(active)); + + return overlaps; + } + public enum InclusionMethod + { + NONE, SINGLE, TRANSITIVE; + } + + public interface BucketMaker + { + B makeBucket(List> sets, int startIndexInclusive, int endIndexExclusive); + } + + /** + * Assign overlap sections into buckets. Identify sections that have at least threshold-many overlapping + * items and apply the overlap inclusion method to combine with any neighbouring sections that contain + * selected sstables to make sure we make full use of any sstables selected for compaction (i.e. avoid + * recompacting, see {@link org.apache.cassandra.db.compaction.unified.Controller#overlapInclusionMethod()}). + * + * @param threshold Threshold for selecting a bucket. Sets below this size will be ignored, unless they need + * to be grouped with a neighboring set due to overlap. + * @param inclusionMethod NONE to only form buckets of the overlapping sets, SINGLE to include all + * sets that share an sstable with a selected bucket, or TRANSITIVE to include + * all sets that have an overlap chain to a selected bucket. + * @param overlaps An ordered list of overlap sets as returned by {@link #constructOverlapSets}. + * @param bucketer Method used to create a bucket out of the supplied set indexes. + */ + public static List assignOverlapsIntoBuckets(int threshold, + InclusionMethod inclusionMethod, + List> overlaps, + BucketMaker bucketer) + { + List buckets = new ArrayList<>(); + int regionCount = overlaps.size(); + int lastEnd = -1; + for (int i = 0; i < regionCount; ++i) + { + Set bucket = overlaps.get(i); + int maxOverlap = bucket.size(); + if (maxOverlap < threshold) + continue; + int startIndex = i; + int endIndex = i + 1; + + if (inclusionMethod != InclusionMethod.NONE) + { + Set allOverlapping = new HashSet<>(bucket); + Set overlapTarget = inclusionMethod == InclusionMethod.TRANSITIVE + ? allOverlapping + : bucket; + int j; + for (j = i - 1; j > lastEnd; --j) + { + Set next = overlaps.get(j); + if (!setsIntersect(next, overlapTarget)) + break; + allOverlapping.addAll(next); + } + startIndex = j + 1; + for (j = i + 1; j < regionCount; ++j) + { + Set next = overlaps.get(j); + if (!setsIntersect(next, overlapTarget)) + break; + allOverlapping.addAll(next); + } + i = j - 1; + endIndex = j; + } + buckets.add(bucketer.makeBucket(overlaps, startIndex, endIndex)); + lastEnd = i; + } + return buckets; + } + + private static boolean setsIntersect(Set s1, Set s2) + { + // Note: optimized for small sets and O(1) lookup. + for (E s : s1) + if (s2.contains(s)) + return true; + + return false; + } + + /** + * Pull the last elements from the given list, up to the given limit. + */ + public static List pullLast(List source, int limit) + { + List result = new ArrayList<>(limit); + while (--limit >= 0) + result.add(source.remove(source.size() - 1)); + return result; + } + + /** + * Select up to `limit` sstables from each overlapping set (more than `limit` in total) by taking the last entries + * from `allObjectsSorted`. To achieve this, keep selecting the last sstable until the next one we would add would + * bring the number selected in some overlap section over `limit`. + */ + public static Collection pullLastWithOverlapLimit(List allObjectsSorted, List> overlapSets, int limit) + { + int setsCount = overlapSets.size(); + int[] selectedInBucket = new int[setsCount]; + int allCount = allObjectsSorted.size(); + for (int selectedCount = 0; selectedCount < allCount; ++selectedCount) + { + T candidate = allObjectsSorted.get(allCount - 1 - selectedCount); + for (int i = 0; i < setsCount; ++i) + { + if (overlapSets.get(i).contains(candidate)) + { + ++selectedInBucket[i]; + if (selectedInBucket[i] > limit) + return pullLast(allObjectsSorted, selectedCount); + } + } + } + return allObjectsSorted; + } +} diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java index 3ad042b22d38..ca9f5bbcd16a 100644 --- a/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java @@ -208,6 +208,18 @@ public void testTriggerMinorCompactionSTCSAlterTable() throws Throwable public void testSetLocalCompactionStrategy() throws Throwable { createTable("CREATE TABLE %s (id text PRIMARY KEY)"); + testSetLocalCompactionStrategy(SizeTieredCompactionStrategy.class); + } + + @Test + public void testSetLocalCompactionStrategyUCS() throws Throwable + { + testSetLocalCompactionStrategy(UnifiedCompactionStrategy.class); + } + + private void testSetLocalCompactionStrategy(Class strategy) throws Throwable + { + createTable(String.format("CREATE TABLE %%s (id text PRIMARY KEY) with compaction = {'class': '%s'}", strategy.getSimpleName())); Map localOptions = new HashMap<>(); localOptions.put("class", "SizeTieredCompactionStrategy"); getCurrentColumnFamilyStore().setCompactionParameters(localOptions); diff --git a/test/unit/org/apache/cassandra/db/compaction/CorruptedSSTablesCompactionsTest.java b/test/unit/org/apache/cassandra/db/compaction/CorruptedSSTablesCompactionsTest.java index 33dfc2559941..774055bdd402 100644 --- a/test/unit/org/apache/cassandra/db/compaction/CorruptedSSTablesCompactionsTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/CorruptedSSTablesCompactionsTest.java @@ -60,6 +60,7 @@ public class CorruptedSSTablesCompactionsTest private static final String KEYSPACE1 = "CorruptedSSTablesCompactionsTest"; private static final String STANDARD_STCS = "Standard_STCS"; private static final String STANDARD_LCS = "Standard_LCS"; + private static final String STANDARD_UCS = "Standard_UCS"; private static int maxValueSize; @After @@ -86,8 +87,9 @@ public static void defineSchema() throws ConfigurationException SchemaLoader.prepareServer(); SchemaLoader.createKeyspace(KEYSPACE1, KeyspaceParams.simple(1), - makeTable(STANDARD_STCS).compaction(CompactionParams.DEFAULT), - makeTable(STANDARD_LCS).compaction(CompactionParams.lcs(Collections.emptyMap()))); + makeTable(STANDARD_STCS).compaction(CompactionParams.stcs(Collections.emptyMap())), + makeTable(STANDARD_LCS).compaction(CompactionParams.lcs(Collections.emptyMap())), + makeTable(STANDARD_UCS).compaction(CompactionParams.ucs(Collections.emptyMap()))); maxValueSize = DatabaseDescriptor.getMaxValueSize(); DatabaseDescriptor.setMaxValueSize(1024 * 1024); @@ -130,6 +132,12 @@ public void testCorruptedSSTablesWithLeveledCompactionStrategy() throws Exceptio testCorruptedSSTables(STANDARD_LCS); } + @Test + public void testCorruptedSSTablesWithUnifiedCompactionStrategy() throws Exception + { + testCorruptedSSTables(STANDARD_UCS); + } + public void testCorruptedSSTables(String tableName) throws Exception { diff --git a/test/unit/org/apache/cassandra/db/compaction/ShardManagerTest.java b/test/unit/org/apache/cassandra/db/compaction/ShardManagerTest.java new file mode 100644 index 000000000000..bb1f8dadff1a --- /dev/null +++ b/test/unit/org/apache/cassandra/db/compaction/ShardManagerTest.java @@ -0,0 +1,408 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.db.compaction; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +import com.google.common.collect.ImmutableList; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import org.agrona.collections.IntArrayList; +import org.apache.cassandra.config.DatabaseDescriptor; +import org.apache.cassandra.db.BufferDecoratedKey; +import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.DecoratedKey; +import org.apache.cassandra.db.DiskBoundaries; +import org.apache.cassandra.db.PartitionPosition; +import org.apache.cassandra.dht.IPartitioner; +import org.apache.cassandra.dht.Murmur3Partitioner; +import org.apache.cassandra.dht.Range; +import org.apache.cassandra.dht.Splitter; +import org.apache.cassandra.dht.Token; +import org.apache.cassandra.io.sstable.format.SSTableReader; +import org.mockito.Mockito; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.when; + +public class ShardManagerTest +{ + final IPartitioner partitioner = Murmur3Partitioner.instance; + final Token minimumToken = partitioner.getMinimumToken(); + + ColumnFamilyStore.VersionedLocalRanges weightedRanges; + + static final double delta = 1e-15; + + @Before + public void setUp() + { + DatabaseDescriptor.daemonInitialization(); // because of all the static initialization in CFS + DatabaseDescriptor.setPartitionerUnsafe(Murmur3Partitioner.instance); + weightedRanges = new ColumnFamilyStore.VersionedLocalRanges(-1, 16); + } + + @Test + public void testRangeSpannedFullOwnership() + { + weightedRanges.add(new Splitter.WeightedRange(1.0, new Range<>(minimumToken, minimumToken))); + ShardManager shardManager = new ShardManagerNoDisks(weightedRanges); + + // sanity check + assertEquals(0.4, tokenAt(0.1).size(tokenAt(0.5)), delta); + + assertEquals(0.5, shardManager.rangeSpanned(range(0.2, 0.7)), delta); + assertEquals(0.2, shardManager.rangeSpanned(range(0.3, 0.5)), delta); + + assertEquals(0.2, shardManager.rangeSpanned(mockedTable(0.5, 0.7, Double.NaN)), delta); + // single-partition correction + assertEquals(1.0, shardManager.rangeSpanned(mockedTable(0.3, 0.3, Double.NaN)), delta); + + // reported coverage + assertEquals(0.1, shardManager.rangeSpanned(mockedTable(0.5, 0.7, 0.1)), delta); + // bad coverage + assertEquals(0.2, shardManager.rangeSpanned(mockedTable(0.5, 0.7, 0.0)), delta); + assertEquals(0.2, shardManager.rangeSpanned(mockedTable(0.5, 0.7, -1)), delta); + + // correction over coverage + assertEquals(1.0, shardManager.rangeSpanned(mockedTable(0.3, 0.5, 1e-50)), delta); + } + + @Test + public void testRangeSpannedPartialOwnership() + { + weightedRanges.add(new Splitter.WeightedRange(1.0, new Range<>(tokenAt(0.05), tokenAt(0.15)))); + weightedRanges.add(new Splitter.WeightedRange(1.0, new Range<>(tokenAt(0.3), tokenAt(0.4)))); + weightedRanges.add(new Splitter.WeightedRange(1.0, new Range<>(tokenAt(0.45), tokenAt(0.5)))); + weightedRanges.add(new Splitter.WeightedRange(1.0, new Range<>(tokenAt(0.7), tokenAt(0.75)))); + weightedRanges.add(new Splitter.WeightedRange(1.0, new Range<>(tokenAt(0.75), tokenAt(0.85)))); + weightedRanges.add(new Splitter.WeightedRange(1.0, new Range<>(tokenAt(0.90), tokenAt(0.91)))); + weightedRanges.add(new Splitter.WeightedRange(1.0, new Range<>(tokenAt(0.92), tokenAt(0.94)))); + weightedRanges.add(new Splitter.WeightedRange(1.0, new Range<>(tokenAt(0.98), tokenAt(1.0)))); + double total = weightedRanges.stream().mapToDouble(wr -> wr.range().left.size(wr.range().right)).sum(); + + ShardManager shardManager = new ShardManagerNoDisks(weightedRanges); + + // sanity check + assertEquals(0.4, tokenAt(0.1).size(tokenAt(0.5)), delta); + + assertEquals(0.15, shardManager.rangeSpanned(range(0.2, 0.7)), delta); + assertEquals(0.15, shardManager.rangeSpanned(range(0.3, 0.5)), delta); + assertEquals(0.0, shardManager.rangeSpanned(range(0.5, 0.7)), delta); + assertEquals(total, shardManager.rangeSpanned(range(0.0, 1.0)), delta); + + + assertEquals(0.1, shardManager.rangeSpanned(mockedTable(0.5, 0.8, Double.NaN)), delta); + + // single-partition correction + assertEquals(1.0, shardManager.rangeSpanned(mockedTable(0.3, 0.3, Double.NaN)), delta); + // out-of-local-range correction + assertEquals(1.0, shardManager.rangeSpanned(mockedTable(0.6, 0.7, Double.NaN)), delta); + assertEquals(0.001, shardManager.rangeSpanned(mockedTable(0.6, 0.701, Double.NaN)), delta); + + // reported coverage + assertEquals(0.1, shardManager.rangeSpanned(mockedTable(0.5, 0.7, 0.1)), delta); + // bad coverage + assertEquals(0.1, shardManager.rangeSpanned(mockedTable(0.5, 0.8, 0.0)), delta); + assertEquals(0.1, shardManager.rangeSpanned(mockedTable(0.5, 0.8, -1)), delta); + + // correction over coverage, no recalculation + assertEquals(1.0, shardManager.rangeSpanned(mockedTable(0.5, 0.8, 1e-50)), delta); + } + + @Test + public void testRangeSpannedWeighted() + { + weightedRanges.add(new Splitter.WeightedRange(1.0, new Range<>(tokenAt(0.05), tokenAt(0.15)))); + weightedRanges.add(new Splitter.WeightedRange(0.5, new Range<>(tokenAt(0.3), tokenAt(0.4)))); + weightedRanges.add(new Splitter.WeightedRange(1.0, new Range<>(tokenAt(0.45), tokenAt(0.5)))); + weightedRanges.add(new Splitter.WeightedRange(1.0, new Range<>(tokenAt(0.7), tokenAt(0.75)))); + weightedRanges.add(new Splitter.WeightedRange(0.2, new Range<>(tokenAt(0.75), tokenAt(0.85)))); + weightedRanges.add(new Splitter.WeightedRange(1.0, new Range<>(tokenAt(0.90), tokenAt(0.91)))); + weightedRanges.add(new Splitter.WeightedRange(1.0, new Range<>(tokenAt(0.92), tokenAt(0.94)))); + weightedRanges.add(new Splitter.WeightedRange(1.0, new Range<>(tokenAt(0.98), tokenAt(1.0)))); + double total = weightedRanges.stream().mapToDouble(wr -> wr.size()).sum(); + + ShardManager shardManager = new ShardManagerNoDisks(weightedRanges); + + // sanity check + assertEquals(0.4, tokenAt(0.1).size(tokenAt(0.5)), delta); + + assertEquals(0.10, shardManager.rangeSpanned(range(0.2, 0.7)), delta); + assertEquals(0.10, shardManager.rangeSpanned(range(0.3, 0.5)), delta); + assertEquals(0.0, shardManager.rangeSpanned(range(0.5, 0.7)), delta); + assertEquals(total, shardManager.rangeSpanned(range(0.0, 1.0)), delta); + + + assertEquals(0.06, shardManager.rangeSpanned(mockedTable(0.5, 0.8, Double.NaN)), delta); + + // single-partition correction + assertEquals(1.0, shardManager.rangeSpanned(mockedTable(0.3, 0.3, Double.NaN)), delta); + // out-of-local-range correction + assertEquals(1.0, shardManager.rangeSpanned(mockedTable(0.6, 0.7, Double.NaN)), delta); + assertEquals(0.001, shardManager.rangeSpanned(mockedTable(0.6, 0.701, Double.NaN)), delta); + + // reported coverage + assertEquals(0.1, shardManager.rangeSpanned(mockedTable(0.5, 0.7, 0.1)), delta); + // bad coverage + assertEquals(0.06, shardManager.rangeSpanned(mockedTable(0.5, 0.8, 0.0)), delta); + assertEquals(0.06, shardManager.rangeSpanned(mockedTable(0.5, 0.8, -1)), delta); + + // correction over coverage, no recalculation + assertEquals(1.0, shardManager.rangeSpanned(mockedTable(0.5, 0.8, 1e-50)), delta); + } + + Token tokenAt(double pos) + { + return partitioner.split(minimumToken, minimumToken, pos); + } + + DecoratedKey keyAt(double pos) + { + Token token = tokenAt(pos); + return new BufferDecoratedKey(token, ByteBuffer.allocate(0)); + } + + Range range(double start, double end) + { + return new Range<>(tokenAt(start), tokenAt(end)); + } + + SSTableReader mockedTable(double start, double end, double reportedCoverage) + { + SSTableReader mock = Mockito.mock(SSTableReader.class); + Mockito.when(mock.getFirst()).thenReturn(keyAt(start)); + Mockito.when(mock.getLast()).thenReturn(keyAt(end)); + return mock; + } + + @Test + public void testShardBoundaries() + { + // no shards + testShardBoundaries(ints(), 1, 1, ints(10, 50)); + // split on disks at minimum + testShardBoundaries(ints(30), 1, 2, ints(10, 50)); + testShardBoundaries(ints(20, 30, 40, 50), 1, 5, ints(10, 51, 61, 70)); + + // no disks + testShardBoundaries(ints(30), 2, 1, ints(10, 50)); + testShardBoundaries(ints(20, 30, 40, 50), 5, 1, ints(10, 51, 61, 70)); + + // split + testShardBoundaries(ints(10, 20, 30, 40, 50, 60, 70, 80), 3, 3, ints(0, 90)); + testShardBoundaries(ints(10, 20, 30, 40, 50, 70, 80, 90), 3, 3, ints(0, 51, 61, 100)); + testShardBoundaries(ints(10, 20, 30, 40, 60, 70, 80, 90), 3, 3, ints(0, 49, 59, 100)); + testShardBoundaries(ints(12, 23, 33, 45, 56, 70, 80, 90), 3, 3, ints(0, 9, 11, 20, 21, 39, 41, 50, 51, 60, 64, 68, 68, 100)); + + // uneven + testShardBoundaries(ints(8, 16, 24, 32, 42, 52, 62, 72, 79, 86, 93), 4, ints(32, 72, 100), ints(0, 100)); + testShardBoundaries(ints(1, 2, 3, 4, 6, 8, 10, 12, 34, 56, 78), 4, ints(4, 12, 100), ints(0, 100)); + } + + @Test + public void testShardBoundariesWraparound() + { + // no shards + testShardBoundaries(ints(), 1, 1, ints(50, 10)); + // split on disks at minimum + testShardBoundaries(ints(70), 1, 2, ints(50, 10)); + testShardBoundaries(ints(10, 20, 30, 70), 1, 5, ints(91, 31, 61, 71)); + // no disks + testShardBoundaries(ints(70), 2, 1, ints(50, 10)); + testShardBoundaries(ints(10, 20, 30, 70), 5, 1, ints(91, 31, 61, 71)); + // split + testShardBoundaries(ints(10, 20, 30, 40, 50, 60, 70, 90), 3, 3, ints(81, 71)); + testShardBoundaries(ints(10, 20, 30, 40, 60, 70, 80, 90), 3, 3, ints(51, 41)); + testShardBoundaries(ints(10, 30, 40, 50, 60, 70, 80, 90), 3, 3, ints(21, 11)); + testShardBoundaries(ints(10, 20, 30, 40, 50, 60, 70, 90), 3, 3, ints(89, 79)); + testShardBoundaries(ints(10, 20, 30, 40, 60, 70, 80, 90), 3, 3, ints(59, 49)); + testShardBoundaries(ints(10, 30, 40, 50, 60, 70, 80, 90), 3, 3, ints(29, 19)); + + testShardBoundaries(ints(10, 20, 30, 40, 50, 70, 80, 90), 3, 3, ints(91, 51, 61, 91)); + testShardBoundaries(ints(10, 20, 30, 40, 50, 70, 80, 90), 3, 3, ints(21, 51, 61, 21)); + testShardBoundaries(ints(10, 20, 30, 40, 50, 70, 80, 90), 3, 3, ints(71, 51, 61, 71)); + } + + @Test + public void testShardBoundariesWeighted() + { + // no shards + testShardBoundariesWeighted(ints(), 1, 1, ints(10, 50)); + // split on disks at minimum + testShardBoundariesWeighted(ints(30), 1, 2, ints(10, 50)); + testShardBoundariesWeighted(ints(22, 34, 45, 64), 1, 5, ints(10, 51, 61, 70)); + + // no disks + testShardBoundariesWeighted(ints(30), 2, 1, ints(10, 50)); + testShardBoundariesWeighted(ints(22, 34, 45, 64), 5, 1, ints(10, 51, 61, 70)); + + // split + testShardBoundariesWeighted(ints(10, 20, 30, 40, 50, 60, 70, 80), 3, 3, ints(0, 90)); + testShardBoundariesWeighted(ints(14, 29, 43, 64, 71, 79, 86, 93), 3, 3, ints(0, 51, 61, 100)); + testShardBoundariesWeighted(ints(18, 36, 50, 63, 74, 83, 91, 96), 3, 3, ints(0, 40, 40, 70, 70, 90, 90, 100)); + } + + private int[] ints(int... values) + { + return values; + } + + private void testShardBoundaries(int[] expected, int numShards, int numDisks, int[] rangeBounds) + { + ColumnFamilyStore cfs = Mockito.mock(ColumnFamilyStore.class); + when(cfs.getPartitioner()).thenReturn(partitioner); + + List> ranges = new ArrayList<>(); + for (int i = 0; i < rangeBounds.length; i += 2) + ranges.add(new Range<>(getToken(rangeBounds[i + 0]), getToken(rangeBounds[i + 1]))); + ranges = Range.sort(ranges); + ColumnFamilyStore.VersionedLocalRanges sortedRanges = localRanges(ranges.stream().map(x -> new Splitter.WeightedRange(1.0, x)).collect(Collectors.toList())); + + List diskBoundaries = splitRanges(sortedRanges, numDisks); + int[] result = getShardBoundaries(cfs, numShards, diskBoundaries, sortedRanges); + Assert.assertArrayEquals("Disks " + numDisks + " shards " + numShards + " expected " + Arrays.toString(expected) + " was " + Arrays.toString(result), expected, result); + } + + private void testShardBoundariesWeighted(int[] expected, int numShards, int numDisks, int[] rangeBounds) + { + ColumnFamilyStore cfs = Mockito.mock(ColumnFamilyStore.class); + when(cfs.getPartitioner()).thenReturn(partitioner); + + List ranges = new ArrayList<>(); + for (int i = 0; i < rangeBounds.length; i += 2) + ranges.add(new Splitter.WeightedRange(2.0 / (rangeBounds.length - i), new Range<>(getToken(rangeBounds[i + 0]), getToken(rangeBounds[i + 1])))); + ColumnFamilyStore.VersionedLocalRanges sortedRanges = localRanges(ranges); + + List diskBoundaries = splitRanges(sortedRanges, numDisks); + int[] result = getShardBoundaries(cfs, numShards, diskBoundaries, sortedRanges); + Assert.assertArrayEquals("Disks " + numDisks + " shards " + numShards + " expected " + Arrays.toString(expected) + " was " + Arrays.toString(result), expected, result); + } + + private void testShardBoundaries(int[] expected, int numShards, int[] diskPositions, int[] rangeBounds) + { + ColumnFamilyStore cfs = Mockito.mock(ColumnFamilyStore.class); + when(cfs.getPartitioner()).thenReturn(partitioner); + + List ranges = new ArrayList<>(); + for (int i = 0; i < rangeBounds.length; i += 2) + ranges.add(new Splitter.WeightedRange(1.0, new Range<>(getToken(rangeBounds[i + 0]), getToken(rangeBounds[i + 1])))); + ColumnFamilyStore.VersionedLocalRanges sortedRanges = localRanges(ranges); + + List diskBoundaries = Arrays.stream(diskPositions).mapToObj(this::getToken).collect(Collectors.toList()); + int[] result = getShardBoundaries(cfs, numShards, diskBoundaries, sortedRanges); + Assert.assertArrayEquals("Disks " + Arrays.toString(diskPositions) + " shards " + numShards + " expected " + Arrays.toString(expected) + " was " + Arrays.toString(result), expected, result); + } + + private int[] getShardBoundaries(ColumnFamilyStore cfs, int numShards, List diskBoundaries, ColumnFamilyStore.VersionedLocalRanges sortedRanges) + { + DiskBoundaries db = makeDiskBoundaries(cfs, diskBoundaries); + when(cfs.localRangesWeighted()).thenReturn(sortedRanges); + when(cfs.getDiskBoundaries()).thenReturn(db); + + final ShardTracker shardTracker = ShardManager.create(cfs) + .boundaries(numShards); + IntArrayList list = new IntArrayList(); + for (int i = 0; i < 100; ++i) + { + if (shardTracker.advanceTo(getToken(i))) + list.addInt(fromToken(shardTracker.shardStart())); + } + return list.toIntArray(); + } + + ColumnFamilyStore.VersionedLocalRanges localRanges(List ranges) + { + ColumnFamilyStore.VersionedLocalRanges versionedLocalRanges = new ColumnFamilyStore.VersionedLocalRanges(-1, ranges.size()); + versionedLocalRanges.addAll(ranges); + return versionedLocalRanges; + } + + ColumnFamilyStore.VersionedLocalRanges localRangesFull() + { + List ranges = ImmutableList.of(new Splitter.WeightedRange(1.0, + new Range<>(partitioner.getMinimumToken(), + partitioner.getMinimumToken()))); + ColumnFamilyStore.VersionedLocalRanges versionedLocalRanges = new ColumnFamilyStore.VersionedLocalRanges(-1, ranges.size()); + versionedLocalRanges.addAll(ranges); + return versionedLocalRanges; + } + + List splitRanges(ColumnFamilyStore.VersionedLocalRanges ranges, int numDisks) + { + return ranges.get(0).left().getPartitioner().splitter().get().splitOwnedRanges(numDisks, ranges, false); + } + + private static DiskBoundaries makeDiskBoundaries(ColumnFamilyStore cfs, List diskBoundaries) + { + List diskPositions = diskBoundaries.stream().map(Token::maxKeyBound).collect(Collectors.toList()); + DiskBoundaries db = new DiskBoundaries(cfs, null, diskPositions, -1, -1); + return db; + } + + private Token getToken(int x) + { + return tokenAt(x / 100.0); + } + + private int fromToken(Token t) + { + return (int) Math.round(partitioner.getMinimumToken().size(t) * 100.0); + } + + @Test + public void testRangeEnds() + { + ColumnFamilyStore cfs = Mockito.mock(ColumnFamilyStore.class); + when(cfs.getPartitioner()).thenReturn(partitioner); + ColumnFamilyStore.VersionedLocalRanges sortedRanges = localRangesFull(); + + for (int numDisks = 1; numDisks <= 3; ++numDisks) + { + List diskBoundaries = splitRanges(sortedRanges, numDisks); + DiskBoundaries db = makeDiskBoundaries(cfs, diskBoundaries); + when(cfs.localRangesWeighted()).thenReturn(sortedRanges); + when(cfs.getDiskBoundaries()).thenReturn(db); + + ShardManager shardManager = ShardManager.create(cfs); + for (int numShards = 1; numShards <= 3; ++numShards) + { + ShardTracker iterator = shardManager.boundaries(numShards); + iterator.advanceTo(partitioner.getMinimumToken()); + + int count = 1; + for (Token end = iterator.shardEnd(); end != null; end = iterator.shardEnd()) + { + assertFalse(iterator.advanceTo(end)); + assertTrue(iterator.advanceTo(end.nextValidToken())); + ++count; + } + assertEquals(numDisks * numShards, count); + } + } + } +} diff --git a/test/unit/org/apache/cassandra/db/compaction/UnifiedCompactionStrategyTest.java b/test/unit/org/apache/cassandra/db/compaction/UnifiedCompactionStrategyTest.java new file mode 100644 index 000000000000..e0e3e4a25c15 --- /dev/null +++ b/test/unit/org/apache/cassandra/db/compaction/UnifiedCompactionStrategyTest.java @@ -0,0 +1,913 @@ +/* + * Copyright DataStax, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.db.compaction; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; + +import com.google.common.collect.Iterables; +import org.apache.commons.math3.random.JDKRandomGenerator; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import org.apache.cassandra.config.DatabaseDescriptor; +import org.apache.cassandra.db.BufferDecoratedKey; +import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.DecoratedKey; +import org.apache.cassandra.db.Directories; +import org.apache.cassandra.db.DiskBoundaries; +import org.apache.cassandra.db.compaction.unified.Controller; +import org.apache.cassandra.db.compaction.unified.UnifiedCompactionTask; +import org.apache.cassandra.db.lifecycle.SSTableSet; +import org.apache.cassandra.db.lifecycle.Tracker; +import org.apache.cassandra.db.marshal.AsciiType; +import org.apache.cassandra.dht.IPartitioner; +import org.apache.cassandra.dht.Murmur3Partitioner; +import org.apache.cassandra.dht.Splitter; +import org.apache.cassandra.dht.Token; +import org.apache.cassandra.io.sstable.format.SSTableReader; +import org.apache.cassandra.schema.TableMetadata; +import org.apache.cassandra.utils.FBUtilities; +import org.apache.cassandra.utils.Overlaps; +import org.apache.cassandra.utils.Pair; +import org.mockito.Answers; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyDouble; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.RETURNS_SMART_NULLS; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.withSettings; + +/** + * The unified compaction strategy is described in this design document: + * + * See CEP-26: https://cwiki.apache.org/confluence/display/CASSANDRA/CEP-26%3A+Unified+Compaction+Strategy + */ +public class UnifiedCompactionStrategyTest +{ + private final static long ONE_MB = 1 << 20; + + // Multiple disks can be used both with and without disk boundaries. We want to test both cases. + + final String keyspace = "ks"; + final String table = "tbl"; + + @Mock(answer = Answers.RETURNS_SMART_NULLS) + ColumnFamilyStore cfs; + + @Mock(answer = Answers.RETURNS_SMART_NULLS) + CompactionStrategyManager csm; + + ColumnFamilyStore.VersionedLocalRanges localRanges; + + Tracker dataTracker; + + long repairedAt; + + IPartitioner partitioner; + + Splitter splitter; + + @BeforeClass + public static void setUpClass() + { + long seed = System.currentTimeMillis(); + random.setSeed(seed); + System.out.println("Random seed: " + seed); + + DatabaseDescriptor.daemonInitialization(); // because of all the static initialization in CFS + DatabaseDescriptor.setPartitionerUnsafe(Murmur3Partitioner.instance); + } + + + static final JDKRandomGenerator random = new JDKRandomGenerator(); + + @Before + public void setUp() + { + setUp(1); + } + + protected void setUp(int numShards) + { + MockitoAnnotations.initMocks(this); + + TableMetadata metadata = TableMetadata.builder(keyspace, table) + .addPartitionKeyColumn("pk", AsciiType.instance) + .build(); + + dataTracker = Tracker.newDummyTracker(); + repairedAt = System.currentTimeMillis(); + partitioner = DatabaseDescriptor.getPartitioner(); + splitter = partitioner.splitter().orElse(null); + if (numShards > 1) + assertNotNull("Splitter is required with multiple compaction shards", splitter); + + when(cfs.getPartitioner()).thenReturn(partitioner); + localRanges = cfs.fullWeightedRange(0, partitioner); + + when(cfs.metadata()).thenReturn(metadata); + when(cfs.getTableName()).thenReturn(table); + when(cfs.localRangesWeighted()).thenReturn(localRanges); + when(cfs.getTracker()).thenReturn(dataTracker); + when(cfs.getLiveSSTables()).thenAnswer(request -> dataTracker.getView().select(SSTableSet.LIVE)); + when(cfs.getSSTables(any())).thenAnswer(request -> dataTracker.getView().select(request.getArgument(0))); + when(cfs.getCompactionStrategyManager()).thenReturn(csm); + + DiskBoundaries db = new DiskBoundaries(cfs, new Directories.DataDirectory[0], 0); + when(cfs.getDiskBoundaries()).thenReturn(db); + + when(csm.onlyPurgeRepairedTombstones()).thenReturn(false); + } + + @Test + public void testNoSSTables() + { + Controller controller = Mockito.mock(Controller.class); + long minimalSizeBytes = 2 << 20; + when(controller.getScalingParameter(anyInt())).thenReturn(4); + when(controller.getSurvivalFactor(anyInt())).thenReturn(1.0); + when(controller.getMaxLevelDensity(anyInt(), anyDouble())).thenCallRealMethod(); + when(controller.getSurvivalFactor(anyInt())).thenReturn(1.0); + when(controller.getNumShards(anyDouble())).thenReturn(1); + when(controller.getBaseSstableSize(anyInt())).thenReturn((double) minimalSizeBytes); + when(controller.maxConcurrentCompactions()).thenReturn(1000); // let it generate as many candidates as it can + when(controller.maxThroughput()).thenReturn(Double.MAX_VALUE); + when(controller.maxSSTablesToCompact()).thenReturn(1000); + when(controller.random()).thenCallRealMethod(); + + UnifiedCompactionStrategy strategy = new UnifiedCompactionStrategy(cfs, new HashMap<>(), controller); + + assertNull(strategy.getNextBackgroundTask(FBUtilities.nowInSeconds())); + assertEquals(0, strategy.getEstimatedRemainingTasks()); + } + + @Test + public void testGetBucketsSameWUniqueArena() + { + final int m = 2; // minimal sorted run size in MB m + final Map sstables = new TreeMap<>(); + + for (int i = 0; i < 20; i++) + { + int numSSTables = 2 + random.nextInt(18); + sstables.put(m * i, numSSTables); + } + + // W = 3, o = 1 => F = 5, T = 5 => expected T sstables and 2 buckets: 0-10m, 10-50m + testGetBucketsOneArena(sstables, new int[] { 3 }, m, new int[] { 5, 5}); + + // W = 2, o = 1 => F = 4, T = 4 => expected T sstables and 3 buckets: 0-8m, 8-32m, 32-128m + testGetBucketsOneArena(sstables, new int[] { 2 }, m, new int[] { 4, 4, 4}); + + // W = 0, o = 1 => F = 2, T = 2 => expected 2 sstables and 5 buckets: 0-4m, 4-8m, 8-16m, 16-32m, 32-64m + testGetBucketsOneArena(sstables, new int[] { 0 }, m, new int[] { 2, 2, 2, 2, 2}); + + // W = -2, o = 1 => F = 4, T = 2 => expected 2 sstables and 3 buckets: 0-8mb, 8-32m, 32-128m + testGetBucketsOneArena(sstables, new int[] { -2 }, m, new int[] { 2, 2, 2}); + + // W = -3, o = 1 => F = 5, T = 2 => expected 2 sstables and 2 buckets: 0-10m, 10-50m + testGetBucketsOneArena(sstables, new int[] { -3 }, m, new int[] { 2, 2}); + + // remove sstables from 4m to 8m to create an empty bucket in the next call + sstables.remove(4); // 4m + sstables.remove(6); // 6m + sstables.remove(8); // 8m + + // W = 0, o = 1 => F = 2, T = 2 => expected 2 sstables and 5 buckets: 0-4m, 4-8m, 8-16m, 16-32m, 32-64m + testGetBucketsOneArena(sstables, new int[] { 0 }, m, new int[] { 2, 2, 2, 2, 2}); + } + + @Test + public void testGetBucketsDifferentWsUniqueArena() + { + final int m = 2; // minimal sorted run size in MB m + final Map sstables = new TreeMap<>(); + + for (int i : new int[] { 50, 100, 200, 400, 600, 800, 1000}) + { + int numSSTables = 2 + random.nextInt(18); + sstables.put(i, numSSTables); + } + + // W = [30, 2, -6], o = 1 => F = [32, 4, 8] , T = [32, 4, 2] => expected 3 buckets: 0-64m, 64-256m 256-2048m + testGetBucketsOneArena(sstables, new int[]{ 30, 2, -6 }, m, new int[] { 32, 4, 2}); + + // W = [30, 6, -8], o = 1 => F = [32, 8, 10] , T = [32, 8, 2] => expected 3 buckets: 0-64m, 64-544m 544-5440m + testGetBucketsOneArena(sstables, new int[]{ 30, 6, -8 }, m, new int[] { 32, 8, 2}); + + // W = [0, 0, 0, -2, -2], o = 1 => F = [2, 2, 2, 4, 4] , T = [2, 2, 2, 2, 2] => expected 6 buckets: 0-4m, 4-8m, 8-16m, 16-64m, 64-256m, 256-1024m + testGetBucketsOneArena(sstables, new int[]{ 0, 0, 0, -2, -2 }, m, new int[] { 2, 2, 2, 2, 2, 2}); + } + + private void testGetBucketsOneArena(Map sstableMap, int[] Ws, int m, int[] expectedTs) + { + long minimalSizeBytes = m << 20; + + Controller controller = Mockito.mock(Controller.class); + when(controller.getNumShards(anyDouble())).thenReturn(1); + when(controller.getBaseSstableSize(anyInt())).thenReturn((double) minimalSizeBytes); + when(controller.maxConcurrentCompactions()).thenReturn(1000); // let it generate as many candidates as it can + when(controller.maxThroughput()).thenReturn(Double.MAX_VALUE); + when(controller.maxSSTablesToCompact()).thenReturn(1000); + + when(controller.getScalingParameter(anyInt())).thenAnswer(answer -> { + int index = answer.getArgument(0); + return Ws[index < Ws.length ? index : Ws.length - 1]; + }); + when(controller.getFanout(anyInt())).thenCallRealMethod(); + when(controller.getThreshold(anyInt())).thenCallRealMethod(); + when(controller.getMaxLevelDensity(anyInt(), anyDouble())).thenCallRealMethod(); + + when(controller.getSurvivalFactor(anyInt())).thenReturn(1.0); + when(controller.random()).thenCallRealMethod(); + + UnifiedCompactionStrategy strategy = new UnifiedCompactionStrategy(cfs, new HashMap<>(), controller); + + IPartitioner partitioner = cfs.getPartitioner(); + DecoratedKey first = new BufferDecoratedKey(partitioner.getMinimumToken(), ByteBuffer.allocate(0)); + DecoratedKey last = new BufferDecoratedKey(partitioner.getMaximumToken(), ByteBuffer.allocate(0)); + + List sstables = new ArrayList<>(); + long dataSetSizeBytes = 0; + for (Map.Entry entry : sstableMap.entrySet()) + { + for (int i = 0; i < entry.getValue(); i++) + { + // we want a number > 0 and < 1 so that the sstable has always some size and never crosses the boundary to the next bucket + // so we leave a 1% margin, picking a number from 0.01 to 0.99 + double rand = 0.01 + 0.98 * random.nextDouble(); + long sizeOnDiskBytes = (entry.getKey() << 20) + (long) (minimalSizeBytes * rand); + dataSetSizeBytes += sizeOnDiskBytes; + sstables.add(mockSSTable(sizeOnDiskBytes, System.currentTimeMillis(), first, last)); + } + } + strategy.addSSTables(sstables); + dataTracker.addInitialSSTables(sstables); + + List levels = strategy.getLevels(); + assertEquals(expectedTs.length, levels.size()); + + for (int i = 0; i < expectedTs.length; i++) + { + UnifiedCompactionStrategy.Level level = levels.get(i); + assertEquals(i, level.getIndex()); + UnifiedCompactionStrategy.SelectionContext context = new UnifiedCompactionStrategy.SelectionContext(strategy.getController()); + UnifiedCompactionStrategy.CompactionPick pick = level.getCompactionPick(context); + + assertEquals(level.getSSTables().size() >= expectedTs[i], pick != null); + } + } + + @Test + public void testPreserveLayout_W2_947() + { + testPreserveLayout(2, 947); + } + + @Test + public void testPreserveLayout_WM2_947() + { + testPreserveLayout(-2, 947); + } + + @Test + public void testPreserveLayout_W2_251() + { + testPreserveLayout(2, 251); + } + + @Test + public void testPreserveLayout_WM2_251() + { + testPreserveLayout(-2, 251); + } + + @Test + public void testPreserveLayout_W2_320() + { + testPreserveLayout(2, 320); + } + + @Test + public void testPreserveLayout_WM2_320() + { + testPreserveLayout(-2, 320); + } + + @Test + public void testPreserveLayout_WM2_947_128() + { + testLayout(-2, 947, 128); + } + + @Test + public void testPreserveLayout_WM2_947_64() + { + testLayout(-2, 947, 64); + } + + public void testPreserveLayout(int W, int numSSTables) + { + testLayout(W, numSSTables, 10000); + } + + @Test + public void testMaxSSTablesToCompact() + { + testLayout(2, 944, 60); + testLayout(2, 944, 1000); + testLayout(2, 944, 100); + testLayout(2, 803, 200); + } + + public void testLayout(int W, int numSSTables, int maxSSTablesToCompact) + { + int F = 2 + Math.abs(W); + int T = W < 0 ? 2 : F; + final long minSstableSizeBytes = 2L << 20; // 2 MB + final int numShards = 1; + final int levels = (int) Math.floor(Math.log(numSSTables) / Math.log(F)) + 1; + + Controller controller = Mockito.mock(Controller.class); + when(controller.getScalingParameter(anyInt())).thenReturn(W); + when(controller.getFanout(anyInt())).thenCallRealMethod(); + when(controller.getThreshold(anyInt())).thenCallRealMethod(); + when(controller.getMaxLevelDensity(anyInt(), anyDouble())).thenCallRealMethod(); + when(controller.getSurvivalFactor(anyInt())).thenReturn(1.0); + when(controller.getNumShards(anyDouble())).thenReturn(numShards); + when(controller.getBaseSstableSize(anyInt())).thenReturn((double) minSstableSizeBytes); + + if (maxSSTablesToCompact >= numSSTables) + when(controller.maxConcurrentCompactions()).thenReturn(levels * (W < 0 ? 1 : F)); // make sure the work is assigned to different levels + else + when(controller.maxConcurrentCompactions()).thenReturn(1000); // make sure the work is assigned to different levels + + when(controller.maxThroughput()).thenReturn(Double.MAX_VALUE); + when(controller.maxSSTablesToCompact()).thenReturn(maxSSTablesToCompact); + Random random = Mockito.mock(Random.class); + when(random.nextInt(anyInt())).thenReturn(0); + when(controller.random()).thenReturn(random); + + UnifiedCompactionStrategy strategy = new UnifiedCompactionStrategy(cfs, new HashMap<>(), controller); + List allSstables = new ArrayList<>(); + + List sstables = mockSSTables(numSSTables, +// minSstableSizeBytes, + 0, + System.currentTimeMillis(), + 0); + allSstables.addAll(sstables); + strategy.addSSTables(allSstables); + dataTracker.addInitialSSTables(allSstables); + + int num = numSSTables; + UnifiedCompactionStrategy.CompactionPick task; + while (true) + { + task = strategy.getNextCompactionPick(0); // do not check expiration + if (task == null) + break; + + boolean layout = Math.min(num, maxSSTablesToCompact) > F * F; + int limit; + if (layout) + { + int forLimitLevel = (int) (Math.pow(F, Math.floor(Math.log(maxSSTablesToCompact) / Math.log(F)))); + // for clarification see W < 0 case in layoutCompactions method + limit = W < 0 ? maxSSTablesToCompact / forLimitLevel * forLimitLevel : forLimitLevel; + } + else + limit = maxSSTablesToCompact; + + int expected = num; + if (layout) + { + int forTopLevel = (int) (Math.pow(F, Math.floor(Math.log(num) / Math.log(F)))); + expected = W > 0 + ? forTopLevel + : num / forTopLevel * forTopLevel; + + } + expected = Math.min(expected, limit); + + int count = task.size(); + assertEquals(expected, count); + for (SSTableReader rdr : task) + strategy.removeSSTable(rdr); + num -= count; + } + // Check that we issue all the compactions + assertTrue(num < T); + } + + private static Map mapFromPair(Pair ... pairs) + { + Map ret = new HashMap<>(); + for (Pair pair : pairs) + { + ret.put(pair.left, pair.right); + } + + return ret; + } + + @Test + public void testGetNextBackgroundTasks() + { + Controller controller = Mockito.mock(Controller.class); + long minimalSizeBytes = 2 << 20; + when(controller.getScalingParameter(anyInt())).thenReturn(0); + when(controller.getFanout(anyInt())).thenCallRealMethod(); + when(controller.getThreshold(anyInt())).thenCallRealMethod(); + when(controller.getMaxLevelDensity(anyInt(), anyDouble())).thenCallRealMethod(); + when(controller.getSurvivalFactor(anyInt())).thenReturn(1.0); + when(controller.getNumShards(anyDouble())).thenReturn(1); + when(controller.getBaseSstableSize(anyInt())).thenReturn((double) minimalSizeBytes); + when(controller.maxConcurrentCompactions()).thenReturn(1000); // let it generate as many candidates as it can + when(controller.maxThroughput()).thenReturn(Double.MAX_VALUE); + when(controller.maxSSTablesToCompact()).thenReturn(1000); + when(controller.random()).thenCallRealMethod(); + + UnifiedCompactionStrategy strategy = new UnifiedCompactionStrategy(cfs, new HashMap<>(), controller); + + IPartitioner partitioner = cfs.getPartitioner(); + + List sstables = createSStables(partitioner); + + strategy.addSSTables(sstables); + dataTracker.addInitialSSTables(sstables); + + AbstractCompactionTask task = strategy.getNextBackgroundTask(0); + assertSame(UnifiedCompactionTask.class, task.getClass()); + task.transaction.abort(); + } + + private List createSStables(IPartitioner partitioner) + { + return createSStables(partitioner, mapFromPair(Pair.create(4 * ONE_MB, 4)), 10000); + } + + private List createSStables(IPartitioner partitioner, int ttl) + { + return createSStables(partitioner, mapFromPair(Pair.create(4 * ONE_MB, 4)), ttl); + } + + private List createSStables(IPartitioner partitioner, Map sstablesMap) + { + return createSStables(partitioner, sstablesMap, 10000); + } + + // Used to make sure timestamps are not exactly the same, which disables expiration + int millisAdjustment = 0; + + private List createSStables(IPartitioner partitioner, + Map sstablesMap, + int ttl) + { + List mockSSTables = new ArrayList<>(); + Token min = partitioner.getMinimumToken(); + Token max = partitioner.getMaximumToken(); + ByteBuffer bb = ByteBuffer.allocate(0); + sstablesMap.forEach((size, num) -> { + Token first = min.getPartitioner().split(min, max, 0.01); + + for (int i = 0; i < num; i++) + { + // pending repair + mockSSTables.add(mockSSTable(0, + size, + System.currentTimeMillis() + millisAdjustment++, + 0.0, + new BufferDecoratedKey(first, bb), + new BufferDecoratedKey(max, bb), + ttl)); + first = first.nextValidToken(); + } + }); + return mockSSTables; + } + + @Test + public void testDropExpiredSSTables() + { + testDropExpiredFromBucket(1); + testDropExpiredAndCompactNonExpired(); + } + + private void testDropExpiredFromBucket(int numShards) + { + Controller controller = Mockito.mock(Controller.class); + long minimalSizeBytes = 2 << 20; + when(controller.getMaxLevelDensity(anyInt(), anyDouble())).thenCallRealMethod(); + when(controller.getScalingParameter(anyInt())).thenReturn(3); // T=5 + when(controller.getFanout(anyInt())).thenCallRealMethod(); + when(controller.getThreshold(anyInt())).thenCallRealMethod(); + when(controller.getSurvivalFactor(anyInt())).thenReturn(1.0); + when(controller.getNumShards(anyDouble())).thenReturn(numShards); + when(controller.getBaseSstableSize(anyInt())).thenReturn((double) minimalSizeBytes); + when(controller.maxConcurrentCompactions()).thenReturn(1000); // let it generate as many candidates as it can + when(controller.maxThroughput()).thenReturn(Double.MAX_VALUE); + when(controller.maxSSTablesToCompact()).thenReturn(1000); + when(controller.getIgnoreOverlapsInExpirationCheck()).thenReturn(false); + when(controller.random()).thenCallRealMethod(); + UnifiedCompactionStrategy strategy = new UnifiedCompactionStrategy(cfs, new HashMap<>(), controller); + strategy.startup(); + + List sstables = createSStables(cfs.getPartitioner()); + // Tracker#addSSTables also tries to backup SSTables, so we use addInitialSSTables and notify explicitly + strategy.addSSTables(sstables); + dataTracker.addInitialSSTables(sstables); + + try + { + // nothing to compact yet + assertNull(strategy.getNextCompactionPick(0)); + + long timestamp = sstables.get(sstables.size() - 1).getMaxLocalDeletionTime(); + long expirationPoint = timestamp + 1; + + UnifiedCompactionStrategy.CompactionPick pick = strategy.getNextCompactionPick(expirationPoint); + assertNotNull(pick); + assertEquals(sstables.size(), pick.size()); + assertEquals(-1, pick.level); + } + finally + { + strategy.shutdown(); + } + } + + private void testDropExpiredAndCompactNonExpired() + { + Controller controller = Mockito.mock(Controller.class); + long minimalSizeBytes = 2 << 20; + when(controller.getMaxLevelDensity(anyInt(), anyDouble())).thenCallRealMethod(); + when(controller.getScalingParameter(anyInt())).thenReturn(2); + when(controller.getFanout(anyInt())).thenCallRealMethod(); + when(controller.getThreshold(anyInt())).thenCallRealMethod(); + when(controller.getSurvivalFactor(anyInt())).thenReturn(1.0); + when(controller.getNumShards(anyDouble())).thenReturn(1); + when(controller.getBaseSstableSize(anyInt())).thenReturn((double) minimalSizeBytes); + when(controller.maxConcurrentCompactions()).thenReturn(1000); // let it generate as many candidates as it can + when(controller.maxThroughput()).thenReturn(Double.MAX_VALUE); + when(controller.getIgnoreOverlapsInExpirationCheck()).thenReturn(false); + when(controller.maxSSTablesToCompact()).thenReturn(1000); + + when(controller.random()).thenCallRealMethod(); + UnifiedCompactionStrategy strategy = new UnifiedCompactionStrategy(cfs, new HashMap<>(), controller); + strategy.startup(); + + List expiredSSTables = createSStables(cfs.getPartitioner(), 1000); + List nonExpiredSSTables = createSStables(cfs.getPartitioner(), 0); + strategy.addSSTables(expiredSSTables); + strategy.addSSTables(nonExpiredSSTables.subList(0, 3)); + dataTracker.addInitialSSTables(Iterables.concat(expiredSSTables, nonExpiredSSTables)); + + long timestamp = expiredSSTables.get(expiredSSTables.size() - 1).getMaxLocalDeletionTime(); + long expirationPoint = timestamp + 1; + + try + { + UnifiedCompactionStrategy.CompactionPick pick = strategy.getNextCompactionPick(expirationPoint); + + assertEquals(expiredSSTables.size(), pick.size()); + assertEquals(-1, pick.level); + + strategy.addSSTables(nonExpiredSSTables); // duplicates should be skipped + pick = strategy.getNextCompactionPick(expirationPoint); + + assertEquals(expiredSSTables.size() + nonExpiredSSTables.size(), pick.size()); + assertEquals(0, pick.level); + } + finally + { + strategy.shutdown(); + } + } + + @Test + public void testPending() + { + Controller controller = Mockito.mock(Controller.class); + when(controller.getScalingParameter(anyInt())).thenReturn(8); // F=10, T=10 + when(controller.getFanout(anyInt())).thenCallRealMethod(); + when(controller.getThreshold(anyInt())).thenCallRealMethod(); + when(controller.maxSSTablesToCompact()).thenReturn(10); // same as fanout + + long minimalSizeBytes = 2 << 20; + when(controller.getMaxLevelDensity(anyInt(), anyDouble())).thenCallRealMethod(); + when(controller.getSurvivalFactor(anyInt())).thenReturn(1.0); + when(controller.getNumShards(anyDouble())).thenReturn(1); + when(controller.getBaseSstableSize(anyInt())).thenReturn((double) minimalSizeBytes); + when(controller.maxConcurrentCompactions()).thenReturn(1000); // let it generate as many candidates as it can + when(controller.maxThroughput()).thenReturn(Double.MAX_VALUE); + when(controller.getIgnoreOverlapsInExpirationCheck()).thenReturn(false); + when(controller.random()).thenCallRealMethod(); + + UnifiedCompactionStrategy strategy = new UnifiedCompactionStrategy(cfs, new HashMap<>(), controller); + strategy.startup(); + + int count = 91; + List sstables = createSStables(cfs.getPartitioner(), + mapFromPair(Pair.create(4 * ONE_MB, count))); + strategy.addSSTables(sstables); + dataTracker.addInitialSSTables(sstables); + + UnifiedCompactionStrategy.CompactionPick pick = strategy.getNextCompactionPick(0); + assertNotNull(pick); + assertEquals(9, strategy.getEstimatedRemainingTasks()); + } + + @Test + public void testMaximalSelection() + { + Set allSSTables = new HashSet<>(); + allSSTables.addAll(mockNonOverlappingSSTables(10, 0, 100 << 20)); + allSSTables.addAll(mockNonOverlappingSSTables(15, 1, 200 << 20)); + allSSTables.addAll(mockNonOverlappingSSTables(25, 2, 400 << 20)); + + Controller controller = Mockito.mock(Controller.class); + UnifiedCompactionStrategy strategy = new UnifiedCompactionStrategy(cfs, new HashMap<>(), controller); + strategy.addSSTables(allSSTables); + dataTracker.addInitialSSTables(allSSTables); + + Collection tasks = strategy.getMaximalTask(0, false); + assertEquals(5, tasks.size()); // 5 (gcd of 10,15,25) common boundaries + for (AbstractCompactionTask task : tasks) + { + Set compacting = task.transaction.originals(); + assertEquals(2 + 3 + 5, compacting.size()); // count / gcd sstables of each level + assertEquals((2 * 100L + 3 * 200 + 5 * 400) << 20, compacting.stream().mapToLong(SSTableReader::onDiskLength).sum()); + + // None of the selected sstables may intersect any in any other set. + for (AbstractCompactionTask task2 : tasks) + { + if (task == task2) + continue; + + Set compacting2 = task2.transaction.originals(); + for (SSTableReader r1 : compacting) + for (SSTableReader r2 : compacting2) + assertTrue(r1 + " intersects " + r2, r1.getFirst().compareTo(r2.getLast()) > 0 || r1.getLast().compareTo(r2.getFirst()) < 0); + } + } + } + + @Test + public void testBucketSelectionSimple() + { + testBucketSelection(repeats(4, 10), repeats(10, 4), Overlaps.InclusionMethod.TRANSITIVE); + } + + @Test + public void testBucketSelectionHalved() + { + testBucketSelection(repeats(4, arr(10, 5)), repeats(5, 6), Overlaps.InclusionMethod.TRANSITIVE); + testBucketSelection(repeats(4, arr(10, 5)), repeats(5, 6), Overlaps.InclusionMethod.SINGLE); + // When we take large sstables for one compaction, remaining overlaps don't have enough to trigger next + testBucketSelection(repeats(4, arr(10, 5)), repeats(5, 4), Overlaps.InclusionMethod.NONE, 10); + } + + @Test + public void testBucketSelectionFives() + { + testBucketSelection(arr(25, 15, 10), repeats(5, arr(10)), Overlaps.InclusionMethod.TRANSITIVE); + testBucketSelection(arr(25, 15, 10), repeats(10, arr(6, 4)), Overlaps.InclusionMethod.SINGLE); + // When we take large sstables for one compaction, remaining overlaps don't have enough to trigger next + testBucketSelection(arr(25, 15, 10), repeats(10, arr(3)), Overlaps.InclusionMethod.NONE, 20); + } + + @Test + public void testBucketSelectionMissing() + { + testBucketSelection(repeats(4,5), repeats(4, 4), Overlaps.InclusionMethod.TRANSITIVE, 3, 1); + } + + @Test + public void testBucketSelectionHalvesMissing() + { + // Drop one half: still compact because of overlap + // Note: picks are returned right-to-left because the random mock always returns 0, picking the last bucket. + testBucketSelection(repeats(4, arr(6, 3)), arr(6, 6, 5), Overlaps.InclusionMethod.TRANSITIVE, 0, 1); + // Drop one full: don't compact + testBucketSelection(repeats(4, arr(3, 6)), arr(6, 6), Overlaps.InclusionMethod.TRANSITIVE, 5, 1); + // Drop two adjacent halves: don't compact + testBucketSelection(repeats(4, arr(6, 3)), arr(6, 6), Overlaps.InclusionMethod.TRANSITIVE, 4, 2, 3); + } + + + private int[] arr(int... values) + { + return values; + } + + private int[] repeats(int count, int... values) + { + int[] rep = new int[count]; + for (int i = 0; i < count; ++i) + rep[i] = values[i % values.length]; + return rep; + } + + public void testBucketSelection(int[] counts, int[] expecteds, Overlaps.InclusionMethod overlapInclusionMethod) + { + testBucketSelection(counts, expecteds, overlapInclusionMethod, 0); + } + + public void testBucketSelection(int[] counts, int[] expecteds, Overlaps.InclusionMethod overlapInclusionMethod, int expectedRemaining, int... dropFromFirst) + { + Set allSSTables = new HashSet<>(); + int fanout = counts.length; + for (int i = 0; i < fanout; ++i) + { + final int count = counts[i]; + final List list = mockNonOverlappingSSTables(count, 0, (100 << 20) / count); + if (i == 0) + { + for (int k = dropFromFirst.length - 1; k >= 0; --k) + list.remove(dropFromFirst[k]); + } + allSSTables.addAll(list); + } + Controller controller = Mockito.mock(Controller.class); + when(controller.getScalingParameter(anyInt())).thenReturn(fanout - 2); // F=T=fanout + when(controller.getFanout(anyInt())).thenCallRealMethod(); + when(controller.getThreshold(anyInt())).thenCallRealMethod(); + when(controller.getMaxLevelDensity(anyInt(), anyDouble())).thenCallRealMethod(); + when(controller.getSurvivalFactor(anyInt())).thenReturn(1.0); + when(controller.getNumShards(anyDouble())).thenReturn(1); + when(controller.getBaseSstableSize(anyInt())).thenReturn((double) (90 << 20)); + when(controller.maxConcurrentCompactions()).thenReturn(1000); // let it generate as many candidates as it can + when(controller.maxThroughput()).thenReturn(Double.MAX_VALUE); + when(controller.getIgnoreOverlapsInExpirationCheck()).thenReturn(false); + when(controller.overlapInclusionMethod()).thenReturn(overlapInclusionMethod); + Random randomMock = Mockito.mock(Random.class); + when(randomMock.nextInt(anyInt())).thenReturn(0); + when(controller.random()).thenReturn(randomMock); + UnifiedCompactionStrategy strategy = new UnifiedCompactionStrategy(cfs, new HashMap<>(), controller); + strategy.addSSTables(allSSTables); + dataTracker.addInitialSSTables(allSSTables); + + List picks = new ArrayList<>(); + while (true) + { + UnifiedCompactionStrategy.CompactionPick pick = strategy.getNextCompactionPick(0); + if (pick == null) + break; + strategy.removeSSTables(pick); + picks.add(pick); + } + assertEquals(expectedRemaining, strategy.getSSTables().size()); + + assertEquals(expecteds.length, picks.size()); + int buckIdx = 0; + for (UnifiedCompactionStrategy.CompactionPick pick : picks) + { + int expectedCount = expecteds[buckIdx++]; + assertEquals(expectedCount, pick.size()); // count / gcd sstables of each level + + if (overlapInclusionMethod == Overlaps.InclusionMethod.TRANSITIVE) + { + // None of the selected sstables may intersect any in any other set. + for (UnifiedCompactionStrategy.CompactionPick pick2 : picks) + { + if (pick == pick2) + continue; + + for (SSTableReader r1 : pick) + for (SSTableReader r2 : pick2) + assertTrue(r1 + " intersects " + r2, r1.getFirst().compareTo(r2.getLast()) > 0 || r1.getLast().compareTo(r2.getFirst()) < 0); + } + } + } + } + + SSTableReader mockSSTable(int level, long bytesOnDisk, long timestamp, double hotness, DecoratedKey first, DecoratedKey last) + { + return mockSSTable(level, bytesOnDisk, timestamp, hotness, first, last, 0); + } + + SSTableReader mockSSTable(long bytesOnDisk, long timestamp, DecoratedKey first, DecoratedKey last) + { + return mockSSTable(0, bytesOnDisk, timestamp, 0, first, last, 0); + } + + SSTableReader mockSSTable(int level, + long bytesOnDisk, + long timestamp, + double hotness, + DecoratedKey first, + DecoratedKey last, + int ttl) + { + // We create a ton of mock SSTables that mockito is going to keep until the end of the test suite without stubOnly. + // Mockito keeps them alive to preserve the history of invocations which is not available for stubs. If we ever + // need history of invocations and remove stubOnly, we should also manually reset mocked SSTables in tearDown. + SSTableReader ret = Mockito.mock(SSTableReader.class, withSettings().stubOnly() + .defaultAnswer(RETURNS_SMART_NULLS)); + + when(ret.getSSTableLevel()).thenReturn(level); + when(ret.onDiskLength()).thenReturn(bytesOnDisk); + when(ret.uncompressedLength()).thenReturn(bytesOnDisk); // let's assume no compression + when(ret.getMaxTimestamp()).thenReturn(timestamp); + when(ret.getMinTimestamp()).thenReturn(timestamp); + when(ret.getFirst()).thenReturn(first); + when(ret.getLast()).thenReturn(last); + when(ret.isMarkedSuspect()).thenReturn(false); + when(ret.isRepaired()).thenReturn(false); + when(ret.getRepairedAt()).thenReturn(repairedAt); + when(ret.getPendingRepair()).thenReturn(null); + when(ret.isPendingRepair()).thenReturn(false); + when(ret.getColumnFamilyName()).thenReturn(table); + when(ret.toString()).thenReturn(String.format("Bytes on disk: %s, level %d, hotness %f, timestamp %d, first %s, last %s", + FBUtilities.prettyPrintMemory(bytesOnDisk), level, hotness, timestamp, first, last)); + long deletionTime; + if (ttl > 0) + deletionTime = TimeUnit.MILLISECONDS.toSeconds(timestamp) + ttl; + else + deletionTime = Long.MAX_VALUE; + + when(ret.getMinLocalDeletionTime()).thenReturn(deletionTime); + when(ret.getMaxLocalDeletionTime()).thenReturn(deletionTime); + when(ret.getMinTTL()).thenReturn(ttl); + when(ret.getMaxTTL()).thenReturn(ttl); + + return ret; + } + + List mockSSTables(int numSSTables, long bytesOnDisk, double hotness, long timestamp) + { + DecoratedKey first = new BufferDecoratedKey(partitioner.getMinimumToken(), ByteBuffer.allocate(0)); + DecoratedKey last = new BufferDecoratedKey(partitioner.getMinimumToken(), ByteBuffer.allocate(0)); + + List sstables = new ArrayList<>(); + for (int i = 0; i < numSSTables; i++) + { + long b = (long)(bytesOnDisk * 0.95 + bytesOnDisk * 0.05 * random.nextDouble()); // leave 5% variability + double h = hotness * 0.95 + hotness * 0.05 * random.nextDouble(); // leave 5% variability + sstables.add(mockSSTable(0, b, timestamp, h, first, last, 0)); + } + + return sstables; + } + + List mockNonOverlappingSSTables(int numSSTables, int level, long bytesOnDisk) + { + if (!partitioner.splitter().isPresent()) + throw new IllegalStateException(String.format("Cannot split ranges with current partitioner %s", partitioner)); + + ByteBuffer emptyBuffer = ByteBuffer.allocate(0); + + long timestamp = System.currentTimeMillis(); + List sstables = new ArrayList<>(numSSTables); + for (int i = 0; i < numSSTables; i++) + { + DecoratedKey first = new BufferDecoratedKey(boundary(numSSTables, i).nextValidToken(), emptyBuffer); + DecoratedKey last = new BufferDecoratedKey(boundary(numSSTables, i+1), emptyBuffer); + sstables.add(mockSSTable(level, bytesOnDisk, timestamp, 0., first, last)); + + timestamp+=10; + } + + return sstables; + } + + private Token boundary(int numSSTables, int i) + { + return partitioner.split(partitioner.getMinimumToken(), partitioner.getMaximumToken(), i * 1.0 / numSSTables); + } +} diff --git a/test/unit/org/apache/cassandra/db/compaction/unified/ControllerTest.java b/test/unit/org/apache/cassandra/db/compaction/unified/ControllerTest.java new file mode 100644 index 000000000000..a0ccd10c7fa1 --- /dev/null +++ b/test/unit/org/apache/cassandra/db/compaction/unified/ControllerTest.java @@ -0,0 +1,336 @@ +/* + * Copyright DataStax, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.db.compaction.unified; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import com.google.common.collect.ImmutableList; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import org.apache.cassandra.Util; +import org.apache.cassandra.config.DatabaseDescriptor; +import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.DiskBoundaries; +import org.apache.cassandra.db.PartitionPosition; +import org.apache.cassandra.db.compaction.UnifiedCompactionStrategy; +import org.apache.cassandra.exceptions.ConfigurationException; +import org.apache.cassandra.schema.SchemaConstants; +import org.apache.cassandra.schema.TableMetadata; +import org.apache.cassandra.utils.FBUtilities; +import org.apache.cassandra.utils.Overlaps; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.when; + +public class ControllerTest +{ + static final double epsilon = 0.00000001; + static final boolean allowOverlaps = false; + static final long checkFrequency= 600L; + + @Mock + ColumnFamilyStore cfs; + + @Mock + TableMetadata metadata; + + @Mock + UnifiedCompactionStrategy strategy; + + protected String keyspaceName = "TestKeyspace"; + protected DiskBoundaries diskBoundaries = new DiskBoundaries(cfs, null, null, 0, 0); + + @BeforeClass + public static void setUpClass() + { + DatabaseDescriptor.daemonInitialization(); + } + + @Before + public void setUp() + { + MockitoAnnotations.initMocks(this); + + when(strategy.getMetadata()).thenReturn(metadata); + when(strategy.getEstimatedRemainingTasks()).thenReturn(0); + + when(metadata.toString()).thenReturn(""); + when(cfs.getKeyspaceName()).thenAnswer(invocation -> keyspaceName); + when(cfs.getDiskBoundaries()).thenAnswer(invocation -> diskBoundaries); + } + + Controller testFromOptions(Map options) + { + addOptions(false, options); + Controller.validateOptions(options); + + Controller controller = Controller.fromOptions(cfs, options); + assertNotNull(controller); + assertNotNull(controller.toString()); + + for (int i = 0; i < 5; i++) // simulate 5 levels + assertEquals(Controller.DEFAULT_SURVIVAL_FACTOR, controller.getSurvivalFactor(i), epsilon); + assertEquals(2, controller.getNumShards(0)); + assertEquals(16, controller.getNumShards(16 * 100 << 20)); + assertEquals(Overlaps.InclusionMethod.SINGLE, controller.overlapInclusionMethod()); + + return controller; + } + + @Test + public void testValidateOptions() + { + testValidateOptions(false); + } + + @Test + public void testValidateOptionsIntegers() + { + testValidateOptions(true); + } + + void testValidateOptions(boolean useIntegers) + { + Map options = new HashMap<>(); + addOptions(useIntegers, options); + options = Controller.validateOptions(options); + assertTrue(options.toString(), options.isEmpty()); + } + + private static void addOptions(boolean useIntegers, Map options) + { + String wStr = Arrays.stream(Ws) + .mapToObj(useIntegers ? Integer::toString : UnifiedCompactionStrategy::printScalingParameter) + .collect(Collectors.joining(",")); + options.putIfAbsent(Controller.SCALING_PARAMETERS_OPTION, wStr); + + options.putIfAbsent(Controller.ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION_OPTION, Boolean.toString(allowOverlaps)); + options.putIfAbsent(Controller.EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_OPTION, Long.toString(checkFrequency)); + + options.putIfAbsent(Controller.BASE_SHARD_COUNT_OPTION, Integer.toString(2)); + options.putIfAbsent(Controller.TARGET_SSTABLE_SIZE_OPTION, FBUtilities.prettyPrintMemory(100 << 20)); + options.putIfAbsent(Controller.OVERLAP_INCLUSION_METHOD_OPTION, Overlaps.InclusionMethod.SINGLE.toString().toLowerCase()); + } + + @Test + public void testScalingParameterConversion() + { + testScalingParameterConversion("T4", 2); + testScalingParameterConversion("L4", -2); + testScalingParameterConversion("N", 0); + testScalingParameterConversion("L2, T2, N", 0, 0, 0); + testScalingParameterConversion("T10, T8, T4, N, L4, L6", 8, 6, 2, 0, -2, -4); + testScalingParameterConversion("T10000, T1000, T100, T10, T2, L10, L100, L1000, L10000", 9998, 998, 98, 8, 0, -8, -98, -998, -9998); + + testScalingParameterParsing("-50 , T5 , 3 , N , L7 , +5 , -12 ,T9,L4,6,-7,+0,-0", -50, 3, 3, 0, -5, 5, -12, 7, -2, 6, -7, 0, 0); + + testScalingParameterError("Q6"); + testScalingParameterError("L4,,T5"); + testScalingParameterError("L1"); + testScalingParameterError("T1"); + testScalingParameterError("L0"); + testScalingParameterError("T0"); + testScalingParameterError("T-5"); + testScalingParameterError("T+5"); + testScalingParameterError("L-5"); + testScalingParameterError("L+5"); + testScalingParameterError("N3"); + testScalingParameterError("7T"); + testScalingParameterError("T,5"); + testScalingParameterError("L,5"); + } + + void testScalingParameterConversion(String definition, int... parameters) + { + testScalingParameterParsing(definition, parameters); + + String normalized = definition.replaceAll("T2|L2", "N"); + assertEquals(normalized, Controller.printScalingParameters(parameters)); + + testScalingParameterParsing(Arrays.toString(parameters).replaceAll("[\\[\\]]", ""), parameters); + } + + void testScalingParameterParsing(String definition, int... parameters) + { + assertArrayEquals(parameters, Controller.parseScalingParameters(definition)); + } + + void testScalingParameterError(String definition) + { + try + { + Controller.parseScalingParameters(definition); + Assert.fail("Expected error on " + definition); + } + catch (ConfigurationException e) + { + // expected + } + } + + @Test + public void testGetNumShards() + { + Map options = new HashMap<>(); + options.putIfAbsent(Controller.BASE_SHARD_COUNT_OPTION, Integer.toString(3)); + options.putIfAbsent(Controller.TARGET_SSTABLE_SIZE_OPTION, FBUtilities.prettyPrintMemory(100 << 20)); + Controller controller = Controller.fromOptions(cfs, options); + + // Easy ones + // x00 MiB = x * 100 + assertEquals(6, controller.getNumShards(Math.scalb(600, 20))); + assertEquals(24, controller.getNumShards(Math.scalb(2400, 20))); + assertEquals(6 * 1024, controller.getNumShards(Math.scalb(600, 30))); + // Check rounding + assertEquals(6, controller.getNumShards(Math.scalb(800, 20))); + assertEquals(12, controller.getNumShards(Math.scalb(900, 20))); + assertEquals(6 * 1024, controller.getNumShards(Math.scalb(800, 30))); + assertEquals(12 * 1024, controller.getNumShards(Math.scalb(900, 30))); + // Check lower limit + assertEquals(3, controller.getNumShards(Math.scalb(200, 20))); + assertEquals(3, controller.getNumShards(Math.scalb(100, 20))); + assertEquals(3, controller.getNumShards(Math.scalb(10, 20))); + assertEquals(3, controller.getNumShards(5)); + assertEquals(3, controller.getNumShards(0)); + // Check upper limit + assertEquals(3 * (int) Controller.MAX_SHARD_SPLIT, controller.getNumShards(Math.scalb(600, 40))); + assertEquals(3 * (int) Controller.MAX_SHARD_SPLIT, controller.getNumShards(Math.scalb(10, 60))); + assertEquals(3 * (int) Controller.MAX_SHARD_SPLIT, controller.getNumShards(Double.POSITIVE_INFINITY)); + } + + static final int[] Ws = new int[] { 30, 2, 0, -6}; + + @Test + public void testFromOptions() + { + Map options = new HashMap<>(); + addOptions(false, options); + + Controller controller = testFromOptions(options); + + for (int i = 0; i < Ws.length; i++) + assertEquals(Ws[i], controller.getScalingParameter(i)); + + assertEquals(Ws[Ws.length-1], controller.getScalingParameter(Ws.length)); + } + + @Test + public void testFromOptionsIntegers() + { + Map options = new HashMap<>(); + addOptions(true, options); + + Controller controller = testFromOptions(options); + + for (int i = 0; i < Ws.length; i++) + assertEquals(Ws[i], controller.getScalingParameter(i)); + + assertEquals(Ws[Ws.length-1], controller.getScalingParameter(Ws.length)); + } + + @Test + public void testMaxSSTablesToCompact() + { + Map options = new HashMap<>(); + Controller controller = testFromOptions(options); + assertTrue(controller.maxSSTablesToCompact == Integer.MAX_VALUE); + + options.put(Controller.MAX_SSTABLES_TO_COMPACT_OPTION, "100"); + controller = testFromOptions(options); + assertEquals(100, controller.maxSSTablesToCompact); + } + + @Test + public void testExpiredSSTableCheckFrequency() + { + Map options = new HashMap<>(); + + Controller controller = testFromOptions(options); + assertEquals(TimeUnit.MILLISECONDS.convert(Controller.DEFAULT_EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS, TimeUnit.SECONDS), + controller.getExpiredSSTableCheckFrequency()); + + options.put(Controller.EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_OPTION, "5"); + controller = testFromOptions(options); + assertEquals(5000L, controller.getExpiredSSTableCheckFrequency()); + + try + { + options.put(Controller.EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_OPTION, "0"); + testFromOptions(options); + fail("Exception should be thrown"); + } + catch (ConfigurationException e) + { + // valid path + } + } + + @Test + public void testAllowOverlaps() + { + Map options = new HashMap<>(); + + Controller controller = testFromOptions(options); + assertEquals(Controller.DEFAULT_ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION, controller.getIgnoreOverlapsInExpirationCheck()); + + options.put(Controller.ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION_OPTION, "true"); + controller = testFromOptions(options); + assertEquals(Controller.ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION, controller.getIgnoreOverlapsInExpirationCheck()); + } + + @Test + public void testBaseShardCountDefault() + { + Map options = new HashMap<>(); + Controller controller = Controller.fromOptions(cfs, options); + assertEquals(Controller.DEFAULT_BASE_SHARD_COUNT, controller.baseShardCount); + + String prevKS = keyspaceName; + try + { + keyspaceName = SchemaConstants.SYSTEM_KEYSPACE_NAME; + controller = controller.fromOptions(cfs, options); + assertEquals(1, controller.baseShardCount); + } + finally + { + keyspaceName = prevKS; + } + + PartitionPosition min = Util.testPartitioner().getMinimumToken().minKeyBound(); + diskBoundaries = new DiskBoundaries(cfs, null, ImmutableList.of(min, min, min), 0, 0); + controller = controller.fromOptions(cfs, options); + assertEquals(1, controller.baseShardCount); + + diskBoundaries = new DiskBoundaries(cfs, null, ImmutableList.of(min), 0, 0); + controller = controller.fromOptions(cfs, options); + assertEquals(Controller.DEFAULT_BASE_SHARD_COUNT, controller.baseShardCount); + } +} \ No newline at end of file diff --git a/test/unit/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriterTest.java b/test/unit/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriterTest.java new file mode 100644 index 000000000000..87fb8fcf5968 --- /dev/null +++ b/test/unit/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriterTest.java @@ -0,0 +1,293 @@ +/* + * Copyright DataStax, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.db.compaction.unified; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.stream.Collectors; + +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import org.apache.cassandra.cql3.CQLTester; +import org.apache.cassandra.cql3.QueryProcessor; +import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.Keyspace; +import org.apache.cassandra.db.compaction.AbstractCompactionStrategy; +import org.apache.cassandra.db.compaction.CompactionController; +import org.apache.cassandra.db.compaction.CompactionIterator; +import org.apache.cassandra.db.compaction.OperationType; +import org.apache.cassandra.db.compaction.ShardManager; +import org.apache.cassandra.db.compaction.ShardManagerDiskAware; +import org.apache.cassandra.db.compaction.ShardManagerNoDisks; +import org.apache.cassandra.db.compaction.writers.CompactionAwareWriter; +import org.apache.cassandra.db.lifecycle.LifecycleTransaction; +import org.apache.cassandra.dht.Token; +import org.apache.cassandra.io.sstable.format.SSTableReader; +import org.apache.cassandra.io.sstable.format.SSTableReaderWithFilter; +import org.apache.cassandra.service.StorageService; +import org.apache.cassandra.utils.FBUtilities; +import org.apache.cassandra.utils.TimeUUID; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class ShardedCompactionWriterTest extends CQLTester +{ + private static final String KEYSPACE = "cawt_keyspace"; + private static final String TABLE = "cawt_table"; + + private static final int ROW_PER_PARTITION = 10; + + @BeforeClass + public static void beforeClass() + { + CQLTester.setUpClass(); + CQLTester.prepareServer(); + StorageService.instance.initServer(); + + // Disabling durable write since we don't care + schemaChange("CREATE KEYSPACE IF NOT EXISTS " + KEYSPACE + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes=false"); + schemaChange(String.format("CREATE TABLE %s.%s (k int, t int, v blob, PRIMARY KEY (k, t))", KEYSPACE, TABLE)); + } + + @AfterClass + public static void tearDownClass() + { + QueryProcessor.executeInternal("DROP KEYSPACE IF EXISTS " + KEYSPACE); + } + + private ColumnFamilyStore getColumnFamilyStore() + { + return Keyspace.open(KEYSPACE).getColumnFamilyStore(TABLE); + } + + @Test + public void testOneSSTablePerShard() throws Throwable + { + // If we set the minSSTableSize ratio to 0.5, because this gets multiplied by the shard size to give the min sstable size, + // assuming evenly distributed data, it should split at each boundary and so we should end up with numShards sstables + int numShards = 5; + int rowCount = 5000; + testShardedCompactionWriter(numShards, rowCount, numShards, true); + } + + + @Test + public void testMultipleInputSSTables() throws Throwable + { + int numShards = 3; + int rowCount = 5000; + testShardedCompactionWriter(numShards, rowCount, numShards, false); + } + + private void testShardedCompactionWriter(int numShards, int rowCount, int numOutputSSTables, boolean majorCompaction) throws Throwable + { + ColumnFamilyStore cfs = getColumnFamilyStore(); + cfs.disableAutoCompaction(); + + populate(rowCount, majorCompaction); + + LifecycleTransaction txn = cfs.getTracker().tryModify(cfs.getLiveSSTables(), OperationType.COMPACTION); + + ShardManager boundaries = new ShardManagerNoDisks(ColumnFamilyStore.fullWeightedRange(-1, cfs.getPartitioner())); + ShardedCompactionWriter writer = new ShardedCompactionWriter(cfs, cfs.getDirectories(), txn, txn.originals(), false, boundaries.boundaries(numShards)); + + int rows = compact(cfs, txn, writer); + assertEquals(numOutputSSTables, cfs.getLiveSSTables().size()); + assertEquals(rowCount, rows); + + long totalOnDiskLength = cfs.getLiveSSTables().stream().mapToLong(SSTableReader::onDiskLength).sum(); + long totalBFSize = cfs.getLiveSSTables().stream().mapToLong(ShardedCompactionWriterTest::getFilterSize).sum(); + assert totalBFSize > 16 * numOutputSSTables : "Bloom Filter is empty"; // 16 is the size of empty bloom filter + for (SSTableReader rdr : cfs.getLiveSSTables()) + { + assertEquals((double) rdr.onDiskLength() / totalOnDiskLength, + (double) getFilterSize(rdr) / totalBFSize, 0.1); + } + + validateData(cfs, rowCount); + cfs.truncateBlocking(); + } + + static long getFilterSize(SSTableReader rdr) + { + if (!(rdr instanceof SSTableReaderWithFilter)) + return 0; + return ((SSTableReaderWithFilter) rdr).getFilterSerializedSize(); + } + + @Test + public void testDiskAdvance() throws Throwable + { + int rowCount = 5000; + int numDisks = 4; + int numShards = 3; + ColumnFamilyStore cfs = getColumnFamilyStore(); + cfs.disableAutoCompaction(); + + populate(rowCount, false); + + final ColumnFamilyStore.VersionedLocalRanges localRanges = cfs.localRangesWeighted(); + final List diskBoundaries = cfs.getPartitioner().splitter().get().splitOwnedRanges(numDisks, localRanges, false); + ShardManager shardManager = new ShardManagerDiskAware(localRanges, diskBoundaries); + int rows = compact(1, cfs, shardManager, cfs.getLiveSSTables()); + + // We must now have one sstable per disk + assertEquals(numDisks, cfs.getLiveSSTables().size()); + assertEquals(rowCount, rows); + + for (SSTableReader rdr : cfs.getLiveSSTables()) + verifyNoSpannedBoundaries(diskBoundaries, rdr); + + Token selectionStart = diskBoundaries.get(0); + Token selectionEnd = diskBoundaries.get(2); + + // Now compact only a section to trigger disk advance; shard needs to advance with disk, a potential problem + // is to create on-partition sstables at the start because shard wasn't advanced at the right time. + Set liveSSTables = cfs.getLiveSSTables(); + List selection = liveSSTables.stream() + .filter(rdr -> rdr.getFirst().getToken().compareTo(selectionStart) > 0 && + rdr.getLast().getToken().compareTo(selectionEnd) <= 0) + .collect(Collectors.toList()); + List remainder = liveSSTables.stream() + .filter(rdr -> !selection.contains(rdr)) + .collect(Collectors.toList()); + + rows = compact(numShards, cfs, shardManager, selection); + + List compactedSelection = cfs.getLiveSSTables() + .stream() + .filter(rdr -> !remainder.contains(rdr)) + .collect(Collectors.toList()); + // We must now have numShards sstables per each of the two disk sections + assertEquals(numShards * 2, compactedSelection.size()); + assertEquals(rowCount * 2.0 / numDisks, rows * 1.0, rowCount / 20.0); // should end up with roughly this many rows + + + long totalOnDiskLength = compactedSelection.stream().mapToLong(SSTableReader::onDiskLength).sum(); + long totalBFSize = compactedSelection.stream().mapToLong(ShardedCompactionWriterTest::getFilterSize).sum(); + double expectedSize = totalOnDiskLength / (numShards * 2.0); + double expectedTokenShare = 1.0 / (numDisks * numShards); + + for (SSTableReader rdr : compactedSelection) + { + verifyNoSpannedBoundaries(diskBoundaries, rdr); + + assertEquals((double) rdr.onDiskLength() / totalOnDiskLength, + (double) getFilterSize(rdr) / totalBFSize, 0.1); + assertEquals(expectedSize, rdr.onDiskLength(), expectedSize * 0.1); + } + + validateData(cfs, rowCount); + cfs.truncateBlocking(); + } + + private int compact(int numShards, ColumnFamilyStore cfs, ShardManager shardManager, Collection selection) + { + int rows; + LifecycleTransaction txn = cfs.getTracker().tryModify(selection, OperationType.COMPACTION); + ShardedCompactionWriter writer = new ShardedCompactionWriter(cfs, + cfs.getDirectories(), + txn, + txn.originals(), + false, + shardManager.boundaries(numShards)); + + rows = compact(cfs, txn, writer); + return rows; + } + + private static void verifyNoSpannedBoundaries(List diskBoundaries, SSTableReader rdr) + { + for (int i = 0; i < diskBoundaries.size(); ++i) + { + Token boundary = diskBoundaries.get(i); + // rdr cannot span a boundary. I.e. it must be either fully before (last <= boundary) or fully after + // (first > boundary). + assertTrue(rdr.getFirst().getToken().compareTo(boundary) > 0 || + rdr.getLast().getToken().compareTo(boundary) <= 0); + } + } + + private int compact(ColumnFamilyStore cfs, LifecycleTransaction txn, CompactionAwareWriter writer) + { + //assert txn.originals().size() == 1; + int rowsWritten = 0; + long nowInSec = FBUtilities.nowInSeconds(); + try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(txn.originals()); + CompactionController controller = new CompactionController(cfs, txn.originals(), cfs.gcBefore(nowInSec)); + CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, TimeUUID.minAtUnixMillis(System.currentTimeMillis()))) + { + while (ci.hasNext()) + { + if (writer.append(ci.next())) + rowsWritten++; + } + } + writer.finish(); + return rowsWritten; + } + + private void populate(int count, boolean compact) throws Throwable + { + byte [] payload = new byte[5000]; + new Random(42).nextBytes(payload); + ByteBuffer b = ByteBuffer.wrap(payload); + + ColumnFamilyStore cfs = getColumnFamilyStore(); + for (int i = 0; i < count; i++) + { + for (int j = 0; j < ROW_PER_PARTITION; j++) + execute(String.format("INSERT INTO %s.%s(k, t, v) VALUES (?, ?, ?)", KEYSPACE, TABLE), i, j, b); + + if (i % (count / 4) == 0) + cfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS); + } + + cfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS); + if (compact && cfs.getLiveSSTables().size() > 1) + { + // we want just one big sstable to avoid doing actual compaction in compact() above + try + { + cfs.forceMajorCompaction(); + } + catch (Throwable t) + { + throw new RuntimeException(t); + } + assert cfs.getLiveSSTables().size() == 1 : cfs.getLiveSSTables(); + } + } + + private void validateData(ColumnFamilyStore cfs, int rowCount) throws Throwable + { + for (int i = 0; i < rowCount; i++) + { + Object[][] expected = new Object[ROW_PER_PARTITION][]; + for (int j = 0; j < ROW_PER_PARTITION; j++) + expected[j] = row(i, j); + + assertRows(execute(String.format("SELECT k, t FROM %s.%s WHERE k = :i", KEYSPACE, TABLE), i), expected); + } + } +} \ No newline at end of file diff --git a/test/unit/org/apache/cassandra/db/compaction/unified/ShardedMultiWriterTest.java b/test/unit/org/apache/cassandra/db/compaction/unified/ShardedMultiWriterTest.java new file mode 100644 index 000000000000..b48b4a7f0a8d --- /dev/null +++ b/test/unit/org/apache/cassandra/db/compaction/unified/ShardedMultiWriterTest.java @@ -0,0 +1,139 @@ +/* + * Copyright DataStax, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.db.compaction.unified; + +import java.nio.ByteBuffer; +import java.util.Random; + +import org.junit.BeforeClass; +import org.junit.Test; +import org.apache.cassandra.cql3.CQLTester; +import org.apache.cassandra.cql3.UntypedResultSet; +import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.service.StorageService; + +import static org.junit.Assert.assertEquals; + +public class ShardedMultiWriterTest extends CQLTester +{ + private static final int ROW_PER_PARTITION = 10; + + @BeforeClass + public static void beforeClass() + { + CQLTester.setUpClass(); + StorageService.instance.initServer(); + } + + @Test + public void testShardedCompactionWriter_fiveShards() throws Throwable + { + int numShards = 5; + int minSSTableSizeMB = 2; + long totSizeBytes = ((minSSTableSizeMB << 20) * numShards) * 2; + + // We have double the data required for 5 shards so we should get 5 shards + testShardedCompactionWriter(numShards, totSizeBytes, numShards); + } + + @Test + public void testShardedCompactionWriter_oneShard() throws Throwable + { + int numShards = 1; + int minSSTableSizeMB = 2; + long totSizeBytes = (minSSTableSizeMB << 20); + + // there should be only 1 shard if there is <= minSSTableSize + testShardedCompactionWriter(numShards, totSizeBytes, 1); + } + + @Test + public void testShardedCompactionWriter_threeShard() throws Throwable + { + int numShards = 3; + int minSSTableSizeMB = 2; + long totSizeBytes = (minSSTableSizeMB << 20) * 3; + + // there should be only 3 shards if there is minSSTableSize * 3 data + testShardedCompactionWriter(numShards, totSizeBytes, 3); + } + + private void testShardedCompactionWriter(int numShards, long totSizeBytes, int numOutputSSTables) throws Throwable + { + createTable(String.format("CREATE TABLE %%s (k int, t int, v blob, PRIMARY KEY (k, t)) with compaction = " + + "{'class':'UnifiedCompactionStrategy', 'base_shard_count' : '%d'} ", numShards)); + + ColumnFamilyStore cfs = getCurrentColumnFamilyStore(); + cfs.disableAutoCompaction(); + + int rowCount = insertData(totSizeBytes); + cfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS); + + assertEquals(numOutputSSTables, cfs.getLiveSSTables().size()); + + validateData(rowCount); + cfs.truncateBlocking(); + } + + private int insertData(long totSizeBytes) throws Throwable + { + byte [] payload = new byte[5000]; + ByteBuffer b = ByteBuffer.wrap(payload); + int rowCount = (int) Math.ceil((double) totSizeBytes / (8 + ROW_PER_PARTITION * payload.length)); + + for (int i = 0; i < rowCount; i++) + { + for (int j = 0; j < ROW_PER_PARTITION; j++) + { + new Random(42 + i * ROW_PER_PARTITION + j).nextBytes(payload); // write different data each time to make non-compressible + execute("INSERT INTO %s(k, t, v) VALUES (?, ?, ?)", i, j, b); + } + } + + return rowCount; + } + + private void validateData(int rowCount) throws Throwable + { + for (int i = 0; i < rowCount; i++) + { + Object[][] expected = new Object[ROW_PER_PARTITION][]; + for (int j = 0; j < ROW_PER_PARTITION; j++) + expected[j] = row(i, j); + + assertRows(execute("SELECT k, t FROM %s WHERE k = :i", i), expected); + } + } + + @Override + public UntypedResultSet execute(String query, Object... values) + { + return super.executeFormattedQuery(formatQuery(KEYSPACE_PER_TEST, query), values); + } + + @Override + public String createTable(String query) + { + return super.createTable(KEYSPACE_PER_TEST, query); + } + + @Override + public ColumnFamilyStore getCurrentColumnFamilyStore() + { + return super.getCurrentColumnFamilyStore(KEYSPACE_PER_TEST); + } +} \ No newline at end of file diff --git a/test/unit/org/apache/cassandra/db/memtable/MemtableQuickTest.java b/test/unit/org/apache/cassandra/db/memtable/MemtableQuickTest.java index a3dbcd77e9e4..c442a4c036e8 100644 --- a/test/unit/org/apache/cassandra/db/memtable/MemtableQuickTest.java +++ b/test/unit/org/apache/cassandra/db/memtable/MemtableQuickTest.java @@ -82,7 +82,8 @@ public void testMemtable() throws Throwable String keyspace = createKeyspace("CREATE KEYSPACE %s with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 } and durable_writes = false"); String table = createTable(keyspace, "CREATE TABLE %s ( userid bigint, picid bigint, commentid bigint, PRIMARY KEY(userid, picid))" + " with compression = {'enabled': false}" + - " and memtable = '" + memtableClass + "'"); + " and memtable = '" + memtableClass + "'" + + " and compaction = { 'class': 'UnifiedCompactionStrategy', 'base_shard_count': '4' }"); // to trigger splitting of sstables, CASSANDRA-18123 execute("use " + keyspace + ';'); String writeStatement = "INSERT INTO "+table+"(userid,picid,commentid)VALUES(?,?,?)"; @@ -159,7 +160,7 @@ public void testMemtable() throws Throwable } // make sure the row counts are correct in both the metadata as well as the cardinality estimator - // (see STAR-1826) + // (see CASSANDRA-18123) long totalPartitions = 0; for (SSTableReader sstable : sstables) { diff --git a/test/unit/org/apache/cassandra/utils/OverlapsTest.java b/test/unit/org/apache/cassandra/utils/OverlapsTest.java new file mode 100644 index 000000000000..3312578b629c --- /dev/null +++ b/test/unit/org/apache/cassandra/utils/OverlapsTest.java @@ -0,0 +1,372 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.utils; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import com.google.common.collect.Ordering; +import org.junit.Assert; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class OverlapsTest +{ + Random random = new Random(1); + + @Test + public void testConstructOverlapsMap() + { + Interval[] input = new Interval[]{ + Interval.create(1, 5, "A"), + Interval.create(1, 3, "B"), + Interval.create(1, 3, "C"), + // expected 1 - 2, ABC + Interval.create(2, 6, "D"), + // expected 2 - 3, ABCD + Interval.create(3, 7, "E"), + // expected 3 - 5 ADE + Interval.create(5, 7, "F"), + // expected 5 - 6 DEF + // expected 6 - 7 EF + Interval.create(7, 9, "G"), + // hole + // expected 7 - 9 G + Interval.create(10, 13, "H"), + // expected 10 - 11 H + Interval.create(11, 12, "I"), + // expected 11 - 12 HI + // expected 12 - 13 H + Interval.create(1399, 1799, "J"), + Interval.create(1402, 1798, "K"), + + Interval.create(2102, 2402, "L"), + Interval.create(2099, 2398, "M"), + + Interval.create(2502, 2998, "N"), + Interval.create(2499, 2601, "O"), + Interval.create(2602, 3001, "P"), + Interval.create(2799, 3401, "Q"), + + Interval.create(3502, 3998, "R"), + Interval.create(3499, 3602, "S"), + Interval.create(3601, 4001, "T"), + }; + String[] allOverlapsManual = new String[]{ + "ABC", + "ABCD", + "ADE", + "DEF", + "EF", + "G", + "", + "H", + "HI", + "H", + "", + "J", + "JK", + "J", + "", + "M", + "LM", + "L", + "", + "O", + "NO", + "N", + "NP", + "NPQ", + "PQ", + "Q", + "", + "S", + "RS", + "RST", + "RT", + "T" + }; + String[] expectedSubsumed = new String[]{ + "ABCD", + "ADE", + "DEF", + "G", + "HI", + "JK", + "LM", + "NO", + "NPQ", + "RST", + }; + List allOverlaps = getAllOverlaps(input, false); + assertEquals(Arrays.asList(allOverlapsManual), allOverlaps); + + List subsumed = subsumeContainedNeighbours(allOverlaps); + assertEquals(Arrays.asList(expectedSubsumed), subsumed); + + List>> overlaps = Overlaps.constructOverlapSets(Arrays.asList(input), + (x, y) -> x.min >= y.max, + Comparator.comparingInt(x -> x.min), + Comparator.comparingInt(x -> x.max)); + + List result = mapOverlapSetsToStrings(overlaps); + assertEquals(subsumed, result); + } + + private static List mapOverlapSetsToStrings(List>> overlaps) + { + List result = overlaps.stream() + .map(set -> set.stream() + .map(x -> x.data) + .sorted() + .collect(Collectors.joining())) + .collect(Collectors.toList()); + return result; + } + + @Test + public void testConstructOverlapsMapRandom() + { + int size; + int range = 100; + Random rand = new Random(); + for (int i = 0; i < 1000; ++i) + { + size = rand.nextInt(range) + 2; + Interval[] input = new Interval[size]; + char c = 'A'; + for (int j = 0; j < size; ++j) + { + int start = rand.nextInt(range); + input[j] = (new Interval<>(start, start + 1 + random.nextInt(range - start), Character.toString(c++))); + } + + boolean endInclusive = rand.nextBoolean(); + List expected = subsumeContainedNeighbours(getAllOverlaps(input, endInclusive)); + + List>> overlaps = + Overlaps.constructOverlapSets(Arrays.asList(input), + endInclusive ? (x, y) -> x.min > y.max + : (x, y) -> x.min >= y.max, + Comparator.comparingInt(x -> x.min), + Comparator.comparingInt(x -> x.max)); + List result = mapOverlapSetsToStrings(overlaps); + assertEquals("Input " + Arrays.asList(input), expected, result); + } + } + + private static List getAllOverlaps(Interval[] input, boolean endInclusive) + { + int min = Arrays.stream(input).mapToInt(x -> x.min).min().getAsInt(); + int max = Arrays.stream(input).mapToInt(x -> x.max).max().getAsInt(); + List allOverlaps = new ArrayList<>(); + IntStream.range(min, max) + .mapToObj(i -> Arrays.stream(input) + .filter(iv -> i >= iv.min && (i < iv.max || endInclusive && i == iv.max)) + .map(iv -> iv.data) + .collect(Collectors.joining())) + .reduce(null, (prev, curr) -> { + if (curr.equals(prev)) + return prev; + allOverlaps.add(curr); + return curr; + }); + return allOverlaps; + } + + private List subsumeContainedNeighbours(List allOverlaps) + { + List subsumed = new ArrayList<>(); + String last = ""; + for (String overlap : allOverlaps) + { + if (containsAll(last, overlap)) + continue; + if (containsAll(overlap, last)) + { + last = overlap; + continue; + } + subsumed.add(last); + last = overlap; + } + assert !last.isEmpty(); + subsumed.add(last); + return subsumed; + } + + boolean containsAll(String a, String b) + { + if (a.contains(b)) + return true; + return asSet(a).containsAll(asSet(b)); + } + + private static Set asSet(String a) + { + Set as = new HashSet<>(); + for (int i = 0; i < a.length(); ++i) + as.add(a.charAt(i)); + return as; + } + + + @Test + public void testAssignOverlapsIntoBuckets() + { + String[] sets = new String[]{ + "ABCD", + "ADE", + "EF", + "HI", + "LN", + "NO", + "NPQ", + "RST", + }; + String[] none3 = new String[]{ + "ABCD", + "ADE", + "NPQ", + "RST", + }; + String[] single3 = new String[]{ + "ABCDE", + "LNOPQ", + "RST", + }; + String[] transitive3 = new String[]{ + "ABCDEF", + "LNOPQ", + "RST", + }; + + List> input = Arrays.stream(sets).map(OverlapsTest::asSet).collect(Collectors.toList()); + List actual; + + actual = Overlaps.assignOverlapsIntoBuckets(3, Overlaps.InclusionMethod.NONE, input, this::makeBucket); + assertEquals(Arrays.asList(none3), actual); + + actual = Overlaps.assignOverlapsIntoBuckets(3, Overlaps.InclusionMethod.SINGLE, input, this::makeBucket); + assertEquals(Arrays.asList(single3), actual); + + actual = Overlaps.assignOverlapsIntoBuckets(3, Overlaps.InclusionMethod.TRANSITIVE, input, this::makeBucket); + assertEquals(Arrays.asList(transitive3), actual); + + } + + private String makeBucket(List> sets, int startIndex, int endIndex) + { + Set bucket = new HashSet<>(); + for (int i = startIndex; i < endIndex; ++i) + bucket.addAll(sets.get(i)); + return bucket.stream() + .sorted() + .map(x -> x.toString()) + .collect(Collectors.joining()); + } + + @Test + public void testMultiSetPullOldest() + { + // In this test each letter stands for an sstable, ordered alphabetically (i.e. A is oldest) + Assert.assertEquals("ABCD", pullLast(3, "ACD", "BCD")); + Assert.assertEquals("ABC", pullLast(2, "ACD", "BCD")); + Assert.assertEquals("BC", pullLast(2, "CDE", "BCD")); + } + + + @Test + public void testMultiSetPullOldestRandom() + { + int size; + int range = 100; + Random rand = new Random(); + for (int i = 0; i < 100; ++i) + { + size = rand.nextInt(range) + 2; + Interval[] input = new Interval[size]; + char c = 'A'; + for (int j = 0; j < size; ++j) + { + int start = rand.nextInt(range); + input[j] = (new Interval<>(start, start + 1 + random.nextInt(range - start), Character.toString(c++))); + } + + List>> overlaps = Overlaps.constructOverlapSets(Arrays.asList(input), + (x, y) -> x.min >= y.max, + Comparator.comparingInt(x -> x.min), + Comparator.comparingInt(x -> x.max)); + String[] overlapSets = mapOverlapSetsToStrings(overlaps).toArray(new String[0]); + int maxOverlap = Arrays.stream(overlapSets).mapToInt(String::length).max().getAsInt(); + for (int limit = 1; limit <= maxOverlap + 1; ++limit) + { + String pulled = pullLast(limit, overlapSets); + String message = pulled + " from " + overlapSets + " limit " + limit; + Assert.assertTrue(message + ", size " + pulled.length(), pulled.length() >= Math.min(size, limit)); + String e = ""; + for (char j = 'A'; j < pulled.length() + 'A'; ++j) + e += Character.toString(j); + Assert.assertEquals("Must select oldest " + message, e, pulled); + int countAtLimit = 0; + for (String set : overlapSets) + { + int count = 0; + for (int j = 0; j < set.length(); ++j) + if (pulled.indexOf(set.charAt(j)) >= 0) + ++count; + Assert.assertTrue(message + " set " + set + " elements " + count, count <= limit); + if (count == limit) + ++countAtLimit; + } + if (pulled.length() < size) + Assert.assertTrue(message + " must have at least one set of size " + limit, countAtLimit > 0); + else + Assert.assertTrue(message,limit >= maxOverlap); + } + } + } + + String pullLast(int limit, String... inputOverlapSets) + { + List> overlapSets = Arrays.stream(inputOverlapSets) + .map(s -> IntStream.range(0, s.length()) + .mapToObj(i -> Character.toString(s.charAt(i))) + .collect(Collectors.toSet())) + .collect(Collectors.toList()); + + List allObjectsSorted = overlapSets.stream() + .flatMap(x -> x.stream()) + .sorted(Ordering.natural().reversed()) + .distinct() + .collect(Collectors.toList()); + + Collection pulled = Overlaps.pullLastWithOverlapLimit(allObjectsSorted, overlapSets, limit); + return pulled.stream().sorted().collect(Collectors.joining()); + } +} From 955b73e32c782f1d450acae62788200cc1fb3b40 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Thu, 10 Nov 2022 15:12:20 +0200 Subject: [PATCH 13/27] Put tokenSpaceCoverage in sstable metadata This is needed to avoid situations where changed local token space ownership causes SSTables to have their density calculated incorrectly. This is only calculated and written by UCS writers; when the value is not available in the file, it is calculated from the covered range. --- .../cassandra/db/compaction/ShardManager.java | 7 ++++- .../db/compaction/ShardManagerDiskAware.java | 6 +++++ .../db/compaction/ShardManagerNoDisks.java | 7 +++++ .../db/compaction/ShardManagerTrivial.java | 7 +++++ .../cassandra/db/compaction/ShardTracker.java | 10 ++++++++ .../unified/ShardedCompactionWriter.java | 7 +++++ .../unified/ShardedMultiWriter.java | 10 ++++++-- .../io/sstable/RangeAwareSSTableWriter.java | 4 +-- .../io/sstable/SSTableMultiWriter.java | 2 +- .../io/sstable/SSTableTxnWriter.java | 4 +-- .../io/sstable/SSTableZeroCopyWriter.java | 2 +- .../io/sstable/SimpleSSTableMultiWriter.java | 5 ++-- .../io/sstable/format/SSTableReader.java | 13 ++++++++++ .../io/sstable/format/SSTableWriter.java | 6 +++++ .../cassandra/io/sstable/format/Version.java | 8 ++++++ .../io/sstable/format/big/BigFormat.java | 9 +++++++ .../io/sstable/format/bti/BtiFormat.java | 6 +++++ .../sstable/metadata/MetadataCollector.java | 11 +++++++- .../io/sstable/metadata/StatsMetadata.java | 24 ++++++++++++++++++ .../tools/SSTableMetadataViewer.java | 1 + .../da-1-bti-CompressionInfo.db | Bin 207 -> 207 bytes .../legacy_da_clust/da-1-bti-Data.db | Bin 8746 -> 8660 bytes .../legacy_da_clust/da-1-bti-Digest.crc32 | 2 +- .../legacy_da_clust/da-1-bti-Rows.db | Bin 563 -> 563 bytes .../legacy_da_clust/da-1-bti-Statistics.db | Bin 7304 -> 7312 bytes .../da-1-bti-CompressionInfo.db | Bin 199 -> 199 bytes .../legacy_da_clust_counter/da-1-bti-Data.db | Bin 7798 -> 7767 bytes .../da-1-bti-Digest.crc32 | 2 +- .../legacy_da_clust_counter/da-1-bti-Rows.db | Bin 563 -> 563 bytes .../da-1-bti-Statistics.db | Bin 7313 -> 7321 bytes .../legacy_da_simple/da-1-bti-Data.db | Bin 89 -> 88 bytes .../legacy_da_simple/da-1-bti-Digest.crc32 | 2 +- .../legacy_da_simple/da-1-bti-Statistics.db | Bin 4814 -> 4822 bytes .../legacy_da_simple_counter/da-1-bti-Data.db | Bin 141 -> 140 bytes .../da-1-bti-Digest.crc32 | 2 +- .../da-1-bti-Statistics.db | Bin 4823 -> 4831 bytes .../oa-1-big-CompressionInfo.db | Bin 207 -> 207 bytes .../legacy_oa_clust/oa-1-big-Data.db | Bin 8685 -> 8630 bytes .../legacy_oa_clust/oa-1-big-Digest.crc32 | 2 +- .../legacy_oa_clust/oa-1-big-Index.db | Bin 157553 -> 157553 bytes .../legacy_oa_clust/oa-1-big-Statistics.db | Bin 7304 -> 7312 bytes .../oa-1-big-CompressionInfo.db | Bin 199 -> 199 bytes .../legacy_oa_clust_counter/oa-1-big-Data.db | Bin 7346 -> 7718 bytes .../oa-1-big-Digest.crc32 | 2 +- .../legacy_oa_clust_counter/oa-1-big-Index.db | Bin 157553 -> 157553 bytes .../oa-1-big-Statistics.db | Bin 7313 -> 7321 bytes .../legacy_oa_simple/oa-1-big-Data.db | Bin 88 -> 88 bytes .../legacy_oa_simple/oa-1-big-Digest.crc32 | 2 +- .../legacy_oa_simple/oa-1-big-Statistics.db | Bin 4814 -> 4822 bytes .../legacy_oa_simple_counter/oa-1-big-Data.db | Bin 137 -> 141 bytes .../oa-1-big-Digest.crc32 | 2 +- .../oa-1-big-Statistics.db | Bin 4823 -> 4831 bytes .../format/ForwardingSSTableReader.java | 6 +++++ .../db/compaction/ShardManagerTest.java | 1 + .../unified/ShardedCompactionWriterTest.java | 2 ++ .../unified/ShardedMultiWriterTest.java | 5 ++++ .../metadata/MetadataSerializerTest.java | 2 ++ 57 files changed, 160 insertions(+), 21 deletions(-) diff --git a/src/java/org/apache/cassandra/db/compaction/ShardManager.java b/src/java/org/apache/cassandra/db/compaction/ShardManager.java index a58be340a97a..a975da05a747 100644 --- a/src/java/org/apache/cassandra/db/compaction/ShardManager.java +++ b/src/java/org/apache/cassandra/db/compaction/ShardManager.java @@ -96,7 +96,12 @@ static Range coveringRange(PartitionPosition first, PartitionPosition las */ default double rangeSpanned(SSTableReader rdr) { - double span = rangeSpanned(rdr.getFirst(), rdr.getLast()); + double reported = rdr.tokenSpaceCoverage(); + double span; + if (reported > 0) // also false for NaN + span = reported; + else + span = rangeSpanned(rdr.getFirst(), rdr.getLast()); if (span >= MINIMUM_TOKEN_COVERAGE) return span; diff --git a/src/java/org/apache/cassandra/db/compaction/ShardManagerDiskAware.java b/src/java/org/apache/cassandra/db/compaction/ShardManagerDiskAware.java index 3fff8beaf39c..f6c74314c729 100644 --- a/src/java/org/apache/cassandra/db/compaction/ShardManagerDiskAware.java +++ b/src/java/org/apache/cassandra/db/compaction/ShardManagerDiskAware.java @@ -24,6 +24,7 @@ import javax.annotation.Nullable; import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.PartitionPosition; import org.apache.cassandra.dht.Range; import org.apache.cassandra.dht.Splitter; import org.apache.cassandra.dht.Token; @@ -212,6 +213,11 @@ public double fractionInShard(Range targetSpan) return inShardSize / totalSize; } + public double rangeSpanned(PartitionPosition first, PartitionPosition last) + { + return ShardManagerDiskAware.this.rangeSpanned(first, last); + } + public int shardIndex() { return nextShardIndex - 1; diff --git a/src/java/org/apache/cassandra/db/compaction/ShardManagerNoDisks.java b/src/java/org/apache/cassandra/db/compaction/ShardManagerNoDisks.java index 618c27d1d7fc..0d2c10ef5288 100644 --- a/src/java/org/apache/cassandra/db/compaction/ShardManagerNoDisks.java +++ b/src/java/org/apache/cassandra/db/compaction/ShardManagerNoDisks.java @@ -23,6 +23,7 @@ import javax.annotation.Nullable; import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.PartitionPosition; import org.apache.cassandra.dht.Range; import org.apache.cassandra.dht.Splitter; import org.apache.cassandra.dht.Token; @@ -192,6 +193,12 @@ public double fractionInShard(Range targetSpan) return inShardSize / totalSize; } + @Override + public double rangeSpanned(PartitionPosition first, PartitionPosition last) + { + return ShardManagerNoDisks.this.rangeSpanned(first, last); + } + @Override public int shardIndex() { diff --git a/src/java/org/apache/cassandra/db/compaction/ShardManagerTrivial.java b/src/java/org/apache/cassandra/db/compaction/ShardManagerTrivial.java index 7678ca1a230c..d6192cb52fa0 100644 --- a/src/java/org/apache/cassandra/db/compaction/ShardManagerTrivial.java +++ b/src/java/org/apache/cassandra/db/compaction/ShardManagerTrivial.java @@ -20,6 +20,7 @@ import java.util.Set; +import org.apache.cassandra.db.PartitionPosition; import org.apache.cassandra.dht.IPartitioner; import org.apache.cassandra.dht.Range; import org.apache.cassandra.dht.Token; @@ -111,6 +112,12 @@ public double fractionInShard(Range targetSpan) return 1; } + @Override + public double rangeSpanned(PartitionPosition first, PartitionPosition last) + { + return 1; + } + @Override public int shardIndex() { diff --git a/src/java/org/apache/cassandra/db/compaction/ShardTracker.java b/src/java/org/apache/cassandra/db/compaction/ShardTracker.java index 6f8be3237e86..46b20638dbd4 100644 --- a/src/java/org/apache/cassandra/db/compaction/ShardTracker.java +++ b/src/java/org/apache/cassandra/db/compaction/ShardTracker.java @@ -21,9 +21,11 @@ import java.util.Set; import javax.annotation.Nullable; +import org.apache.cassandra.db.PartitionPosition; import org.apache.cassandra.dht.Range; import org.apache.cassandra.dht.Token; import org.apache.cassandra.io.sstable.format.SSTableReader; +import org.apache.cassandra.io.sstable.format.SSTableWriter; public interface ShardTracker { @@ -50,6 +52,8 @@ public interface ShardTracker */ double fractionInShard(Range targetSpan); + double rangeSpanned(PartitionPosition first, PartitionPosition last); + int shardIndex(); default long shardAdjustedKeyCount(Set sstables) @@ -60,4 +64,10 @@ default long shardAdjustedKeyCount(Set sstables) shardAdjustedKeyCount += sstable.estimatedKeys() * fractionInShard(ShardManager.coveringRange(sstable)); return shardAdjustedKeyCount; } + + default void applyTokenSpaceCoverage(SSTableWriter writer) + { + if (writer.getFirst() != null) + writer.setTokenSpaceCoverage(rangeSpanned(writer.getFirst(), writer.getLast())); + } } diff --git a/src/java/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriter.java b/src/java/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriter.java index c7c1eb92339c..28076929aa8c 100644 --- a/src/java/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriter.java +++ b/src/java/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriter.java @@ -99,4 +99,11 @@ private static long shardAdjustedKeyCount(ShardTracker boundaries, // Note: computationally non-trivial; can be optimized if we save start/stop shards and size per table. return Math.round(boundaries.shardAdjustedKeyCount(sstables) * survivalRatio); } + + @Override + protected void doPrepare() + { + sstableWriter.forEachWriter(boundaries::applyTokenSpaceCoverage); + super.doPrepare(); + } } \ No newline at end of file diff --git a/src/java/org/apache/cassandra/db/compaction/unified/ShardedMultiWriter.java b/src/java/org/apache/cassandra/db/compaction/unified/ShardedMultiWriter.java index aae606691c45..c2fe1babe3b5 100644 --- a/src/java/org/apache/cassandra/db/compaction/unified/ShardedMultiWriter.java +++ b/src/java/org/apache/cassandra/db/compaction/unified/ShardedMultiWriter.java @@ -126,7 +126,7 @@ private long forSplittingKeysBy(long splits) { } @Override - public boolean append(UnfilteredRowIterator partition) + public void append(UnfilteredRowIterator partition) { DecoratedKey key = partition.partitionKey(); @@ -142,7 +142,7 @@ public boolean append(UnfilteredRowIterator partition) writers[++currentWriter] = createWriter(); } - return writers[currentWriter].append(partition) != null; + writers[currentWriter].append(partition); } @Override @@ -151,7 +151,10 @@ public Collection finish(boolean openResult) List sstables = new ArrayList<>(writers.length); for (SSTableWriter writer : writers) if (writer != null) + { + boundaries.applyTokenSpaceCoverage(writer); sstables.add(writer.finish(openResult)); + } return sstables; } @@ -235,8 +238,11 @@ public void prepareToCommit() { for (SSTableWriter writer : writers) if (writer != null) + { + boundaries.applyTokenSpaceCoverage(writer); writer.prepareToCommit(); } + } @Override public void close() diff --git a/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java index b52b2b3137b3..422c6eaa6eb7 100644 --- a/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/RangeAwareSSTableWriter.java @@ -101,10 +101,10 @@ private void maybeSwitchWriter(DecoratedKey key) } } - public boolean append(UnfilteredRowIterator partition) + public void append(UnfilteredRowIterator partition) { maybeSwitchWriter(partition.partitionKey()); - return currentWriter.append(partition); + currentWriter.append(partition); } @Override diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableMultiWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableMultiWriter.java index 0a1495c43abc..9a7968071b25 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableMultiWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableMultiWriter.java @@ -34,7 +34,7 @@ public interface SSTableMultiWriter extends Transactional * @param partition the partition to append * @return true if the partition was written, false otherwise */ - boolean append(UnfilteredRowIterator partition); + void append(UnfilteredRowIterator partition); Collection finish(boolean openResult); Collection finished(); diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java index e917107cc4f0..8bdc1a19172c 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java @@ -50,9 +50,9 @@ public SSTableTxnWriter(LifecycleTransaction txn, SSTableMultiWriter writer) this.writer = writer; } - public boolean append(UnfilteredRowIterator iterator) + public void append(UnfilteredRowIterator iterator) { - return writer.append(iterator); + writer.append(iterator); } public String getFilename() diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableZeroCopyWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableZeroCopyWriter.java index f6febf834126..6d088dfd2dab 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableZeroCopyWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableZeroCopyWriter.java @@ -116,7 +116,7 @@ private void write(DataInputPlus in, long size, SequentialWriter out) throws FSW } @Override - public boolean append(UnfilteredRowIterator partition) + public void append(UnfilteredRowIterator partition) { throw new UnsupportedOperationException(); } diff --git a/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java b/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java index 8e613180f77d..f5f6a443e39c 100644 --- a/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/SimpleSSTableMultiWriter.java @@ -44,10 +44,9 @@ protected SimpleSSTableMultiWriter(SSTableWriter writer, LifecycleNewTracker lif this.writer = writer; } - public boolean append(UnfilteredRowIterator partition) + public void append(UnfilteredRowIterator partition) { - AbstractRowIndexEntry indexEntry = writer.append(partition); - return indexEntry != null; + writer.append(partition); } public Collection finish(boolean openResult) diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java index 4c8b69991402..45496ff4e8d5 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java +++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java @@ -843,6 +843,19 @@ public long uncompressedLength() return dfile.dataLength(); } + /** + * @return the fraction of the token space for which this sstable has content. In the simplest case this is just the + * size of the interval returned by {@link #getBounds()}, but the sstable may contain "holes" when the locally-owned + * range is not contiguous (e.g. with vnodes). + * As this is affected by the local ranges which can change, the token space fraction is calculated at the time of + * writing the sstable and stored with its metadata. + * For older sstables that do not contain this metadata field, this method returns NaN. + */ + public double tokenSpaceCoverage() + { + return sstableMetadata.tokenSpaceCoverage; + } + /** * The length in bytes of the on disk size for this SSTable. For compressed files, this is not the same thing * as the data length (see {@link #uncompressedLength()}). diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableWriter.java index 255e1cd04ae9..8eb92cbf2ec8 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/SSTableWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableWriter.java @@ -170,6 +170,12 @@ public void setMaxDataAge(long maxDataAge) this.maxDataAge = maxDataAge; } + public SSTableWriter setTokenSpaceCoverage(double rangeSpanned) + { + metadataCollector.tokenSpaceCoverage(rangeSpanned); + return this; + } + public void setOpenResult(boolean openResult) { txnProxy.openResult = openResult; diff --git a/src/java/org/apache/cassandra/io/sstable/format/Version.java b/src/java/org/apache/cassandra/io/sstable/format/Version.java index d2b73778e5d0..3852888d114c 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/Version.java +++ b/src/java/org/apache/cassandra/io/sstable/format/Version.java @@ -89,6 +89,14 @@ protected Version(SSTableFormat format, String version) public abstract boolean hasImprovedMinMax(); + /** + * If the sstable has token space coverage data. + */ + public abstract boolean hasTokenSpaceCoverage(); + + /** + * Records in th stats if the sstable has any partition deletions. + */ public abstract boolean hasPartitionLevelDeletionsPresenceMarker(); public abstract boolean hasKeyRange(); diff --git a/src/java/org/apache/cassandra/io/sstable/format/big/BigFormat.java b/src/java/org/apache/cassandra/io/sstable/format/big/BigFormat.java index 13622d86902f..e1cb14b90553 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/big/BigFormat.java +++ b/src/java/org/apache/cassandra/io/sstable/format/big/BigFormat.java @@ -367,6 +367,7 @@ static class BigVersion extends Version // nb (4.0.0): originating host id // nc (4.1): improved min/max, partition level deletion presence marker, key range (CASSANDRA-18134) // oa (5.0): Long deletionTime to prevent TTL overflow + // token space coverage // // NOTE: When adding a new version: // - Please add it to LegacySSTableTest @@ -387,6 +388,7 @@ static class BigVersion extends Version private final boolean hasPartitionLevelDeletionPresenceMarker; private final boolean hasKeyRange; private final boolean hasUintDeletionTime; + private final boolean hasTokenSpaceCoverage; /** * CASSANDRA-9067: 4.0 bloom filter representation changed (two longs just swapped) @@ -416,6 +418,7 @@ static class BigVersion extends Version hasPartitionLevelDeletionPresenceMarker = version.compareTo("nc") >= 0; hasKeyRange = version.compareTo("nc") >= 0; hasUintDeletionTime = version.compareTo("oa") >= 0; + hasTokenSpaceCoverage = version.compareTo("oa") >= 0; } @Override @@ -496,6 +499,12 @@ public boolean hasImprovedMinMax() return hasImprovedMinMax; } + @Override + public boolean hasTokenSpaceCoverage() + { + return hasTokenSpaceCoverage; + } + @Override public boolean hasPartitionLevelDeletionsPresenceMarker() { diff --git a/src/java/org/apache/cassandra/io/sstable/format/bti/BtiFormat.java b/src/java/org/apache/cassandra/io/sstable/format/bti/BtiFormat.java index 8a240d3e3b2d..0e9c447dc5f7 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/bti/BtiFormat.java +++ b/src/java/org/apache/cassandra/io/sstable/format/bti/BtiFormat.java @@ -396,6 +396,12 @@ public boolean hasImprovedMinMax() { return true; } + @Override + public boolean hasTokenSpaceCoverage() + { + return true; + } + @Override public boolean hasPartitionLevelDeletionsPresenceMarker() { diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java index 409b4e317b75..7b841c7cd8de 100644 --- a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java +++ b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java @@ -95,6 +95,7 @@ public static StatsMetadata defaultStatsMetadata() ActiveRepairService.UNREPAIRED_SSTABLE, -1, -1, + Double.NaN, null, null, false, @@ -128,7 +129,6 @@ public static StatsMetadata defaultStatsMetadata() * be a corresponding end bound that is bigger). */ private ClusteringPrefix maxClustering = ClusteringBound.MIN_END; - private boolean clusteringInitialized = false; protected boolean hasLegacyCounterShards = false; private boolean hasPartitionLevelDeletions = false; @@ -136,6 +136,8 @@ public static StatsMetadata defaultStatsMetadata() protected long totalRows; public int totalTombstones; + protected double tokenSpaceCoverage = Double.NaN; + /** * Default cardinality estimation method is to use HyperLogLog++. * Parameter here(p=13, sp=25) should give reasonable estimation @@ -291,6 +293,12 @@ public MetadataCollector sstableLevel(int sstableLevel) return this; } + public MetadataCollector tokenSpaceCoverage(double coverage) + { + tokenSpaceCoverage = coverage; + return this; + } + public void updateClusteringValues(Clustering clustering) { if (clustering == Clustering.STATIC_CLUSTERING) @@ -371,6 +379,7 @@ public Map finalizeMetadata(String partitioner, repairedAt, totalColumnsSet, totalRows, + tokenSpaceCoverage, originatingHostId, pendingRepair, isTransient, diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java b/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java index ab43533ffe0e..b509e3f3ae6d 100644 --- a/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java +++ b/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java @@ -73,6 +73,7 @@ public class StatsMetadata extends MetadataComponent public final int sstableLevel; public final Slice coveredClustering; public final boolean hasLegacyCounterShards; + public final double tokenSpaceCoverage; public final long repairedAt; public final long totalColumnsSet; public final long totalRows; @@ -117,6 +118,7 @@ public StatsMetadata(EstimatedHistogram estimatedPartitionSize, long repairedAt, long totalColumnsSet, long totalRows, + double tokenSpaceCoverage, UUID originatingHostId, TimeUUID pendingRepair, boolean isTransient, @@ -142,6 +144,7 @@ public StatsMetadata(EstimatedHistogram estimatedPartitionSize, this.repairedAt = repairedAt; this.totalColumnsSet = totalColumnsSet; this.totalRows = totalRows; + this.tokenSpaceCoverage = tokenSpaceCoverage; this.originatingHostId = originatingHostId; this.pendingRepair = pendingRepair; this.isTransient = isTransient; @@ -200,6 +203,7 @@ public StatsMetadata mutateLevel(int newLevel) repairedAt, totalColumnsSet, totalRows, + tokenSpaceCoverage, originatingHostId, pendingRepair, isTransient, @@ -228,6 +232,7 @@ public StatsMetadata mutateRepairedMetadata(long newRepairedAt, TimeUUID newPend newRepairedAt, totalColumnsSet, totalRows, + tokenSpaceCoverage, originatingHostId, newPendingRepair, newIsTransient, @@ -261,6 +266,7 @@ public boolean equals(Object o) .append(hasLegacyCounterShards, that.hasLegacyCounterShards) .append(totalColumnsSet, that.totalColumnsSet) .append(totalRows, that.totalRows) + .append(tokenSpaceCoverage, that.tokenSpaceCoverage) .append(originatingHostId, that.originatingHostId) .append(pendingRepair, that.pendingRepair) .append(hasPartitionLevelDeletions, that.hasPartitionLevelDeletions) @@ -290,6 +296,7 @@ public int hashCode() .append(hasLegacyCounterShards) .append(totalColumnsSet) .append(totalRows) + .append(tokenSpaceCoverage) .append(originatingHostId) .append(pendingRepair) .append(hasPartitionLevelDeletions) @@ -375,6 +382,11 @@ else if (version.hasImprovedMinMax()) size += ByteBufferUtil.serializedSizeWithVIntLength(component.lastKey); } + if (version.hasTokenSpaceCoverage()) + { + size += Double.BYTES; + } + return size; } @@ -493,6 +505,11 @@ else if (version.hasImprovedMinMax()) ByteBufferUtil.writeWithVIntLength(component.firstKey, out); ByteBufferUtil.writeWithVIntLength(component.lastKey, out); } + + if (version.hasTokenSpaceCoverage()) + { + out.writeDouble(component.tokenSpaceCoverage); + } } private void serializeImprovedMinMax(Version version, StatsMetadata component, DataOutputPlus out) throws IOException @@ -632,6 +649,12 @@ else if (version.hasImprovedMinMax()) lastKey = ByteBufferUtil.readWithVIntLength(in); } + double tokenSpaceCoverage = Double.NaN; + if (version.hasTokenSpaceCoverage()) + { + tokenSpaceCoverage = in.readDouble(); + } + return new StatsMetadata(partitionSizes, columnCounts, commitLogIntervals, @@ -650,6 +673,7 @@ else if (version.hasImprovedMinMax()) repairedAt, totalColumnsSet, totalRows, + tokenSpaceCoverage, originatingHostId, pendingRepair, isTransient, diff --git a/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java b/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java index aa5cfbff25cd..256c80d26903 100644 --- a/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java +++ b/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java @@ -392,6 +392,7 @@ private void printSStableMetadata(File file, boolean scan) throws IOException String::valueOf, String::valueOf); cellCount.printHistogram(out, color, unicode); + field("Local token space coverage", stats.tokenSpaceCoverage); } if (compaction != null) { diff --git a/test/data/legacy-sstables/da/legacy_tables/legacy_da_clust/da-1-bti-CompressionInfo.db b/test/data/legacy-sstables/da/legacy_tables/legacy_da_clust/da-1-bti-CompressionInfo.db index db075a863fdc91d3e538bd407031c46389d46942..4f8f029e9a0d7095f01b6230c1ca3c8d32f83ab4 100644 GIT binary patch literal 207 zcmZSJ^@%cZ&d)6yA+)eQgcf-Pp~a>{XbF1=E%_TlOK*VCvY`-K K?jM9!SOx%bKo!OS literal 207 zcmZSJ^@%cZ&d)6nI{gVr7D<GN3ATjs|Q2v%YD67YYCPeF3wy}FR=Km zO5JpDwWp*w7hub1Fr6l*+~yn8G|FWruI>osH_C1kQQ0sHuk@Ii9pedP-KALkhU^dW zFiqEx&)OiXdrFV_A%X3x7dq~gNT?SRs@V7jw(23iffxJJLzr&$#w^-KC;K+%Bp2b) z4idrJquT#uImy1kN$Nz%%X(ja3A=ufOSVPY!<2u75Lw5HJU)I`=1I$ik;>lr0JW8g z!3a*o577B*F%y^szBLY@%glWDG(fkRdH4uGkC}OV8-c9NL6di}?{fGnKW!!v6o<{s z{CqB*Ho03b){pynew=yGox!y+r_5%OhEC!7V0tLiXxNbo-!0-?5L z=qK1&3;iaN;Ts9C<5tdojebOU&kq9Bl3XHr!LUNPrHWLeH zAw@hUqVf}-4O4%q>(8AAaU4*>-0?w73^g8PzI*D z^C%CwcT6JWT?J*<1~w%Dl>K!8z^?=_4)rj~Ab+hTl&@^>2k4k& ziI0XDFYpthtxGI`KzZk|k_K;&-D#%2?nkD%OjKnb7v61V9)R$&$ILv`>$4X5xre`! znG=C_`b|Hko12o8HoigEU~Azc+kVJt@L4^IY7?RDR0gP;Q@G`eEX8NDzNqbAZePavY`i{M&n}oAGv3_XmMVv)HAzZ)jnt>k@o*z%Z zZpSJd!c**l+|81gd|~lu9$SZfnq@Ql8?xvWvdG(4@)Dw;t`_v~GB-=1d~cr`B9!$q z9vW}oBtJ-6TXuXbQsDY=0G-K^NP(MA19T;40eIUHfNnE$<~D#HGjk3diAENN>3_tQ z=I+4gs_DXOZnx}Ud$4lYpL|~kMU1Sb!_T|o8BN1aaAi=>L>WUr_8;ymzVMQC>60~k zve*j+8U_&vgWYfi?&pU$#ce_zplW7kXb%E(CSQx7t)2xi#clqKBcAFuPY(f1ahqob z2xRT#`GYK!Ev)YFT7%Vedc#W(ddXMHxUX#WdczNuDWnLD=Q%$Y;OrHPJGms>V>m>T zyw6j`8(z_oCaL0i812@QEZ%eNw0Nz`njm)8mqTapmdu^3U!o&N@Z+;$hNB`aU^vp6 zeA)IyqFaOC^c^q1nBPPS3xwaS%tgzGCXRO%eV^J={3bG*3!+AI;!PmVL*yzblG+1~ z>j!A)0U>?SO8`?^!0{UZnp;4{HXmT32RtU4f$kZTR&d#j&A=xjk0KsVdZ^Z8Pq{8k zMm1vlvj-V3=sT1fPNQt7Hp5keiq=g>gy$qLKPZoVpNsOS+LLUaoROQ3;kCkXI=zuw z!ibn-e|G%pB=s_HWX5)&T5>F8xbrc9PV-ge_bmWjX6D`H0NrM0r467b!TeBk&)4Q; zy$^BpRi*IbNTr%4dZ>xA>p|V6jsZcaz7irhXfJ@s;t3?ncL>WTPd+mk^CiI`&yUm} z#Lm??;EOcvCLm!(kycRzW11*S+d+?+ewVxwPchH$R356sW5V)R%*LZ9zM7R&>5(NZ z-?FzL<@34r+gFfG4Yp*O-kT0(Kba&~Ve6h>}!@ms1LC?gq)3%X_k7Mq|+o zY~2Xdl`?euGN5k5Jb2*?yzF>VxI0OfMwX*tni3}ERe|~nWi?&x#r3As+jqVr<~ei3 zSD2Y{;BBr>%2$|cg~>!L?P(&8J3`k+y@-9Ct=GUytaY>z*>g#TUc>IEJEmR)G;cPx zH}z6_{#k-Ld}_>g&=m>Wd7H_$vB{zEok;KLi~7!>Bzz|W*OwxuW$HEDcp}lGsmAlg zXHZMMH7B_yQue<*9UW1zevF4OUyCFhmRtm(^0zBBe%DDM>p_0miILwE-NCwk??^74 zsXNdo#EeF-PY>mK?CwJuy6#}p=F0G_Iy)hcizPll zqr;{uUm~=MUbiewv{F3>E&4>}Lqg5P21^zoWCc08BHDr~Wujj=l7!kfmjyHRiKt&H zRUa7Bzmwy+8V9z={K>!Pc`v67}$nyZ`uy@72Tc!l|NnG^dG7jssE&o6J*zSNWN9 zp^0Y8;-|BUHDncI=d-7CO|-Y9;y9aYa&T?T*9?52$s8M^q8=~|fF^UOnV?KD(DC~6 zD+x{bS`;fGO@?JBCd0XyKM2&7GPL#}P`67HN9MxL++j&(D%Ujt zJNtk>=#a_9*-NuzpaXQ{X<++DQby=O=Cu|nA72@BIBx#@Vl}K#w!M>l8yqfrfBnTA zF1jVekBR+OUI+t>rjgz(@;V(u^ZswvHFH5ws`dDfFdv2QKoZU4N6|aG{W$I(Fl~+$=EAH1U z`WQ9=1+xL@o+Cg#DcrtZLfN6qiPeZgDl2bn6Q*VyPO*IBd9H;&BD{Vlz+8gcE??)ma&!gGUeTJpqgPUR(g0kP-n_YkMRoNig9C?rdgM+d5?=^ zzM}+V8XZfUJCu72by+QJCG`}%bvTX^C3|+^nO#SJ^UeX^zZ%(E>NJAvDLS?MynZq$ ze65#QGPBX&0YvFDs#NebMlTMq5Pn1st}ck#nt?6^`H~kgwZOn9UB7%4Qwmq>QqF9~ zvNVZ4WyGf<&bd0EPUA>g!1>ugU50ri>Y?^orUG`y$!q+)fuDWH!mqhd{GV7kL?v*M z&}pBwI8AcN;Te@d=W_+JhUZe=2-*$%U5>~Oii$GqoUpW4DUOt2C7G076r+LxE{9f_ z91ly2J~|Y$Gy|XOv2SEQ0;L)zM4Pd1o108CQ?#4ReSG9dtZ3M|l>q;z^)-Y}v zP*=*(qWM7GhI#P9@w_&A65LPx{PXJV3!0;kj-!A4j{S!OGOK*4R@l_O3F-2QNW07u zWDjvW;wfUI;!U~We4Sr=Wi~M@z=Qw90Bp)N!V-wHMJlZC=cZ<$XLzmhe(q@oGVlW8r5cxL%|@v(am<_2Yj>SSI+lulql|M&qXd literal 8746 zcmd5?ZE#f88NQojlMsaylmNz#7nz78RrcfF-MvA~F4hkyD2w$2Yc2U8BC0oDbOnN3zOT?VGOw#?EzQ85x9+aAhmV&PIOz9aknF_`8_ zzsDR9?FULvcp-tcc~Si>0txlUgeq3Pfn92dZ=hpQdI%G?-4=x~$QVBUmCRG73ll6m^8w23iNOfY z?sEVgw`((jN#M)-06LA#S33Z@jLd_z02L!M<|UAAlxXtZ?Asjv$hQ>|!6I3W%pyoPIIOqouShECzLX`)QGWSf_%s(aaavpTFv@b?h}!oD#>9cEvc z=sPVLj=u|7n$Ov9(3c4B$vpt&Bp2ZD7Jv>T^CO-iP9yV7IhZaZ^V2y16(jR=D}ikG zjjH+ryI@v7!8~7H1tDbX<_z@{c7ayf3H1DFF2uni(oEg4m}33>Y@YG*0m`LeFRxzv zVmI8##VJjwLy7a8x*mrxY8pt7wCR@Zb22DvdT4K_xC&8o4}v4LkqW2SIr` zPI3W`_XBhonO7mbM5mE?Z9YJkky&&Opkid+&`%({Fim}qp@5yrK9_;1towM#-Gw~l z3n=8fFH8uC;hQypG${MS(3-peyD7ma>;4*yQUGVXsAaZ{S&!YpH5qjnt1VA>N?k|k zQucaBdMFZBOdaec)T=L1$I1?@FEjgJFbO7GD`D`-#oQ)$q6mG-va_}kjYXBvU1mSN zG1yMDakA!6!R&4EhiW2wyl!q$9&S9l7PzCU1gK#@jw;9>YYOFA)(rrZW|~GvLyHak zfXKf|EC66TKOhbx^NB?m?ldx=nGVopWOiz2!N}Y-eNi=wU-AsUu;6^N$x_P=uZaILziyZmb3<*0lim`M-z!YyPQwU`1s6#l(nzPk?5aO08I%Dc`NAHig%>h=% zT?l3M!9Dm4o7&~&*P8e4Iql_=z>qW_qQ2k+&lGpGDwrOMgq2^QIa`&*JGA{smYQuM!Q*WqKv#0i4G>B;V zxIOhQq$uJBm9VGYHY9;)@5>o&8If*}Q5Ia(Fh0rsC8c5P$Oki}VSM%+z?6pZd49wa z4dZ)WuG92}SN_3Oi(gGix;)`SEsc72d^f-N-pX6IE#+>p8%e+pWuIm|kb5nADFFgx zPP%&uh4JNDu7W*zQ-ZMfz8E*BBw0(%SG{=Zs5$)!Ng+yyvPW-OWKNcYlogbc$&pBv zSS~<^@lx~F2tcQid1w%z%gFp&1fY^&4r`a7^S<+TD5JK+yL*psNsk2TwkOhC;TLAL z12H%egaA@XcW|OOOwi z{#47MtdSQcU6eyqGt_BXn-DX#r-yc7`E3}#TFlhkYQasuTtT&u8a3ILYpP~J{#vS- zCqvBKfEd~P;{%keY8X{)%maWW9UbbAosBRF1$cOuJ650-rCi?T6*C%(qV|I!40Waq zozf4~rJEZI76ANs@VOMxp>!?feTU*lQY$N?o}~)VTxD99;9@pGci9W zxhB$e@m!!%pSFIC$C0SA0+laME`m_8rdS98viepV=~e2Xy5Cq3oWqsw%U|bUDR< z0({ZjqghvvP{5}13~T|snko;;VxPn>vMsDFs{jW;DUF+#j;)gwHo9axpBYFt9wJ$Qms$Awb20CaKceO}(@ot?Dkajp}8b;lv{ zh2pqOn~T##Cc83VQfDv^QmGufegZ{`<>#mtt%<0%Jatawdx0tm?uS|#+<8ai!i&n_^>KUJucV1a(qW;5&Eh#wjV-=51%k-d zEPgVpSs<$z@SC|@5cw}caf0nOiykVCureK6(<3rModtbkKdc)-iu4U4R+5g7r}sw) zjeFT{%i%rXI4;jlPDLXJwg7dej64eA0b&l@ZUs-$q=N6}=D&MaZMrqM_9%?(k% zLswmQ;Uq7=MQuIOn8B6fzgloeYrE>=>Ur|gL%pzbESJg09vvfUf&bBG@Gw_Y5Sg8; z>O%>;<6Z7-qwIBABG7peINPBKln<((xy?nE1$4vYF9_vU!nmznULabJCZEP_?X++y zTkFfDq6S-=&nuvhVVZY}EN9794HvM!9A4+_8QF){n9j7&fcjnh*bItUZD%wMXRuuc#`1Fvh+vc2rS{Gm&Gu1-u?+Pk`bcEk9 zsfFwYl*$11_ccHzg*tJFPt5q zM)J=d1nSU@#d2?p0CnnS0y>vh+r)k!_wug&rdZorE)@T*gF{pTm&bLQ`%;?dBqxgc^uay%F0Kh)Us+nJ89 z336{G!e`e_$OU--t|Cf0I->fJ73&zio%K~r2FXF|N-UPvP*DifnKBeMO!YuF4<15((?llu3X`90;C94I`m&=u_>MRE zO;=g1-Tqr<+wY2&7*V*^$FFtt0chM|$->UH8uw|SWt M|8rrg3$K6c|49Vg0RR91 diff --git a/test/data/legacy-sstables/da/legacy_tables/legacy_da_clust/da-1-bti-Digest.crc32 b/test/data/legacy-sstables/da/legacy_tables/legacy_da_clust/da-1-bti-Digest.crc32 index 9b7b9f89fa5e..4a59ffcab283 100644 --- a/test/data/legacy-sstables/da/legacy_tables/legacy_da_clust/da-1-bti-Digest.crc32 +++ b/test/data/legacy-sstables/da/legacy_tables/legacy_da_clust/da-1-bti-Digest.crc32 @@ -1 +1 @@ -3501696673 \ No newline at end of file +1178851237 \ No newline at end of file diff --git a/test/data/legacy-sstables/da/legacy_tables/legacy_da_clust/da-1-bti-Rows.db b/test/data/legacy-sstables/da/legacy_tables/legacy_da_clust/da-1-bti-Rows.db index 46defad6a760ff2aeed9393e22f02427facd8f5d..a8301fedae295b786dc166ef92e1586334fc5450 100644 GIT binary patch delta 89 zcmdnYvYBOqmsC{%o1u}hiG`J`rHTLti=qP`KLeuy126CY$=eyT*bNV|Rm0duhj`)Q U#)pMlA>z6GTtEg8G%!E_073s5-2eap delta 89 zcmdnYvYBOqmsDi{o1u}hiG`J`rHTLti=qP`KLeuy121p=-YDHpoZ@#-oOLy{o>utXaI|c)GLyjJphs28<^4 cKOkm`ny)zY=bq#DW0?(;WLj8WS?${i0Ltz`6#xJL delta 160 zcmbPW*}1)F~e9^mwXXk(mt1Src0L`Zxn8)PH{W6hU?r45;$vQvf31+{o@-C|%g iU^MAJnsVpQ&CWG~KMfcdCi4nuO*WB9(h literal 199 zcmZSJ^@%cZ&d)6_75aCljHP;qI zcx#L8fSyg4(!iHY-4NlP=ney{{?Z%ft$S^KFFYRv8Gns~jBHFpjigxj~p(bKi2q zc=vEhRYgcISRNe+=_SuFiZy`k2$tU=#_`_aoN5FtSSk^q*yy6o22$4OY5m4Q0On^4 zvA%sDL+X}bE4%tIY}$^nrIvHkmin3MgLhqjoP#xc5{;0~XOeDDRgYlBcLI!D%dD{5 zmN0cB@oK&V(m~f(Ax)~irpD06C$mtv5^+5~o#>Q1-roXG3w0WQMlwd=YmlK*%SwQD#}Uror@0uXncL^EE#k&ufJ zQ1T4tWEN0n)h4w8@(w3WG81WZ6}CKeGM4{?_+%=JZ!&Zi%xPtq*eMU^(Z=pGevQgfeB9& zHsrNC0~pIN_VG#kT$1yN4CQ`5b0F~X#UC<5qV)=_g zA?advva*Kso3RWF8ZF(bBkm5JvPP^TOVSDTEuau>P+#(Bi4AGi{A_|;6{Kzg>!tyM zW z9p-R$4>Z$gjsc^0#zzls$Eac2*=bVCQZ`z*1a)Apw~rSxJ*lRxI!pl5! z&{WXehoWWQ&pJw@_2WW3II(Bjbjcnq@o#wqJB|lp zcGw*^Lg+E#mH7WL3VC26Rx|b$N6Xe5G%h|v&k*|c8K9O=prGVRASw`4uzU&yCAWsf zq1tsM*AVsvB$PY}v@xXwecJ}etD(_i8P`xf*}+#q*yklhxlCM+)fKrG7-xkValLrQvHYKp1Wq9SoX()-T8Jfi6Ii!FJHav+T8VQr zm9-*ZRuNgXEP{A~c_BnWe@K|~x6nv1{R*O3Tbv8xRhEOa?GNF9==^IOA?gwus;cu! zs0dr{fP9i&6A>5E=XI3zP)N(OfjKlx^0d5~pcMk5t;mRauz|J22e5n91LV-A0(xv! z+tAP^HD8v!Fu(nK`SN+xm1oQ&xU#M)=8Dtf(|seRQP0>yAqB=_E#t;Q)fDf0q{J4n zHu)O@UkXzom5@>7$9yZ^n4~gUmK}Q;m?=0&0=hcmImQHBeFg%0Ym0Gpklgbr_0;rK ztL2&=)ChBFcHdC@s9S;>`gk>st`snOSF|Hw^vR7h`p^JI)t+{7wDCb@8BA6YaSg_Z8c&c!U2OfiRFMQHXB@@Sj^kSl>` z4gks+4<4???gXA(3~VWNuC6%lU*eyw_;Xr0&xaTwqUVT;82XdYPC357Y(+8 zqVeegntxJ?Kkf2cbeJl{K(dlT*tO??nQR64v&B9~r=*AmI))W=Vmi*^>YwV_@gYk& zyG=u7XC`=kFQr_a%ZFvDhfw_r-hdPlEV<^PL_zM{)I>uwQ7fY9w8hWNSYN&RO|s_v zWuq|6?APpl;E?lu53FDl2M4*4iM0hQv5JZsVSaKKCj#iXm_ zXPi#Ff}Nd8rt7(Wrt2wWBDD++Rn>LH87w!o?pB0jpRM7dk_GQ$m8{Sk(#Kjx@WIz9 z$X{pUd1o{8L5%~~oYgf16$>%x8?9?9Xy~2+&CvQ%8Y+REFvy|i&4-(ewcpDd@_2qU zcOEYo!K3@zV!7k+ZeiLi5DWR4q}!7|tY&WC1TkI?u9hHUlv~T!n|6@_KHj z=x3@epbOiUbYP~nFc{6@>G+IH(JxWGa8gg>s!Gj4Lsb=hc95Oa#|lYOO*qsXg4@Hi z`zP{&IW$ZP^$%SH{a%JJL(L&r(@bMe!Fm%F* z>Kf=KP(qp0;u1;1qvFZr!%V>ouR}rmj(-VZK8YGcvhJ{WjMP7>DyXZVn*SMUzT}Bh zb+}TFDkbE6FA#5>MTj2P3c>tU8~Vh?37D!=Je_)kfXjUVa{h}GK&`?7h*|~rfI*Tc z>5hmW(YhgO6v4IuY7}*^6rAF@m4<{*Kf!(unwkC%<~*i_r=D540C7C%kkHQb-*(8N87`KOv*P^kw5uXId;k za_#GykX|r+a@d7MYNji>e)g{Kc8^XP!xNAVA|;V-X-2aWPVsXoHKXu?r)j>Gfw zseR^4voLBNtisH8Su+`_F~$<%Gtm?(;}Ot733kCw!E(SH)JrxEg`JX+IF^?AwhYS* zHH6T(LCU>)t`KoKO35U3KU_mvUTA!n2z)*;r)D^uh4V>(^^b88L^HKnmIsY(+cpo^NSR15ZCk) zV)X%W#V4X2eL#T2jKAFR6;tT@jyd2>t#BRv*pQZ_>4>qK+p?ozJ zwoclU^*>U<8y8771+!p6n0>nG$ zAtvbCFKYyK+B9rc(O=GCiq6JKlI-*%Iz%Q(Z&a|6H}8YZo~)IiXUl(qRgV7GWP zYUWSb4S2D2WepgnA~-(uMF_XB?x(SRZ{1%8QG1JVbMNu9?~oTC{`eN=#?V)L*@nXO z3Yme@_}&O+)OB{ndWYw8ozg7gJxV!2)x38pDuVF{J}FV>@Jcw@C&bT^lx70MGC=d+ z0*GV(7^OwrpR7`|=vh0koSyPNegoQo6O}T(FBHRn0SLXcofN3*hdnK}=cvN06gMO*S#UbSg*N!Gis@c7n_-P0v|}A9{Y*D> zv-?tF>yc}XNa6{S;{cUDbt-&5B%q03*MXg#e$sWy^gXc!bnBBfu#@M`%=oBwr1u*eq zit*5K%GTRclw5Xjlv}JF?O5N~j8cwo^ymP1DZsPhUCD}`d(gjn<=OH1y{%bZ{VuSY-GRdUQ3(?bOUd@g7@B zgu`8s+m*1D_mRAUCD=@wCa^w`33@XC$dkZ2X&~sYl)6J7!A>G`qZRD$ekOOtj79G- z@nb)1$)mZQQSBqT5ww4&IHtWr9^iI~hv*s;p{;yE`EGpIA1zjnDRjGti06}cQHoSp z;#xXvai%Ke8CDSf;l+mRIdLBR`Lvbx^IDZLy-YI>^wFq76Kl=P)f)@ zdx5w#(;&F-3J5vd2E-j-oWrf5Q|BY-Gn@m-Gn_Qa3?%FP)keu1=g?O5=|uUe4DTmF z+ivxw^-JUM&Lufj)f@6UFS~$;O#dx}wb5S!ZdZRbk$25m%G8gYeD`S17*&7M*b&rU zRTfjLH!8Vncjy5jTiIPdy~C^bX1DOmAr*oXy+AO)C?MuaCRhzhD;kW zG}DGs41k;(9zj literal 7798 zcmaJ`4OEoZnf`_uU`CHzkX6w}zhR`A0j=}@BQ*hSx{<7DAgKu+HwBVom9+`Pm=s-C z(0W|7iHko;$UkTVjZI}pXf&QG!%P!NvzE9XORUjh;<3aE3}_OKoAd4S-fw=snXGtD zh~DQu_rCXe|L*+|iUnbppt96UlGGrm1WC{fTf7#F)n<1%U2f0(2?7x5rLim%6NEfL z)gWxR?t7-8x@6`HligVblg^zJQg5DFc6HO=H2)pwRuZT;l#k4{ndn z)oHcOOl1*+uy{!5Ue=EZxo5*cjV7QI?fsnm64q8>uc`yYzR-P9lN!)Xbu18~Tmq}> zh{1QwyA3&)sFSR&d!wHcwLnS)qILvXCa-HZ;9_fSe}U-Vk$5Rfa0^?ox!jt)TX zB-XJ_1bHw)^dzxPR1>tIA*5k7b4X}C+o7!?D?ai7G2K&&zuGNHtUu=hGcM6%K`%2B zZLjd2)BI|AP?PSy+6*6+RdlKP-*^9sTO;@=e$zNQ^p@$bCG)mxf5{=kJuN+ zrt*kA+kbnn3_TQ%dBtwOpvLyEvEQjj$?9`HSY~*nU-pQ@r5a@#6-^-U!a+Lh@@|;z zOr(W&5>kOo5M7C!%j*bPSqH?O$hlHQ$m&WUoknMLu0CjcoRYCuhOLZ^2Zgt9 zqNe5?+M6MISIENx1@1|VPCwj|!5)tfe$b3T)B7D5t%{ExZ@_5Y3H;om5>(cDvPP)T zQ)jTwh#j?}fC)ZNSfh0ebv1`I!Us+i2)TQLYWwrLDNYeA{4g6FgY6b+~uLUS&xSkWKTlWycCetic zKxbZ$n9}CoqL8vHsN}NEJftLql;}{h0BvuiNF1Js|B22d+OBe1k<|inCDC3n5j2el zK@#hYEP{T338E*7)q0a40|h}aUy^pPCsT0*vhd(ZAf`JJ1ZJBhiS>>bm~mbQ7DRub z0BJYh;~7BB8uFD6!1cB+-cQ=iT@Cy&wq|S)KquZ<%6;(TiM5Y&k3?&h+e02niFoAb zI35`;Tov<(&HgiJJ(1mkpT8laeI6KQxEnT5A8~w0<7^MjNx{Dh>8y(|MRX?ddUXWd zrv>CvkZ8%ieuC}~19B&^&U6!0h6$o4iPe9Qprx3=nnWCoQ70ioc}qA?RnoNV0G9t= zO9)b(G(OsdMI$Yt-MmE0we8gppmy1<3;C5M+CLG7m#VnRRdpaKo2P1jhIz1rELl+r zOWHDIS6#Kz&=VF6aX+QWY4WSN!A;*!ll2jwTp#B)HvCjh%qMndQl0sRvS9KvxP-tu zm$E+L__saX}99mHQg%CciH#Rv-fm17yIPH)Q)8v&lPcMLh)_ zL~2L*q-&d;Nf@5jOtTMl$g}zI^Kv$Q-R|o8UN()%e(SL{+`hUEA8wBM&EYOq-C`Nk zRCbYZu2tC1@NR>gN}e+$&MvlBeT%@tGJyHVN)bWdP8ehNq{z(Y30tHGW<24+U%k%1 zAon5_lWY3WXVBC$bO!ed(e)F6k#Z@Q)8z*SkQe>k1JVBSRt2KFVkeCTn=txI-ZS{< z%1Ro&REJTy>G09jB^b>c1V)|2Ru7{K4RR;hoE0;r2A!m*BplYHJBB~fqLtj5lh{gn z8Yu9L-yNYtSo6|jb_#AoQBN5R)aIOW8}NPz@!Kfv-LnPZnC>1?wNNZrZJl88qyJke zznFd5x}lSMiQlIA&;(&^#7obW-`-0jy4skRtoGx=I`$mD8&4s%n--FrCY47j-oxCS zDJ)O3j?g{%}(`M4pH!$Oqhah@-f9yBWqSmX@ni#1k5-WGt6FFiAqXiITVGBDs+;i zh=_Fe@M$Ps)M>LSm~slfTTPX3#L|FWslQMsdY=cU1MwS4o_6!G`DB}i;#JP4-SV5} z=WqZ1x8##CJ@*Iq3|*UhPUMc`j|Cq1<5~GJ<4k;W`E}UHP^_2T;YxU{nLivQEA9=4 zG*jJ)v{T)L{7XN{n#ehQkdTKC;FyV&b9Derk2FCwyCsoxp%RdBIVRlcwf{K&427P{ zkW1+W!{?};#YwW3p~XCYD=PhX``T@a?NA;sTgZ;bF(uYUuH?|rN&uqc1Fk)P31jlG zK*(y~F%OImVko<2K$Ftt6z#GT+Rb0}VRZJI!7ntbmkMdvA?L~$i`JRHmn-DmC6Rpj z>hw7NSu+^R7rSQ{&xkTK;JYZ;ZXU&shG+Y@gVS!DjM6^Lp55lnJeHVb#L zQl4dHRLK3RMu5hREfk}Uo$)0F#AQ!aB@faIs73U&P;j}h{?u9?q?@sZXj9n3SJstd z<@~LfBH9&9vUO51LEABbMVo_vN+yl%GGV$?wq8CORL|jQ`ywWYuB4Uj7$DZmHvqX6 z2Ee?K;Mbod@bzHKTWv-DJTHy40)YpQ$;nc=^%MDi5YH9Bzp`i?#p1?reU!^vCq{O9VD84rpc#6w4+O0rWd7rDl`ed1D8;((Yp#eD!YV# zc_k^^53*3YH1Pewi>6@ z8U~>?U9H_)Rnj8%bE?xz=xUAswAoi>%#*}?@=KhHIVNN7{m3-wUh|qyNxG52Kj~C> zquMNr#Yl)}DMyXh;2|`G=M`o^OUHvzS*af=h;pi}Zv{2~n&}ng3XhH)1P2?&z&M4?8wJn24<3r*Ckvqx=GOgF~4vF zfGHcPk7j`P=P4G1Yfg}uF9!h`r(%NHYgfgWgO;B8RJTUD5UKR6i1TDEUk>546X-Hy zw<+wvny>fLnP&tcL-bA2$_vIU6b#XEJxyuA{$`qN=m6r3FGCu?LcpDXMOS=pI_JSn zq`o^on+D&hrs)L@3hljRG&FBjq;i$WDPYO1FZ_Q}z#S34YyLRm+e%~NJ1h;jpWemj z(N{(W9L>dU2A5}mf9>@;b`!uJqe-YM;sOMwY2>Kf0~AzQ%s=Y z&+bt=$K6^eVOKC|)4*p0Eezv`at~-XpY+ktPx~?3`7B~UvCt<62`cF(Ns|`p+euJa z6Ck&8`_Vb({z`(D)&cS)vCc0hXc;E3hwPGcH-($@usLYKu<=oV(Jw^p=3*_P$u#oE z(K(`b74GS$SRLQ52#T)wD}FSkjz$`7Tb^R00H%K!nTwHv;TqWoxC9VKYbykg5wu)3u$zs zi3p9w_(1Pe5Gbh%Xud0HKFgx{&zX=r`wFrC1MzLaNopwg$rfMDkn;SW#8U6 z;ik-3qF6l}RFVf5ek}YjSvUD6Y+%1gljieqk(QgM4q$)7?i+p`ey4zfkE=iCvR|qt z57np{CjlE@LZw2%|M&P}bUpC@V-SL7SK|>+msSOnCsts_$KcL>_2d_tl>HfzqTf~& znB^rM>@Xr~nb4L|6fnF@LcG}l;T(#k$m%;A2--*G$f-CGX}Xzfnl!(F*Q0jSBhj5S ze>JUiXjP;v(VJvm8A-ygy=P0g+WXG_)5l!<=x@F()(OJFi1aqkIMN@E*C@LsEF5CT z>9Q0eL-B=c3d;O!xw@vc!?>6q51OKr;#o$-}O-i1`ia*rctH2__4D+v6l zkC;9E{M&};PGpuGAm)eN{CkM#i8JZMZ+j@9`@8x6owU-&2XI(yq%GS{u zr8}>UjpX(OAIVjvXzZG{1rEtsX)g+<@j%HqiJ<5Jvw~K(a5Fg o_mf%+dNlZLV?t?yfE&WIc;}GD`CUqQL!%)7QK`=V@AcmQ1CUmrV*mgE diff --git a/test/data/legacy-sstables/da/legacy_tables/legacy_da_clust_counter/da-1-bti-Digest.crc32 b/test/data/legacy-sstables/da/legacy_tables/legacy_da_clust_counter/da-1-bti-Digest.crc32 index a661246d4452..79735e8a6684 100644 --- a/test/data/legacy-sstables/da/legacy_tables/legacy_da_clust_counter/da-1-bti-Digest.crc32 +++ b/test/data/legacy-sstables/da/legacy_tables/legacy_da_clust_counter/da-1-bti-Digest.crc32 @@ -1 +1 @@ -1748826086 \ No newline at end of file +1260299575 \ No newline at end of file diff --git a/test/data/legacy-sstables/da/legacy_tables/legacy_da_clust_counter/da-1-bti-Rows.db b/test/data/legacy-sstables/da/legacy_tables/legacy_da_clust_counter/da-1-bti-Rows.db index 31347f9e071ae9d24920447063ee791a033ab777..d96484550443dc74cf52e7265b5b4e5bef5fb87b 100644 GIT binary patch delta 89 zcmdnYvYBOqmy}!po1u}hiG`J`rHTLti=qP`KLeuy126CY$=eyT*bNVSk%zI34!-b! Vv5gPiDTJ_d`DXzcK+wPd0RV(s9C!c# delta 89 zcmdnYvYBOqmy~P(o1u}hiG`J`rHTLti=qP`KLeuy121p=B7nO>G{%Vd23_QE}s delta 160 zcmbPfIni>08k_XbiOajyHtNh4G;Ck={QhYM?pKQ#SbtuLd{Zj{rawGms{jB0KL|5G zf&EM`&V|0(J)3_B9^mwXXk(l|87RvLL`Zxn8)PH{W6hU?r45;$vQvf31+{o@-C|%g iU^H2>-Cy(1&CWG~eB}hn+tFS;j!t5w%8T1{o7&2Gyr8lodY22Y+v;J{%Hp8SBn@}e_n}v^lOKWCS84K9mhIk%6)1%fZryOi$UVLgs>6yti&KFd8tL gtc(>)|8ui*jo`5i1_q6ZdSa6&2syFbHI0!304s|HLWzFIx$J3qtXWV6A{nxW} z!POrc44Dj!j0}v1Rt))C{0s-DIodF=n=f+JuwjrfvSF}4Eb#**u=w}`8wOcp8wUOF ii$MG(s|tV$On?fGZ07_D99WwAfdK*-8Cfi@lmGy+BP&Dz literal 141 zcmdO5WMG)Yz-Una|Ns9621bTY!U`Jv3=Ap^j13GmUk;WwWO~X@6*3pp;=Of?f%WH= z$Tyi2G#D}&7#SHD4XqgRwfGqh#!s|iU^joe_Jj?CjFAn4{h>l8kifgc3P1s4pupki h1dzb{l{0_>CP0BB{6av110P~N7$AU=(aAq52mqbUCw2e; diff --git a/test/data/legacy-sstables/da/legacy_tables/legacy_da_simple_counter/da-1-bti-Digest.crc32 b/test/data/legacy-sstables/da/legacy_tables/legacy_da_simple_counter/da-1-bti-Digest.crc32 index d17ace79e5d5..a4b6f172c8cf 100644 --- a/test/data/legacy-sstables/da/legacy_tables/legacy_da_simple_counter/da-1-bti-Digest.crc32 +++ b/test/data/legacy-sstables/da/legacy_tables/legacy_da_simple_counter/da-1-bti-Digest.crc32 @@ -1 +1 @@ -1102192488 \ No newline at end of file +1213045656 \ No newline at end of file diff --git a/test/data/legacy-sstables/da/legacy_tables/legacy_da_simple_counter/da-1-bti-Statistics.db b/test/data/legacy-sstables/da/legacy_tables/legacy_da_simple_counter/da-1-bti-Statistics.db index b282579cf94aefd0d65ee74885111dbad9ca3b3d..be65831af8e7adc9fd10f34fc597a9d5fe6600b2 100644 GIT binary patch delta 160 zcmcbvdS7*dI%~ldM()=eb!H11cHFSq+s(kku!@28U(eD7>t2BArKunPLjeO6*xxmE zX54=5$mAb_`T-DK3_lM8B^iMTi4SFiOk`j@YM9r%>Z{9|#Y>K-JNwVL%fM*BXj15M=39KRpadoej*D1~K`cxwazud_la delta 158 zcmcbwdR=vbI%~%9+{t@4>dY22Y+v;J{%Hp8SBn@}e_n}v<7WY;Kg4>~|Ns9Vgc+c~ z{%(%#E#q$O$v*`3y&<|7r&t3e8G#6i4`qW)WMHiMa*81_q6ZdSa6&2zj$87EKHT0D|;D%m4rY diff --git a/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_clust/oa-1-big-CompressionInfo.db b/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_clust/oa-1-big-CompressionInfo.db index 4902f5b8f06a9579ac7878dac88da5d585039bda..f0da520ba8b243baedf7b1501dece2a8a41d62c0 100644 GIT binary patch literal 207 zcmZSJ^@%cZ&d)6&HighUCm=LmErb>jhtPtp5L#FsLW|sl&|*yxT0#{JL)njeclQP{yJ`J^3bItGq>MuXZb(94LkLNh zDl8N*f`gH&1RX39NI=?3qA0csyPLEk(i%JH0Cg;l3TPR{>L`M8`#$IF$Gt26^v60Q zGxyo&+;h(RocDd+b3yV-(gw+7)9AmDWRf(gL|WBvv)LW8;&i#yihKz_ZPXqJt!7eg zDCBRR-`>$QXF+p&V_RcjejvDT@!WQQdtGZ|-IDf}#ev|QwmFLf9rNea);0KR+ZF}_ z3+rlwb!~MWwe@p@fuO&oBM_XwbaAc!zP8%>_JyrWTH5P_iyJzcS{5woSkSbz>AuE> z1%ZYI_4V`SE($i(*V6y33+J`ZYw*vXQ{P0(8?fJO#-g?cI#*8&|3dYe7hC)7E%|(* z*}YkT(1WYJ4-QunJYe@EsFmSg=6cObMmvt0S!`F!L!GO=;p!sGWJe6dPavb}hu$-_@BOy7W74&7y7evhz77xdxAHAq_kO z4*NrZCoWGdYRvI{qAn;8n&$?UTkm`Yv2L+&Z!A)iJe zf3~uy)zW`+0;v4cL;yD>ffpwLr~vS>55>&B{OS4GT$8xN*zEbzaoDV{U%NB$@2kJB)HR_sQ)jLtnD&tc_IgSmYMqa1*cKZ)$=7!v)eRpdLY%Q|$ z!1p-}UbAX-*hFM|t^IVOsY!5%ZlKpXC66KxXA*>M6#6|OiUGS1INRH{Y_>MPWGwEh(wqnE`8QBV?a?>WyeQ?@)hL{1zI7mPjVow%&u0Zsf0z^V*HBu z_Bg;4rzzhJFvV%ccLQ`8{ll>WpgXw$#CL)ZV2aaJssyr5GW=64lr3!TXT>_3>Gecr zcX`aMtde^QYUJDwJi~Oh`M3b*U)n5jNw~joh$Q(J&lFE&UQK!^(y#s&y;ge`Z@Bg# zSpqifB$`8q@Q%zq?0`gPj^M>(VrJt&UchXm_4yXt?-H#Vye2yN%*DJWx`llVgy8bW2YqtCoSW5eM|H}YV+P_si z*;3lS&So$Z?cXuc1{^-rdJ`8--v)drvMBm+y7bNVl$%1Am-fOAW}jj_j=QXnQGIA7 zqtV?mD2m+$!fulH9_Ttf95bCHS!Cu@$iam5{GKPDd@wtHXEKGEb?X5HQAv(OO7-mq z=rmqYc8QEMGM@{9={7Q-uLr0mnEw$?^B*Q1=1sG=RJZ)?@8khC_Y>jt z(UD2%5&nMVHF&pH#q$3)HQjh2Hn=7NDw*iwgD(fh_5&hAWfu+2wj+EYaF@F&6hAT@$qu_Eoml z2x0t_$W|=sB^g>HyPxiqS_#kwZZ=hy(en)n>Vmk{pcN8U^LJ*kU6ULNuNlm%zL?hx zj$0|bCIi=%5~gHojofSoM^&Ym&7iFTvN)#XB-cboLYWR!9un)vcpM3pGMe)5(&Qq5 zPP`7NGllEr-z!`(?%SdvI9GB1xbHRu+Bo4ZgFpW)Ju+&*?mnEMX)?Qo{+xw9-hLE& zRKLM9Yr=0p?}!k}?jEIS3L8Ur2n;C157TxE^VusEvQnOaLBMx)3HQ00-#3D+Gpi5> z;PSGV{pjdCUkC5$zy$HD&#W3OLY~QUP>d!6nl%Tgq8p2;W={s{#Hut+a0#7DT?XZA z3U?mQe>dmO@bOwc#AbkpHAOHtDj_Oz@M z(N(-iy5v+VshE)xx{+9D3qY4{8i=;08mL=G1-iBhsG8uuFRI|RGcK$8P8BQ_-ZZ$r zG))we4@u>0B2NK7HnX7sdMLImelY#4HLDEp<7c@Z${SKyf=zmi`8wugI=awf({TjF zaY)A}JvQ@ZOxCgS@}AwqO*q;1Zj5wZnw^}9M(zv&b?HVzrIi5P`bp?@e{2A%>Zq8c z9~G0Wn^4YjP4PGLt?f~1YnFab(P$sPm(~~LpWwRj&(s_eO;ck`yK9f9hj#t0QCy?| zTbE}MQ7=9K19L|t3v+Ny1IU_^-Q3pV>}6RNprzfoLmj+|l0j=`9&?FxTqt&Z;&#uk zPnQ;2Y;Pss#y#!T+>3hJyHwF|f11L}Uw~nn*GsIg=BGN^abDyc8QIB2lE5E~fok$) zu`OXVsdh>Nszj4Z0Q%okK%K-LC3?Z|xiz!6X8Jo3?W-kL5I-GG56K4XuB%Y!TG^^h ztq3QW-w@ren6x4`4pp)h`}_Q5>`&EEfqvCPC_8*5v6i5GTCC?E5;mqEPNCGOqT!p} zmC30fc=)rJjp^w8*bHUP#G7MnyJsl#*O*?u8+B5wR0=cCP)-$KMK>C&x}*%KGi9xe z$_S-vj2pWjbv)QNldELBjx-CAhORi9CL%f9MbuXX*;?u&c)Ku~7c=!mJhL0=&mg25 zp)Im5YA%}DlXPnNc`bTf*jboJ7G|SA0|?TOslt&WGNbnfSO`B#9%`hAj>h~<#}|@} zdn~=T&T8C~8<+oX69UIG- z+C`V!n4ytMQ#WuuXjzH1($MKau1Cty8MQ#&x@qvj>d8RW1ouN9|A@Np&>L?*QcQop z9s4H-WLEi7ld!2Ff4Ve*$`z(&wvyWs&yo6%c{Njjn4}3*dN*nn{r)l3dGsM x)Q0K0Os1xzqnaJYrPHG7*g%iez|^K*14ARhwHqvRJmw}J{{4fgX4H=T{|AF;&#wRg literal 8685 zcmd5?Yj9I#8a`>8-atQDEJ_(Ss0C?tNpj9fa$2<~xULHyvFt&2MS_XHYh{Ot?d z8aq2Wn`@hci|X2%8X5!syBF2gcC@txL;kMzU`Jyh*b!)GZRx0O3N!|57cQs?1zVdM zy8Ql@w%XbSbbvWq<=4E{t6D?$h;wl<-)H`oyg>M& zG7ECn9gPO;jm=gFo*=Im zl5C6DYQJeRxGsYW@YW(Q-RaD^!(e*SnRmv(RMMF>TM1+@RATeJ`G<-yW&IkjwZ$gB zQF+LxRFV=`s9)iBsRtCF>J*GP{310wz6J#K*s!d&F|~Lc4zfKzU_qFoLsq7C_ex z`buCD*y;f2PG@#r4A5gRbG%ktDKI6ScppvC%9csAc0c=H0iQ8^lr*qQj&$a-{a$Ox z7SYPJ5g+A-!#UW!?Fg49nJx||qP20r&Yt@eF3M@N-K!~ez zv}5e7g+9~o{fbusOV@FBls-gsS8W6+8(acyYlMb_|Jt~P*e-vFotaLkADB^}&*`Fbvf?kIqYg|*reN|Ea4N3uhT zh+^qw+fbrT3Ds76ba%BixY;6DoH#j*@hf*kuz^kH81Xt6Zxmk^kQmLmj`~Y>2PiqMm+edo!HRXlV0! zCqVh})CK^4F(07IU_$N%z})G?FL~AU7(|Rd!Krd=au~*Z*j~ z&94K$1>C^2Ud_WD?9{{`cniE%mtAzE`|W9gRG1nOpD;{Zwc6jUA`Re41{vb0vD0+= zRu<--)NlXpKlIlG>7!J;AjFs&w^02j5saaT*a0wI>9%ft6-2i|M7)bOg6T1s7`+)p zC7pQQ!vq!>JPaPU7)^mRSh-Pe3T7W{oyU_Zqbc~8-W0@3ZvTNH{gFhENK0{LM_H(F zKv1n}Z11I90LgiiWq|EO#*K0T;w5(=6k@kdKao2igp1Yq#2JLL7$csxMU7uVhCm9e z&QFd-!dyazKxtr%M8GZ`2heS-0bv=M-c!GtcOgt0h!f`7A?!T}7vYPW7i z1`()(N-p&TFvxY)}!iZeXr-;x!Eb-3Ae(y*_{*gNf0%0#nk7*H;qAmQYXd zDQnKxwqx+j$tF|UqQ>4UR>^RxxdWl59ypG_VNv}){)9RE*>{JyAaI`4t7e!diW>W1 zI6L%+C=cXNn)vgyVfKY(Fh{l8KEgqE6lfo^U*!(61A;bcp$hgQu7TmjH!JYW#)mB8HT#MR_1l#B)uqkC=# zsHB*O@!koqxcrsf&qEiD9?9{ZJF-Lv@tnRwztoSVeQzF<9B2=xV0dS7tcaKe@h zE7s$`9_BJZG`TTp_!pQ;WzF-fC z8GXTDBLrsj1(9C#1@d6!@Ab@CrEY!goXkO?C`&X3k@eZ3T10*qPOp_St*{;$a2u67 z$^h9}XsKr%XbiM+U4oaEA{gSX1YgOkM}S{?0H|bQOLXzPMb%v^V_)ZMjnL(;HFOZ0GA2iBWPc)m(aPXWJviNr z^XbniDQ0a)!kN%E>CW^IW^+t3MsjP~_tp1W)1kGxHRa$jRL107tx-?oeJ3)#X`~aX zh;s^3Tf&7H`6E#2ZGHO~4_OY+2P%&>HUU`|0CZ(gkIn?@PEd#S?xFv9)mpAwTK7O- z0pYs@Z~W-&?2s+$^z6w&HdWET{5~FY;514;g`=Le;WHduB#_NM5BD~Yo}u8EBTQdi ztb5fyp`2aAldm3ha%Xr|E`PDsvzEMaT#5a9u@s#qww|gd1RS_t4VfNm2LTRrnWP!Ffx?&B@pl#-`j=OXcMi9Y^dy8r0{ zCl`+Y9>F2~3!J+XO~t{X><};FK93iW0DGj!rdOncZ^OPkalyt!Ue^MsV8hRHQ%ka! z0L8e;E?1vsrI2yNl{?-~>VspH2Oa@0An!4}%9Bs(m%d%>06)a|QI8ICS0Q>`*M~bYF;C*TRtnZH#VWroqUQSzmD-#PP;LxC0eYo~okLMAuVw)(K3Ubg@|{vAFbW z)JT1scy!(PAyC;g7W@1dpetjax5R+D6IA|`ayb8fQU8-%pyYQF`cFsxgg&=qNk&CQ z&jr*$1X!3l2!8*5vlXXNV)&am=-&VizKuY(hWd&iTSNEN2JAWA%S=qDHg^qILt>_B zNsZ~fTn))-qU&nR?ZJYgc^f3^+{dt@Ze;!TJ9$Sa3-q-{Vp9!Uv2rf>b`$QdqD+6%KOp;7(JEXg5BnUF(Q^FgEw&!O3f4YkKjj6Y{f z^DA-@`UYwVTc*km<1veqQ%guap6udf2ETzLIi{~CIo$LbGq}1M4Ke~H8qZ_|+CP!N z36jCZypynr_q09=yi9`Qw6oPDb|4iK~i0 z)LsjqJo{#SCm@e?`vpg!au!yQGQA5xWz3uB-2_T7Zx(zERK~nHr?7yNiMff^YylUp4^% delta 159 zcmex(j`QO=&JE3+jQrald}YkzWMtp2$jMj;q7QN~6>_q$AH2rA`Qw6oPDa+viK~i0 z)LsjqJnLqCCm@e``vpg!au#NgGQA5xWsIBW-2_T7ZWeqDRK~bDr?7yN<@dqYtdk#9 jSus{@Pk6&9Sk1=3Xmp6@FPvk1Shxw!F*zc|4&eX*RiQTG diff --git a/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_clust/oa-1-big-Statistics.db b/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_clust/oa-1-big-Statistics.db index 9fe4a4bf4571f4ed97ee18a42a4d869a04948c39..089352715327bb7fc0dee0f50b9774f63aed0226 100644 GIT binary patch delta 159 zcmeCMoM1UYjZKCzw;}!fMx8l=1~)8kMKka)Y+_*j*Rx3DqXC%KjgtHi1q@JNKa1P+ zZ1Jb#n|}x%{2ARmf*bw>F?vj+_vUZIR?p9S>WEmI@7)|Pb aK#YuuN;>i9p5qt4LW4;%Ei48+7XJV+G&>Ri delta 158 zcmbPW*|_u!@28FR$P(`43=v&x}L=p@0Dj>}Q$j z?v^{eZ}ShqgPfjVT?|k60wo!Nr~|==vOy*?FxGsuW(uu$5}rCweBG@(71<1o28w<`A0y9E28(g3v;LA+$&}gcf6j(Bf?nTG9eSOYMQsGL{fp G_AUT9ITYLg literal 199 zcmZSJ^@%cZ&d)6k diff --git a/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_clust_counter/oa-1-big-Data.db b/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_clust_counter/oa-1-big-Data.db index 85ae88d6d5a08fa324671de04a991af51ab45389..665e2d7e8cd40ef3ae8461c76c65a555f5e60b0a 100644 GIT binary patch literal 7718 zcmaJ`dwf*Y)xDEkk{Q69Kn#qra>I~1Gf8mXGZzX@ph{4A3|ItTBtC$s)uf6T)IwB# zO?&{6hem28A|~-s$wW}vs(H**K&?cy60PDST1ov{7{EvRNxfbB+{w(%wS+(N3um2m z_C9-^z4tvQLWv-35)4iS|7rw-pa}WGy0Fvfa(lc!e?XoxQXuH@lVfd65DElCjZkyt zzb-tKH>0W67B8K8_wMtA5kEY#!}g8&6LGCz0K3ignMj-+Ym3$$5jy5cePU+79t`z~ zjo2xg;iwPZzj)OPFUYg%0j&q+kf+ZZmqJ2Wg zwe5YP(Y~;!Pn0}8ob@=CG{D5NdP1&h0n0m07-SK0YdIlHflI!joI4$aEC)W$Ka{iD zOvr6cZ~{X)HAjKu-5xI*Cl8@W>$0uJLi_4$D{J=AUK=vjn8^Gi+oL9LJ%I_5PsiG|kD$f@Kz<$T)yD~H*$*h7WBq9@K`*ufl69>13W8c2`b1V)L=nzs z8;q6YMI;2owz~$udYn4eJ{g$xHO!F0%u2K!!fv1Aw@YH8@to{pWDX?7gUq?sLZ zm&wtF!LLF*6<+%0$^%>yM?9LG=Oq2t3y63tIa;_)0whw&%bi|HrG=MQ^e z&GL3!)x+yIoM57XCJ=a}8KC_jrbxb_w7z;m-tPe7AIkZ%nvlN%mjXjM$4UwL9LHRe zhjP9hPsq{P2x;L&82=r6D3g2lpQ8Y5$IbA?>r8QkJ~7y7=muu}wjaxdCtJW$cJQ2j z#)gnu&qiQ_ZC=+KM%oPRmY=^)({?iT|KLjQ)PjbcS&lK4l<EsLww#71EywUObyz^v{Nv+|S(!F_ z(Ty0sfe-JTk2IfoUyjjk_uQAmu2%=kdWDWzp)QlLAW(C?d7VKpxX9udp^O)NPhN7X zm4!^`yhx6VnchNQMO+kK^GSoHS$H`s*tzB){LZQB<@Gk~avPOXA)Xa)9bP_z>oV9p zoG#Mk;e;%6GKZ06x}227{50Uy^a`E#?}bUr9sP-_brB(KEX^`RO^%0Z zDf;(dj^xo$NwZCh33{LokXOqB^cX){wK!UmPsiG3CDy}B0Qq&Sr?Ls!SOO@ZV?8}U z&}JJTS;u-dM$k4)U@^O*Y+$#Izz)d4-|qrqdkVP2?bNZt4Zy5B=v1LMkX+4<`Nw-P zHY2|m4b#1c_mO7Do*G*Ic}!~CXKT#G>6awndHkKF-P+0i0{M+dLti@U&re1A*jm;D4=6~ z&_U1vOps(9>ywQH{RI=)Hc3%NvzyVe$kS!yN$f75w)a}_tH=4gu#8pnMrSANi!xx= zzt&^XL_27fF2Biavd%^&4z_t*m+=o3JCns_>JjWOjhKw~kE+3NYZrSq%kfVKx$$W^ z+-S;DWjdwM(BGXH;v$J^h6M8$zx?A2xi_K6cm2aC@(*)T5syz-W0Y56%@R+fhhI~i z1XSv-^hR>#>uw6Jj+t3U&}mGNWDSi%G^&c~agl%-?9yCiEd$QWAup9Vlsz^PbI=)d zi;lMevl^EG3wsx&O00m;c3$1wi%-77IXx6-uS-Maaqc=do0}b7LV?z}Bt%WYt_cI= z?%9|kc{EIN_xybXjmHF5mqQ{n1-lAcY3$tCpb9kRUQ1(>fT+qmtR~B{8%|gMC|SN% zh3WFIII}QC)`p}o-av`rdzM8VV-xu_H3VChq8c?*F>Oylt!-s#+D}=LT9CuM$5}Fs@lM1;!ZwRZ|BfJpgk_z`y z>ez3ES;>0^_am1Th^q;z*-p@}x&irgtaV5Vqy5gkfc!dET@^vA8vzA$tVfm*^lNY> zS;wj`A?QAE*~kfZvr7#LC+o$snvDyHCt@gx!}IlniF3 zlUHXzZ&-lCVYJVTM-X`pLrt;Y4v?|+Se3U6YAf4gH1sGIQ?mj?&+Mn6Ets1adNjlX zf8(OY43gobCHW>rQSbI-igh;F3I)^QQnIFIJ$GL~}A`4c-ISQ^eXOV7Ar}e)Txt<&v*wPl|>#EHe?1 zb+-&C?73Jlu(SC=Sx*sN)PjgwXYsP-acOvJOe026O%W4%hkKTWiO-Wap{kpaO zy%*@Dulhe14JV2m-hkE%nEFp6t$VCHsZCQbVyLCDlORHiKd33vy>ZrcT#HnO@C(YD zsuYnCRl`W}Z6lDu36tRNF1{T4G`v)&6utJ3B-ne^-s)btvRU z+d_m`3Z$5Hc$dv4&QI6NQy#-7`+axdJ&DD4y7W@P%D*x(%xPK@W(GvBHKm zZ3)b^xl&yq;^m3`(;7MmUyc~uBpoZ6C$_zU&OvrEICKoAQCiz4hk$s+q=ST%WD??% zq0rM)6*$^XDSPl-bgA7`0v00j}Gr zT@1mvyk4#S>op!Al(`I1wP%!2j%gVDj6@b!la<*mutHY_s{L1&)6g6&CV8qzpu}cy zuPrMjC6)?(b)O{_HmjLC7*6P3d@#qoLs6@U*P+pgP}1*Gd|>Z zt=oC---z7r6VLZJQ_X~&eh<`1E0Fp4&_wk=%35ESxm@XVCvr{ z9|{t35187wN!rQ`0@g0k#D9a0h8`$M43(-m5Sichv;UC;QxYm{%06?IDz3F9Rq#4% z@DVy4U$Ba@fMnP@0Be?qvlA}_-pvHC>uC~I7W2OmOmmj{{jZ^_!F(bHGH;|_H(e?R ziHOqg_5-qR7<8ca2Pa8Jy%1}HgF`rZlT|= zAm}M@rGVA}G=wm(OFO`oWDSdAzJN$g6|jCeYUPELyUG=gJu)(M@O7tI`pJnRlj(h@ z6t2KGcz1~2*PTyqy;^-VNj|wdkqWy=gI*z$zke7F#tbAoMc|Y{X=S()DW6h^3tACk zOKwbU6taXY`IH??Q(6hMH2}1ovmYgY&nP9#_N1%SJOYbr02Z7FK9zkhRR}8kFK0>4 zTMbGcE2dJmlGpv$ja2tvl2P~Dvh;O7M&-f!UOAKryB?#evXZS!SMnfopAhNbuNZCl zFgT>}$Eb90@m=ncjHEeAkuS^?;lIv~nk3IFRPfaGNcw((ppTr8!Mil&BtiezPtd2} zNpGlPwFxu@qnW&% z3}8{o1LK392~AnS=qM}AKMj`T)htHm-cGmm$wCPQShpdTjp8TyH3Zt`*a2Egv_Z#! zjxa7pkP);*R(7c%GkP%Yrw;w_)FCbssz!z180_C@+e9g^<~TB zj0-QYH|Pthk6fG33R@NyOVlmTB}2{20sC^!&B7i}6@L(G0M&LSARi1xxZC8ju5|2wHF#ZFOb~1yjdb zR7TLOdWxowb!90*msSG`Xjr^@MX#mWW}jCMP}cD(juLd08G&76Eo9Fpx5!@|e|ZpS z&QhGOZ0lfGz!l=1=WwuW#hsHaYG+WCo%2!qyR)puQ7F=>XPn3f)p)@-|M@JiQ_nb& zo^oWn`M_w5E;Fa8MQY*27w1Mr{0VVLk-~Xi6Q964%O|D7==7IrLH%bioZhnc7nEMnDPF#p0Sku{IOI4pdtz2vk7d4aKP_tYix)h=9>GSL}nK?7ohlMJp(%M1MO*uaI}}MOl}vTg zW0$98-LrN^;qyC2zI^^-&4bdAe_c21=!C9|@>sHSPGHrHOORaE_A4TVNAPxA}gQk|!E_WM!Y9OKr?qW8pN54?y7fSsk2t zTZLpdhnF>A*uDX4vPLdXUK$48ZqJ$CPb?DB^4ozFxIH=qWBZFHVV%D`GDsR+4M^fz zkp{14p#aFL+#pv0p>C;t*(S_q+fWggvl+=DsY+QV@@v%*tgL1H&x9dV7p06D>O+et zdM*T!&tR8@|CdnmJRr9EGmNTd^~fWw0fRnWt@6-do4M0s4Q3eB7E|qii^@yc zml;?6YsADCFkMz;69pW6dKUt)wbXQ2vJtO8-J)RwqdSBISOe*1TUJnVWF{oRlnv2( zn1VJp{tBg=G%mpOO38T4yjiKsy~f#Jyan2h_To;9%jYgLT~qQI*d~g0Iv^_SjLW7u zW37?vZ5Y>fFJu{RtthsPnpRQl@_7!KsjlqKzNVMi-h!CLJmL43IxMDqyUXV_xZIoX z+B=-Fy#>etHvKibn!jXFPZesb!UF8~50;(mhA%XqS1oj8xgJA#mk>nQe`R5Y* z0;MEdTf^o2_9W)Eo!zN#fI!Totx+1d*aAvlX<=mV))P*nVN<^Al~!gglwt6H+h}lM zMW;nMKLTEcDG%r96CHa368i`9;VM#D43VJ29_d#LP=1c-?rvH6L2|fT+T*}*;q4#D zrkQ|wnJ$q~qs_x0m@xZNT$UU*Yh-Ck@^UvM#dANJY>g~Vjkcb^Xwke&lQ)|rlZW%x zowh6#obDN&Tx``OMQVSevJbBRQucu?wd5xWc(}ep2=xPa#hLg#H z8AhMX68Sgi(^WqZkLn{QT^20X?D~Bu zF6Rc1eq4YPvekPyu_LZZfMd$BvE;i>oSJoEfq3tm7oHdR4^Yw+H}RJe_+QQXdi>!l ziN9CP2<%@UA~4(db{%}5VeyD#denr?YIc5mKNjXUSaP9fC~+b?)_}o^;0$?tJ|RF(;}V>(Xoy}vT#c>398?~CS2hp&UNc0LTO|ltaAJ? z0i{8oYcKth1~X3)$_xWXH$`)f0Ls}#+FAu-L3C?4PY;pwxih?dhty~f@5E+{_{vkX zDaGaK?Qe}2kbZIn|Frr%22&kUcZ-1ZZe-BvHN+xyTBb+cf;&}%5u^8QV~F#~o%MwY zxI=XuyAS!%;l|MrTE~P4@SoN9K-1`tzxkCt(5LT!=#74)iDhHy2nUzwVvAKF{FfLo^!u!;f#=069b+N$i&(moe>y=?|wp`-oXaU^pJ zMiny^bgFwh51nm-{`W2f_no#$nVjawmvymJ@()goGPR2)lmBwr> zhx4okkrufk-dQFsM=54!QEvjV&q2z~&PPskSgam{g}!;#oaS7+_a~^H4beL6T(h&O z!LSk59ZhZo?RovCP#rzLiNN=d8@MV6{`vW?K_LHi4Itm5Ul9CmPx}4k7udgM+slh| z%mb-w4_6HWHalC9l@Y;otButH!IU`PLZarQ5gvOM@Zc@VH z94u$}kw%E^e_8?LBfjTSzF2&Bkx1;1f=I~S216af$aa`oIxG-+4N=hRVZq1e=3_&t z20I40j%d7B2Do(gZg#y^$ujWkL5qdZ#qYvYj61j|GhC2XtwH0IhyAW-i28p6t0s?t z=u|xqztgL}VTXB)0DYk76Zhj-0`ueJ2f_T(6<}V)@{jn35T;J$o$QkWB{Hk+(UxvG zi==&>>7;Uz_;%_j{ZABP$4^1L)?m8f{~n-ZZ5#HtCE-ih)u7 z&ij|X5RmrCp*VZK(P2T^(7oX9i<@AO#~gdSeM1IT#{68|LcHbhQ-xMFCj|)VsJoW= z@#QDuV%mR7ZF30O*)>(WF)SX5dhMn6iG&PX2?9ZJTe~%pkiQOc1hknAM?hdB$#jl# zU$!=blP$$$wr?a(fZ)$f+6?7xQtBKM%%xcjFO*?&2TR2_97^Bi`fqpjdf>!SV8$KJ z>o&T;+Iv-A4qqOM>b^zcoX?=ysH zc8=^q>H7?BxH6m|*eZtDxuP#FQGX0xERv=F#YbEZx}T1)0Qb}F*WWInPQ1?H3Pz~g z5=ej1`}L*YHl{sEH{MVdmi&f-2KJ}_jaQ9j`gfg*0_fKQRxQs zZ?I&v*Pv~m(11T9sww4&(0$525zy&uowC!h(~b?4q3TUVz$P)4y^D*t*I@Aj-FGZ| z4`K3smSdbfmuHl7@*<$>Px&CIIH2&@l$w((8wy4=J6*gV1k<%2P9TevHBFBx;YRY_vjk-utObs-!WjcN`n+2VwghsdI1#7e?PXs+C|xxe6C`mY4m8}c zF$D-@!p%4nrOIMx`+tJmCn900#|#&kX88Cf2ZM%*d7zH_l1R@u)s$}yW$0fc)OUgG zZWf$0&t;24*M8d(WF_%Bg1gqK3s`jCS+_M(fhPDJ{UvDmyN}*4GLZHXbdU)^OZrax z*C*iEmBu912R}7wEeb9m+I!^X7QFo2S5!b4dYG7Rb<_#+2=u3eV;GbLiQoagT{aq&9^KzDFbGQ_N*($q5voq1_ zHneqE;cXV3w%--!8{hw#z=>}GL+9+B9hMP>u2$bKw+)5Aq5Ja1Qwsa*SzvFo^Pv+c zK-JI(g0yG#BO>GX1BUY%wX0i5IjIJeV1M@QVZ$4-&0O5tjR%^)X8}?<6S`9k6>2>VR3jr71I1-hu`$cwF}9!s_P3k16QyQThd%bGxU(kp|YduM$TU7zOx$*Ooo zt4eR2@==gpV&ACu{UnJoqX0bR$$(*OVf diff --git a/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_clust_counter/oa-1-big-Digest.crc32 b/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_clust_counter/oa-1-big-Digest.crc32 index 87faad2ea807..7e4c746c638d 100644 --- a/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_clust_counter/oa-1-big-Digest.crc32 +++ b/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_clust_counter/oa-1-big-Digest.crc32 @@ -1 +1 @@ -12357127 \ No newline at end of file +1886131535 \ No newline at end of file diff --git a/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_clust_counter/oa-1-big-Index.db b/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_clust_counter/oa-1-big-Index.db index 19e05aa59a4f501f9d8982d97d094d192a4ea4ac..e61eea828979b73aadeadf428a7171b292053c4c 100644 GIT binary patch delta 159 zcmex(j`QO=&JE3+jK8-(_{x~a$vAPlA}3=ZC!^%{gB(nSoGc3u-uS%v0#MoM>*oC4x5#J_ahgy*a0_fRiQm p;G4?H52~yf6}Bh5VHB)pV_-Bo_`(y;F+Oys2+lD%d|@_(0|1gzKz{%L delta 159 zcmex(j`QO=&JE3+jQ_Vk_{x~a$=JVLk(05IlTm#8K@O%uPL}xxZ+zbTaX~&Oqw40w zRYjbPYd7z;0Lq(g)^`H(c5lDn2vpAE3{s|d0jTWc=6N@P5`miq9|M)$+?-Qbz{wJQ p@J;RH2US*#GTRg0FbY<)F)$h(eBlb`7$3Tm2j`d^zAzoa0RW1$Kwkg= diff --git a/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_clust_counter/oa-1-big-Statistics.db b/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_clust_counter/oa-1-big-Statistics.db index f304fd2a3c849f27ab23578629f803035ab91363..8a56ecf9d48679f6f8e35e5f7d371c23ecc5c49f 100644 GIT binary patch delta 159 zcmbPeIn#228k-DbZbSO{jXHA#4Q^Q8ie}(p*u=p4uV<0QM?)~J8^`b;3K*ckekR)! zjt$*Sn|}x%{2ARmf*bw>F?vj+_vUZIR?p9S>WEmI@7)|Pb aK#aWMYS{JXp5qt4Lc>Wiy)4^=gQEcH-#Rq_ delta 158 zcmbPfIni>08k_XbiOajyHtNg~G$`Yk^`C)qRgQB`h_w>fI?DY6(gU(vq%6> CISqdR delta 49 zcma!un4qI`;L#*y25BQ@2AhM~UdjwI#>xyjZ-88B6J-X|L!O_M83GiN5~~<(d!GmZ E0A`mCM*si- diff --git a/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_simple/oa-1-big-Digest.crc32 b/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_simple/oa-1-big-Digest.crc32 index a26d0f6518bc..4a4c86e1e58b 100644 --- a/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_simple/oa-1-big-Digest.crc32 +++ b/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_simple/oa-1-big-Digest.crc32 @@ -1 +1 @@ -3321550027 \ No newline at end of file +1899914505 \ No newline at end of file diff --git a/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_simple/oa-1-big-Statistics.db b/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_simple/oa-1-big-Statistics.db index 577352490ca4d893f35b15bd020045b4e2bae584..b1553b5058c4280bf7c3c0e2eb140f1ac4a63f8a 100644 GIT binary patch delta 134 zcmX@7dQEkLI%~ldM()=eb>;|4+_1bA&A`L3iGlTB&mxU|8DLsxRlwv2f*$^0Nrqc{ z85kHCfv5w)hq6H$85kQP-`ZW0a$MG~@xk4yYKtrbqXDBy{STlnAW&^p-Sp?4;}<`> PjLG>z(JZz*6^;P_6Xh^L delta 132 zcmcbndQNqMI%~%9+{t@4>dX<8DC3y*pMi&A6$9&EUcsGzPJrn>Zo4Kw5cKc@OETQr z%fP_E2t*wSK9mj8$iP_h(V8i=-br}sJn?n6?o?zmFd8tLXf}Ud{^!o2WNLBAmqeyueG2X0NK}8xFQ`ivs*6xy&|~rElUwuAnkls2NqFi!@pZTERAe)-{^b?iC1Rn$kjcQv$iQf5#gMPX&v4-0 zY8wU)^~Vl23^GPG4E6`JJ!}}*&39-2uwjrjwqekJ;SH3Rcmb3)0ZJco;|59}*yFZ? N0Rk8q*Dh5(0ssZPAk6>( diff --git a/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_simple_counter/oa-1-big-Digest.crc32 b/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_simple_counter/oa-1-big-Digest.crc32 index 9ac8c2b64d5c..497ae8200d67 100644 --- a/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_simple_counter/oa-1-big-Digest.crc32 +++ b/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_simple_counter/oa-1-big-Digest.crc32 @@ -1 +1 @@ -2310159778 \ No newline at end of file +3169912797 \ No newline at end of file diff --git a/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_simple_counter/oa-1-big-Statistics.db b/test/data/legacy-sstables/oa/legacy_tables/legacy_oa_simple_counter/oa-1-big-Statistics.db index bf24cd4bb788d80f3facb0efc4015dc538fd4238..92f283148024409daf355dbdf6d305c2f8c8e491 100644 GIT binary patch delta 158 zcmcbvdS7*dI%~ldM()=eb>;{f+_1bA&A`L3iGlTB&mxUe3Se4iRp5UpV1NSqyE(SE zjJvfb{}43r2Ww(@EC-Zh1fmWEAIb(9$iUbT`PS}|l;g5?jSuctRa;~k7!4Rr>VH5? aENOdk;LknBFMeJMlkdX-|DC3y*pMi&A6$9&EUcp@q8^H7)x847tfB_2Z?@S1) zaM{^E`G=r^7g!U+0~4SeBM@~U_)s>;KnBK|kJe0~^-jW5=ZUYob*Cbmfzg1`L`%aW c^Us|_$@d#JFfeFL)DxRLLCBj$qJwcY0D-_ca{vGU diff --git a/test/distributed/org/apache/cassandra/io/sstable/format/ForwardingSSTableReader.java b/test/distributed/org/apache/cassandra/io/sstable/format/ForwardingSSTableReader.java index 03739dc579ad..710a42664790 100644 --- a/test/distributed/org/apache/cassandra/io/sstable/format/ForwardingSSTableReader.java +++ b/test/distributed/org/apache/cassandra/io/sstable/format/ForwardingSSTableReader.java @@ -693,6 +693,12 @@ public AbstractBounds getBounds() return delegate.getBounds(); } + @Override + public double tokenSpaceCoverage() + { + return delegate.tokenSpaceCoverage(); + } + @Override public IVerifier getVerifier(ColumnFamilyStore cfs, OutputHandler outputHandler, boolean isOffline, IVerifier.Options options) { diff --git a/test/unit/org/apache/cassandra/db/compaction/ShardManagerTest.java b/test/unit/org/apache/cassandra/db/compaction/ShardManagerTest.java index bb1f8dadff1a..9ea03e695b87 100644 --- a/test/unit/org/apache/cassandra/db/compaction/ShardManagerTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/ShardManagerTest.java @@ -197,6 +197,7 @@ SSTableReader mockedTable(double start, double end, double reportedCoverage) SSTableReader mock = Mockito.mock(SSTableReader.class); Mockito.when(mock.getFirst()).thenReturn(keyAt(start)); Mockito.when(mock.getLast()).thenReturn(keyAt(end)); + Mockito.when(mock.tokenSpaceCoverage()).thenReturn(reportedCoverage); return mock; } diff --git a/test/unit/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriterTest.java b/test/unit/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriterTest.java index 87fb8fcf5968..9ad6f8ed14d3 100644 --- a/test/unit/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriterTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriterTest.java @@ -122,6 +122,7 @@ private void testShardedCompactionWriter(int numShards, int rowCount, int numOut { assertEquals((double) rdr.onDiskLength() / totalOnDiskLength, (double) getFilterSize(rdr) / totalBFSize, 0.1); + assertEquals(1.0 / numOutputSSTables, rdr.tokenSpaceCoverage(), 0.05); } validateData(cfs, rowCount); @@ -194,6 +195,7 @@ public void testDiskAdvance() throws Throwable assertEquals((double) rdr.onDiskLength() / totalOnDiskLength, (double) getFilterSize(rdr) / totalBFSize, 0.1); + assertEquals(expectedTokenShare, rdr.tokenSpaceCoverage(), expectedTokenShare * 0.05); assertEquals(expectedSize, rdr.onDiskLength(), expectedSize * 0.1); } diff --git a/test/unit/org/apache/cassandra/db/compaction/unified/ShardedMultiWriterTest.java b/test/unit/org/apache/cassandra/db/compaction/unified/ShardedMultiWriterTest.java index b48b4a7f0a8d..e5f38dd7fc49 100644 --- a/test/unit/org/apache/cassandra/db/compaction/unified/ShardedMultiWriterTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/unified/ShardedMultiWriterTest.java @@ -24,6 +24,7 @@ import org.apache.cassandra.cql3.CQLTester; import org.apache.cassandra.cql3.UntypedResultSet; import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.io.sstable.format.SSTableReader; import org.apache.cassandra.service.StorageService; import static org.junit.Assert.assertEquals; @@ -84,6 +85,10 @@ private void testShardedCompactionWriter(int numShards, long totSizeBytes, int n cfs.forceBlockingFlush(ColumnFamilyStore.FlushReason.UNIT_TESTS); assertEquals(numOutputSSTables, cfs.getLiveSSTables().size()); + for (SSTableReader rdr : cfs.getLiveSSTables()) + { + assertEquals(1.0 / numOutputSSTables, rdr.tokenSpaceCoverage(), 0.05); + } validateData(rowCount); cfs.truncateBlocking(); diff --git a/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java b/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java index 0566eb8cdc90..c753573af597 100644 --- a/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java @@ -142,6 +142,8 @@ public Map constructMetadata(boolean withNulls) .build(); MetadataCollector collector = new MetadataCollector(cfm.comparator) .commitLogIntervals(new IntervalSet<>(cllb, club)); + if (DatabaseDescriptor.getSelectedSSTableFormat().getLatestVersion().hasTokenSpaceCoverage()) + collector.tokenSpaceCoverage(0.7); String partitioner = RandomPartitioner.class.getCanonicalName(); double bfFpChance = 0.1; From c0950671ea3c6fffc716408c7debefd9834056d5 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Fri, 17 Mar 2023 16:33:30 +0200 Subject: [PATCH 14/27] Work around CASSANDRA-18342 --- .../cassandra/db/compaction/UnifiedCompactionStrategy.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java index a8427a84f4ad..9436c351f2d3 100644 --- a/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java +++ b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java @@ -30,6 +30,7 @@ import com.google.common.base.Preconditions; import com.google.common.base.Predicate; import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Iterables; import com.google.common.collect.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -428,7 +429,9 @@ public synchronized void removeSSTable(SSTableReader sstable) @Override protected synchronized Set getSSTables() { - return ImmutableSet.copyOf(sstables); + // Filter the set of sstables through the live set. This is to ensure no zombie sstables are picked for + // compaction (see CASSANDRA-18342). + return ImmutableSet.copyOf(Iterables.filter(cfs.getLiveSSTables(), sstables::contains)); } /** From 9197ba736b4068f66821ab1a359149f9762401b4 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Thu, 9 Feb 2023 15:55:51 +0200 Subject: [PATCH 15/27] Adds ability to change compaction default in YAML and switches to UCS for the trie unit test suite --- .../org/apache/cassandra/config/Config.java | 5 +++++ .../cassandra/config/DatabaseDescriptor.java | 5 +++++ .../cassandra/schema/CompactionParams.java | 21 +++++++++++++++++-- test/conf/trie_memtable.yaml | 7 +++++++ .../statements/DescribeStatementTest.java | 14 ++++++++++--- 5 files changed, 47 insertions(+), 5 deletions(-) diff --git a/src/java/org/apache/cassandra/config/Config.java b/src/java/org/apache/cassandra/config/Config.java index b69955b7e9c4..21d307c6016f 100644 --- a/src/java/org/apache/cassandra/config/Config.java +++ b/src/java/org/apache/cassandra/config/Config.java @@ -1098,6 +1098,11 @@ public enum PaxosOnLinearizabilityViolation public volatile long min_tracked_partition_tombstone_count = 5000; public volatile boolean top_partitions_enabled = true; + /** + * Default compaction configuration, used if a table does not specify any. + */ + public ParameterizedClass default_compaction = null; + public static Supplier getOverrideLoadConfig() { return overrideLoadConfig; diff --git a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java index 5fdccbb8735f..cb3fd9159f89 100644 --- a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java +++ b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java @@ -4751,4 +4751,9 @@ public static StorageCompatibilityMode getStorageCompatibilityMode() else return conf.storage_compatibility_mode; } + + public static ParameterizedClass getDefaultCompaction() + { + return conf != null ? conf.default_compaction : null; + } } diff --git a/src/java/org/apache/cassandra/schema/CompactionParams.java b/src/java/org/apache/cassandra/schema/CompactionParams.java index 06446276c2b6..7da6b50280eb 100644 --- a/src/java/org/apache/cassandra/schema/CompactionParams.java +++ b/src/java/org/apache/cassandra/schema/CompactionParams.java @@ -30,6 +30,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.cassandra.config.DatabaseDescriptor; +import org.apache.cassandra.config.ParameterizedClass; import org.apache.cassandra.db.compaction.AbstractCompactionStrategy; import org.apache.cassandra.db.compaction.LeveledCompactionStrategy; import org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy; @@ -85,8 +87,23 @@ public static Optional forName(String name) ImmutableMap.of(Option.MIN_THRESHOLD.toString(), Integer.toString(DEFAULT_MIN_THRESHOLD), Option.MAX_THRESHOLD.toString(), Integer.toString(DEFAULT_MAX_THRESHOLD)); - public static final CompactionParams DEFAULT = - new CompactionParams(SizeTieredCompactionStrategy.class, DEFAULT_THRESHOLDS, DEFAULT_ENABLED, DEFAULT_PROVIDE_OVERLAPPING_TOMBSTONES_PROPERTY_VALUE); + public static final CompactionParams DEFAULT; + static + { + ParameterizedClass defaultCompaction = DatabaseDescriptor.getDefaultCompaction(); + if (defaultCompaction == null) + { + DEFAULT = new CompactionParams(SizeTieredCompactionStrategy.class, + DEFAULT_THRESHOLDS, + DEFAULT_ENABLED, + DEFAULT_PROVIDE_OVERLAPPING_TOMBSTONES_PROPERTY_VALUE); + } + else + { + DEFAULT = create(classFromName(defaultCompaction.class_name), + defaultCompaction.parameters); + } + } private final Class klass; private final ImmutableMap options; diff --git a/test/conf/trie_memtable.yaml b/test/conf/trie_memtable.yaml index c43ca8064a9e..4b74dec9eb42 100644 --- a/test/conf/trie_memtable.yaml +++ b/test/conf/trie_memtable.yaml @@ -15,6 +15,7 @@ # specific language governing permissions and limitations # under the License. +# Change default memtable implementation to TrieMemtable # Note: this attaches at the end of cassandra.yaml, where the memtable configuration setting must be. default: inherits: trie @@ -23,3 +24,9 @@ # Note: This can also be achieved by passing -Dcassandra.sstable.format.default=bti sstable: selected_format: bti + +# Change default compaction to UCS +default_compaction: + class_name: UnifiedCompactionStrategy + parameters: + base_shard_count: 1 diff --git a/test/unit/org/apache/cassandra/cql3/statements/DescribeStatementTest.java b/test/unit/org/apache/cassandra/cql3/statements/DescribeStatementTest.java index cb0f08030570..e51d8ed3ad65 100644 --- a/test/unit/org/apache/cassandra/cql3/statements/DescribeStatementTest.java +++ b/test/unit/org/apache/cassandra/cql3/statements/DescribeStatementTest.java @@ -18,6 +18,7 @@ package org.apache.cassandra.cql3.statements; import java.util.Iterator; +import java.util.Map; import java.util.Optional; import com.google.common.collect.ImmutableList; @@ -34,9 +35,11 @@ import org.apache.cassandra.config.DatabaseDescriptor; import org.apache.cassandra.cql3.CQLTester; +import org.apache.cassandra.cql3.CqlBuilder; import org.apache.cassandra.dht.Token; import org.apache.cassandra.locator.InetAddressAndPort; import org.apache.cassandra.locator.TokenMetadata; +import org.apache.cassandra.schema.CompactionParams; import org.apache.cassandra.schema.Schema; import org.apache.cassandra.schema.TableId; import org.apache.cassandra.schema.TableMetadata; @@ -1043,14 +1046,14 @@ private static String tableParametersCql() " AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}\n" + " AND cdc = false\n" + " AND comment = ''\n" + - " AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}\n" + + " AND compaction = " + cqlQuoted(CompactionParams.DEFAULT.asMap()) + "\n" + " AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}\n" + " AND memtable = 'default'\n" + " AND crc_check_chance = 1.0\n" + " AND default_time_to_live = 0\n" + " AND extensions = {}\n" + " AND gc_grace_seconds = 864000\n" + - " AND incremental_backups = true\n" + + " AND incremental_backups = true\n" + " AND max_index_interval = 2048\n" + " AND memtable_flush_period_in_ms = 0\n" + " AND min_index_interval = 128\n" + @@ -1058,6 +1061,11 @@ private static String tableParametersCql() " AND speculative_retry = '99p';"; } + private static String cqlQuoted(Map map) + { + return new CqlBuilder().append(map).toString(); + } + private static String mvParametersCql() { return "additional_write_policy = '99p'\n" + @@ -1066,7 +1074,7 @@ private static String mvParametersCql() " AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}\n" + " AND cdc = false\n" + " AND comment = ''\n" + - " AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}\n" + + " AND compaction = " + cqlQuoted(CompactionParams.DEFAULT.asMap()) + "\n" + " AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}\n" + " AND memtable = 'default'\n" + " AND crc_check_chance = 1.0\n" + From e2295f4ff1b7bc17fa4d3c80dfe1a305bc009d83 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Fri, 16 Jun 2023 15:23:04 +0300 Subject: [PATCH 16/27] Trivial corrections --- .../compaction/UnifiedCompactionStrategy.java | 4 +- .../compaction/UnifiedCompactionStrategy.md | 54 ++++++++++--------- .../db/compaction/unified/Controller.java | 18 +++---- .../unified/ShardedCompactionWriter.java | 7 +-- .../apache/cassandra/utils/FBUtilities.java | 11 ++-- 5 files changed, 46 insertions(+), 48 deletions(-) diff --git a/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java index 9436c351f2d3..70ef991392fd 100644 --- a/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java +++ b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java @@ -71,7 +71,6 @@ public class UnifiedCompactionStrategy extends AbstractCompactionStrategy private static final Pattern SCALING_PARAMETER_PATTERN = Pattern.compile("(N)|L(\\d+)|T(\\d+)|([+-]?\\d+)"); private static final String SCALING_PARAMETER_PATTERN_SIMPLIFIED = SCALING_PARAMETER_PATTERN.pattern() .replaceAll("[()]", "") - .replace("\\d", "[0-9]"); private final Controller controller; @@ -271,7 +270,6 @@ public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, Collection indexes, LifecycleNewTracker lifecycleNewTracker) { - // FIXME: needs the metadata collector fix ShardManager shardManager = getShardManager(); double flushDensity = cfs.metric.flushSizeOnDisk.get() / shardManager.localSpaceCoverage(); ShardTracker boundaries = shardManager.boundaries(controller.getNumShards(flushDensity)); @@ -608,7 +606,7 @@ CompactionPick getCompactionPick(SelectionContext context) { // We can have just one pick in each level. Pick one bucket randomly out of the ones with // the highest overlap. - // The random() part below implements reservoir sampling with size 1, giving us a uniform selection. + // The random() part below implements reservoir sampling with size 1, giving us a uniformly random selection. if (bucket.maxOverlap == maxOverlap && controller.random().nextInt(++overlapMatchingCount) == 0) selectedBucket = bucket; // The estimated remaining tasks is a measure of the remaining amount of work, thus we prefer to diff --git a/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.md b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.md index 7fd53010e117..c21fc27979fe 100644 --- a/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.md +++ b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.md @@ -190,12 +190,15 @@ $S$ to split the local token space into to be $$ S = \begin{cases} -2^{\mathrm{round}\left( \log_2 \left( {\frac d t \cdot \frac 1 b}\right)\right)} \cdot b - & \text{if } d \ge tb\\ -b & \text{otherwise} +b + & \text{if } d < t b\\ +2^{\left\lfloor \log_2 \left( {\frac d t \cdot \frac 1 b}\right)\right\rceil} \cdot b + & \text{otherwise} \end{cases} $$ +(where $\lfloor x \rceil$ stands for $x$ rounded to the nearest integer, i.e. $\lfloor x + 0.5 \rfloor$) + That is, we divide the density by the target size and round this to a power-of-two multiple of $b$. We then generate $S - 1$ boundaries that split the local token space equally into $S$ shards, and split the result of the compaction on these boundaries to form a separate sstable for each shard. This aims to produce sstable sizes that @@ -295,15 +298,15 @@ $t = f = w + 2$. UCS drops the upper limit as we have seen that compaction is st sstables. UCS makes use of the density measure to split results in order to keep the size of sstables and the length of -compactions low. Within a level it will only consider overlapping sstables when deciding whether or not the threshold -is hit, and will independently compact sets of sstables that do not overlap. +compactions low. Within a level it will only consider overlapping sstables when deciding whether the threshold is hit, +and will independently compact sets of sstables that do not overlap. If there are multiple choices to pick SSTables within a bucket, STCS groups them by size while UCS groups them by -timestamp. Because of that, STCS easily loses time order and makes whole table expiration less efficient. +timestamp. Because of that, STCS easily loses time order which makes whole table expiration less efficient. #### UCS-leveled vs LCS -On first glance LeveledCompactionStrategy look very different in behaviour compared to UCS. +On a first glance LeveledCompactionStrategy looks very different in behaviour compared to UCS. LCS keeps multiple sstables per level which form a sorted run of non-overlapping sstables of small fixed size. So physical sstables on increasing levels increase in number (by a factor of `fanout_size`) instead of size. LCS does that @@ -338,34 +341,35 @@ UCS accepts these compaction strategy parameters: * **scaling_parameters**. A list of per-level scaling parameters, specified as L*f*, T*f*, N, or an integer value specifying $w$ directly. If more levels are present than the length of this list, the last value is used for all higher levels. Often this will be a single parameter, specifying the behaviour for all levels of the - hierarchy. -
Levelled compaction, specified as L*f*, is preferable for read-heavy workloads, especially if bloom filters are + hierarchy. + Levelled compaction, specified as L*f*, is preferable for read-heavy workloads, especially if bloom filters are not effective (e.g. with wide partitions); higher levelled fan factors improve read amplification (and hence latency, - as well as throughput for read-dominated workloads) at the expense of increased write costs. -
Tiered compaction, specified as T*f*, is preferable for write-heavy workloads, or ones where bloom filters or + as well as throughput for read-dominated workloads) at the expense of increased write costs. + Tiered compaction, specified as T*f*, is preferable for write-heavy workloads, or ones where bloom filters or time order can be exploited; higher tiered fan factors improve the cost of writes (and hence throughput) at the - expense of making reads more difficult. -
N is the middle ground that has the features of levelled (one sstable run per level) as well as tiered (one - compaction to be promoted to the next level) and a fan factor of 2. This can also be specified as T2 or L2. -
The default value is T4, matching the default STCS behaviour with threshold 4. To select an equivalent of LCS + expense of making reads more difficult. + N is the middle ground that has the features of levelled (one sstable run per level) as well as tiered (one + compaction to be promoted to the next level) and a fan factor of 2. This can also be specified as T2 or L2. + The default value is T4, matching the default STCS behaviour with threshold 4. To select an equivalent of LCS with its default fan factor 10, use L10. * **target_sstable_size**. The target sstable size $t$, specified as a human-friendly size in bytes (e.g. 100 MiB = $100\cdot 2^{20}$ B or (10 MB = 10,000,000 B)). The strategy will split data in shards that aim to produce sstables - of size between $t / \sqrt 2$ and $t \cdot \sqrt 2$. -
Smaller sstables improve streaming and repair, and make compactions shorter. On the other hand, each sstable - on disk has a non-trivial in-memory footprint that also affects garbage collection times. -
Increase this if the memory pressure from the number of sstables in the system becomes too high. -
The default value is 1 GiB. + of size between $t / \sqrt 2$ and $t \cdot \sqrt 2$. + Smaller sstables improve streaming and repair, and make compactions shorter. On the other hand, each sstable + on disk has a non-trivial in-memory footprint that also affects garbage collection times. + Increase this if the memory pressure from the number of sstables in the system becomes too high. + The default value is 1 GiB. * **base_shard_count**. The minimum number of shards $b$, used for levels with the smallest density. This gives the minimum compaction concurrency for the lowest levels. A low number would result in larger L0 sstables but may limit - the overall maximum write throughput (as every piece of data has to go through L0). -
The default value is 4 (1 for system tables, or when multiple data locations are defined). -* **expired_sstable_check_frequency_seconds**. Determines how often to check for expired SSTables. -
The default value is 10 minutes. + the overall maximum write throughput (as every piece of data has to go through L0). + The default value is 4 (1 for system tables, or when multiple data locations are defined). +* **expired_sstable_check_frequency_seconds**. Determines how often to check for expired SSTables. + The default value is 10 minutes. In **cassandra.yaml**: -* **concurrent_compactors**. The number of compaction threads available. Set this to a large number, at minimum the number of expected levels of the compaction hierarchy to make sure that each level is given a dedicated compaction thread. This will avoid latency spikes caused by lower levels of the compaction hierarchy not getting a chance to run. +* **concurrent_compactors**. The number of compaction threads available. Higher values increase compaction performance + but may increase read and write latencies. [^1]: Note: in addition to TRANSITIVE, "overlap inclusion methods" of NONE and SINGLE are also implemented for experimentation, but they are not recommended for the UCS sharding scheme. diff --git a/src/java/org/apache/cassandra/db/compaction/unified/Controller.java b/src/java/org/apache/cassandra/db/compaction/unified/Controller.java index 7d39e58babca..848eca76d82e 100644 --- a/src/java/org/apache/cassandra/db/compaction/unified/Controller.java +++ b/src/java/org/apache/cassandra/db/compaction/unified/Controller.java @@ -67,8 +67,8 @@ public class Controller public static final int DEFAULT_BASE_SHARD_COUNT = CassandraRelevantProperties.UCS_BASE_SHARD_COUNT.getInt(); static final String TARGET_SSTABLE_SIZE_OPTION = "target_sstable_size"; - public static final double DEFAULT_TARGET_SSTABLE_SIZE = CassandraRelevantProperties.UCS_TARGET_SSTABLE_SIZE.getSizeInBytes(); - static final double MIN_TARGET_SSTABLE_SIZE = 1L << 20; + public static final long DEFAULT_TARGET_SSTABLE_SIZE = CassandraRelevantProperties.UCS_TARGET_SSTABLE_SIZE.getSizeInBytes(); + static final long MIN_TARGET_SSTABLE_SIZE = 1L << 20; /** * This parameter is intended to modify the shape of the LSM by taking into account the survival ratio of data, for now it is fixed to one. @@ -269,7 +269,7 @@ public long getExpiredSSTableCheckFrequency() } /** - * The strategy will call this method each time {@link CompactionStrategy#getNextBackgroundTask} is called. + * The strategy will call this method each time {@link UnifiedCompactionStrategy#getNextBackgroundTask} is called. */ public void onStrategyBackgroundTaskRequest() { @@ -289,7 +289,7 @@ public static Controller fromOptions(ColumnFamilyStore cfs, Map { int[] Ws = parseScalingParameters(options.getOrDefault(SCALING_PARAMETERS_OPTION, DEFAULT_SCALING_PARAMETERS)); - long flushSizeOverride = (long) FBUtilities.parseHumanReadable(options.getOrDefault(FLUSH_SIZE_OVERRIDE_OPTION, "0MiB"), null, "B"); + long flushSizeOverride = FBUtilities.parseHumanReadableBytes(options.getOrDefault(FLUSH_SIZE_OVERRIDE_OPTION, "0MiB")); int maxSSTablesToCompact = Integer.parseInt(options.getOrDefault(MAX_SSTABLES_TO_COMPACT_OPTION, "0")); long expiredSSTableCheckFrequency = options.containsKey(EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_OPTION) ? Long.parseLong(options.get(EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_OPTION)) @@ -311,9 +311,9 @@ public static Controller fromOptions(ColumnFamilyStore cfs, Map baseShardCount = DEFAULT_BASE_SHARD_COUNT; } - double targetSStableSize = options.containsKey(TARGET_SSTABLE_SIZE_OPTION) - ? FBUtilities.parseHumanReadable(options.get(TARGET_SSTABLE_SIZE_OPTION), null, "B") - : DEFAULT_TARGET_SSTABLE_SIZE; + long targetSStableSize = options.containsKey(TARGET_SSTABLE_SIZE_OPTION) + ? FBUtilities.parseHumanReadableBytes(options.get(TARGET_SSTABLE_SIZE_OPTION)) + : DEFAULT_TARGET_SSTABLE_SIZE; Overlaps.InclusionMethod inclusionMethod = options.containsKey(OVERLAP_INCLUSION_METHOD_OPTION) ? Overlaps.InclusionMethod.valueOf(options.get(OVERLAP_INCLUSION_METHOD_OPTION).toUpperCase()) @@ -369,7 +369,7 @@ public static Map validateOptions(Map options) t { try { - long targetSSTableSize = (long) FBUtilities.parseHumanReadable(s, null, "B"); + long targetSSTableSize = FBUtilities.parseHumanReadableBytes(s); if (targetSSTableSize < MIN_TARGET_SSTABLE_SIZE) { throw new ConfigurationException(String.format(sizeUnacceptableErr, @@ -393,7 +393,7 @@ public static Map validateOptions(Map options) t { try { - long flushSize = (long) FBUtilities.parseHumanReadable(s, null, "B"); + long flushSize = FBUtilities.parseHumanReadableBytes(s); if (flushSize < MIN_TARGET_SSTABLE_SIZE) throw new ConfigurationException(String.format(sizeUnacceptableErr, FLUSH_SIZE_OVERRIDE_OPTION, diff --git a/src/java/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriter.java b/src/java/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriter.java index 28076929aa8c..b5ea5ec77821 100644 --- a/src/java/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriter.java +++ b/src/java/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriter.java @@ -33,8 +33,7 @@ /** * A {@link CompactionAwareWriter} that splits the output sstable at the partition boundaries of the compaction - * shards used by {@link org.apache.cassandra.db.compaction.UnifiedCompactionStrategy} as long as the size of - * the sstable so far is sufficiently large. + * shards used by {@link org.apache.cassandra.db.compaction.UnifiedCompactionStrategy}. */ public class ShardedCompactionWriter extends CompactionAwareWriter { @@ -63,7 +62,9 @@ public ShardedCompactionWriter(ColumnFamilyStore cfs, @Override protected boolean shouldSwitchWriterInCurrentLocation(DecoratedKey key) { - // If we have written anything and cross a shard boundary, switch to a new writer. + // If we have written anything and cross a shard boundary, switch to a new writer. We use the uncompressed + // file pointer here because there may be writes that are not yet reflected in the on-disk size, and we want + // to split as soon as there is content, regardless how small. final long uncompressedBytesWritten = sstableWriter.currentWriter().getFilePointer(); if (boundaries.advanceTo(key.getToken()) && uncompressedBytesWritten > 0) { diff --git a/src/java/org/apache/cassandra/utils/FBUtilities.java b/src/java/org/apache/cassandra/utils/FBUtilities.java index 1740833d9663..96a756869380 100644 --- a/src/java/org/apache/cassandra/utils/FBUtilities.java +++ b/src/java/org/apache/cassandra/utils/FBUtilities.java @@ -924,13 +924,7 @@ public static String prettyPrintMemoryPerSecond(long rate) public static String prettyPrintMemoryPerSecond(long bytes, long timeInNano) { - // We can't sanely calculate a rate over 0 nanoseconds - if (timeInNano == 0) - return "NaN KiB/s"; - - long rate = (long) (((double) bytes / timeInNano) * 1000 * 1000 * 1000); - - return prettyPrintMemoryPerSecond(rate); + return prettyPrintBinary(bytes * 1.0e9 / timeInNano, "B/s", ""); } /** @@ -939,7 +933,8 @@ public static String prettyPrintMemoryPerSecond(long bytes, long timeInNano) * * @param datum The human-readable number. * @param separator Expected separator, null to accept any amount of whitespace. - * @param unit Expected unit. + * @param unit Expected unit. If null, the method will accept any string as unit, i.e. it will parse the number + * at the start of the supplied string and ignore any remainder. * @return The parsed value. */ public static double parseHumanReadable(String datum, String separator, String unit) From 13a71aef4f961e9c69f832054174abf9df60e2d0 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Wed, 21 Jun 2023 15:16:22 +0300 Subject: [PATCH 17/27] Test and fix shard number calculation for multiple nodes/disks --- .../cassandra/db/compaction/ShardManager.java | 9 +- .../db/compaction/ShardManagerDiskAware.java | 11 ++ .../db/compaction/ShardManagerNoDisks.java | 6 + .../db/compaction/ShardManagerTrivial.java | 6 + .../compaction/UnifiedCompactionStrategy.java | 2 +- .../db/compaction/unified/Controller.java | 14 +- .../unified/UnifiedCompactionTask.java | 2 +- .../test/UnifiedCompactionDensitiesTest.java | 136 ++++++++++++++++++ 8 files changed, 179 insertions(+), 7 deletions(-) create mode 100644 test/distributed/org/apache/cassandra/distributed/test/UnifiedCompactionDensitiesTest.java diff --git a/src/java/org/apache/cassandra/db/compaction/ShardManager.java b/src/java/org/apache/cassandra/db/compaction/ShardManager.java index a975da05a747..6ea2cd72a84c 100644 --- a/src/java/org/apache/cassandra/db/compaction/ShardManager.java +++ b/src/java/org/apache/cassandra/db/compaction/ShardManager.java @@ -65,10 +65,17 @@ else if (partitioner.splitter().isPresent()) double rangeSpanned(Range tableRange); /** - * The total fraction of the local space covered by the local ranges. + * The total fraction of the token space covered by the local ranges. */ double localSpaceCoverage(); + /** + * The fraction of the token space covered by a shard set, i.e. the space that is split in the requested number of + * shards. + * If no disks are defined, this is the same as localSpaceCoverage(). Otherwise, it is the token coverage of a disk. + */ + double shardSetCoverage(); + /** * Construct a boundary/shard iterator for the given number of shards. * diff --git a/src/java/org/apache/cassandra/db/compaction/ShardManagerDiskAware.java b/src/java/org/apache/cassandra/db/compaction/ShardManagerDiskAware.java index f6c74314c729..4f8aba283aba 100644 --- a/src/java/org/apache/cassandra/db/compaction/ShardManagerDiskAware.java +++ b/src/java/org/apache/cassandra/db/compaction/ShardManagerDiskAware.java @@ -76,6 +76,17 @@ public ShardManagerDiskAware(ColumnFamilyStore.VersionedLocalRanges localRanges, assert diskIndex + 1 == diskBoundaryPositions.length : "Disk boundaries are not within local ranges"; } + @Override + public double shardSetCoverage() + { + return localSpaceCoverage() / diskBoundaryPositions.length; + // The above is an approximation that works correctly for the normal allocation of disks. + // This can be properly calculated if a contained token is supplied as argument and the diskBoundaryPosition + // difference is retrieved for the disk containing that token. + // Unfortunately we don't currently have a way to get a representative position when an sstable writer is + // constructed for flushing. + } + /** * Construct a boundary/shard iterator for the given number of shards. */ diff --git a/src/java/org/apache/cassandra/db/compaction/ShardManagerNoDisks.java b/src/java/org/apache/cassandra/db/compaction/ShardManagerNoDisks.java index 0d2c10ef5288..6174612a94aa 100644 --- a/src/java/org/apache/cassandra/db/compaction/ShardManagerNoDisks.java +++ b/src/java/org/apache/cassandra/db/compaction/ShardManagerNoDisks.java @@ -85,6 +85,12 @@ public double localSpaceCoverage() return localRangePositions[localRangePositions.length - 1]; } + @Override + public double shardSetCoverage() + { + return localSpaceCoverage(); + } + @Override public ShardTracker boundaries(int shardCount) { diff --git a/src/java/org/apache/cassandra/db/compaction/ShardManagerTrivial.java b/src/java/org/apache/cassandra/db/compaction/ShardManagerTrivial.java index d6192cb52fa0..407bff4f0d67 100644 --- a/src/java/org/apache/cassandra/db/compaction/ShardManagerTrivial.java +++ b/src/java/org/apache/cassandra/db/compaction/ShardManagerTrivial.java @@ -68,6 +68,12 @@ public double localSpaceCoverage() return 1; } + @Override + public double shardSetCoverage() + { + return 1; + } + ShardTracker iterator = new ShardTracker() { @Override diff --git a/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java index 70ef991392fd..60cccda2ddfd 100644 --- a/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java +++ b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java @@ -271,7 +271,7 @@ public SSTableMultiWriter createSSTableMultiWriter(Descriptor descriptor, LifecycleNewTracker lifecycleNewTracker) { ShardManager shardManager = getShardManager(); - double flushDensity = cfs.metric.flushSizeOnDisk.get() / shardManager.localSpaceCoverage(); + double flushDensity = cfs.metric.flushSizeOnDisk.get() * shardManager.shardSetCoverage() / shardManager.localSpaceCoverage(); ShardTracker boundaries = shardManager.boundaries(controller.getNumShards(flushDensity)); return new ShardedMultiWriter(cfs, descriptor, diff --git a/src/java/org/apache/cassandra/db/compaction/unified/Controller.java b/src/java/org/apache/cassandra/db/compaction/unified/Controller.java index 848eca76d82e..8199e04cd181 100644 --- a/src/java/org/apache/cassandra/db/compaction/unified/Controller.java +++ b/src/java/org/apache/cassandra/db/compaction/unified/Controller.java @@ -195,12 +195,18 @@ public int getThreshold(int index) { * This is calculated as a power-of-two multiple of baseShardCount, so that the expected size of resulting sstables * is between targetSSTableSizeMin and 2*targetSSTableSizeMin (in other words, sqrt(0.5) * targetSSTableSize and * sqrt(2) * targetSSTableSize), with a minimum of baseShardCount shards for smaller sstables. + * + * Note that to get the sstables resulting from this splitting within the bounds, the density argument must be + * normalized to the span that is being split. In other words, if no disks are defined, the density should be + * scaled by the token coverage of the locally-owned ranges. If multiple data directories are defined, the density + * should be scaled by the token coverage of the respective data directory. That is localDensity = size / span, + * where the span is normalized so that span = 1 when the data covers the range that is being split. */ - public int getNumShards(double density) + public int getNumShards(double localDensity) { // How many we would have to aim for the target size. Divided by the base shard count, so that we can ensure // the result is a multiple of it by multiplying back below. - double count = density / (targetSSTableSizeMin * baseShardCount); + double count = localDensity / (targetSSTableSizeMin * baseShardCount); if (count > MAX_SHARD_SPLIT) count = MAX_SHARD_SPLIT; assert !(count < 0); // Must be positive, 0 or NaN, which should translate to baseShardCount @@ -212,8 +218,8 @@ public int getNumShards(double density) int shards = baseShardCount * Integer.highestOneBit((int) count | 1); logger.debug("Shard count {} for density {}, {} times target {}", shards, - FBUtilities.prettyPrintBinary(density, "B", " "), - density / targetSSTableSizeMin, + FBUtilities.prettyPrintBinary(localDensity, "B", " "), + localDensity / targetSSTableSizeMin, FBUtilities.prettyPrintBinary(targetSSTableSizeMin, "B", " ")); return shards; } diff --git a/src/java/org/apache/cassandra/db/compaction/unified/UnifiedCompactionTask.java b/src/java/org/apache/cassandra/db/compaction/unified/UnifiedCompactionTask.java index 720ce2dbdc1d..cc784a584f1b 100644 --- a/src/java/org/apache/cassandra/db/compaction/unified/UnifiedCompactionTask.java +++ b/src/java/org/apache/cassandra/db/compaction/unified/UnifiedCompactionTask.java @@ -53,7 +53,7 @@ public CompactionAwareWriter getCompactionAwareWriter(ColumnFamilyStore cfs, Set nonExpiredSSTables) { double density = shardManager.calculateCombinedDensity(nonExpiredSSTables); - int numShards = controller.getNumShards(density); + int numShards = controller.getNumShards(density * shardManager.shardSetCoverage()); return new ShardedCompactionWriter(cfs, directories, txn, nonExpiredSSTables, keepOriginals, shardManager.boundaries(numShards)); } } \ No newline at end of file diff --git a/test/distributed/org/apache/cassandra/distributed/test/UnifiedCompactionDensitiesTest.java b/test/distributed/org/apache/cassandra/distributed/test/UnifiedCompactionDensitiesTest.java new file mode 100644 index 000000000000..c4579989a0b8 --- /dev/null +++ b/test/distributed/org/apache/cassandra/distributed/test/UnifiedCompactionDensitiesTest.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.distributed.test; + +import java.io.IOException; +import java.util.LongSummaryStatistics; + +import org.junit.Test; + +import org.slf4j.LoggerFactory; + +import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.Keyspace; +import org.apache.cassandra.distributed.Cluster; +import org.apache.cassandra.distributed.api.ConsistencyLevel; +import org.apache.cassandra.io.sstable.format.SSTableReader; +import org.apache.cassandra.utils.FBUtilities; +import org.hamcrest.Matchers; + +import static org.apache.cassandra.cql3.TombstonesWithIndexedSSTableTest.makeRandomString; +import static org.junit.Assert.assertThat; + +public class UnifiedCompactionDensitiesTest extends TestBaseImpl +{ + @Test + public void testTargetSSTableSize1Node1Dir() throws IOException + { + testTargetSSTableSize(1, 1); + } + + @Test + public void testTargetSSTableSize1Node2Dirs() throws IOException + { + testTargetSSTableSize(1, 2); + } + + @Test + public void testTargetSSTableSize2Nodes1Dir() throws IOException + { + testTargetSSTableSize(2, 1); + } + + @Test + public void testTargetSSTableSize2Nodes3Dirs() throws IOException + { + testTargetSSTableSize(2, 3); + } + + private void testTargetSSTableSize(int nodeCount, int dataDirs) throws IOException + { + try (Cluster cluster = init(builder().withNodes(nodeCount) + .withDataDirCount(dataDirs) + .withConfig(cfg -> cfg.set("memtable_heap_space", "100MiB")) + .start())) + { + cluster.schemaChange(withKeyspace("alter keyspace %s with replication = {'class': 'SimpleStrategy', 'replication_factor':1}")); + cluster.schemaChange(withKeyspace("create table %s.tbl (id bigint primary key, value text) with compaction = {'class':'UnifiedCompactionStrategy', 'target_sstable_size' : '1MiB'}")); + long targetSize = 1L<<20; + long targetMin = targetSize * 10 / 16; // Size must be within sqrt(0.5), sqrt(2) of target, use 1.6 to account for estimations + long targetMax = targetSize * 16 / 10; + long toWrite = targetSize * nodeCount * dataDirs * 8; // 8 MiB per data directory, to be guaranteed to be over the 1MiB target size, and also different from the base shard count + int payloadSize = 1024; + cluster.forEach(x -> x.nodetool("disableautocompaction")); + + // The first flush will not have the flush size metric initialized, so first check distribution after compaction. + int i = 0; + for (; i < 2; ++i) + { + writeData(cluster, i * toWrite, toWrite, payloadSize); + cluster.forEach(x -> x.flush(KEYSPACE)); + } + + cluster.forEach(x -> x.forceCompact(KEYSPACE, "tbl")); + checkSSTableSizes(nodeCount, cluster, targetMin, targetMax); + + // Now check that the sstables created by flushes are of the right size. + for (; i < 2; ++i) + { + writeData(cluster, i * toWrite, toWrite, payloadSize); + cluster.forEach(x -> x.flush(KEYSPACE)); + } + checkSSTableSizes(nodeCount, cluster, targetMin, targetMax); + + // Compact again, as this time there will be independent buckets whose splitting must also work correctly. + cluster.forEach(x -> x.forceCompact(KEYSPACE, "tbl")); + checkSSTableSizes(nodeCount, cluster, targetMin, targetMax); + } + } + + private static void writeData(Cluster cluster, long offset, long toWrite, int payloadSize) + { + for (int i = 0; i < toWrite; i += payloadSize) + cluster.coordinator(1).execute(withKeyspace("insert into %s.tbl (id, value) values (?, ?)"), ConsistencyLevel.ONE, i + offset, makeRandomString(payloadSize)); + } + + private void checkSSTableSizes(int nodeCount, Cluster cluster, long targetMin, long targetMax) + { + for (int i = 1; i <= nodeCount; ++i) + { + LongSummaryStatistics stats = cluster.get(i).callOnInstance(() -> { + ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore("tbl"); + return cfs.getLiveSSTables().stream().mapToLong(SSTableReader::onDiskLength).summaryStatistics(); + }); + long sstableCount = stats.getCount(); + long minSize = stats.getMin(); + long maxSize = stats.getMax(); + + LoggerFactory.getLogger(getClass()).info("Node {} sstables {} min/max size: {}/{} avg {} total {}", + i, + sstableCount, + FBUtilities.prettyPrintMemory(minSize), + FBUtilities.prettyPrintMemory(maxSize), + FBUtilities.prettyPrintBinary(stats.getAverage(), "", "B"), + FBUtilities.prettyPrintMemory(stats.getSum())); + assertThat(sstableCount, Matchers.greaterThan(0L)); + assertThat(minSize, Matchers.greaterThan(targetMin)); + assertThat(maxSize, Matchers.lessThan(targetMax)); + } + } +} From b2835f23f846c6886a84d2b02714a284bdafc716 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Fri, 23 Jun 2023 17:13:01 +0300 Subject: [PATCH 18/27] Set max_sstables_to_compact default to 32 Store targetSSTableSize and apply sqrt(0.5) in calculation. --- .../db/compaction/unified/Controller.java | 29 ++++++++++--------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/src/java/org/apache/cassandra/db/compaction/unified/Controller.java b/src/java/org/apache/cassandra/db/compaction/unified/Controller.java index 8199e04cd181..4852e05faeb9 100644 --- a/src/java/org/apache/cassandra/db/compaction/unified/Controller.java +++ b/src/java/org/apache/cassandra/db/compaction/unified/Controller.java @@ -79,10 +79,8 @@ public class Controller /** * The maximum number of sstables to compact in one operation. * - * This is expected to be large and never be reached, but compaction going very very late may cause the accumulation - * of thousands and even tens of thousands of sstables which may cause problems if compacted in one long operation. - * The default is chosen to be half of the maximum permitted space overhead when the source sstables are of the - * minimum sstable size. + * The default is 32, which aims to keep the length of operations under control and prevent accummulation of + * sstables while compactions are taking place. * * If the fanout factor is larger than the maximum number of sstables, the strategy will ignore the latter. */ @@ -119,7 +117,9 @@ public class Controller protected final int baseShardCount; - protected final double targetSSTableSizeMin; + protected final double targetSSTableSize; + + static final double INVERSE_SQRT_2 = Math.sqrt(0.5); protected final Overlaps.InclusionMethod overlapInclusionMethod; @@ -143,7 +143,7 @@ public class Controller this.currentFlushSize = flushSizeOverride; this.expiredSSTableCheckFrequency = TimeUnit.MILLISECONDS.convert(expiredSSTableCheckFrequency, TimeUnit.SECONDS); this.baseShardCount = baseShardCount; - this.targetSSTableSizeMin = targetSStableSize * Math.sqrt(0.5); + this.targetSSTableSize = targetSStableSize; this.overlapInclusionMethod = overlapInclusionMethod; if (maxSSTablesToCompact <= 0) @@ -175,7 +175,7 @@ public int getScalingParameter(int index) public String toString() { return String.format("Controller, m: %s, o: %s, Ws: %s", - FBUtilities.prettyPrintBinary(targetSSTableSizeMin, "B", ""), + FBUtilities.prettyPrintBinary(targetSSTableSize, "B", ""), Arrays.toString(survivalFactors), printScalingParameters(scalingParameters)); } @@ -193,8 +193,8 @@ public int getThreshold(int index) { /** * Calculate the number of shards to split the local token space in for the given sstable density. * This is calculated as a power-of-two multiple of baseShardCount, so that the expected size of resulting sstables - * is between targetSSTableSizeMin and 2*targetSSTableSizeMin (in other words, sqrt(0.5) * targetSSTableSize and - * sqrt(2) * targetSSTableSize), with a minimum of baseShardCount shards for smaller sstables. + * is between sqrt(0.5) * targetSSTableSize and sqrt(2) * targetSSTableSize, with a minimum of baseShardCount shards + * for smaller sstables. * * Note that to get the sstables resulting from this splitting within the bounds, the density argument must be * normalized to the span that is being split. In other words, if no disks are defined, the density should be @@ -206,21 +206,22 @@ public int getNumShards(double localDensity) { // How many we would have to aim for the target size. Divided by the base shard count, so that we can ensure // the result is a multiple of it by multiplying back below. - double count = localDensity / (targetSSTableSizeMin * baseShardCount); + double count = localDensity / (targetSSTableSize * INVERSE_SQRT_2 * baseShardCount); if (count > MAX_SHARD_SPLIT) count = MAX_SHARD_SPLIT; assert !(count < 0); // Must be positive, 0 or NaN, which should translate to baseShardCount - // Make it a power of two multiple of the base count so that split points for lower levels remain split points for higher. + // Make it a power of two multiple of the base count so that split points for lower levels remain split points + // for higher. // The conversion to int and highestOneBit round down, for which we compensate by using the sqrt(0.5) multiplier - // already applied in targetSSTableSizeMin. + // applied above. // Setting the bottom bit to 1 ensures the result is at least baseShardCount. int shards = baseShardCount * Integer.highestOneBit((int) count | 1); logger.debug("Shard count {} for density {}, {} times target {}", shards, FBUtilities.prettyPrintBinary(localDensity, "B", " "), - localDensity / targetSSTableSizeMin, - FBUtilities.prettyPrintBinary(targetSSTableSizeMin, "B", " ")); + localDensity / targetSSTableSize, + FBUtilities.prettyPrintBinary(targetSSTableSize, "B", " ")); return shards; } From 41defe6dc85a5295c34b8a724e067ccd978f0dce Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Mon, 26 Jun 2023 16:15:42 +0300 Subject: [PATCH 19/27] Fix header --- .../db/compaction/UnifiedCompactionStrategy.java | 14 ++++++++------ .../db/compaction/unified/Controller.java | 14 ++++++++------ .../unified/ShardedCompactionWriter.java | 14 ++++++++------ .../db/compaction/unified/ShardedMultiWriter.java | 14 ++++++++------ .../compaction/unified/UnifiedCompactionTask.java | 14 ++++++++------ .../compaction/UnifiedCompactionStrategyTest.java | 14 ++++++++------ .../db/compaction/unified/ControllerTest.java | 14 ++++++++------ .../unified/ShardedCompactionWriterTest.java | 14 ++++++++------ .../compaction/unified/ShardedMultiWriterTest.java | 14 ++++++++------ 9 files changed, 72 insertions(+), 54 deletions(-) diff --git a/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java index 60cccda2ddfd..4ee7b4ba21a8 100644 --- a/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java +++ b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/src/java/org/apache/cassandra/db/compaction/unified/Controller.java b/src/java/org/apache/cassandra/db/compaction/unified/Controller.java index 4852e05faeb9..0bf2cda664fe 100644 --- a/src/java/org/apache/cassandra/db/compaction/unified/Controller.java +++ b/src/java/org/apache/cassandra/db/compaction/unified/Controller.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/src/java/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriter.java b/src/java/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriter.java index b5ea5ec77821..bede6d2efe8c 100644 --- a/src/java/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriter.java +++ b/src/java/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriter.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/src/java/org/apache/cassandra/db/compaction/unified/ShardedMultiWriter.java b/src/java/org/apache/cassandra/db/compaction/unified/ShardedMultiWriter.java index c2fe1babe3b5..f2a39d7f017a 100644 --- a/src/java/org/apache/cassandra/db/compaction/unified/ShardedMultiWriter.java +++ b/src/java/org/apache/cassandra/db/compaction/unified/ShardedMultiWriter.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/src/java/org/apache/cassandra/db/compaction/unified/UnifiedCompactionTask.java b/src/java/org/apache/cassandra/db/compaction/unified/UnifiedCompactionTask.java index cc784a584f1b..de62fc9113ba 100644 --- a/src/java/org/apache/cassandra/db/compaction/unified/UnifiedCompactionTask.java +++ b/src/java/org/apache/cassandra/db/compaction/unified/UnifiedCompactionTask.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/unit/org/apache/cassandra/db/compaction/UnifiedCompactionStrategyTest.java b/test/unit/org/apache/cassandra/db/compaction/UnifiedCompactionStrategyTest.java index e0e3e4a25c15..d81237b8ffcf 100644 --- a/test/unit/org/apache/cassandra/db/compaction/UnifiedCompactionStrategyTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/UnifiedCompactionStrategyTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/unit/org/apache/cassandra/db/compaction/unified/ControllerTest.java b/test/unit/org/apache/cassandra/db/compaction/unified/ControllerTest.java index a0ccd10c7fa1..2b0fab232a2b 100644 --- a/test/unit/org/apache/cassandra/db/compaction/unified/ControllerTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/unified/ControllerTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/unit/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriterTest.java b/test/unit/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriterTest.java index 9ad6f8ed14d3..9eb05dc50392 100644 --- a/test/unit/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriterTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/unified/ShardedCompactionWriterTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/unit/org/apache/cassandra/db/compaction/unified/ShardedMultiWriterTest.java b/test/unit/org/apache/cassandra/db/compaction/unified/ShardedMultiWriterTest.java index e5f38dd7fc49..3c1e1181bb93 100644 --- a/test/unit/org/apache/cassandra/db/compaction/unified/ShardedMultiWriterTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/unified/ShardedMultiWriterTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, From 64477084fd813babc6751b4e93f8c3406bc34015 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Mon, 26 Jun 2023 18:45:45 +0300 Subject: [PATCH 20/27] Review comments --- conf/cassandra.yaml | 13 +++ .../config/CassandraRelevantProperties.java | 5 +- .../compaction/UnifiedCompactionStrategy.java | 8 +- .../db/compaction/unified/Controller.java | 84 ++++++++----------- .../UnifiedCompactionStrategyTest.java | 1 - 5 files changed, 55 insertions(+), 56 deletions(-) diff --git a/conf/cassandra.yaml b/conf/cassandra.yaml index 92e092af75da..47fbe5505e6b 100644 --- a/conf/cassandra.yaml +++ b/conf/cassandra.yaml @@ -1002,6 +1002,19 @@ snapshot_links_per_second: 0 # Min unit: KiB column_index_cache_size: 2KiB +# Default compaction strategy, applied when a table's parameters do not +# specify compaction. +# The default is to use SizeTieredCompactionStrategy, with its default +# threshold of 4 sstables. +# The selected compaction strategy will also apply to system tables. +# +# default_compaction: +# class_name: UnifiedCompactionStrategy +# parameters: +# scaling_parameters: T4 +# target_sstable_size: 1GiB + + # Number of simultaneous compactions to allow, NOT including # validation "compactions" for anti-entropy repair. Simultaneous # compactions can help preserve read performance in a mixed read/write diff --git a/src/java/org/apache/cassandra/config/CassandraRelevantProperties.java b/src/java/org/apache/cassandra/config/CassandraRelevantProperties.java index 0ea4e2cc01cc..b328b722eb94 100644 --- a/src/java/org/apache/cassandra/config/CassandraRelevantProperties.java +++ b/src/java/org/apache/cassandra/config/CassandraRelevantProperties.java @@ -517,6 +517,7 @@ public enum CassandraRelevantProperties TRIGGERS_DIR("cassandra.triggers_dir"), TRUNCATE_BALLOT_METADATA("cassandra.truncate_ballot_metadata"), TYPE_UDT_CONFLICT_BEHAVIOR("cassandra.type.udt.conflict_behavior"), + // See org.apache.cassandra.db.compaction.unified.Controller for the definition of the UCS parameters UCS_BASE_SHARD_COUNT("unified_compaction.base_shard_count", "4"), UCS_OVERLAP_INCLUSION_METHOD("unified_compaction.overlap_inclusion_method"), UCS_SCALING_PARAMETER("unified_compaction.scaling_parameters", "T4"), @@ -745,9 +746,9 @@ public double getDouble() /** * Gets the value of a system property as a double. - * @return system property long value if it exists, defaultValue otherwise. + * @return system property value if it exists, defaultValue otherwise. */ - public double getLong(double overrideDefaultValue) + public double getDouble(double overrideDefaultValue) { String value = System.getProperty(key); if (value == null) diff --git a/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java index 4ee7b4ba21a8..f523199d3ae8 100644 --- a/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java +++ b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java @@ -220,8 +220,6 @@ public AbstractCompactionTask getUserDefinedTask(Collection sstab @Override public synchronized UnifiedCompactionTask getNextBackgroundTask(long gcBefore) { - controller.onStrategyBackgroundTaskRequest(); - while (true) { CompactionPick pick = getNextCompactionPick(gcBefore); @@ -247,9 +245,9 @@ private UnifiedCompactionTask createCompactionTask(CompactionPick pick, long gcB } else { - // This can happen e.g. due to a race with upgrade tasks - logger.error("Failed to submit compaction {} because a transaction could not be created. If this happens frequently, it should be reported", pick); - // FIXME: Needs the sstable removal race fix + // This can happen e.g. due to a race with upgrade tasks. + logger.warn("Failed to submit compaction {} because a transaction could not be created. If this happens frequently, it should be reported", pick); + // This may be an indication of an SSTableReader reference leak. See CASSANDRA-18342. return null; } } diff --git a/src/java/org/apache/cassandra/db/compaction/unified/Controller.java b/src/java/org/apache/cassandra/db/compaction/unified/Controller.java index 0bf2cda664fe..32a3a081fdbb 100644 --- a/src/java/org/apache/cassandra/db/compaction/unified/Controller.java +++ b/src/java/org/apache/cassandra/db/compaction/unified/Controller.java @@ -50,7 +50,8 @@ public class Controller * Higher indexes will use the value of the last index with a W specified. */ final static String SCALING_PARAMETERS_OPTION = "scaling_parameters"; - private final static String DEFAULT_SCALING_PARAMETERS = CassandraRelevantProperties.UCS_SCALING_PARAMETER.getString(); + private final static String DEFAULT_SCALING_PARAMETERS = + CassandraRelevantProperties.UCS_SCALING_PARAMETER.getString(); /** * Override for the flush size in MB. The database should be able to calculate this from executing flushes, this @@ -66,16 +67,19 @@ public class Controller * For others a base count of 1 is used as system tables are usually small and do not need as much compaction * parallelism, while having directories defined provides for parallelism in a different way. */ - public static final int DEFAULT_BASE_SHARD_COUNT = CassandraRelevantProperties.UCS_BASE_SHARD_COUNT.getInt(); + public static final int DEFAULT_BASE_SHARD_COUNT = + CassandraRelevantProperties.UCS_BASE_SHARD_COUNT.getInt(); static final String TARGET_SSTABLE_SIZE_OPTION = "target_sstable_size"; - public static final long DEFAULT_TARGET_SSTABLE_SIZE = CassandraRelevantProperties.UCS_TARGET_SSTABLE_SIZE.getSizeInBytes(); + public static final long DEFAULT_TARGET_SSTABLE_SIZE = + CassandraRelevantProperties.UCS_TARGET_SSTABLE_SIZE.getSizeInBytes(); static final long MIN_TARGET_SSTABLE_SIZE = 1L << 20; /** * This parameter is intended to modify the shape of the LSM by taking into account the survival ratio of data, for now it is fixed to one. */ - static final double DEFAULT_SURVIVAL_FACTOR = CassandraRelevantProperties.UCS_SURVIVAL_FACTOR.getDouble(); + static final double DEFAULT_SURVIVAL_FACTOR = + CassandraRelevantProperties.UCS_SURVIVAL_FACTOR.getDouble(); static final double[] DEFAULT_SURVIVAL_FACTORS = new double[] { DEFAULT_SURVIVAL_FACTOR }; /** @@ -89,7 +93,8 @@ public class Controller static final String MAX_SSTABLES_TO_COMPACT_OPTION = "max_sstables_to_compact"; static final String ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION_OPTION = "unsafe_aggressive_sstable_expiration"; - static final boolean ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION = CassandraRelevantProperties.ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION.getBoolean(); + static final boolean ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION = + CassandraRelevantProperties.ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION.getBoolean(); static final boolean DEFAULT_ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION = false; static final int DEFAULT_EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS = 60 * 10; @@ -155,8 +160,10 @@ public class Controller if (ignoreOverlapsInExpirationCheck && !ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION) { - logger.warn("Not enabling aggressive SSTable expiration, as the system property '" + CassandraRelevantProperties.ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION.name() + "' is set to 'false'. " + - "Set it to 'true' to enable aggressive SSTable expiration."); + logger.warn("Not enabling aggressive SSTable expiration, as the system property '" + + CassandraRelevantProperties.ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION.name() + + "' is set to 'false'. " + + "Set it to 'true' to enable aggressive SSTable expiration."); } this.ignoreOverlapsInExpirationCheck = ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION && ignoreOverlapsInExpirationCheck; } @@ -277,28 +284,12 @@ public long getExpiredSSTableCheckFrequency() return expiredSSTableCheckFrequency; } - /** - * The strategy will call this method each time {@link UnifiedCompactionStrategy#getNextBackgroundTask} is called. - */ - public void onStrategyBackgroundTaskRequest() - { - } - - /** - * Returns a maximum bucket index for the given data size and fanout. - */ - private int maxBucketIndex(long totalLength, int fanout) - { - double o = getSurvivalFactor(0); - long m = getFlushSizeBytes(); - return Math.max(0, (int) Math.floor((Math.log(totalLength) - Math.log(m)) / (Math.log(fanout) - Math.log(o)))); - } - public static Controller fromOptions(ColumnFamilyStore cfs, Map options) { int[] Ws = parseScalingParameters(options.getOrDefault(SCALING_PARAMETERS_OPTION, DEFAULT_SCALING_PARAMETERS)); - long flushSizeOverride = FBUtilities.parseHumanReadableBytes(options.getOrDefault(FLUSH_SIZE_OVERRIDE_OPTION, "0MiB")); + long flushSizeOverride = FBUtilities.parseHumanReadableBytes(options.getOrDefault(FLUSH_SIZE_OVERRIDE_OPTION, + "0MiB")); int maxSSTablesToCompact = Integer.parseInt(options.getOrDefault(MAX_SSTABLES_TO_COMPACT_OPTION, "0")); long expiredSSTableCheckFrequency = options.containsKey(EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_OPTION) ? Long.parseLong(options.get(EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_OPTION)) @@ -314,19 +305,20 @@ public static Controller fromOptions(ColumnFamilyStore cfs, Map } else { - if (SchemaConstants.isSystemKeyspace(cfs.getKeyspaceName()) || (cfs.getDiskBoundaries().positions != null && cfs.getDiskBoundaries().positions.size() > 1)) + if (SchemaConstants.isSystemKeyspace(cfs.getKeyspaceName()) + || (cfs.getDiskBoundaries().positions != null && cfs.getDiskBoundaries().positions.size() > 1)) baseShardCount = 1; else baseShardCount = DEFAULT_BASE_SHARD_COUNT; } long targetSStableSize = options.containsKey(TARGET_SSTABLE_SIZE_OPTION) - ? FBUtilities.parseHumanReadableBytes(options.get(TARGET_SSTABLE_SIZE_OPTION)) - : DEFAULT_TARGET_SSTABLE_SIZE; + ? FBUtilities.parseHumanReadableBytes(options.get(TARGET_SSTABLE_SIZE_OPTION)) + : DEFAULT_TARGET_SSTABLE_SIZE; Overlaps.InclusionMethod inclusionMethod = options.containsKey(OVERLAP_INCLUSION_METHOD_OPTION) - ? Overlaps.InclusionMethod.valueOf(options.get(OVERLAP_INCLUSION_METHOD_OPTION).toUpperCase()) - : DEFAULT_OVERLAP_INCLUSION_METHOD; + ? Overlaps.InclusionMethod.valueOf(options.get(OVERLAP_INCLUSION_METHOD_OPTION).toUpperCase()) + : DEFAULT_OVERLAP_INCLUSION_METHOD; return new Controller(cfs, MonotonicClock.Global.preciseTime, @@ -343,12 +335,6 @@ public static Controller fromOptions(ColumnFamilyStore cfs, Map public static Map validateOptions(Map options) throws ConfigurationException { - String nonPositiveErr = "Invalid configuration, %s should be positive: %d"; - String booleanParseErr = "%s should either be 'true' or 'false', not %s"; - String intParseErr = "%s is not a parsable int (base10) for %s"; - String longParseErr = "%s is not a parsable long (base10) for %s"; - String sizeUnacceptableErr = "%s %s is not acceptable, size must be at least %s"; - String invalidSizeErr = "%s %s is not a valid size in bytes: %s"; options = new HashMap<>(options); String s; @@ -363,13 +349,15 @@ public static Map validateOptions(Map options) t { int numShards = Integer.parseInt(s); if (numShards <= 0) - throw new ConfigurationException(String.format(nonPositiveErr, + throw new ConfigurationException(String.format("Invalid configuration, %s should be positive: %d", BASE_SHARD_COUNT_OPTION, numShards)); } catch (NumberFormatException e) { - throw new ConfigurationException(String.format(intParseErr, s, BASE_SHARD_COUNT_OPTION), e); + throw new ConfigurationException(String.format("%s is not a parsable int (base10) for %s", + s, + BASE_SHARD_COUNT_OPTION), e); } } @@ -381,15 +369,15 @@ public static Map validateOptions(Map options) t long targetSSTableSize = FBUtilities.parseHumanReadableBytes(s); if (targetSSTableSize < MIN_TARGET_SSTABLE_SIZE) { - throw new ConfigurationException(String.format(sizeUnacceptableErr, + throw new ConfigurationException(String.format("%s %s is not acceptable, size must be at least %s", TARGET_SSTABLE_SIZE_OPTION, s, - FBUtilities.prettyPrintBinary(MIN_TARGET_SSTABLE_SIZE, "B", ""))); + FBUtilities.prettyPrintMemory(MIN_TARGET_SSTABLE_SIZE))); } } catch (NumberFormatException e) { - throw new ConfigurationException(String.format(invalidSizeErr, + throw new ConfigurationException(String.format("%s %s is not a valid size in bytes: %s", TARGET_SSTABLE_SIZE_OPTION, s, e.getMessage()), @@ -404,14 +392,14 @@ public static Map validateOptions(Map options) t { long flushSize = FBUtilities.parseHumanReadableBytes(s); if (flushSize < MIN_TARGET_SSTABLE_SIZE) - throw new ConfigurationException(String.format(sizeUnacceptableErr, + throw new ConfigurationException(String.format("%s %s is not acceptable, size must be at least %s", FLUSH_SIZE_OVERRIDE_OPTION, s, - FBUtilities.prettyPrintBinary(MIN_TARGET_SSTABLE_SIZE, "B", ""))); + FBUtilities.prettyPrintMemory(MIN_TARGET_SSTABLE_SIZE))); } catch (NumberFormatException e) { - throw new ConfigurationException(String.format(invalidSizeErr, + throw new ConfigurationException(String.format("%s %s is not a valid size in bytes: %s", FLUSH_SIZE_OVERRIDE_OPTION, s, e.getMessage()), @@ -428,7 +416,7 @@ public static Map validateOptions(Map options) t } catch (NumberFormatException e) { - throw new ConfigurationException(String.format(intParseErr, + throw new ConfigurationException(String.format("%s is not a parsable int (base10) for %s", s, MAX_SSTABLES_TO_COMPACT_OPTION), e); @@ -441,13 +429,13 @@ public static Map validateOptions(Map options) t { long expiredSSTableCheckFrequency = Long.parseLong(s); if (expiredSSTableCheckFrequency <= 0) - throw new ConfigurationException(String.format(nonPositiveErr, + throw new ConfigurationException(String.format("Invalid configuration, %s should be positive: %d", EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_OPTION, expiredSSTableCheckFrequency)); } catch (NumberFormatException e) { - throw new ConfigurationException(String.format(longParseErr, + throw new ConfigurationException(String.format("%s is not a parsable long (base10) for %s", s, EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_OPTION), e); @@ -457,7 +445,7 @@ public static Map validateOptions(Map options) t s = options.remove(ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION_OPTION); if (s != null && !s.equalsIgnoreCase("true") && !s.equalsIgnoreCase("false")) { - throw new ConfigurationException(String.format(booleanParseErr, + throw new ConfigurationException(String.format("%s should either be 'true' or 'false', not %s", ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION_OPTION, s)); } diff --git a/test/unit/org/apache/cassandra/db/compaction/UnifiedCompactionStrategyTest.java b/test/unit/org/apache/cassandra/db/compaction/UnifiedCompactionStrategyTest.java index d81237b8ffcf..a673b27b7ceb 100644 --- a/test/unit/org/apache/cassandra/db/compaction/UnifiedCompactionStrategyTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/UnifiedCompactionStrategyTest.java @@ -388,7 +388,6 @@ public void testLayout(int W, int numSSTables, int maxSSTablesToCompact) List allSstables = new ArrayList<>(); List sstables = mockSSTables(numSSTables, -// minSstableSizeBytes, 0, System.currentTimeMillis(), 0); From fa86c79e24291615919134957955250a68428b02 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Wed, 28 Jun 2023 15:23:46 +0300 Subject: [PATCH 21/27] Review comments --- .../compaction/UnifiedCompactionStrategy.java | 55 ++++++++++++++----- .../unified/ShardedMultiWriter.java | 8 +-- .../apache/cassandra/dht/SplitterTest.java | 13 ++++- 3 files changed, 53 insertions(+), 23 deletions(-) diff --git a/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java index f523199d3ae8..95e901b7b80b 100644 --- a/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java +++ b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.java @@ -433,7 +433,7 @@ protected synchronized Set getSSTables() } /** - * @return a LinkedHashMap of arenas with buckets where order of arenas are preserved + * @return a list of the levels in the compaction hierarchy */ @VisibleForTesting List getLevels() @@ -442,14 +442,14 @@ List getLevels() } /** - * Groups the sstables passed in into arenas and buckets. This is used by the strategy to determine - * new compactions, and by external tools in CNDB to analyze the strategy decisions. + * Groups the sstables passed in into levels. This is used by the strategy to determine + * new compactions, and by external tools to analyze the strategy decisions. * - * @param sstables a collection of the sstables to be assigned to arenas + * @param sstables a collection of the sstables to be assigned to levels * @param compactionFilter a filter to exclude CompactionSSTables, * e.g., {@link #isSuitableForCompaction} * - * @return a map of arenas to their buckets + * @return a list of the levels in the compaction hierarchy */ public List getLevels(Collection sstables, Predicate compactionFilter) @@ -464,13 +464,13 @@ private List formLevels(List suitable) List levels = new ArrayList<>(MAX_LEVELS); suitable.sort(shardManager::compareByDensity); - double maxSize = controller.getMaxLevelDensity(0, controller.getBaseSstableSize(controller.getFanout(0)) / shardManager.localSpaceCoverage()); + double maxDensity = controller.getMaxLevelDensity(0, controller.getBaseSstableSize(controller.getFanout(0)) / shardManager.localSpaceCoverage()); int index = 0; - Level level = new Level(controller, index, 0, maxSize); + Level level = new Level(controller, index, 0, maxDensity); for (SSTableReader candidate : suitable) { - final double size = shardManager.density(candidate); - if (size < level.max) + final double density = shardManager.density(candidate); + if (density < level.max) { level.add(candidate); continue; @@ -482,10 +482,10 @@ private List formLevels(List suitable) while (true) { ++index; - double minSize = maxSize; - maxSize = controller.getMaxLevelDensity(index, minSize); - level = new Level(controller, index, minSize, maxSize); - if (size < level.max) + double minDensity = maxDensity; + maxDensity = controller.getMaxLevelDensity(index, minDensity); + level = new Level(controller, index, minDensity, maxDensity); + if (density < level.max) { level.add(candidate); break; @@ -549,7 +549,8 @@ public static class Level final int threshold; // number of SSTables that trigger a compaction final double min; // min density of sstables for this level final double max; // max density of sstables for this level - int maxOverlap = -1; // maximum number of overlapping sstables + int maxOverlap = -1; // maximum number of overlapping sstables, i.e. maximum number of sstables that need + // to be queried on this level for any given key Level(Controller controller, int index, double minSize, double maxSize) { @@ -585,7 +586,13 @@ void complete() } /** - * Return the compaction pick + * Return the compaction pick for this level. + *

+ * This is done by splitting the level into buckets that we can treat as independent regions for compaction. + * We then use the maxOverlap value (i.e. the maximum number of sstables that can contain data for any covered + * key) of each bucket to determine if compactions are needed, and to prioritize the buckets that contribute + * most to the complexity of queries: if maxOverlap is below the level's threshold, no compaction is needed; + * otherwise, we choose one from the buckets that have the highest maxOverlap. */ CompactionPick getCompactionPick(SelectionContext context) { @@ -629,6 +636,24 @@ CompactionPick getCompactionPick(SelectionContext context) return selected; } + /** + * Group the sstables in this level into buckets. + *

+ * The buckets are formed by grouping sstables that overlap at some key together, and then expanded to cover + * any overlapping sstable according to the overlap inclusion method. With the usual TRANSITIVE method this + * results into non-overlapping buckets that can't affect one another and can be compacted in parallel without + * any loss of efficiency. + *

+ * Other overlap inclusion methods are provided to cover situations where we may be okay with compacting + * sstables partially and doing more than the strictly necessary amount of compaction to solve a problem: e.g. + * after an upgrade from LCS where transitive overlap may cause a complete level to be compacted together + * (creating an operation that will take a very long time to complete) and we want to make some progress as + * quickly as possible at the cost of redoing some work. + *

+ * The number of sstables that overlap at some key defines the "overlap" of a set of sstables. The maximum such + * value in the bucket is its "maxOverlap", i.e. the highest number of sstables we need to read to find the + * data associated with a given key. + */ @VisibleForTesting List getBuckets(SelectionContext context) { diff --git a/src/java/org/apache/cassandra/db/compaction/unified/ShardedMultiWriter.java b/src/java/org/apache/cassandra/db/compaction/unified/ShardedMultiWriter.java index f2a39d7f017a..1fff50ad8c72 100644 --- a/src/java/org/apache/cassandra/db/compaction/unified/ShardedMultiWriter.java +++ b/src/java/org/apache/cassandra/db/compaction/unified/ShardedMultiWriter.java @@ -45,13 +45,11 @@ /** * A {@link SSTableMultiWriter} that splits the output sstable at the partition boundaries of the compaction - * shards used by {@link org.apache.cassandra.db.compaction.UnifiedCompactionStrategy} as long as the size of - * the sstable so far is sufficiently large. + * shards used by {@link org.apache.cassandra.db.compaction.UnifiedCompactionStrategy}. *

- * This is class is similar to {@link ShardedMultiWriter} but for flushing. Unfortunately + * This is class is similar to {@link ShardedCompactionWriter} but for flushing. Unfortunately * we currently have 2 separate writers hierarchy that are not compatible and so we must - * duplicate the functionality of splitting sstables over compaction shards if they have - * reached a minimum size. + * duplicate the functionality. */ public class ShardedMultiWriter implements SSTableMultiWriter { diff --git a/test/unit/org/apache/cassandra/dht/SplitterTest.java b/test/unit/org/apache/cassandra/dht/SplitterTest.java index 230427e50e90..1de22ff8fc69 100644 --- a/test/unit/org/apache/cassandra/dht/SplitterTest.java +++ b/test/unit/org/apache/cassandra/dht/SplitterTest.java @@ -90,16 +90,23 @@ public void testWithWeight() List ranges = new ArrayList<>(); ranges.add(new Splitter.WeightedRange(1.0, t(0, 10))); ranges.add(new Splitter.WeightedRange(1.0, t(20, 30))); - ranges.add(new Splitter.WeightedRange(0.5, t(40, 60))); + ranges.add(new Splitter.WeightedRange(1.0, t(40, 50))); - List ranges2 = new ArrayList<>(); + List ranges2 = new ArrayList<>(); // same total coverage, split point inside weight-1 range ranges2.add(new Splitter.WeightedRange(1.0, t(0, 10))); ranges2.add(new Splitter.WeightedRange(1.0, t(20, 30))); - ranges2.add(new Splitter.WeightedRange(1.0, t(40, 50))); + ranges2.add(new Splitter.WeightedRange(0.5, t(40, 60))); + + List ranges3 = new ArrayList<>(); // same total coverage, split point inside weight-0.5 range + ranges3.add(new Splitter.WeightedRange(1.0, t(0, 10))); + ranges3.add(new Splitter.WeightedRange(0.5, t(15, 35))); + ranges3.add(new Splitter.WeightedRange(1.0, t(40, 50))); + IPartitioner partitioner = Murmur3Partitioner.instance; Splitter splitter = partitioner.splitter().get(); assertEquals(splitter.splitOwnedRanges(2, ranges, false), splitter.splitOwnedRanges(2, ranges2, false)); + assertEquals(splitter.splitOwnedRanges(2, ranges, false), splitter.splitOwnedRanges(2, ranges3, false)); } @Test From 08384a9916c8e3a3090405131ac68f9f3fe2c3f7 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Thu, 6 Jul 2023 15:41:44 +0300 Subject: [PATCH 22/27] Switch to K instead of k for kilo Fix test failures --- .../apache/cassandra/utils/FBUtilities.java | 8 +- .../org/apache/cassandra/cql3/CQLTester.java | 7 ++ .../db/compaction/CancelCompactionsTest.java | 3 +- .../db/compaction/OneCompactionTest.java | 7 +- .../db/compaction/TTLExpiryTest.java | 5 + .../io/compress/CQLCompressionTest.java | 13 +-- .../tools/SSTablePartitionsTest.java | 102 +++++++++--------- .../tools/nodetool/CompactionHistoryTest.java | 3 +- .../cassandra/utils/FBUtilitiesTest.java | 6 +- 9 files changed, 85 insertions(+), 69 deletions(-) diff --git a/src/java/org/apache/cassandra/utils/FBUtilities.java b/src/java/org/apache/cassandra/utils/FBUtilities.java index 96a756869380..6b2ddb0f8658 100644 --- a/src/java/org/apache/cassandra/utils/FBUtilities.java +++ b/src/java/org/apache/cassandra/utils/FBUtilities.java @@ -829,14 +829,14 @@ public static CloseableIterator closeableIterator(Iterator iterator) return new WrappedCloseableIterator(iterator); } - final static String UNIT_PREFIXES = "qryzafpnum kMGTPEZYRQ"; + final static String UNIT_PREFIXES = "qryzafpnum KMGTPEZYRQ"; final static int UNIT_PREFIXES_BASE = UNIT_PREFIXES.indexOf(' '); final static Pattern BASE_NUMBER_PATTERN = Pattern.compile("NaN|[+-]?Infinity|[+-]?\\d+(\\.\\d+)?([eE]([+-]?)\\d+)?"); final static Pattern BINARY_EXPONENT = Pattern.compile("\\*2\\^([+-]?\\d+)"); /** * Convert the given size in bytes to a human-readable value using binary (i.e. 2^10-based) modifiers. - * For example, 1.000kiB, 2.100GiB etc., up to 8.000 EiB. + * For example, 1.000KiB, 2.100GiB etc., up to 8.000 EiB. * @param size Number to convert. */ public static String prettyPrintMemory(long size) @@ -846,7 +846,7 @@ public static String prettyPrintMemory(long size) /** * Convert the given size in bytes to a human-readable value using binary (i.e. 2^10-based) modifiers. - * For example, 1.000kiB, 2.100GiB etc., up to 8.000 EiB. + * For example, 1.000KiB, 2.100GiB etc., up to 8.000 EiB. * @param size Number to convert. * @param separator Separator between the number and the (modified) unit. */ @@ -866,7 +866,7 @@ public static String prettyPrintMemory(long size, String separator) * Convert the given value to a human-readable string using binary (i.e. 2^10-based) modifiers. * If the number is outside the modifier range (i.e. < 1 qi or > 1 Qi), it will be printed as v*2^e where e is a * multiple of 10 with sign. - * For example, 1.000kiB, 2.100 miB/s, 7.006*2^+150, -Infinity. + * For example, 1.000KiB, 2.100 miB/s, 7.006*2^+150, -Infinity. * @param value Number to convert. * @param separator Separator between the number and the (modified) unit. */ diff --git a/test/unit/org/apache/cassandra/cql3/CQLTester.java b/test/unit/org/apache/cassandra/cql3/CQLTester.java index c2e40d6f32de..b2097617ec7c 100644 --- a/test/unit/org/apache/cassandra/cql3/CQLTester.java +++ b/test/unit/org/apache/cassandra/cql3/CQLTester.java @@ -764,6 +764,13 @@ public void compact(String keyspace, String table1, String... tables) store.forceMajorCompaction(); } + public void forceCompactAll() + { + ColumnFamilyStore store = getCurrentColumnFamilyStore(); + if (store != null) + FBUtilities.waitOnFuture(Util.compactAll(store, FBUtilities.nowInSeconds())); + } + public void disableCompaction() { disableCompaction(KEYSPACE); diff --git a/test/unit/org/apache/cassandra/db/compaction/CancelCompactionsTest.java b/test/unit/org/apache/cassandra/db/compaction/CancelCompactionsTest.java index 4a50d2de5d2d..ebddac7bf3f1 100644 --- a/test/unit/org/apache/cassandra/db/compaction/CancelCompactionsTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/CancelCompactionsTest.java @@ -468,7 +468,8 @@ public void testStandardCompactionTaskCancellation() throws Throwable for (int i = 0; i < 10; i++) { - execute("insert into %s (id, something) values (?,?)", i, i); + for (int j = 0; j < 3; ++j) // write more than once to ensure overlap for UCS + execute("insert into %s (id, something) values (?,?)", i * (j+1), i + j); flush(); } AbstractCompactionTask ct = null; diff --git a/test/unit/org/apache/cassandra/db/compaction/OneCompactionTest.java b/test/unit/org/apache/cassandra/db/compaction/OneCompactionTest.java index 9bb2abd0d0a2..090038146842 100644 --- a/test/unit/org/apache/cassandra/db/compaction/OneCompactionTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/OneCompactionTest.java @@ -33,6 +33,7 @@ import org.apache.cassandra.schema.CompactionParams; import org.apache.cassandra.schema.KeyspaceParams; import org.apache.cassandra.utils.ByteBufferUtil; +import org.apache.cassandra.utils.FBUtilities; import static org.junit.Assert.assertEquals; @@ -64,17 +65,17 @@ private void testCompaction(String columnFamilyName, int insertsPerTable) Set inserted = new HashSet<>(); for (int j = 0; j < insertsPerTable; j++) { String key = String.valueOf(j); - new RowUpdateBuilder(store.metadata(), j, key) + new RowUpdateBuilder(store.metadata(), j, key) .clustering("0") .add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER) .build() .applyUnsafe(); - inserted.add(key); + inserted.add(key); Util.flush(store); assertEquals(inserted.size(), Util.getAll(Util.cmd(store).build()).size()); } - CompactionManager.instance.performMaximal(store, false); + FBUtilities.waitOnFuture(Util.compactAll(store, FBUtilities.nowInSeconds())); assertEquals(1, store.getLiveSSTables().size()); } diff --git a/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java b/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java index 82d660e03566..e7e97bed9988 100644 --- a/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java @@ -240,6 +240,11 @@ public void testNoExpire() throws InterruptedException, IOException .add("col311", ByteBufferUtil.EMPTY_BYTE_BUFFER) .build() .applyUnsafe(); + // also write to other key to ensure overlap for UCS + new RowUpdateBuilder(cfs.metadata(), timestamp, 1, key) + .add("col7", ByteBufferUtil.EMPTY_BYTE_BUFFER) + .build() + .applyUnsafe(); Util.flush(cfs); Thread.sleep(2000); // wait for ttl to expire diff --git a/test/unit/org/apache/cassandra/io/compress/CQLCompressionTest.java b/test/unit/org/apache/cassandra/io/compress/CQLCompressionTest.java index 108c70fb24c5..f011f8a3b418 100644 --- a/test/unit/org/apache/cassandra/io/compress/CQLCompressionTest.java +++ b/test/unit/org/apache/cassandra/io/compress/CQLCompressionTest.java @@ -24,6 +24,7 @@ import org.junit.BeforeClass; import org.junit.Test; +import org.apache.cassandra.Util; import org.apache.cassandra.config.Config; import org.apache.cassandra.config.DatabaseDescriptor; import org.apache.cassandra.cql3.CQLTester; @@ -117,7 +118,7 @@ public void lz4FlushTest() throws Throwable }); // Should compact to LZ4 "fast" - compact(); + forceCompactAll(); sstables = store.getLiveSSTables(); assertEquals(sstables.size(), 1); @@ -142,7 +143,7 @@ public void lz4hcFlushTest() throws Throwable }); // Should compact to LZ4 "high" mode - compact(); + forceCompactAll(); sstables = store.getLiveSSTables(); assertEquals(sstables.size(), 1); @@ -165,7 +166,7 @@ public void zstdFlushTest() throws Throwable }); // Should compact to Zstd - compact(); + forceCompactAll(); sstables = store.getLiveSSTables(); assertEquals(sstables.size(), 1); @@ -187,7 +188,7 @@ public void deflateFlushTest() throws Throwable }); // Should compact to Deflate - compact(); + forceCompactAll(); sstables = store.getLiveSSTables(); assertEquals(sstables.size(), 1); @@ -210,7 +211,7 @@ public void useNoCompressorOnFlushTest() throws Throwable }); // Should compact to LZ4 - compact(); + forceCompactAll(); sstables = store.getLiveSSTables(); assertEquals(sstables.size(), 1); @@ -247,7 +248,7 @@ public void zstdTableFlushTest() throws Throwable }); // Should compact to Zstd - compact(); + forceCompactAll(); sstables = store.getLiveSSTables(); assertEquals(sstables.size(), 1); diff --git a/test/unit/org/apache/cassandra/tools/SSTablePartitionsTest.java b/test/unit/org/apache/cassandra/tools/SSTablePartitionsTest.java index d5bc8a59d93d..7899a79341b3 100644 --- a/test/unit/org/apache/cassandra/tools/SSTablePartitionsTest.java +++ b/test/unit/org/apache/cassandra/tools/SSTablePartitionsTest.java @@ -41,21 +41,21 @@ public class SSTablePartitionsTest extends OfflineToolUtils { private static final String SSTABLE_1 = sstable("legacy_ma_simple"); private static final String SSTABLE_2 = sstable("legacy_ma_clust"); - private static final String HEADER_1 = "\nProcessing #1 (big-ma) (0.169 KiB uncompressed, 0.086 KiB on disk)\n"; + private static final String HEADER_1 = "\nProcessing #1 (big-ma) (173 B uncompressed, 88 B on disk)\n"; private static final String HEADER_2 = "\nProcessing #1 (big-ma) (328.145 KiB uncompressed, 5.096 KiB on disk)\n"; - private static final String BACKUPS_HEADER_1 = "\nProcessing Backup:backups #1 (big-ma) (0.169 KiB uncompressed, 0.086 KiB on disk)\n"; + private static final String BACKUPS_HEADER_1 = "\nProcessing Backup:backups #1 (big-ma) (173 B uncompressed, 88 B on disk)\n"; private static final String BACKUPS_HEADER_2 = "\nProcessing Backup:backups #1 (big-ma) (328.145 KiB uncompressed, 5.096 KiB on disk)\n"; - private static final String SNAPSHOTS_HEADER_1 = "\nProcessing Snapshot:snapshot-1 #1 (big-ma) (0.169 KiB uncompressed, 0.086 KiB on disk)\n"; + private static final String SNAPSHOTS_HEADER_1 = "\nProcessing Snapshot:snapshot-1 #1 (big-ma) (173 B uncompressed, 88 B on disk)\n"; private static final String SNAPSHOTS_HEADER_2 = "\nProcessing Snapshot:snapshot-1 #1 (big-ma) (328.145 KiB uncompressed, 5.096 KiB on disk)\n"; private static final String SUMMARY_1 = " Partition size Row count Cell count Tombstone count\n" + - " ~p50 0.034 KiB 1 1 0\n" + - " ~p75 0.034 KiB 1 1 0\n" + - " ~p90 0.034 KiB 1 1 0\n" + - " ~p95 0.034 KiB 1 1 0\n" + - " ~p99 0.034 KiB 1 1 0\n" + - " ~p999 0.034 KiB 1 1 0\n" + - " min 0.032 KiB 1 1 0\n" + - " max 0.034 KiB 1 1 0\n" + + " ~p50 35 B 1 1 0\n" + + " ~p75 35 B 1 1 0\n" + + " ~p90 35 B 1 1 0\n" + + " ~p95 35 B 1 1 0\n" + + " ~p99 35 B 1 1 0\n" + + " ~p999 35 B 1 1 0\n" + + " min 33 B 1 1 0\n" + + " max 35 B 1 1 0\n" + " count 5\n"; private static final String SUMMARY_2 = " Partition size Row count Cell count Tombstone count\n" + " ~p50 71.735 KiB 50 50 0\n" + @@ -208,14 +208,14 @@ private static void testPartitionsOnly(String option) assertThatToolSucceds(SSTABLE_1, option) .isEqualTo(HEADER_1 + " Partition size\n" + - " ~p50 0.034 KiB\n" + - " ~p75 0.034 KiB\n" + - " ~p90 0.034 KiB\n" + - " ~p95 0.034 KiB\n" + - " ~p99 0.034 KiB\n" + - " ~p999 0.034 KiB\n" + - " min 0.032 KiB\n" + - " max 0.034 KiB\n" + + " ~p50 35 B\n" + + " ~p75 35 B\n" + + " ~p90 35 B\n" + + " ~p95 35 B\n" + + " ~p99 35 B\n" + + " ~p999 35 B\n" + + " min 33 B\n" + + " max 35 B\n" + " count 5\n"); assertThatToolSucceds(SSTABLE_2, "--partitions-only") @@ -257,10 +257,10 @@ private static void testMinSize(String option) " Keys: 0 1 2 3 4\n" + SUMMARY_2 + HEADER_1 + - " Partition: '1' (31) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '2' (32) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '3' (33) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '4' (34) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '1' (31) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '2' (32) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '3' (33) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '4' (34) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + "Summary of #1 (big-ma):\n" + " File: " + SSTABLE_1 + "\n" + " 4 partitions match\n" + @@ -343,11 +343,11 @@ private static void testMinCells(String option) " Keys: 0 1 2 3 4\n" + SUMMARY_2 + HEADER_1 + - " Partition: '0' (30) live, size: 0.032 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '1' (31) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '2' (32) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '3' (33) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '4' (34) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '0' (30) live, size: 33 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '1' (31) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '2' (32) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '3' (33) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '4' (34) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + "Summary of #1 (big-ma):\n" + " File: " + SSTABLE_1 + "\n" + " 5 partitions match\n" + @@ -397,11 +397,11 @@ private static void testMinRows(String option) " Keys: 0 1 2 3 4\n" + SUMMARY_2 + HEADER_1 + - " Partition: '0' (30) live, size: 0.032 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '1' (31) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '2' (32) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '3' (33) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '4' (34) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '0' (30) live, size: 33 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '1' (31) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '2' (32) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '3' (33) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '4' (34) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + "Summary of #1 (big-ma):\n" + " File: " + SSTABLE_1 + "\n" + " 5 partitions match\n" + @@ -451,11 +451,11 @@ private static void testMinTombstones(String option) " Keys: 0 1 2 3 4\n" + SUMMARY_2 + HEADER_1 + - " Partition: '0' (30) live, size: 0.032 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '1' (31) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '2' (32) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '3' (33) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '4' (34) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '0' (30) live, size: 33 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '1' (31) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '2' (32) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '3' (33) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '4' (34) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + "Summary of #1 (big-ma):\n" + " File: " + SSTABLE_1 + "\n" + " 5 partitions match\n" + @@ -534,8 +534,8 @@ private static void testIncludedKeys(String option) { assertThatToolSucceds(SSTABLE_1, "--min-size", "0", option, "1", option, "3") .contains(HEADER_1 + - " Partition: '1' (31) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '3' (33) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '1' (31) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '3' (33) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + "Summary of #1 (big-ma):\n" + " File: " + SSTABLE_1 + "\n" + " 2 partitions match\n" + @@ -544,9 +544,9 @@ private static void testIncludedKeys(String option) assertThatToolSucceds(SSTABLE_1, "--min-size", "0", option, "0", option, "2", option, "4") .contains(HEADER_1 + - " Partition: '0' (30) live, size: 0.032 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '2' (32) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '4' (34) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '0' (30) live, size: 33 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '2' (32) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '4' (34) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + "Summary of #1 (big-ma):\n" + " File: " + SSTABLE_1 + "\n" + " 3 partitions match\n" + @@ -555,9 +555,9 @@ private static void testIncludedKeys(String option) assertThatToolSucceds(SSTABLE_1, "-y","--min-size", "0", option, "0", option, "2", option, "4") .contains(HEADER_1 + - " Partition: '0' (30) live, size: 0.032 KiB\n" + - " Partition: '2' (32) live, size: 0.034 KiB\n" + - " Partition: '4' (34) live, size: 0.034 KiB\n" + + " Partition: '0' (30) live, size: 33 B\n" + + " Partition: '2' (32) live, size: 35 B\n" + + " Partition: '4' (34) live, size: 35 B\n" + "Summary of #1 (big-ma):\n" + " File: " + SSTABLE_1 + "\n" + " 3 partitions match\n" + @@ -579,9 +579,9 @@ private static void testExcludedKeys(String option) { assertThatToolSucceds(SSTABLE_1, "--min-size", "0", option, "1", option, "3") .contains(HEADER_1 + - " Partition: '0' (30) live, size: 0.032 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '2' (32) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '4' (34) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '0' (30) live, size: 33 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '2' (32) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '4' (34) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + "Summary of #1 (big-ma):\n" + " File: " + SSTABLE_1 + "\n" + " 3 partitions match\n" + @@ -590,8 +590,8 @@ private static void testExcludedKeys(String option) assertThatToolSucceds(SSTABLE_1, "--min-size", "0", option, "0", option, "2", option, "4") .contains(HEADER_1 + - " Partition: '1' (31) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + - " Partition: '3' (33) live, size: 0.034 KiB, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '1' (31) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + + " Partition: '3' (33) live, size: 35 B, rows: 1, cells: 1, tombstones: 0 (row:0, range:0, complex:0, cell:0, row-TTLd:0, cell-TTLd:0)\n" + "Summary of #1 (big-ma):\n" + " File: " + SSTABLE_1 + "\n" + " 2 partitions match\n" + diff --git a/test/unit/org/apache/cassandra/tools/nodetool/CompactionHistoryTest.java b/test/unit/org/apache/cassandra/tools/nodetool/CompactionHistoryTest.java index 804b75aab268..7dbda7768454 100644 --- a/test/unit/org/apache/cassandra/tools/nodetool/CompactionHistoryTest.java +++ b/test/unit/org/apache/cassandra/tools/nodetool/CompactionHistoryTest.java @@ -85,7 +85,8 @@ public void testCompactionProperties() throws Throwable // write SSTables for the specific key for (int i = 0; i < 10; i++) { - execute("INSERT INTO %s (id, value) VALUES (?, ?)", "key" + i, "value" + i); + for (int j = 0; j < 3; j++) // write more than once to ensure overlap for UCS + execute("INSERT INTO %s (id, value) VALUES (?, ?)", "key" + i + j, "value" + i + j); flush(keyspace()); } diff --git a/test/unit/org/apache/cassandra/utils/FBUtilitiesTest.java b/test/unit/org/apache/cassandra/utils/FBUtilitiesTest.java index ea1a9bf6f889..cf54a6223b14 100644 --- a/test/unit/org/apache/cassandra/utils/FBUtilitiesTest.java +++ b/test/unit/org/apache/cassandra/utils/FBUtilitiesTest.java @@ -267,8 +267,8 @@ public void testPrettyPrintAndParse() { String[] tests = new String[]{ "1", "", "", "1", - "1k", "", "", "1e3", - "1 kiB", " ", "B", "1024", + "1K", "", "", "1e3", + "1 KiB", " ", "B", "1024", "10 B/s", " ", "B/s", "10", "10.2 MiB/s", null, "B/s", "10695475.2", "10e+5", "", "", "10e5", @@ -288,7 +288,7 @@ public void testPrettyPrintAndParse() "-876ns", "", "s", "-876e-9", Long.toString(Long.MAX_VALUE), null, null, Long.toString(Long.MAX_VALUE), Long.toString(Long.MIN_VALUE), null, null, Long.toString(Long.MIN_VALUE), - "Infinity kg", " ", "kg", "+Infinity", + "Infinity Kg", " ", "Kg", "+Infinity", "NaN", "", "", "NaN", "-Infinity", "", "", "-Infinity", }; From a38d50f44b1803c4bac537b65a0b4c5a74ccaeb5 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Wed, 5 Jul 2023 10:55:27 +0300 Subject: [PATCH 23/27] Tweak flaky test for more information --- .../compaction/CorruptedSSTablesCompactionsTest.java | 11 ++++++----- .../cassandra/io/compress/CQLCompressionTest.java | 1 - 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/test/unit/org/apache/cassandra/db/compaction/CorruptedSSTablesCompactionsTest.java b/test/unit/org/apache/cassandra/db/compaction/CorruptedSSTablesCompactionsTest.java index 774055bdd402..26adfb108ca8 100644 --- a/test/unit/org/apache/cassandra/db/compaction/CorruptedSSTablesCompactionsTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/CorruptedSSTablesCompactionsTest.java @@ -44,10 +44,12 @@ import org.apache.cassandra.db.*; import org.apache.cassandra.db.marshal.LongType; import org.apache.cassandra.exceptions.ConfigurationException; +import org.apache.cassandra.io.sstable.CorruptSSTableException; import org.apache.cassandra.io.sstable.format.SSTableReader; import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.schema.*; +import org.apache.cassandra.utils.Throwables; import static org.junit.Assert.assertTrue; @@ -223,16 +225,15 @@ public void testCorruptedSSTables(String tableName) throws Exception try { cfs.forceMajorCompaction(); + break; } catch (Exception e) { - // kind of a hack since we're not specifying just CorruptSSTableExceptions, or (what we actually expect) - // an ExecutionException wrapping a CSSTE. This is probably Good Enough though, since if there are - // other errors in compaction presumably the other tests would bring that to light. + // This is the expected path. The SSTable should be marked corrupted, and retrying the compaction + // should move on to the next corruption. + Throwables.assertAnyCause(e, CorruptSSTableException.class); failures++; - continue; } - break; } cfs.truncateBlocking(); diff --git a/test/unit/org/apache/cassandra/io/compress/CQLCompressionTest.java b/test/unit/org/apache/cassandra/io/compress/CQLCompressionTest.java index f011f8a3b418..517edeb8a8b4 100644 --- a/test/unit/org/apache/cassandra/io/compress/CQLCompressionTest.java +++ b/test/unit/org/apache/cassandra/io/compress/CQLCompressionTest.java @@ -24,7 +24,6 @@ import org.junit.BeforeClass; import org.junit.Test; -import org.apache.cassandra.Util; import org.apache.cassandra.config.Config; import org.apache.cassandra.config.DatabaseDescriptor; import org.apache.cassandra.cql3.CQLTester; From 6b7e755db20bb1b63dd18f3a03830b8dee909672 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Tue, 18 Jul 2023 14:30:46 +0300 Subject: [PATCH 24/27] Fix flaky DiskSpaceMetricsTest --- .../apache/cassandra/io/DiskSpaceMetricsTest.java | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/test/unit/org/apache/cassandra/io/DiskSpaceMetricsTest.java b/test/unit/org/apache/cassandra/io/DiskSpaceMetricsTest.java index 95335746060e..3f65d8c48545 100644 --- a/test/unit/org/apache/cassandra/io/DiskSpaceMetricsTest.java +++ b/test/unit/org/apache/cassandra/io/DiskSpaceMetricsTest.java @@ -99,15 +99,15 @@ public void summaryRedistribution() throws Throwable @Test public void testFlushSize() throws Throwable { - createTable("CREATE TABLE %s (pk bigint, PRIMARY KEY (pk))"); - ColumnFamilyStore cfs = getCurrentColumnFamilyStore(); + createTable(KEYSPACE_PER_TEST, "CREATE TABLE %s (pk bigint, PRIMARY KEY (pk))"); + ColumnFamilyStore cfs = getCurrentColumnFamilyStore(KEYSPACE_PER_TEST); assertTrue(Double.isNaN(cfs.metric.flushSizeOnDisk.get())); // disable compaction so nothing changes between calculations cfs.disableAutoCompaction(); for (int i = 0; i < 3; i++) - insertN(cfs, 1000, 55); + insertN(KEYSPACE_PER_TEST, cfs, 1000, 55); int totalSize = 0; final Set liveSSTables = cfs.getLiveSSTables(); @@ -125,9 +125,14 @@ private void insert(ColumnFamilyStore cfs, long value) throws Throwable } private void insertN(ColumnFamilyStore cfs, int n, long base) throws Throwable + { + insertN(KEYSPACE, cfs, n, base); + } + + private void insertN(String keyspace, ColumnFamilyStore cfs, int n, long base) throws Throwable { for (int i = 0; i < n; i++) - execute("INSERT INTO %s (pk) VALUES (?)", base + i); + executeFormattedQuery(formatQuery(keyspace, "INSERT INTO %s (pk) VALUES (?)"), base + i); // flush to write the sstable Util.flush(cfs); From 58ba8f10ecce5584fe24d9a9e6dc7310ec69283a Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Tue, 18 Jul 2023 21:42:33 +0300 Subject: [PATCH 25/27] review nits --- conf/cassandra.yaml | 5 +++-- .../db/compaction/UnifiedCompactionStrategy.md | 16 ++++++++-------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/conf/cassandra.yaml b/conf/cassandra.yaml index 47fbe5505e6b..4ae0d8bdd8bd 100644 --- a/conf/cassandra.yaml +++ b/conf/cassandra.yaml @@ -1004,10 +1004,11 @@ column_index_cache_size: 2KiB # Default compaction strategy, applied when a table's parameters do not # specify compaction. -# The default is to use SizeTieredCompactionStrategy, with its default -# threshold of 4 sstables. # The selected compaction strategy will also apply to system tables. # +# The default is to use SizeTieredCompactionStrategy, with its default +# compaction parameters. +# # default_compaction: # class_name: UnifiedCompactionStrategy # parameters: diff --git a/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.md b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.md index c21fc27979fe..961bd741be74 100644 --- a/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.md +++ b/src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.md @@ -1,19 +1,19 @@ # Unified compaction strategy (UCS) From f586649a109ee31ad893976126dd64ecdfba0377 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Tue, 18 Jul 2023 23:56:21 +0300 Subject: [PATCH 26/27] DO NOT COMMIT: CircleCI config --- .circleci/config.yml | 1103 ++++++++++++++++++++++++++---------------- 1 file changed, 680 insertions(+), 423 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 15e81d79d1d9..aad3c54c5edc 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -21,10 +21,10 @@ jobs: j11_jvm_upgrade_dtests_repeat: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -80,17 +80,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -114,7 +114,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -170,17 +170,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -235,17 +235,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -266,10 +266,10 @@ jobs: j8_cqlsh_dtests_py311_vnode: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -344,17 +344,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -374,10 +374,10 @@ jobs: j11_dtests_vnode_repeat: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -498,17 +498,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -529,7 +529,7 @@ jobs: j8_dtests_large_vnode: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: xlarge working_directory: ~/ shell: /bin/bash -eo pipefail -l parallelism: 4 @@ -583,17 +583,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -616,7 +616,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -700,17 +700,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -772,17 +772,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -805,7 +805,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -861,17 +861,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -892,10 +892,10 @@ jobs: j11_cqlsh_dtests_py311: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -970,17 +970,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -1004,7 +1004,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -1060,17 +1060,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -1090,10 +1090,10 @@ jobs: j8_jvm_dtests_vnode: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 1 + parallelism: 10 steps: - attach_workspace: at: /home/cassandra @@ -1177,17 +1177,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -1210,7 +1210,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -1266,17 +1266,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -1299,7 +1299,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -1383,17 +1383,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -1414,10 +1414,10 @@ jobs: j8_cqlsh_dtests_py3: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -1492,17 +1492,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -1522,10 +1522,10 @@ jobs: j11_cqlsh_dtests_py38: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -1600,17 +1600,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -1634,7 +1634,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -1690,17 +1690,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -1724,7 +1724,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -1886,17 +1886,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -1917,7 +1917,7 @@ jobs: j11_dtests_large_vnode: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: xlarge working_directory: ~/ shell: /bin/bash -eo pipefail -l parallelism: 4 @@ -1971,17 +1971,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -2002,10 +2002,10 @@ jobs: j11_dtests_large_vnode_repeat: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -2104,17 +2104,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -2135,10 +2135,10 @@ jobs: j8_cqlsh_dtests_py311: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -2213,17 +2213,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -2243,10 +2243,10 @@ jobs: j11_cqlsh_dtests_py38_offheap: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -2321,17 +2321,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -2352,7 +2352,7 @@ jobs: j11_dtests_large: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: xlarge working_directory: ~/ shell: /bin/bash -eo pipefail -l parallelism: 4 @@ -2406,17 +2406,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -2440,7 +2440,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -2496,17 +2496,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -2530,7 +2530,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -2614,17 +2614,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -2645,10 +2645,10 @@ jobs: j11_upgrade_dtests_repeat: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: xlarge working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -2747,17 +2747,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -2778,10 +2778,10 @@ jobs: j8_cqlsh_dtests_py3_vnode: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -2856,17 +2856,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -2886,10 +2886,10 @@ jobs: j11_cqlsh_dtests_py3: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -2964,17 +2964,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -3029,17 +3029,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -3062,7 +3062,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -3146,17 +3146,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -3218,17 +3218,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -3252,7 +3252,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -3336,17 +3336,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -3366,10 +3366,10 @@ jobs: j8_dtests_offheap_repeat: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -3468,17 +3468,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -3498,10 +3498,10 @@ jobs: j11_dtests_offheap_repeat: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -3600,17 +3600,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -3631,10 +3631,10 @@ jobs: j8_dtests_large_repeat: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -3733,17 +3733,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -3763,10 +3763,10 @@ jobs: j11_jvm_dtests_vnode: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 1 + parallelism: 10 steps: - attach_workspace: at: /home/cassandra @@ -3850,17 +3850,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -3964,17 +3964,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -3997,7 +3997,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -4081,17 +4081,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -4184,17 +4184,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -4256,17 +4256,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -4290,7 +4290,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -4346,17 +4346,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -4376,7 +4376,7 @@ jobs: j8_dtests_large: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: xlarge working_directory: ~/ shell: /bin/bash -eo pipefail -l parallelism: 4 @@ -4430,17 +4430,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -4543,17 +4543,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -4574,10 +4574,10 @@ jobs: j11_jvm_upgrade_dtests: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: xlarge working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 1 + parallelism: 4 steps: - attach_workspace: at: /home/cassandra @@ -4661,17 +4661,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -4733,17 +4733,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -4764,10 +4764,10 @@ jobs: j8_cqlsh_dtests_py38_offheap: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -4842,17 +4842,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -4875,7 +4875,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -4931,17 +4931,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -4962,10 +4962,10 @@ jobs: j11_dtests_repeat: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -5086,17 +5086,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -5120,7 +5120,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -5176,17 +5176,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -5209,7 +5209,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -5265,17 +5265,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -5298,7 +5298,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -5382,17 +5382,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -5413,10 +5413,10 @@ jobs: j8_cqlsh_dtests_py38: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -5491,17 +5491,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -5521,10 +5521,10 @@ jobs: j11_cqlsh_dtests_py3_offheap: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -5599,17 +5599,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -5633,7 +5633,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -5717,17 +5717,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -5750,7 +5750,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -5806,17 +5806,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -5836,10 +5836,10 @@ jobs: j11_cqlsh_dtests_py311_offheap: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -5914,17 +5914,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -5948,7 +5948,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -6004,17 +6004,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -6037,7 +6037,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -6121,17 +6121,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -6152,10 +6152,10 @@ jobs: j11_upgrade_dtests: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 100 steps: - attach_workspace: at: /home/cassandra @@ -6206,17 +6206,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -6237,10 +6237,10 @@ jobs: j11_dtests_large_repeat: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -6339,17 +6339,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -6373,7 +6373,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -6457,17 +6457,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -6487,10 +6487,10 @@ jobs: j8_cqlsh_dtests_py3_offheap: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -6565,17 +6565,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -6598,7 +6598,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -6654,17 +6654,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -6684,10 +6684,10 @@ jobs: j8_dtests_vnode_repeat: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -6786,17 +6786,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -6816,10 +6816,10 @@ jobs: j11_cqlsh_dtests_py3_vnode: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -6894,17 +6894,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -6925,10 +6925,10 @@ jobs: j11_dtests_offheap: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -7001,17 +7001,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -7032,10 +7032,10 @@ jobs: j11_cqlsh_dtests_py38_vnode: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -7110,17 +7110,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -7144,7 +7144,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -7200,17 +7200,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -7234,7 +7234,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -7290,17 +7290,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -7324,7 +7324,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -7380,17 +7380,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -7411,10 +7411,10 @@ jobs: j11_cqlsh_dtests_py311_vnode: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -7489,17 +7489,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -7523,7 +7523,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -7579,17 +7579,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -7609,10 +7609,10 @@ jobs: j8_dtests_offheap: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -7663,17 +7663,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -7696,7 +7696,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -7780,17 +7780,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -7810,10 +7810,10 @@ jobs: j11_jvm_dtests: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 1 + parallelism: 10 steps: - attach_workspace: at: /home/cassandra @@ -7897,17 +7897,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -7931,7 +7931,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -7987,17 +7987,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -8094,17 +8094,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -8128,7 +8128,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -8184,17 +8184,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -8214,10 +8214,10 @@ jobs: j8_dtests: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -8268,17 +8268,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -8301,7 +8301,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -8357,17 +8357,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -8388,10 +8388,10 @@ jobs: j8_dtests_vnode: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -8442,17 +8442,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -8505,17 +8505,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -8539,7 +8539,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -8595,17 +8595,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -8625,10 +8625,10 @@ jobs: j11_dtests: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -8701,17 +8701,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -8735,7 +8735,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -8897,17 +8897,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -8968,17 +8968,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -9001,7 +9001,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -9085,17 +9085,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -9115,10 +9115,10 @@ jobs: j8_cqlsh_dtests_py311_offheap: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -9193,17 +9193,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -9223,10 +9223,10 @@ jobs: j8_dtests_repeat: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -9325,17 +9325,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -9355,10 +9355,10 @@ jobs: j8_jvm_dtests: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 1 + parallelism: 10 steps: - attach_workspace: at: /home/cassandra @@ -9442,17 +9442,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -9548,17 +9548,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -9578,10 +9578,10 @@ jobs: j8_cqlsh_dtests_py38_vnode: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -9656,17 +9656,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -9689,7 +9689,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -9745,17 +9745,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -9817,17 +9817,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -9880,17 +9880,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -9910,10 +9910,10 @@ jobs: j11_dtests_vnode: docker: - image: apache/cassandra-testing-ubuntu2004-java11:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 50 steps: - attach_workspace: at: /home/cassandra @@ -9986,17 +9986,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -10020,7 +10020,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -10076,17 +10076,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -10106,10 +10106,10 @@ jobs: j8_dtests_large_vnode_repeat: docker: - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest - resource_class: medium + resource_class: large working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -10208,17 +10208,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -10241,7 +10241,7 @@ jobs: resource_class: medium working_directory: ~/ shell: /bin/bash -eo pipefail -l - parallelism: 4 + parallelism: 25 steps: - attach_workspace: at: /home/cassandra @@ -10297,17 +10297,17 @@ jobs: - CCM_MAX_HEAP_SIZE: 1024M - CCM_HEAP_NEWSIZE: 256M - REPEATED_TESTS_STOP_ON_FAILURE: false - - REPEATED_UTESTS: null + - REPEATED_UTESTS: org.apache.cassandra.utils.OverlapsTest,org.apache.cassandra.utils.FBUtilitiesTest,org.apache.cassandra.tools.nodetool.stats.TableStatsPrinterTest,org.apache.cassandra.tools.nodetool.CompactionHistoryTest,org.apache.cassandra.tools.SSTablePartitionsTest,org.apache.cassandra.streaming.StreamingTransferTest,org.apache.cassandra.streaming.StreamTransferTaskTest,org.apache.cassandra.service.reads.DataResolverTest,org.apache.cassandra.service.paxos.PaxosRepairHistoryTest,org.apache.cassandra.repair.ValidatorTest,org.apache.cassandra.metrics.TrieMemtableMetricsTest,org.apache.cassandra.io.sstable.metadata.MetadataSerializerTest,org.apache.cassandra.io.sstable.SSTableWriterTest,org.apache.cassandra.io.sstable.SSTableRewriterTest,org.apache.cassandra.io.sstable.SSTableReaderTest,org.apache.cassandra.io.sstable.SSTableMetadataTest,org.apache.cassandra.io.sstable.RangeAwareSSTableWriterTest,org.apache.cassandra.io.compress.CQLCompressionTest,org.apache.cassandra.io.DiskSpaceMetricsTest,org.apache.cassandra.dht.SplitterTest,org.apache.cassandra.db.streaming.CassandraStreamManagerTest,org.apache.cassandra.db.streaming.CassandraStreamHeaderTest,org.apache.cassandra.db.streaming.CassandraOutgoingFileTest,org.apache.cassandra.db.streaming.CassandraEntireSSTableStreamWriterTest,org.apache.cassandra.db.repair.PendingAntiCompactionTest,org.apache.cassandra.db.repair.PendingAntiCompactionBytemanTest,org.apache.cassandra.db.memtable.MemtableQuickTest,org.apache.cassandra.db.lifecycle.RealTransactionsTest,org.apache.cassandra.db.lifecycle.LogTransactionTest,org.apache.cassandra.db.compaction.unified.ShardedMultiWriterTest,org.apache.cassandra.db.compaction.unified.ShardedCompactionWriterTest,org.apache.cassandra.db.compaction.unified.ControllerTest,org.apache.cassandra.db.compaction.UnifiedCompactionStrategyTest,org.apache.cassandra.db.compaction.TTLExpiryTest,org.apache.cassandra.db.compaction.ShardManagerTest,org.apache.cassandra.db.compaction.OneCompactionTest,org.apache.cassandra.db.compaction.LeveledGenerationsTest,org.apache.cassandra.db.compaction.LeveledCompactionStrategyTest,org.apache.cassandra.db.compaction.CorruptedSSTablesCompactionsTest,org.apache.cassandra.db.compaction.CompactionsCQLTest,org.apache.cassandra.db.compaction.CompactionsBytemanTest,org.apache.cassandra.db.compaction.CompactionStrategyManagerTest,org.apache.cassandra.db.compaction.CancelCompactionsTest,org.apache.cassandra.db.DiskBoundaryManagerTest,org.apache.cassandra.db.CleanupTest,org.apache.cassandra.cql3.statements.DescribeStatementTest - REPEATED_UTESTS_COUNT: 500 - REPEATED_UTESTS_FQLTOOL: null - REPEATED_UTESTS_FQLTOOL_COUNT: 500 - - REPEATED_UTESTS_LONG: null + - REPEATED_UTESTS_LONG: org.apache.cassandra.db.compaction.LongLeveledCompactionStrategyTest - REPEATED_UTESTS_LONG_COUNT: 100 - REPEATED_UTESTS_STRESS: null - REPEATED_UTESTS_STRESS_COUNT: 500 - REPEATED_SIMULATOR_DTESTS: null - REPEATED_SIMULATOR_DTESTS_COUNT: 500 - - REPEATED_JVM_DTESTS: null + - REPEATED_JVM_DTESTS: org.apache.cassandra.distributed.test.UnifiedCompactionDensitiesTest,org.apache.cassandra.distributed.test.PreviewRepairSnapshotTest - REPEATED_JVM_DTESTS_COUNT: 500 - REPEATED_JVM_UPGRADE_DTESTS: null - REPEATED_JVM_UPGRADE_DTESTS_COUNT: 500 @@ -10340,6 +10340,12 @@ workflows: requires: - start_j8_unit_tests - j8_build + - start_j8_unit_tests_repeat: + type: approval + - j8_unit_tests_repeat: + requires: + - start_j8_unit_tests_repeat + - j8_build - start_j8_jvm_dtests: type: approval - j8_jvm_dtests: @@ -10352,6 +10358,18 @@ workflows: requires: - start_j8_jvm_dtests_vnode - j8_build + - start_j8_jvm_dtests_repeat: + type: approval + - j8_jvm_dtests_repeat: + requires: + - start_j8_jvm_dtests_repeat + - j8_build + - start_j8_jvm_dtests_vnode_repeat: + type: approval + - j8_jvm_dtests_vnode_repeat: + requires: + - start_j8_jvm_dtests_vnode_repeat + - j8_build - start_j11_jvm_dtests: type: approval - j11_jvm_dtests: @@ -10364,6 +10382,18 @@ workflows: requires: - start_j11_jvm_dtests_vnode - j8_build + - start_j11_jvm_dtests_repeat: + type: approval + - j11_jvm_dtests_repeat: + requires: + - start_j11_jvm_dtests_repeat + - j8_build + - start_j11_jvm_dtests_vnode_repeat: + type: approval + - j11_jvm_dtests_vnode_repeat: + requires: + - start_j11_jvm_dtests_vnode_repeat + - j8_build - start_j8_simulator_dtests: type: approval - j8_simulator_dtests: @@ -10400,18 +10430,36 @@ workflows: requires: - start_j11_unit_tests - j8_build + - start_j11_unit_tests_repeat: + type: approval + - j11_unit_tests_repeat: + requires: + - start_j11_unit_tests_repeat + - j8_build - start_j8_utests_oa: type: approval - j8_utests_oa: requires: - start_j8_utests_oa - j8_build + - start_j8_utests_oa_repeat: + type: approval + - j8_utests_oa_repeat: + requires: + - start_j8_utests_oa_repeat + - j8_build - start_j11_utests_oa: type: approval - j11_utests_oa: requires: - start_j11_utests_oa - j8_build + - start_j11_utests_oa_repeat: + type: approval + - j11_utests_oa_repeat: + requires: + - start_j11_utests_oa_repeat + - j8_build - start_j8_utests_long: type: approval - j8_utests_long: @@ -10424,6 +10472,18 @@ workflows: requires: - start_j11_utests_long - j8_build + - start_j8_utests_long_repeat: + type: approval + - j8_utests_long_repeat: + requires: + - start_j8_utests_long_repeat + - j8_build + - start_j11_utests_long_repeat: + type: approval + - j11_utests_long_repeat: + requires: + - start_j11_utests_long_repeat + - j8_build - start_j8_utests_cdc: type: approval - j8_utests_cdc: @@ -10436,6 +10496,18 @@ workflows: requires: - start_j11_utests_cdc - j8_build + - start_j8_utests_cdc_repeat: + type: approval + - j8_utests_cdc_repeat: + requires: + - start_j8_utests_cdc_repeat + - j8_build + - start_j11_utests_cdc_repeat: + type: approval + - j11_utests_cdc_repeat: + requires: + - start_j11_utests_cdc_repeat + - j8_build - start_j8_utests_compression: type: approval - j8_utests_compression: @@ -10448,6 +10520,18 @@ workflows: requires: - start_j11_utests_compression - j8_build + - start_j8_utests_compression_repeat: + type: approval + - j8_utests_compression_repeat: + requires: + - start_j8_utests_compression_repeat + - j8_build + - start_j11_utests_compression_repeat: + type: approval + - j11_utests_compression_repeat: + requires: + - start_j11_utests_compression_repeat + - j8_build - start_j8_utests_trie: type: approval - j8_utests_trie: @@ -10460,6 +10544,18 @@ workflows: requires: - start_j11_utests_trie - j8_build + - start_j8_utests_trie_repeat: + type: approval + - j8_utests_trie_repeat: + requires: + - start_j8_utests_trie_repeat + - j8_build + - start_j11_utests_trie_repeat: + type: approval + - j11_utests_trie_repeat: + requires: + - start_j11_utests_trie_repeat + - j8_build - start_j8_utests_stress: type: approval - j8_utests_stress: @@ -10496,6 +10592,18 @@ workflows: requires: - start_j11_utests_system_keyspace_directory - j8_build + - start_j8_utests_system_keyspace_directory_repeat: + type: approval + - j8_utests_system_keyspace_directory_repeat: + requires: + - start_j8_utests_system_keyspace_directory_repeat + - j8_build + - start_j11_utests_system_keyspace_directory_repeat: + type: approval + - j11_utests_system_keyspace_directory_repeat: + requires: + - start_j11_utests_system_keyspace_directory_repeat + - j8_build - start_j8_dtests: type: approval - j8_dtests: @@ -10649,21 +10757,42 @@ workflows: - j8_utests_oa: requires: - j8_build + - j8_utests_oa_repeat: + requires: + - j8_build + - j11_utests_oa_repeat: + requires: + - j8_build + - j8_unit_tests_repeat: + requires: + - j8_build - j8_simulator_dtests: requires: - j8_build - j8_jvm_dtests: requires: - j8_build + - j8_jvm_dtests_repeat: + requires: + - j8_build - j8_jvm_dtests_vnode: requires: - j8_build + - j8_jvm_dtests_vnode_repeat: + requires: + - j8_build - j11_jvm_dtests: requires: - j8_build + - j11_jvm_dtests_repeat: + requires: + - j8_build - j11_jvm_dtests_vnode: requires: - j8_build + - j11_jvm_dtests_vnode_repeat: + requires: + - j8_build - j8_cqlshlib_tests: requires: - j8_build @@ -10682,6 +10811,9 @@ workflows: - j11_utests_oa: requires: - j8_build + - j11_unit_tests_repeat: + requires: + - j8_build - start_utests_long: type: approval - j8_utests_long: @@ -10692,6 +10824,14 @@ workflows: requires: - start_utests_long - j8_build + - j8_utests_long_repeat: + requires: + - start_utests_long + - j8_build + - j11_utests_long_repeat: + requires: + - start_utests_long + - j8_build - start_utests_cdc: type: approval - j8_utests_cdc: @@ -10702,6 +10842,14 @@ workflows: requires: - start_utests_cdc - j8_build + - j8_utests_cdc_repeat: + requires: + - start_utests_cdc + - j8_build + - j11_utests_cdc_repeat: + requires: + - start_utests_cdc + - j8_build - start_utests_compression: type: approval - j8_utests_compression: @@ -10712,6 +10860,14 @@ workflows: requires: - start_utests_compression - j8_build + - j8_utests_compression_repeat: + requires: + - start_utests_compression + - j8_build + - j11_utests_compression_repeat: + requires: + - start_utests_compression + - j8_build - start_utests_trie: type: approval - j8_utests_trie: @@ -10722,6 +10878,14 @@ workflows: requires: - start_utests_trie - j8_build + - j8_utests_trie_repeat: + requires: + - start_utests_trie + - j8_build + - j11_utests_trie_repeat: + requires: + - start_utests_trie + - j8_build - start_utests_stress: type: approval - j8_utests_stress: @@ -10751,6 +10915,13 @@ workflows: requires: - start_utests_system_keyspace_directory - j8_build + - j8_utests_system_keyspace_directory_repeat: + requires: + - j8_build + - j11_utests_system_keyspace_directory_repeat: + requires: + - start_utests_system_keyspace_directory + - j8_build - j8_dtests: requires: - j8_build @@ -10872,6 +11043,12 @@ workflows: requires: - start_j11_unit_tests - j11_build + - start_j11_unit_tests_repeat: + type: approval + - j11_unit_tests_repeat: + requires: + - start_j11_unit_tests_repeat + - j11_build - start_j11_jvm_dtests: type: approval - j11_jvm_dtests: @@ -10884,6 +11061,18 @@ workflows: requires: - start_j11_jvm_dtests_vnode - j11_build + - start_j11_jvm_dtests_repeat: + type: approval + - j11_jvm_dtests_repeat: + requires: + - start_j11_jvm_dtests_repeat + - j11_build + - start_j11_jvm_dtests_vnode_repeat: + type: approval + - j11_jvm_dtests_vnode_repeat: + requires: + - start_j11_jvm_dtests_vnode_repeat + - j11_build - start_j11_simulator_dtests: type: approval - j11_simulator_dtests: @@ -10978,30 +11167,60 @@ workflows: requires: - start_j11_utests_oa - j11_build + - start_j11_utests_oa_repeat: + type: approval + - j11_utests_oa_repeat: + requires: + - start_j11_utests_oa_repeat + - j11_build - start_j11_utests_long: type: approval - j11_utests_long: requires: - start_j11_utests_long - j11_build + - start_j11_utests_long_repeat: + type: approval + - j11_utests_long_repeat: + requires: + - start_j11_utests_long_repeat + - j11_build - start_j11_utests_cdc: type: approval - j11_utests_cdc: requires: - start_j11_utests_cdc - j11_build + - start_j11_utests_cdc_repeat: + type: approval + - j11_utests_cdc_repeat: + requires: + - start_j11_utests_cdc_repeat + - j11_build - start_j11_utests_compression: type: approval - j11_utests_compression: requires: - start_j11_utests_compression - j11_build + - start_j11_utests_compression_repeat: + type: approval + - j11_utests_compression_repeat: + requires: + - start_j11_utests_compression_repeat + - j11_build - start_j11_utests_trie: type: approval - j11_utests_trie: requires: - start_j11_utests_trie - j11_build + - start_j11_utests_trie_repeat: + type: approval + - j11_utests_trie_repeat: + requires: + - start_j11_utests_trie_repeat + - j11_build - start_j11_utests_stress: type: approval - j11_utests_stress: @@ -11020,6 +11239,12 @@ workflows: requires: - start_j11_utests_system_keyspace_directory - j11_build + - start_j11_utests_system_keyspace_directory_repeat: + type: approval + - j11_utests_system_keyspace_directory_repeat: + requires: + - start_j11_utests_system_keyspace_directory_repeat + - j11_build - start_j11_dtest_jars_build: type: approval - j11_dtest_jars_build: @@ -11051,12 +11276,24 @@ workflows: - j11_utests_oa: requires: - j11_build + - j11_utests_oa_repeat: + requires: + - j11_build + - j11_unit_tests_repeat: + requires: + - j11_build - j11_jvm_dtests: requires: - j11_build + - j11_jvm_dtests_repeat: + requires: + - j11_build - j11_jvm_dtests_vnode: requires: - j11_build + - j11_jvm_dtests_vnode_repeat: + requires: + - j11_build - j11_simulator_dtests: requires: - j11_build @@ -11126,24 +11363,40 @@ workflows: requires: - start_utests_long - j11_build + - j11_utests_long_repeat: + requires: + - start_utests_long + - j11_build - start_utests_cdc: type: approval - j11_utests_cdc: requires: - start_utests_cdc - j11_build + - j11_utests_cdc_repeat: + requires: + - start_utests_cdc + - j11_build - start_utests_compression: type: approval - j11_utests_compression: requires: - start_utests_compression - j11_build + - j11_utests_compression_repeat: + requires: + - start_utests_compression + - j11_build - start_utests_trie: type: approval - j11_utests_trie: requires: - start_utests_trie - j11_build + - j11_utests_trie_repeat: + requires: + - start_utests_trie + - j11_build - start_utests_stress: type: approval - j11_utests_stress: @@ -11162,6 +11415,10 @@ workflows: requires: - start_utests_system_keyspace_directory - j11_build + - j11_utests_system_keyspace_directory_repeat: + requires: + - start_utests_system_keyspace_directory + - j11_build - start_jvm_upgrade_dtests: type: approval - j11_dtest_jars_build: From 920022c1aa04696a550c80a0a0cec4c07df267d5 Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Wed, 19 Jul 2023 09:42:12 +0300 Subject: [PATCH 27/27] Added CHANGES.txt and NEWS.txt entries --- CHANGES.txt | 1 + NEWS.txt | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/CHANGES.txt b/CHANGES.txt index 178ed2b5ae9d..3a2f1c41e2ea 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,4 +1,5 @@ 5.0 + * Implementation of the Unified Compaction Strategy as described in CEP-26 (CASSANDRA-18397) * Upgrade commons cli to 1.5.0 (CASSANDRA-18659) * Disable the deprecated keyspace/table thresholds and convert them to guardrails (CASSANDRA-18617) * Deprecate CloudstackSnitch and remove duplicate code in snitches (CASSANDRA-18438) diff --git a/NEWS.txt b/NEWS.txt index 195cefc25a6f..5c630ec2e3b4 100644 --- a/NEWS.txt +++ b/NEWS.txt @@ -71,6 +71,10 @@ using the provided 'sstableupgrade' tool. New features ------------ + - Added a new "unified" compaction strategy that supports the use cases of the legacy compaction strategies, with + low space overhead, high parallelism and flexible configuration. Implemented by the UnifiedCompactionStrategy + class. Further details and documentation can be found in + src/java/org/apache/cassandra/db/compaction/UnifiedCompactionStrategy.md - New `VectorType` (cql `vector`) which adds new fixed-length element arrays. See CASSANDRA-18504 - Removed UDT type migration logic for 3.6+ clusters upgrading to 4.0. If migration has been disabled, it must be enabled before upgrading to 5.0 if the cluster used UDTs. See CASSANDRA-18504