diff --git a/parquet-column/src/main/java/org/apache/parquet/CorruptStatistics.java b/parquet-column/src/main/java/org/apache/parquet/CorruptStatistics.java index 28a99caec1..c5846f9efa 100644 --- a/parquet-column/src/main/java/org/apache/parquet/CorruptStatistics.java +++ b/parquet-column/src/main/java/org/apache/parquet/CorruptStatistics.java @@ -27,7 +27,7 @@ import org.slf4j.LoggerFactory; /** - * There was a bug (PARQUET-251) that caused the statistics metadata + * There was a bug (https://github.com/apache/parquet-java/issues/1433) that caused the statistics metadata * for binary columns to be corrupted in the write path. *

* This class is used to detect whether a file was written with this bug, @@ -38,7 +38,7 @@ public class CorruptStatistics { private static final Logger LOG = LoggerFactory.getLogger(CorruptStatistics.class); - // the version in which the bug described by jira: PARQUET-251 was fixed + // the version in which the bug described by jira: (https://github.com/apache/parquet-java/issues/1433) was fixed // the bug involved writing invalid binary statistics, so stats written prior to this // fix must be ignored / assumed invalid private static final SemanticVersion PARQUET_251_FIXED_VERSION = new SemanticVersion(1, 8, 0); diff --git a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/FilterApi.java b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/FilterApi.java index 3c51680667..62c52f0176 100644 --- a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/FilterApi.java +++ b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/FilterApi.java @@ -59,13 +59,13 @@ * FilterPredicate pred = or(eq(foo, 10), ltEq(bar, 17.0)); * */ -// TODO: Support repeated columns (https://issues.apache.org/jira/browse/PARQUET-34) +// TODO: Support repeated columns (https://github.com/apache/parquet-java/issues/1452) // // TODO: Support filtering on groups (eg, filter where this group is / isn't null) -// TODO: (https://issues.apache.org/jira/browse/PARQUET-43) +// TODO: (https://github.com/apache/parquet-format/issues/261) // TODO: Consider adding support for more column types that aren't coupled with parquet types, eg Column -// TODO: (https://issues.apache.org/jira/browse/PARQUET-35) +// TODO: (https://github.com/apache/parquet-java/issues/1453) public final class FilterApi { private FilterApi() {} diff --git a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/SchemaCompatibilityValidator.java b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/SchemaCompatibilityValidator.java index b5708a4a0c..650fcb3100 100644 --- a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/SchemaCompatibilityValidator.java +++ b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/SchemaCompatibilityValidator.java @@ -54,7 +54,7 @@ *

* TODO: detect if a column is optional or required and validate that eq(null) * TODO: is not called on required fields (is that too strict?) - * TODO: (https://issues.apache.org/jira/browse/PARQUET-44) + * TODO: (https://github.com/apache/parquet-java/issues/1472) */ public class SchemaCompatibilityValidator implements FilterPredicate.Visitor { diff --git a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/ValidTypeMap.java b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/ValidTypeMap.java index 838583ec52..c8ef61e038 100644 --- a/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/ValidTypeMap.java +++ b/parquet-column/src/main/java/org/apache/parquet/filter2/predicate/ValidTypeMap.java @@ -34,7 +34,7 @@ * when there are type mismatches. *

* TODO: this has some overlap with {@link PrimitiveTypeName#javaType} - * TODO: (https://issues.apache.org/jira/browse/PARQUET-30) + * TODO: (https://github.com/apache/parquet-java/issues/1447) */ public class ValidTypeMap { private ValidTypeMap() {} diff --git a/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/FilteringPrimitiveConverter.java b/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/FilteringPrimitiveConverter.java index c1eee2fa3b..ab1306c8d1 100644 --- a/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/FilteringPrimitiveConverter.java +++ b/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/FilteringPrimitiveConverter.java @@ -42,7 +42,7 @@ public FilteringPrimitiveConverter(PrimitiveConverter delegate, ValueInspector[] // TODO: this works, but // TODO: essentially turns off the benefits of dictionary support // TODO: even if the underlying delegate supports it. - // TODO: we should support it here. (https://issues.apache.org/jira/browse/PARQUET-36) + // TODO: we should support it here. (https://github.com/apache/parquet-java/issues/1392) @Override public boolean hasDictionarySupport() { return false; diff --git a/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateEvaluator.java b/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateEvaluator.java index d78f93132a..51bb36954a 100644 --- a/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateEvaluator.java +++ b/parquet-column/src/main/java/org/apache/parquet/filter2/recordlevel/IncrementallyUpdatedFilterPredicateEvaluator.java @@ -30,7 +30,7 @@ * represent columns with a null value, and updates them accordingly. *

* TODO: We could also build an evaluator that detects if enough values are known to determine the outcome - * TODO: of the predicate and quit the record assembly early. (https://issues.apache.org/jira/browse/PARQUET-37) + * TODO: of the predicate and quit the record assembly early. (https://github.com/apache/parquet-java/issues/1455) */ public class IncrementallyUpdatedFilterPredicateEvaluator implements Visitor { private static final IncrementallyUpdatedFilterPredicateEvaluator INSTANCE = diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/filter2/statisticslevel/StatisticsFilter.java b/parquet-hadoop/src/main/java/org/apache/parquet/filter2/statisticslevel/StatisticsFilter.java index 4d7918c4f1..fb7e0badec 100644 --- a/parquet-hadoop/src/main/java/org/apache/parquet/filter2/statisticslevel/StatisticsFilter.java +++ b/parquet-hadoop/src/main/java/org/apache/parquet/filter2/statisticslevel/StatisticsFilter.java @@ -63,7 +63,7 @@ * false otherwise (including when it is not known, which is often the case). */ // TODO: this belongs in the parquet-column project, but some of the classes here need to be moved too -// TODO: (https://issues.apache.org/jira/browse/PARQUET-38) +// TODO: (https://github.com/apache/parquet-java/issues/1458) public class StatisticsFilter implements FilterPredicate.Visitor { private static final boolean BLOCK_MIGHT_MATCH = false; diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java b/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java index d20ac7faeb..10728dfae3 100644 --- a/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java +++ b/parquet-hadoop/src/main/java/org/apache/parquet/format/converter/ParquetMetadataConverter.java @@ -139,7 +139,7 @@ import org.slf4j.LoggerFactory; // TODO: This file has become too long! -// TODO: Lets split it up: https://issues.apache.org/jira/browse/PARQUET-310 +// TODO: Lets split it up: https://github.com/apache/parquet-java/issues/1835 public class ParquetMetadataConverter { private static final TypeDefinedOrder TYPE_DEFINED_ORDER = new TypeDefinedOrder(); diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/CodecFactory.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/CodecFactory.java index f1041a83b8..eee5fa6083 100644 --- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/CodecFactory.java +++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/CodecFactory.java @@ -171,7 +171,7 @@ public BytesInput decompress(BytesInput bytes, int decompressedSize) throws IOEx InputStream is = codec.createInputStream(bytes.toInputStream(), decompressor); // We need to explicitly close the ZstdDecompressorStream here to release the resources it holds to - // avoid off-heap memory fragmentation issue, see https://issues.apache.org/jira/browse/PARQUET-2160. + // avoid off-heap memory fragmentation issue, see https://github.com/apache/parquet-format/issues/398. // This change will load the decompressor stream into heap a little earlier, since the problem it solves // only happens in the ZSTD codec, so this modification is only made for ZSTD streams. if (codec instanceof ZstandardCodec) { diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetReader.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetReader.java index be599ba569..f08e45ceda 100644 --- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetReader.java +++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetReader.java @@ -49,7 +49,7 @@ /** * Read records from a Parquet file. - * TODO: too many constructors (https://issues.apache.org/jira/browse/PARQUET-39) + * TODO: too many constructors (https://github.com/apache/parquet-java/issues/1466) */ public class ParquetReader implements Closeable {