Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
4537f2a
On delete tile cache workflows minimize the opertunity for 404 to be …
alanmcdade Apr 11, 2025
4b8cb05
On delete tile cache workflows minimize the opertunity for 404 to be …
alanmcdade Apr 11, 2025
e0d205a
On delete tile cache workflows minimize the opertunity for 404 to be …
alanmcdade Apr 11, 2025
ab44cdd
Fixed multiple calls to listener.
alanmcdade Apr 15, 2025
97b67f8
Fixed multiple calls to listener.
alanmcdade Apr 15, 2025
449a7c7
Restored check in putParametersMetadata, tests have been refactored t…
alanmcdade Apr 16, 2025
e2e5d85
Retained 1.26.0 config for compatibility testing
groldan Apr 2, 2025
039d22e
Updated version to 1.27-SNAPSHOT
groldan Apr 2, 2025
3d19f67
Fix gt.version = 33-SNAPSHOT-SNAPSHOT/33-SNAPSHOT, there is a problem…
groldan Apr 3, 2025
da02536
Updated release notes for 1.28-SNAPSHOT
groldan Apr 3, 2025
cd722f5
Updated version to 1.28-SNAPSHOT
groldan Apr 3, 2025
8629417
Fix missing and extra spaces in log messages
peter-afrigis Apr 10, 2025
80fc136
Update Ubuntu to 22.04, 20.04 is not longer supported
aaime Apr 16, 2025
1314791
Merge branch 'GeoWebCache:main' into truncate_cache_4151_simplified
alan-geosolutions Apr 16, 2025
6f3a40c
Revert formatting on AbstractBlobStoreTest
alanmcdade Apr 16, 2025
5c678ad
Use a FakeListener rather than a mock
alanmcdade Apr 16, 2025
e4ba074
Removed global windows test skip in AbstractS3BlobStoreIntegrationTest
alanmcdade Apr 16, 2025
577ac71
Disable all window S3Blobstore tests
alanmcdade Apr 16, 2025
3cbb23f
Disable all window S3Blobstore tests
alanmcdade Apr 16, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,15 @@ public String layerId(String layerName) {
return layer.getId();
}

public String layerNameFromId(String layerId) {
for (TileLayer tileLayer : layers.getLayerList()) {
if (layerId.equals(tileLayer.getId())) {
return tileLayer.getName();
}
}
return null;
}

public Set<String> layerGridsets(String layerName) {
TileLayer layer;
try {
Expand Down Expand Up @@ -222,4 +231,27 @@ private static String join(boolean closing, Object... elements) {
}
return joiner.toString();
}

private static String parametersFromTileRange(TileRange obj) {
String parametersId = obj.getParametersId();
if (parametersId == null) {
Map<String, String> parameters = obj.getParameters();
parametersId = ParametersUtils.getId(parameters);
if (parametersId == null) {
parametersId = "default";
} else {
obj.setParametersId(parametersId);
}
}
return parametersId;
}

public String forZoomLevel(TileRange tileRange, int level) {
String layerId = layerId(tileRange.getLayerName());
String gridsetId = tileRange.getGridSetId();
String format = tileRange.getMimeType().getFileExtension();
String parametersId = parametersFromTileRange(tileRange);

return join(true, prefix, layerId, gridsetId, format, parametersId, String.valueOf(level));
}
}
30 changes: 30 additions & 0 deletions geowebcache/s3storage/Readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
Tidy up aws after working with tests
===

```
aws s3 ls s3://<bucket>/ | grep tmp_ | awk '{print $2}' | while read obj; do
echo "Object: $obj"
aws s3 rm s3://gwc-s3-test/$obj --recursive
done
</code>
```

Replace the `<bucket>` with the value configured in your system.
This will delete all the temporary object that have been created


Config file
====
Add a `.gwc_s3_tests.properties` to your home directory to get the integration tests to run.

```
cat .gwc_s3_tests.properties
```
_contents of file_

```
bucket=gwc-s3-test
secretKey=lxL*****************************
accessKey=AK***************```

```
Original file line number Diff line number Diff line change
Expand Up @@ -13,26 +13,22 @@
*/
package org.geowebcache.s3;

import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static java.lang.String.format;
import static java.util.Objects.isNull;

import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.AccessControlList;
import com.amazonaws.services.s3.model.BucketPolicy;
import com.amazonaws.services.s3.model.CannedAccessControlList;
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
import com.amazonaws.services.s3.model.DeleteObjectsRequest.KeyVersion;
import com.amazonaws.services.s3.model.Grant;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.PutObjectRequest;
import com.amazonaws.services.s3.model.S3Object;
import com.amazonaws.services.s3.model.S3ObjectInputStream;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import com.google.common.base.Function;
import com.google.common.collect.AbstractIterator;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.io.ByteStreams;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
Expand All @@ -41,7 +37,6 @@
import java.nio.channels.WritableByteChannel;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
Expand All @@ -50,7 +45,10 @@
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import javax.annotation.Nullable;
import org.geotools.util.logging.Logging;
import org.geowebcache.GeoWebCacheException;
Expand All @@ -61,14 +59,14 @@
import org.geowebcache.locks.LockProvider;
import org.geowebcache.mime.MimeException;
import org.geowebcache.mime.MimeType;
import org.geowebcache.s3.streams.TileDeletionListenerNotifier;
import org.geowebcache.storage.BlobStore;
import org.geowebcache.storage.BlobStoreListener;
import org.geowebcache.storage.BlobStoreListenerList;
import org.geowebcache.storage.CompositeBlobStore;
import org.geowebcache.storage.StorageException;
import org.geowebcache.storage.TileObject;
import org.geowebcache.storage.TileRange;
import org.geowebcache.storage.TileRangeIterator;
import org.geowebcache.util.TMSKeyBuilder;

public class S3BlobStore implements BlobStore {
Expand All @@ -83,8 +81,6 @@ public class S3BlobStore implements BlobStore {

private String bucketName;

private volatile boolean shutDown;

private final S3Ops s3Ops;

private CannedAccessControlList acl;
Expand All @@ -100,7 +96,7 @@ public S3BlobStore(S3BlobStoreInfo config, TileLayerDispatcher layers, LockProvi
conn = validateClient(config.buildClient(), bucketName);
acl = config.getAccessControlList();

this.s3Ops = new S3Ops(conn, bucketName, keyBuilder, lockProvider);
this.s3Ops = new S3Ops(conn, bucketName, keyBuilder, lockProvider, listeners);

boolean empty = !s3Ops.prefixExists(prefix);
boolean existing = Objects.nonNull(s3Ops.getObjectMetadata(keyBuilder.storeMetadata()));
Expand Down Expand Up @@ -172,7 +168,6 @@ private void checkBucketPolicy(AmazonS3Client client, String bucketName) throws

@Override
public void destroy() {
this.shutDown = true;
AmazonS3Client conn = this.conn;
this.conn = null;
if (conn != null) {
Expand Down Expand Up @@ -279,80 +274,40 @@ public boolean get(TileObject obj) throws StorageException {
return true;
}

private class TileToKey implements Function<long[], KeyVersion> {

private final String coordsPrefix;

private final String extension;

public TileToKey(String coordsPrefix, MimeType mimeType) {
this.coordsPrefix = coordsPrefix;
this.extension = mimeType.getInternalName();
}

@Override
public KeyVersion apply(long[] loc) {
long z = loc[2];
long x = loc[0];
long y = loc[1];
StringBuilder sb = new StringBuilder(coordsPrefix);
sb.append(z).append('/').append(x).append('/').append(y).append('.').append(extension);
return new KeyVersion(sb.toString());
}
}

@Override
public boolean delete(final TileRange tileRange) throws StorageException {
checkNotNull(tileRange, "tile range must not be null");
checkArgument(tileRange.getZoomStart() >= 0, "zoom start must be greater or equal than zero");
checkArgument(
tileRange.getZoomStop() >= tileRange.getZoomStart(),
"zoom stop must be greater or equal than start zoom");

final String coordsPrefix = keyBuilder.coordinatesPrefix(tileRange, true);
if (!s3Ops.prefixExists(coordsPrefix)) {
return false;
}

final Iterator<long[]> tileLocations = new AbstractIterator<>() {

// TileRange iterator with 1x1 meta tiling factor
private TileRangeIterator trIter = new TileRangeIterator(tileRange, new int[] {1, 1});
// Create a prefix for each zoom level
long count = IntStream.range(tileRange.getZoomStart(), tileRange.getZoomStop() + 1)
.mapToObj(level -> scheduleDeleteForZoomLevel(tileRange, level))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is going to execute a delete for all tiles in a given zoom level... which is an improvement, if the tile range does not have rangeBounds, of if the rangeBounds did cover the whole gridset area.

But if someone set up the job to remove a specific area (e.g., a city of interest) then the current code would delete everything instead.

To expedite this, I would suggest the following:

  • Keep the old code in case there is a tile range that is a subset of the gridset bounds
  • Use the new code if the bbox is fully covering the gridset bounds instead.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added code for bounded deletes. Simplest version applied

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added BoundedS3KeySupplier that reduces the S3ObjectSummaries inspected by scanning the x axis between the bounds.

.filter(Objects::nonNull)
.count();

@Override
protected long[] computeNext() {
long[] gridLoc = trIter.nextMetaGridLocation(new long[3]);
return gridLoc == null ? endOfData() : gridLoc;
}
};

if (listeners.isEmpty()) {
// if there are no listeners, don't bother requesting every tile
// metadata to notify the listeners
Iterator<List<long[]>> partition = Iterators.partition(tileLocations, 1000);
final TileToKey tileToKey = new TileToKey(coordsPrefix, tileRange.getMimeType());

while (partition.hasNext() && !shutDown) {
List<long[]> locations = partition.next();
List<KeyVersion> keys = Lists.transform(locations, tileToKey);

DeleteObjectsRequest req = new DeleteObjectsRequest(bucketName);
req.setQuiet(true);
req.setKeys(keys);
conn.deleteObjects(req);
}
// Check all ranges where scheduled
return count == (tileRange.getZoomStop() - tileRange.getZoomStart() + 1);
}

} else {
long[] xyz;
String layerName = tileRange.getLayerName();
String gridSetId = tileRange.getGridSetId();
String format = tileRange.getMimeType().getFormat();
Map<String, String> parameters = tileRange.getParameters();

while (tileLocations.hasNext()) {
xyz = tileLocations.next();
TileObject tile = TileObject.createQueryTileObject(layerName, xyz, gridSetId, format, parameters);
tile.setParametersId(tileRange.getParametersId());
delete(tile);
}
private String scheduleDeleteForZoomLevel(TileRange tileRange, int level) {
String zoomPath = keyBuilder.forZoomLevel(tileRange, level);
Bounds bounds = new Bounds(tileRange.rangeBounds(level));
String prefix = format("%s?%s", zoomPath, bounds);
try {
s3Ops.scheduleAsyncDelete(prefix);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This does the same as deleteByGridsetId, deleteByParametersId and delete(layerName).
This is fine, but we have an event problem. The other three methods inform the listeners that the mass delete is about to happen, while the tile range case would inform tile by tile.

Looking at the changes in S3Ops, I have the impression now all asynch deletes are sending events for single tiles... if so, the listeners may end up recording a change "twice", and thus have disk quota go off synch.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Only delete tile with a bounded delete, because even the bounds are not entered by the user defaults are passed though they will be included in the prefix passed to BulkDelete. BulkDelete will only do the notifications to listeners when it is a bounded delete.

return prefix;
} catch (GeoWebCacheException e) {
log.warning("Cannot schedule delete for prefix " + prefix);
return null;
}

return true;
}

@Override
Expand Down Expand Up @@ -457,8 +412,7 @@ private Properties getLayerMetadata(String layerName) {
}

private void putParametersMetadata(String layerName, String parametersId, Map<String, String> parameters) {
assert (isNull(parametersId) == isNull(parameters));
if (isNull(parametersId)) {
if (isNull(parameters)) {
return;
}
Properties properties = new Properties();
Expand Down Expand Up @@ -519,4 +473,63 @@ public Map<String, Optional<Map<String, String>>> getParametersMapping(String la
.map(props -> (Map<String, String>) (Map<?, ?>) props)
.collect(Collectors.toMap(ParametersUtils::getId, Optional::of));
}

public static class Bounds {
private static final Pattern boundsRegex =
Pattern.compile("^(?<prefix>.*/)\\?bounds=(?<minx>\\d+),(?<miny>\\d+),(?<maxx>\\d+),(?<maxy>\\d+)$");
private final long minX, minY, maxX, maxY;

public Bounds(long[] bound) {
minX = Math.min(bound[0], bound[2]);
minY = Math.min(bound[1], bound[3]);
maxX = Math.max(bound[0], bound[2]);
maxY = Math.max(bound[1], bound[3]);
}

public long getMinX() {
return minX;
}

public long getMaxX() {
return maxX;
}

static Optional<Bounds> createBounds(String prefix) {
Matcher matcher = boundsRegex.matcher(prefix);
if (!matcher.matches()) {
return Optional.empty();
}

Bounds bounds = new Bounds(new long[] {
Long.parseLong(matcher.group("minx")),
Long.parseLong(matcher.group("miny")),
Long.parseLong(matcher.group("maxx")),
Long.parseLong(matcher.group("maxy"))
});
return Optional.of(bounds);
}

static String prefixWithoutBounds(String prefix) {
Matcher matcher = boundsRegex.matcher(prefix);
if (matcher.matches()) {
return matcher.group("prefix");
}
return prefix;
}

@Override
public String toString() {
return format("bounds=%d,%d,%d,%d", minX, minY, maxX, maxY);
}

public boolean predicate(S3ObjectSummary s3ObjectSummary) {
var matcher = TileDeletionListenerNotifier.keyRegex.matcher(s3ObjectSummary.getKey());
if (!matcher.matches()) {
return false;
}
long x = Long.parseLong(matcher.group(TileDeletionListenerNotifier.X_GROUP_POS));
long y = Long.parseLong(matcher.group(TileDeletionListenerNotifier.Y_GROUP_POS));
return x >= minX && x <= maxX && y >= minY && y <= maxY;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -423,7 +423,8 @@ public AmazonS3Client buildClient() {
clientConfig.setUseGzip(useGzip);
}
log.fine("Initializing AWS S3 connection");
AmazonS3Client client = new AmazonS3Client(getCredentialsProvider(), clientConfig);
AWSCredentialsProvider credentialsProvider = getCredentialsProvider();
AmazonS3Client client = new AmazonS3Client(credentialsProvider, clientConfig);
if (endpoint != null && !"".equals(endpoint)) {
S3ClientOptions s3ClientOptions = new S3ClientOptions();
s3ClientOptions.setPathStyleAccess(true);
Expand Down
Loading
Loading