diff --git a/build.gradle.kts b/build.gradle.kts
index 8788287c..371137b1 100644
--- a/build.gradle.kts
+++ b/build.gradle.kts
@@ -95,6 +95,9 @@ dependencies {
exclude("org.slf4j", "slf4j-api")
}
api("com.google.code.findbugs:jsr305:3.0.2")
+ api("com.github.ben-manes.caffeine:caffeine:2.9.3") {
+ because("Last version to support JDK 8.")
+ }
testImplementation("ch.qos.logback:logback-classic:1.5.12")
testImplementation("org.junit.jupiter:junit-jupiter:5.11.3")
diff --git a/docs/content/contributing/developer-guide/index.md b/docs/content/contributing/developer-guide/index.md
index da4537c2..6c6b6f7e 100644
--- a/docs/content/contributing/developer-guide/index.md
+++ b/docs/content/contributing/developer-guide/index.md
@@ -10,7 +10,7 @@ Before you start writing code, please read:
## System requirements
1. Gradle 8.1, or higher
-2. `JDK8`, `JDK11` or `JDK17`
+2. `JDK8`, `JDK11`, `JDK17` or `JDK21`
## Finding issues to work on
@@ -85,7 +85,7 @@ s3fs.proxy.url=https://my.local.domain/path/to/repository
### Build
Builds the entire code and runs unit and integration tests.
-It is assumed you already have the `amazon-test.properties` configuration in place.
+It is assumed you already have the `amazon-test.properties` configuration in place under the `src/test/resources` or `src/testIntegration/resources`.
```
./gradlew build
@@ -100,9 +100,11 @@ It is assumed you already have the `amazon-test.properties` configuration in pla
### Run only integration tests
```
-./gradlew it-s3
+./gradlew testIntegration
```
+You can also use `./gradlew build -x testIntegration` to skip the integration tests.
+
### Run all tests
```
diff --git a/docs/content/reference/configuration-options.md b/docs/content/reference/configuration-options.md
index b0283977..d7da85bb 100644
--- a/docs/content/reference/configuration-options.md
+++ b/docs/content/reference/configuration-options.md
@@ -4,28 +4,30 @@
A complete list of environment variables which can be set to configure the client.
-| Key | Default | Description |
-|-------------------------------------------|---------|-------------------------------------------------------------------------------------------------------------------------|
-| s3fs.access.key | none | AWS access key, used to identify the user interacting with AWS |
-| s3fs.secret.key | none | AWS secret access key, used to authenticate the user interacting with AWS |
-| s3fs.request.metric.collector.class | TODO | Fully-qualified class name to instantiate an AWS SDK request/response metric collector |
-| s3fs.connection.timeout | TODO | Timeout (in milliseconds) for establishing a connection to a remote service |
-| s3fs.max.connections | TODO | Maximum number of connections allowed in a connection pool |
-| s3fs.max.retry.error | TODO | Maximum number of times that a single request should be retried, assuming it fails for a retryable error |
-| s3fs.protocol | TODO | Protocol (HTTP or HTTPS) to use when connecting to AWS |
-| s3fs.proxy.domain | none | For NTLM proxies: The Windows domain name to use when authenticating with the proxy |
-| s3fs.proxy.protocol | none | Proxy connection protocol. |
-| s3fs.proxy.host | none | Proxy host name either from the configured endpoint or from the "http.proxyHost" system property |
-| s3fs.proxy.password | none | The password to use when connecting through a proxy |
-| s3fs.proxy.port | none | Proxy port either from the configured endpoint or from the "http.proxyPort" system property |
-| s3fs.proxy.username | none | The username to use when connecting through a proxy |
-| s3fs.proxy.workstation | none | For NTLM proxies: The Windows workstation name to use when authenticating with the proxy |
-| s3fs.region | none | The AWS Region to configure the client |
-| s3fs.socket.send.buffer.size.hint | TODO | The size hint (in bytes) for the low level TCP send buffer |
-| s3fs.socket.receive.buffer.size.hint | TODO | The size hint (in bytes) for the low level TCP receive buffer |
-| s3fs.socket.timeout | TODO | Timeout (in milliseconds) for each read to the underlying socket |
-| s3fs.user.agent.prefix | TODO | Prefix of the user agent that is sent with each request to AWS |
-| s3fs.amazon.s3.factory.class | TODO | Fully-qualified class name to instantiate a S3 factory base class which creates a S3 client instance |
-| s3fs.signer.override | TODO | Fully-qualified class name to define the signer that should be used when authenticating with AWS |
-| s3fs.path.style.access | TODO | Boolean that indicates whether the client uses path-style access for all requests |
-| s3fs.request.header.cache-control | blank | Configures the `cacheControl` on request builders (i.e. `CopyObjectRequest`, `PutObjectRequest`, etc) |
+| Key | Default | Description |
+|-------------------------------------|---------|-------------------------------------------------------------------------------------------------------------------------|
+| s3fs.access.key | none | AWS access key, used to identify the user interacting with AWS |
+| s3fs.secret.key | none | AWS secret access key, used to authenticate the user interacting with AWS |
+| s3fs.request.metric.collector.class | TODO | Fully-qualified class name to instantiate an AWS SDK request/response metric collector |
+| s3fs.cache.attributes.ttl | `60000` | TTL for the cached file attributes (in millis) |
+| s3fs.cache.attributes.size | `5000` | Total size of cached file attributes |
+| s3fs.connection.timeout | TODO | Timeout (in milliseconds) for establishing a connection to a remote service |
+| s3fs.max.connections | TODO | Maximum number of connections allowed in a connection pool |
+| s3fs.max.retry.error | TODO | Maximum number of times that a single request should be retried, assuming it fails for a retryable error |
+| s3fs.protocol | TODO | Protocol (HTTP or HTTPS) to use when connecting to AWS |
+| s3fs.proxy.domain | none | For NTLM proxies: The Windows domain name to use when authenticating with the proxy |
+| s3fs.proxy.protocol | none | Proxy connection protocol. |
+| s3fs.proxy.host | none | Proxy host name either from the configured endpoint or from the "http.proxyHost" system property |
+| s3fs.proxy.password | none | The password to use when connecting through a proxy |
+| s3fs.proxy.port | none | Proxy port either from the configured endpoint or from the "http.proxyPort" system property |
+| s3fs.proxy.username | none | The username to use when connecting through a proxy |
+| s3fs.proxy.workstation | none | For NTLM proxies: The Windows workstation name to use when authenticating with the proxy |
+| s3fs.region | none | The AWS Region to configure the client |
+| s3fs.socket.send.buffer.size.hint | TODO | The size hint (in bytes) for the low level TCP send buffer |
+| s3fs.socket.receive.buffer.size.hint | TODO | The size hint (in bytes) for the low level TCP receive buffer |
+| s3fs.socket.timeout | TODO | Timeout (in milliseconds) for each read to the underlying socket |
+| s3fs.user.agent.prefix | TODO | Prefix of the user agent that is sent with each request to AWS |
+| s3fs.amazon.s3.factory.class | TODO | Fully-qualified class name to instantiate a S3 factory base class which creates a S3 client instance |
+| s3fs.signer.override | TODO | Fully-qualified class name to define the signer that should be used when authenticating with AWS |
+| s3fs.path.style.access | TODO | Boolean that indicates whether the client uses path-style access for all requests |
+| s3fs.request.header.cache-control | blank | Configures the `cacheControl` on request builders (i.e. `CopyObjectRequest`, `PutObjectRequest`, etc) |
diff --git a/src/main/java/org/carlspring/cloud/storage/s3fs/S3Factory.java b/src/main/java/org/carlspring/cloud/storage/s3fs/S3Factory.java
index 00325699..bdb85231 100644
--- a/src/main/java/org/carlspring/cloud/storage/s3fs/S3Factory.java
+++ b/src/main/java/org/carlspring/cloud/storage/s3fs/S3Factory.java
@@ -5,6 +5,8 @@
import java.time.Duration;
import java.util.Properties;
+import org.carlspring.cloud.storage.s3fs.attribute.S3BasicFileAttributes;
+import org.carlspring.cloud.storage.s3fs.attribute.S3PosixFileAttributes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
@@ -41,6 +43,16 @@ public abstract class S3Factory
public static final String SECRET_KEY = "s3fs.secret.key";
+ /**
+ * Maximum TTL in millis to cache {@link S3BasicFileAttributes} and {@link S3PosixFileAttributes}.
+ */
+ public static final String CACHE_ATTRIBUTES_TTL = "s3fs.cache.attributes.ttl";
+
+ /**
+ * Total size of {@link S3BasicFileAttributes} and {@link S3PosixFileAttributes} cache.
+ */
+ public static final String CACHE_ATTRIBUTES_SIZE = "s3fs.cache.attributes.size";
+
public static final String REQUEST_METRIC_COLLECTOR_CLASS = "s3fs.request.metric.collector.class";
public static final String CONNECTION_TIMEOUT = "s3fs.connection.timeout";
diff --git a/src/main/java/org/carlspring/cloud/storage/s3fs/S3FileSystem.java b/src/main/java/org/carlspring/cloud/storage/s3fs/S3FileSystem.java
index 6853aa12..95758ed3 100644
--- a/src/main/java/org/carlspring/cloud/storage/s3fs/S3FileSystem.java
+++ b/src/main/java/org/carlspring/cloud/storage/s3fs/S3FileSystem.java
@@ -1,5 +1,12 @@
package org.carlspring.cloud.storage.s3fs;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import org.carlspring.cloud.storage.s3fs.cache.S3FileAttributesCache;
+import org.carlspring.cloud.storage.s3fs.util.S3Utils;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.Bucket;
+
import java.io.IOException;
import java.nio.file.FileStore;
import java.nio.file.FileSystem;
@@ -7,16 +14,9 @@
import java.nio.file.PathMatcher;
import java.nio.file.WatchService;
import java.nio.file.attribute.UserPrincipalLookupService;
-import java.util.List;
import java.util.Properties;
import java.util.Set;
-import com.google.common.base.Joiner;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import org.carlspring.cloud.storage.s3fs.util.S3Utils;
-import software.amazon.awssdk.services.s3.S3Client;
-import software.amazon.awssdk.services.s3.model.Bucket;
import static org.carlspring.cloud.storage.s3fs.S3Path.PATH_SEPARATOR;
/**
@@ -37,7 +37,7 @@ public class S3FileSystem
private final String endpoint;
- private final int cache;
+ private S3FileAttributesCache fileAttributesCache;
private final Properties properties;
@@ -51,8 +51,12 @@ public S3FileSystem(final S3FileSystemProvider provider,
this.key = key;
this.client = client;
this.endpoint = endpoint;
- this.cache = 60000; // 1 minute cache for the s3Path
this.properties = properties;
+
+ int cacheTTL = Integer.parseInt(String.valueOf(properties.getOrDefault(S3Factory.CACHE_ATTRIBUTES_TTL, "60000")));
+ int cacheSize = Integer.parseInt(String.valueOf(properties.getOrDefault(S3Factory.CACHE_ATTRIBUTES_SIZE, "5000")));
+
+ this.fileAttributesCache = new S3FileAttributesCache(cacheTTL, cacheSize);
}
public S3FileSystem(final S3FileSystemProvider provider,
@@ -78,6 +82,7 @@ public String getKey()
public void close()
throws IOException
{
+ this.fileAttributesCache.invalidateAll();
this.provider.close(this);
}
@@ -184,12 +189,14 @@ public String[] key2Parts(String keyParts)
return S3Utils.key2Parts(keyParts);
}
- public int getCache()
+ /**
+ * @return The {@link S3FileAttributesCache} instance holding the path attributes cache for this file provider.
+ */
+ public S3FileAttributesCache getFileAttributesCache()
{
- return cache;
+ return fileAttributesCache;
}
-
/**
* @return The value of the {@link S3Factory#REQUEST_HEADER_CACHE_CONTROL} property. Default is empty.
*/
diff --git a/src/main/java/org/carlspring/cloud/storage/s3fs/S3FileSystemProvider.java b/src/main/java/org/carlspring/cloud/storage/s3fs/S3FileSystemProvider.java
index 7719cc73..7934d679 100644
--- a/src/main/java/org/carlspring/cloud/storage/s3fs/S3FileSystemProvider.java
+++ b/src/main/java/org/carlspring/cloud/storage/s3fs/S3FileSystemProvider.java
@@ -1,13 +1,25 @@
package org.carlspring.cloud.storage.s3fs;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
import org.carlspring.cloud.storage.s3fs.attribute.S3BasicFileAttributeView;
import org.carlspring.cloud.storage.s3fs.attribute.S3BasicFileAttributes;
import org.carlspring.cloud.storage.s3fs.attribute.S3PosixFileAttributeView;
import org.carlspring.cloud.storage.s3fs.attribute.S3PosixFileAttributes;
import org.carlspring.cloud.storage.s3fs.util.AttributesUtils;
-import org.carlspring.cloud.storage.s3fs.util.Cache;
import org.carlspring.cloud.storage.s3fs.util.Constants;
import org.carlspring.cloud.storage.s3fs.util.S3Utils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.core.ResponseInputStream;
+import software.amazon.awssdk.core.exception.SdkException;
+import software.amazon.awssdk.core.internal.util.Mimetype;
+import software.amazon.awssdk.core.sync.RequestBody;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.*;
+import software.amazon.awssdk.utils.StringUtils;
import java.io.IOException;
import java.io.InputStream;
@@ -19,91 +31,18 @@
import java.nio.channels.AsynchronousFileChannel;
import java.nio.channels.SeekableByteChannel;
import java.nio.charset.StandardCharsets;
-import java.nio.file.AccessMode;
-import java.nio.file.AtomicMoveNotSupportedException;
-import java.nio.file.CopyOption;
-import java.nio.file.DirectoryStream;
-import java.nio.file.FileAlreadyExistsException;
-import java.nio.file.FileStore;
-import java.nio.file.FileSystem;
-import java.nio.file.FileSystemAlreadyExistsException;
-import java.nio.file.FileSystemNotFoundException;
-import java.nio.file.Files;
-import java.nio.file.LinkOption;
-import java.nio.file.NoSuchFileException;
-import java.nio.file.OpenOption;
-import java.nio.file.Path;
-import java.nio.file.StandardCopyOption;
-import java.nio.file.StandardOpenOption;
-import java.nio.file.attribute.BasicFileAttributeView;
-import java.nio.file.attribute.BasicFileAttributes;
-import java.nio.file.attribute.FileAttribute;
-import java.nio.file.attribute.FileAttributeView;
-import java.nio.file.attribute.PosixFileAttributeView;
-import java.nio.file.attribute.PosixFileAttributes;
+import java.nio.file.*;
+import java.nio.file.attribute.*;
import java.nio.file.spi.FileSystemProvider;
-import java.util.ArrayDeque;
-import java.util.Arrays;
-import java.util.Deque;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
+import java.util.*;
+import java.util.concurrent.CompletionException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
-import java.util.stream.Collectors;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Sets;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import software.amazon.awssdk.core.ResponseInputStream;
-import software.amazon.awssdk.core.exception.SdkException;
-import software.amazon.awssdk.core.internal.util.Mimetype;
-import software.amazon.awssdk.core.sync.RequestBody;
-import software.amazon.awssdk.services.s3.S3Client;
-import software.amazon.awssdk.services.s3.model.Bucket;
-import software.amazon.awssdk.services.s3.model.CopyObjectRequest;
-import software.amazon.awssdk.services.s3.model.CreateBucketRequest;
-import software.amazon.awssdk.services.s3.model.Delete;
-import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest;
-import software.amazon.awssdk.services.s3.model.GetObjectRequest;
-import software.amazon.awssdk.services.s3.model.GetObjectResponse;
-import software.amazon.awssdk.services.s3.model.ObjectIdentifier;
-import software.amazon.awssdk.services.s3.model.PutObjectRequest;
-import software.amazon.awssdk.services.s3.model.S3Exception;
-import software.amazon.awssdk.services.s3.model.S3Object;
-import software.amazon.awssdk.utils.StringUtils;
import static com.google.common.collect.Sets.difference;
import static java.lang.String.format;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.ACCESS_KEY;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.CONNECTION_TIMEOUT;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.MAX_CONNECTIONS;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.MAX_ERROR_RETRY;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.PATH_STYLE_ACCESS;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.PROTOCOL;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.PROXY_DOMAIN;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.PROXY_HOST;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.PROXY_PASSWORD;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.PROXY_PORT;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.PROXY_USERNAME;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.PROXY_WORKSTATION;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.REGION;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.REQUEST_METRIC_COLLECTOR_CLASS;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.SECRET_KEY;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.SIGNER_OVERRIDE;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.SOCKET_RECEIVE_BUFFER_SIZE_HINT;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.SOCKET_SEND_BUFFER_SIZE_HINT;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.SOCKET_TIMEOUT;
-import static org.carlspring.cloud.storage.s3fs.S3Factory.USER_AGENT;
+import static org.carlspring.cloud.storage.s3fs.S3Factory.*;
import static software.amazon.awssdk.http.Header.CONTENT_TYPE;
import static software.amazon.awssdk.http.HttpStatusCode.NOT_FOUND;
@@ -148,6 +87,8 @@ public class S3FileSystemProvider
private static final List PROPS_TO_OVERLOAD = Arrays.asList(ACCESS_KEY,
SECRET_KEY,
+ CACHE_ATTRIBUTES_TTL,
+ CACHE_ATTRIBUTES_SIZE,
REQUEST_METRIC_COLLECTOR_CLASS,
CONNECTION_TIMEOUT,
MAX_CONNECTIONS,
@@ -174,9 +115,6 @@ public class S3FileSystemProvider
private final S3Utils s3Utils = new S3Utils();
- private Cache cache = new Cache();
-
-
@Override
public String getScheme()
{
@@ -487,7 +425,7 @@ private S3Path toS3Path(Path path)
@Override
public Path getPath(URI uri)
{
- FileSystem fileSystem = getFileSystem(uri);
+ S3FileSystem fileSystem = getFileSystem(uri);
/**
* TODO: set as a list. one s3FileSystem by region
@@ -733,45 +671,32 @@ private void deleteBatch(S3Client client,
throws IOException
{
- List keys = batch.stream()
- .map(s3Path -> ObjectIdentifier.builder()
- .key(s3Path.getKey())
- .build())
- .collect(Collectors.toList());
+ // Create a combined list of ObjectIdentifiers in one loop
+ List objectIdentifiers = new ArrayList<>(batch.size() * 2);
+
+ for (S3Path s3Path : batch) {
+ // The original key
+ objectIdentifiers.add(ObjectIdentifier.builder().key(s3Path.getKey()).build());
+ // The key with '/' appended
+ objectIdentifiers.add(ObjectIdentifier.builder().key(s3Path.getKey() + '/').build());
+ }
+ // Create the multi-object delete request with both sets of keys
DeleteObjectsRequest multiObjectDeleteRequest = DeleteObjectsRequest.builder()
.bucket(bucketName)
.delete(Delete.builder()
- .objects(keys)
+ .objects(objectIdentifiers)
.build())
.build();
- try
- {
+ // Try to delete all objects at once
+ try {
client.deleteObjects(multiObjectDeleteRequest);
- }
- catch (SdkException e)
- {
- throw new IOException(e);
- }
-
- // we delete the two objects (sometimes exists the key '/' and sometimes not)
- keys = batch.stream()
- .map(s3Path -> ObjectIdentifier.builder().key(s3Path.getKey() + '/').build())
- .collect(Collectors.toList());
- multiObjectDeleteRequest = DeleteObjectsRequest.builder()
- .bucket(bucketName)
- .delete(Delete.builder()
- .objects(keys)
- .build())
- .build();
-
- try
- {
- client.deleteObjects(multiObjectDeleteRequest);
- }
- catch (SdkException e)
- {
+ for (S3Path path : batch) {
+ path.getFileAttributesCache().invalidate(path, BasicFileAttributes.class);
+ path.getFileAttributesCache().invalidate(path, PosixFileAttributes.class);
+ }
+ } catch (SdkException e) {
throw new IOException(e);
}
}
@@ -855,15 +780,16 @@ public void copy(Path source,
S3Path s3Source = toS3Path(source);
S3Path s3Target = toS3Path(target);
- // TODO: implements support for copying directories
+ // TODO: implements support for copying directories
Preconditions.checkArgument(!Files.isDirectory(source), "copying directories is not yet supported: %s", source);
Preconditions.checkArgument(!Files.isDirectory(target), "copying directories is not yet supported: %s", target);
ImmutableSet actualOptions = ImmutableSet.copyOf(options);
verifySupportedOptions(EnumSet.of(StandardCopyOption.REPLACE_EXISTING), actualOptions);
- if (exists(s3Target) && !actualOptions.contains(StandardCopyOption.REPLACE_EXISTING))
+ // Better check before if we want to replace the file (default) or not to avoid a call to exists
+ if (!actualOptions.contains(StandardCopyOption.REPLACE_EXISTING) && exists(s3Target))
{
throw new FileAlreadyExistsException(format("target already exists: %s", target));
}
@@ -993,39 +919,22 @@ public A readAttributes(Path path,
LinkOption... options)
throws IOException
{
- S3Path s3Path = toS3Path(path);
- if (type == BasicFileAttributes.class)
+ try
{
- if (cache.isInTime(s3Path.getFileSystem().getCache(), s3Path.getFileAttributes()))
+ S3Path s3Path = toS3Path(path);
+ if (type == BasicFileAttributes.class || type == S3BasicFileAttributes.class ||
+ type == PosixFileAttributes.class || type == S3PosixFileAttributes.class)
{
- A result = type.cast(s3Path.getFileAttributes());
- s3Path.setFileAttributes(null);
-
- return result;
- }
- else
- {
- S3BasicFileAttributes attrs = s3Utils.getS3FileAttributes(s3Path);
- s3Path.setFileAttributes(attrs);
-
+ S3BasicFileAttributes attrs = s3Path.getFileAttributesCache().get(s3Path, type);
+ if(attrs == null) {
+ throw new NoSuchFileException(path.toString());
+ }
return type.cast(attrs);
}
}
- else if (type == PosixFileAttributes.class)
+ catch (CompletionException e)
{
- if (s3Path.getFileAttributes() instanceof PosixFileAttributes &&
- cache.isInTime(s3Path.getFileSystem().getCache(), s3Path.getFileAttributes()))
- {
- A result = type.cast(s3Path.getFileAttributes());
- s3Path.setFileAttributes(null);
-
- return result;
- }
-
- S3PosixFileAttributes attrs = s3Utils.getS3PosixFileAttributes(s3Path);
- s3Path.setFileAttributes(attrs);
-
- return type.cast(attrs);
+ throw new IOException(e);
}
throw new UnsupportedOperationException(format("only %s or %s supported",
@@ -1221,6 +1130,7 @@ public void close(S3FileSystem fileSystem)
{
if (fileSystem.getKey() != null && fileSystems.containsKey(fileSystem.getKey()))
{
+ fileSystem.getFileAttributesCache().invalidateAll();
fileSystems.remove(fileSystem.getKey());
}
}
@@ -1239,14 +1149,4 @@ protected static ConcurrentMap getFilesystems()
return fileSystems;
}
- public Cache getCache()
- {
- return cache;
- }
-
- public void setCache(Cache cache)
- {
- this.cache = cache;
- }
-
}
diff --git a/src/main/java/org/carlspring/cloud/storage/s3fs/S3Iterator.java b/src/main/java/org/carlspring/cloud/storage/s3fs/S3Iterator.java
index 60948ec7..5679f73a 100644
--- a/src/main/java/org/carlspring/cloud/storage/s3fs/S3Iterator.java
+++ b/src/main/java/org/carlspring/cloud/storage/s3fs/S3Iterator.java
@@ -1,7 +1,9 @@
package org.carlspring.cloud.storage.s3fs;
+import org.carlspring.cloud.storage.s3fs.cache.S3FileAttributesCache;
import org.carlspring.cloud.storage.s3fs.util.S3Utils;
+import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
@@ -32,6 +34,8 @@ public class S3Iterator
private final S3FileSystem fileSystem;
+ private final S3FileAttributesCache fileAttributesCache;
+
private final S3FileStore fileStore;
private final String key;
@@ -69,6 +73,7 @@ public S3Iterator(S3FileStore fileStore, String key, boolean incremental)
this.fileStore = fileStore;
this.fileSystem = fileStore.getFileSystem();
+ this.fileAttributesCache = fileSystem.getFileAttributesCache();
this.key = key;
this.current = fileSystem.getClient().listObjectsV2(listObjectsV2Request);
this.incremental = incremental;
@@ -136,7 +141,7 @@ private void parseObjects()
{
final String objectKey = object.key();
- String[] keyParts = fileSystem.key2Parts(objectKey);
+ String[] keyParts = S3Utils.key2Parts(objectKey);
addParentPaths(keyParts);
@@ -202,8 +207,13 @@ private void parseObjectListing(String key, List listPath, ListObjectsV2
{
if (!commonPrefix.prefix().equals("/"))
{
- listPath.add(new S3Path(fileSystem, "/" + fileStore.name(),
- fileSystem.key2Parts(commonPrefix.prefix())));
+ S3Path s3Path = new S3Path(fileSystem, "/" + fileStore.name(), S3Utils.key2Parts(commonPrefix.prefix()));
+ listPath.add(s3Path);
+ try {
+ fileAttributesCache.put(s3Path, s3Utils.getS3FileAttributes(s3Path));
+ } catch (NoSuchFileException e) { //NOPMD
+ //NOPMD
+ }
}
}
@@ -218,9 +228,9 @@ private void parseObjectListing(String key, List listPath, ListObjectsV2
{
S3Path descendentPart = new S3Path(fileSystem,
"/" + fileStore.name(),
- fileSystem.key2Parts(immediateDescendantKey));
+ S3Utils.key2Parts(immediateDescendantKey));
- descendentPart.setFileAttributes(s3Utils.toS3FileAttributes(object, descendentPart.getKey()));
+ fileAttributesCache.put(descendentPart, s3Utils.toS3FileAttributes(object, descendentPart.getKey()));
if (!listPath.contains(descendentPart))
{
listPath.add(descendentPart);
diff --git a/src/main/java/org/carlspring/cloud/storage/s3fs/S3Path.java b/src/main/java/org/carlspring/cloud/storage/s3fs/S3Path.java
index 8908e1cc..608af1d4 100644
--- a/src/main/java/org/carlspring/cloud/storage/s3fs/S3Path.java
+++ b/src/main/java/org/carlspring/cloud/storage/s3fs/S3Path.java
@@ -12,6 +12,7 @@
import java.nio.file.WatchEvent;
import java.nio.file.WatchKey;
import java.nio.file.WatchService;
+import java.nio.file.attribute.BasicFileAttributes;
import java.util.ArrayDeque;
import java.util.Deque;
import java.util.Iterator;
@@ -22,6 +23,7 @@
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
+import org.carlspring.cloud.storage.s3fs.cache.S3FileAttributesCache;
import software.amazon.awssdk.core.Protocol;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.S3Utilities;
@@ -53,10 +55,9 @@ public class S3Path
private final S3FileSystem fileSystem;
/**
- * S3BasicFileAttributes cache
+ * S3FileAttributesCache cache
*/
- private S3BasicFileAttributes fileAttributes;
-
+ private S3FileAttributesCache fileAttributesCache;
/**
* Build an S3Path from path segments. '/' are stripped from each segment.
@@ -121,6 +122,7 @@ public S3Path(S3FileSystem fileSystem, String first, String... more)
}
this.uri = localUri;
this.fileSystem = fileSystem;
+ this.fileAttributesCache = fileSystem.getFileAttributesCache();
}
/**
@@ -829,14 +831,18 @@ private String decode(URI uri)
}
}
- public S3BasicFileAttributes getFileAttributes()
+ public S3BasicFileAttributes getFileAttributes(Class extends BasicFileAttributes> type)
{
- return fileAttributes;
+ return fileAttributesCache.get(this, type);
}
- public void setFileAttributes(S3BasicFileAttributes fileAttributes)
+ /**
+ * Shortcut to getFileSystem().getFileAttributesCache()
+ * @return
+ */
+ public S3FileAttributesCache getFileAttributesCache()
{
- this.fileAttributes = fileAttributes;
+ return fileAttributesCache;
}
}
diff --git a/src/main/java/org/carlspring/cloud/storage/s3fs/attribute/S3PosixFileAttributeView.java b/src/main/java/org/carlspring/cloud/storage/s3fs/attribute/S3PosixFileAttributeView.java
index 6834259d..3cb1e48d 100644
--- a/src/main/java/org/carlspring/cloud/storage/s3fs/attribute/S3PosixFileAttributeView.java
+++ b/src/main/java/org/carlspring/cloud/storage/s3fs/attribute/S3PosixFileAttributeView.java
@@ -16,9 +16,6 @@ public class S3PosixFileAttributeView
private static final Logger log = LoggerFactory.getLogger(S3PosixFileAttributeView.class);
private S3Path s3Path;
- private PosixFileAttributes posixFileAttributes;
-
-
public S3PosixFileAttributeView(S3Path s3Path)
{
this.s3Path = s3Path;
@@ -75,12 +72,7 @@ public void setTimes(FileTime lastModifiedTime, FileTime lastAccessTime, FileTim
public PosixFileAttributes read()
throws IOException
{
- if (posixFileAttributes == null)
- {
- posixFileAttributes = s3Path.getFileSystem().provider().readAttributes(s3Path, PosixFileAttributes.class);
- }
-
- return posixFileAttributes;
+ return s3Path.getFileSystem().provider().readAttributes(s3Path, PosixFileAttributes.class);
}
}
diff --git a/src/main/java/org/carlspring/cloud/storage/s3fs/cache/S3FileAttributesCache.java b/src/main/java/org/carlspring/cloud/storage/s3fs/cache/S3FileAttributesCache.java
new file mode 100644
index 00000000..a29c8bee
--- /dev/null
+++ b/src/main/java/org/carlspring/cloud/storage/s3fs/cache/S3FileAttributesCache.java
@@ -0,0 +1,176 @@
+package org.carlspring.cloud.storage.s3fs.cache;
+
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
+import com.github.benmanes.caffeine.cache.stats.CacheStats;
+import org.carlspring.cloud.storage.s3fs.S3Path;
+import org.carlspring.cloud.storage.s3fs.attribute.S3BasicFileAttributes;
+import org.carlspring.cloud.storage.s3fs.attribute.S3PosixFileAttributes;
+import org.carlspring.cloud.storage.s3fs.util.S3Utils;
+
+import java.nio.file.NoSuchFileException;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.nio.file.attribute.PosixFileAttributes;
+import java.util.concurrent.CompletionException;
+
+public class S3FileAttributesCache
+{
+
+ private final S3Utils s3Utils = new S3Utils();
+
+ private final Cache cache;
+
+ /**
+ * @param cacheTTL TTL in milliseconds
+ * @param cacheSize Total cache size.
+ */
+ public S3FileAttributesCache(int cacheTTL, int cacheSize)
+ {
+ this.cache = cacheBuilder(cacheTTL, cacheSize).build();
+ }
+
+ /**
+ * Generates a cache key based on S3Path and the attribute class type.
+ * The key is a combination of the S3Path's hashCode and the attribute class name.
+ *
+ * @param path The {@link S3Path}.
+ * @param attributeClass The class type of {@link BasicFileAttributes}.
+ * @return A unique string key.
+ */
+ public static String generateCacheKey(S3Path path, Class extends BasicFileAttributes> attributeClass)
+ {
+ StringBuilder key = new StringBuilder();
+ key.append(path.getKey().replaceAll("/", "%2F"))
+ .append("_");
+
+ if (attributeClass == BasicFileAttributes.class) {
+ key.append(S3BasicFileAttributes.class.getSimpleName());
+ } else if (attributeClass == PosixFileAttributes.class) {
+ key.append(S3PosixFileAttributes.class.getSimpleName());
+ } else {
+ key.append(attributeClass.getSimpleName());
+ }
+
+ return key.toString();
+ }
+
+
+ /**
+ * Retrieves the file attributes of the given S3Path (either BasicFileAttributes or PosixFileAttributes)
+ *
+ * @param path The {@link S3Path}
+ *
+ * @return The {@link S3BasicFileAttributes} or {@link S3PosixFileAttributes} for the given {@link S3Path}. Is null
+ * only when `attrType` is not {@link BasicFileAttributes} or {@link PosixFileAttributes}.
+ *
+ * @throws CompletionException if a checked exception was thrown while loading the value from AWS.
+ */
+ public S3BasicFileAttributes get(final S3Path path, final Class extends BasicFileAttributes> attrType)
+ {
+ String key = generateCacheKey(path, attrType);
+ S3BasicFileAttributes attrs = cache.getIfPresent(key);
+ if(attrs == null)
+ {
+ attrs = fetchAttribute(path, key);
+ if(attrs != null) {
+ put(path, attrs);
+ }
+ }
+ return attrs;
+ }
+
+ public boolean contains(final S3Path path, final Class extends BasicFileAttributes> attrType)
+ {
+ String key = generateCacheKey(path, attrType);
+ return contains(key);
+ }
+
+ public boolean contains(final String key)
+ {
+ return cache.asMap().containsKey(key);
+ }
+
+
+ /**
+ * @param path The S3 path.
+ * @param attrs the file attributes to store in the cache. Can be the posix ones
+ */
+ public void put(final S3Path path, final S3BasicFileAttributes attrs)
+ {
+ // There is an off-chance we could have both BasicFileAttributes and PosixFileAttributes cached at different times.
+ // This could cause a temporary situation where the cache serves slightly outdated instance of BasicFileAttributes.
+ // To ensure this does not happen we always need to replace the BasicFileAttributes instances when
+ // the PosixFileAttributes type is cached/updated.
+ String basicKey = generateCacheKey(path, BasicFileAttributes.class);
+ cache.put(basicKey, attrs);
+
+ if(attrs instanceof PosixFileAttributes)
+ {
+ String posixKey = generateCacheKey(path, PosixFileAttributes.class);
+ cache.put(posixKey, attrs);
+ }
+ }
+
+ /**
+ * Invalidates the file attributes in the cache for the given s3Path
+ *
+ * @param path The S3 path.
+ */
+ public void invalidate(final S3Path path, final Class extends BasicFileAttributes> attrType)
+ {
+ String key = generateCacheKey(path, attrType);
+ cache.invalidate(key);
+ }
+
+ /**
+ * Invalidates the file attributes in the cache for the given s3Path
+ *
+ * @param key The cache key
+ */
+ public void invalidate(final String key)
+ {
+ cache.invalidate(key);
+ }
+
+ public void invalidateAll()
+ {
+ cache.invalidateAll();
+ }
+
+ public CacheStats stats()
+ {
+ return cache.stats();
+ }
+
+ protected Caffeine cacheBuilder(int cacheTTL, int cacheSize)
+ {
+ return Caffeine.newBuilder()
+ .expireAfter(new S3FileAttributesCachePolicy(cacheTTL))
+ .maximumSize(cacheSize)
+ .softValues()
+ .recordStats()
+ ;
+ }
+
+ protected S3BasicFileAttributes fetchAttribute(S3Path path, String key)
+ {
+ try
+ {
+ if (key.contains(BasicFileAttributes.class.getSimpleName()))
+ {
+ return s3Utils.getS3FileAttributes(path);
+ }
+ else if (key.contains(PosixFileAttributes.class.getSimpleName()))
+ {
+ return s3Utils.getS3PosixFileAttributes(path);
+ }
+ return null;
+ }
+ catch (NoSuchFileException e)
+ {
+ return null;
+ }
+ }
+
+
+}
diff --git a/src/main/java/org/carlspring/cloud/storage/s3fs/cache/S3FileAttributesCachePolicy.java b/src/main/java/org/carlspring/cloud/storage/s3fs/cache/S3FileAttributesCachePolicy.java
new file mode 100644
index 00000000..c35ba410
--- /dev/null
+++ b/src/main/java/org/carlspring/cloud/storage/s3fs/cache/S3FileAttributesCachePolicy.java
@@ -0,0 +1,48 @@
+package org.carlspring.cloud.storage.s3fs.cache;
+
+import com.github.benmanes.caffeine.cache.Expiry;
+import org.carlspring.cloud.storage.s3fs.attribute.S3BasicFileAttributes;
+
+import java.util.concurrent.TimeUnit;
+
+public class S3FileAttributesCachePolicy implements Expiry
+{
+
+ private int cacheTTL;
+
+ public S3FileAttributesCachePolicy(int cacheTTL)
+ {
+ this.cacheTTL = cacheTTL;
+ }
+
+ public int getTTL()
+ {
+ return cacheTTL;
+ }
+
+ public void setTTL(int cacheTTL)
+ {
+ this.cacheTTL = cacheTTL;
+ }
+
+ @Override
+ public long expireAfterCreate(String key, S3BasicFileAttributes value, long currentTime)
+ {
+ // Set initial TTL upon creation
+ return TimeUnit.MILLISECONDS.toNanos(cacheTTL);
+ }
+
+ @Override
+ public long expireAfterUpdate(String key, S3BasicFileAttributes value, long currentTime, long currentDuration)
+ {
+ // Reset TTL on update
+ return TimeUnit.MILLISECONDS.toNanos(cacheTTL);
+ }
+
+ @Override
+ public long expireAfterRead(String key, S3BasicFileAttributes value, long currentTime, long currentDuration)
+ {
+ // Use already assigned TTL.
+ return currentDuration;
+ }
+}
diff --git a/src/test/java/org/carlspring/cloud/storage/s3fs/fileSystemProvider/ReadAttributesTest.java b/src/test/java/org/carlspring/cloud/storage/s3fs/fileSystemProvider/ReadAttributesTest.java
index b83f3787..9523a117 100644
--- a/src/test/java/org/carlspring/cloud/storage/s3fs/fileSystemProvider/ReadAttributesTest.java
+++ b/src/test/java/org/carlspring/cloud/storage/s3fs/fileSystemProvider/ReadAttributesTest.java
@@ -1,20 +1,20 @@
package org.carlspring.cloud.storage.s3fs.fileSystemProvider;
+import com.github.benmanes.caffeine.cache.stats.CacheStats;
+import com.google.common.collect.Sets;
import org.carlspring.cloud.storage.s3fs.S3FileSystem;
import org.carlspring.cloud.storage.s3fs.S3Path;
import org.carlspring.cloud.storage.s3fs.S3UnitTestBase;
+import org.carlspring.cloud.storage.s3fs.cache.S3FileAttributesCache;
import org.carlspring.cloud.storage.s3fs.util.MockBucket;
import org.carlspring.cloud.storage.s3fs.util.S3ClientMock;
import org.carlspring.cloud.storage.s3fs.util.S3EndpointConstant;
import org.carlspring.cloud.storage.s3fs.util.S3MockFactory;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import java.io.IOException;
-import java.nio.file.FileSystem;
-import java.nio.file.FileSystemNotFoundException;
-import java.nio.file.FileSystems;
-import java.nio.file.Files;
-import java.nio.file.NoSuchFileException;
-import java.nio.file.Path;
+import java.nio.file.*;
import java.nio.file.attribute.BasicFileAttributes;
import java.nio.file.attribute.DosFileAttributes;
import java.nio.file.attribute.PosixFileAttributes;
@@ -22,17 +22,9 @@
import java.util.Map;
import java.util.concurrent.TimeUnit;
-import com.google.common.collect.Sets;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.carlspring.cloud.storage.s3fs.util.FileAttributeBuilder.build;
-import static org.junit.jupiter.api.Assertions.assertArrayEquals;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertNull;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
class ReadAttributesTest
extends S3UnitTestBase
@@ -173,51 +165,190 @@ void readAttributesDirectoryNotExistsAtAmazon()
}
@Test
- void readAttributesRegenerateCacheWhenNotExists()
+ void readAttributesRegenerateCacheWhenNotExistsBasic()
throws IOException
{
// fixtures
S3ClientMock client = S3MockFactory.getS3ClientMock();
- client.bucket("bucketA").dir("dir").file("dir/file1", "".getBytes());
-
- S3Path file1 = createNewS3FileSystem().getPath("/bucketA/dir/file1");
-
- // create the cache
- s3fsProvider.readAttributes(file1, BasicFileAttributes.class);
-
- assertNotNull(file1.getFileAttributes());
-
- s3fsProvider.readAttributes(file1, BasicFileAttributes.class);
-
- assertNull(file1.getFileAttributes());
-
- s3fsProvider.readAttributes(file1, BasicFileAttributes.class);
-
- assertNotNull(file1.getFileAttributes());
+ client.bucket("bucketA").dir("dir").file("dir/file-basic", "".getBytes());
+
+ S3FileSystem fs = createNewS3FileSystem();
+
+ // No cache assertion
+ S3FileAttributesCache cache = fs.getFileAttributesCache();
+ CacheStats stats = cache.stats(); // temporary snapshot
+ assertThat(stats.hitCount()).isEqualTo(0);
+ assertThat(stats.missCount()).isEqualTo(0);
+
+ // Pre-requisites (cache entry key should not exist)
+ S3Path file1 = fs.getPath("/bucketA/dir/file-basic");
+ String fileAttrCacheKey = cache.generateCacheKey(file1, BasicFileAttributes.class);
+ assertThat(cache.contains(fileAttrCacheKey)).isFalse();
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(0);
+ assertThat(stats.missCount()).isEqualTo(0);
+
+ // Reading the attributes should create the cache entry.
+ BasicFileAttributes attrs = s3fsProvider.readAttributes(file1, BasicFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertThat(attrs).isEqualTo(file1.getFileAttributes(BasicFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(1);
+ assertThat(stats.missCount()).isEqualTo(1);
+
+ // Should hit the cache.
+ attrs = s3fsProvider.readAttributes(file1, BasicFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertThat(attrs).isEqualTo(file1.getFileAttributes(BasicFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(3);
+ assertThat(stats.missCount()).isEqualTo(1);
+
+ // Should hit the cache.
+ attrs = s3fsProvider.readAttributes(file1, BasicFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertThat(attrs).isEqualTo(file1.getFileAttributes(BasicFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(5);
+ assertThat(stats.missCount()).isEqualTo(1);
+
+ // Invalidate cache manually.
+ cache.invalidate(fileAttrCacheKey);
+ assertThat(cache.contains(fileAttrCacheKey)).isFalse();
+
+ // Should populate the cache again.
+ attrs = s3fsProvider.readAttributes(file1, BasicFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertNotNull(file1.getFileAttributes(BasicFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(6);
+ assertThat(stats.missCount()).isEqualTo(2);
}
@Test
- void readAttributesPosixRegenerateCacheWhenNotExists()
+ void readAttributesRegenerateCacheWhenNotExistsPosix()
throws IOException
{
// fixtures
S3ClientMock client = S3MockFactory.getS3ClientMock();
- client.bucket("bucketA").dir("dir").file("dir/file1", "".getBytes());
-
- S3Path file1 = createNewS3FileSystem().getPath("/bucketA/dir/file1");
-
- // create the cache
- s3fsProvider.readAttributes(file1, PosixFileAttributes.class);
-
- assertNotNull(file1.getFileAttributes());
-
- s3fsProvider.readAttributes(file1, PosixFileAttributes.class);
-
- assertNull(file1.getFileAttributes());
+ client.bucket("bucketA").dir("dir").file("dir/file-posix", "".getBytes());
+
+ S3FileSystem fs = createNewS3FileSystem();
+
+ // No cache assertion
+ S3FileAttributesCache cache = fs.getFileAttributesCache();
+ CacheStats stats = cache.stats(); // temporary snapshot
+ assertThat(stats.hitCount()).isEqualTo(0);
+ assertThat(stats.missCount()).isEqualTo(0);
+
+ // Pre-requisites (cache entry key should not exist)
+ S3Path file1 = fs.getPath("/bucketA/dir/file-posix");
+ String fileAttrCacheKey = cache.generateCacheKey(file1, PosixFileAttributes.class);
+ assertThat(cache.contains(fileAttrCacheKey)).isFalse();
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(0);
+ assertThat(stats.missCount()).isEqualTo(0);
+
+ // Reading the attributes should create the cache entry.
+ PosixFileAttributes attrs = s3fsProvider.readAttributes(file1, PosixFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertThat(attrs).isEqualTo(file1.getFileAttributes(PosixFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(1);
+ assertThat(stats.missCount()).isEqualTo(1);
+
+ // Should hit the cache.
+ attrs = s3fsProvider.readAttributes(file1, PosixFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertThat(attrs).isEqualTo(file1.getFileAttributes(PosixFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(3);
+ assertThat(stats.missCount()).isEqualTo(1);
+
+ // Should hit the cache.
+ attrs = s3fsProvider.readAttributes(file1, PosixFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertThat(attrs).isEqualTo(file1.getFileAttributes(PosixFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(5);
+ assertThat(stats.missCount()).isEqualTo(1);
+
+ // Invalidate cache manually.
+ cache.invalidate(fileAttrCacheKey);
+ assertThat(cache.contains(fileAttrCacheKey)).isFalse();
+
+ // Should populate the cache again.
+ attrs = s3fsProvider.readAttributes(file1, PosixFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertNotNull(file1.getFileAttributes(PosixFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(6);
+ assertThat(stats.missCount()).isEqualTo(2);
- s3fsProvider.readAttributes(file1, PosixFileAttributes.class);
+ }
- assertNotNull(file1.getFileAttributes());
+ @Test
+ void readAttributesCastDownFromPosixToBasic()
+ throws IOException
+ {
+ // fixtures
+ S3ClientMock client = S3MockFactory.getS3ClientMock();
+ client.bucket("bucketA").dir("dir").file("dir/file-posix2", "".getBytes());
+
+ S3FileSystem fs = createNewS3FileSystem();
+
+ // No cache assertion
+ S3FileAttributesCache cache = fs.getFileAttributesCache();
+ CacheStats stats = cache.stats(); // temporary snapshot
+ assertThat(stats.hitCount()).isEqualTo(0);
+ assertThat(stats.missCount()).isEqualTo(0);
+
+ // Pre-requisites (cache entry key should not exist)
+ S3Path file = fs.getPath("/bucketA/dir/file-posix2");
+ String basicFileAttrCacheKey = cache.generateCacheKey(file, BasicFileAttributes.class);
+ String posixFileAttrCacheKey = cache.generateCacheKey(file, PosixFileAttributes.class);
+ assertThat(cache.contains(basicFileAttrCacheKey)).isFalse();
+ assertThat(cache.contains(posixFileAttrCacheKey)).isFalse();
+
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(0);
+ assertThat(stats.missCount()).isEqualTo(0);
+
+ // Reading the attributes should create the cache entry.
+ BasicFileAttributes attrs = s3fsProvider.readAttributes(file, PosixFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertThat(attrs).isEqualTo(file.getFileAttributes(BasicFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(1);
+ assertThat(stats.missCount()).isEqualTo(1);
+
+ // Should hit the cache.
+ attrs = s3fsProvider.readAttributes(file, BasicFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertThat(attrs).isEqualTo(file.getFileAttributes(BasicFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(3);
+ assertThat(stats.missCount()).isEqualTo(1);
+
+ // Should hit the cache.
+ attrs = s3fsProvider.readAttributes(file, BasicFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertThat(attrs).isEqualTo(file.getFileAttributes(BasicFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(5);
+ assertThat(stats.missCount()).isEqualTo(1);
+
+ // Invalidate cache manually.
+ cache.invalidate(basicFileAttrCacheKey);
+ assertThat(cache.contains(basicFileAttrCacheKey)).isFalse();
+
+ // Should populate the cache again.
+ attrs = s3fsProvider.readAttributes(file, PosixFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertNotNull(file.getFileAttributes(BasicFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(6);
+ assertThat(stats.missCount()).isEqualTo(2);
}
@Test
diff --git a/src/test/java/org/carlspring/cloud/storage/s3fs/path/S3PathTest.java b/src/test/java/org/carlspring/cloud/storage/s3fs/path/S3PathTest.java
index 5bef07ea..4d76f9fb 100644
--- a/src/test/java/org/carlspring/cloud/storage/s3fs/path/S3PathTest.java
+++ b/src/test/java/org/carlspring/cloud/storage/s3fs/path/S3PathTest.java
@@ -12,6 +12,8 @@
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
import static org.carlspring.cloud.storage.s3fs.util.S3EndpointConstant.S3_GLOBAL_URI_TEST;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -329,4 +331,17 @@ void registerWatchService()
assertNotNull(exception);
}
+ @Test
+ void sameObjectsMustHaveSameHashCode()
+ {
+ S3Path first = forPath("/buck/same");
+ S3Path second = forPath("/buck/same");
+ S3Path third = forPath("/buck/other");
+
+ assertThat(first).isEqualTo(second);
+ assertThat(first.hashCode()).isEqualTo(second.hashCode());
+ assertThat(first).isNotEqualTo(third);
+ assertThat(first.hashCode()).isNotEqualTo(third.hashCode());
+ }
+
}
diff --git a/src/testIntegration/java/org/carlspring/cloud/storage/s3fs/fileSystemProvider/ReadAttributesIT.java b/src/testIntegration/java/org/carlspring/cloud/storage/s3fs/fileSystemProvider/ReadAttributesIT.java
new file mode 100644
index 00000000..2a0549d2
--- /dev/null
+++ b/src/testIntegration/java/org/carlspring/cloud/storage/s3fs/fileSystemProvider/ReadAttributesIT.java
@@ -0,0 +1,271 @@
+package org.carlspring.cloud.storage.s3fs.fileSystemProvider;
+
+import com.github.benmanes.caffeine.cache.stats.CacheStats;
+import com.github.marschall.memoryfilesystem.MemoryFileSystemBuilder;
+import org.carlspring.cloud.storage.s3fs.BaseIntegrationTest;
+import org.carlspring.cloud.storage.s3fs.S3FileSystem;
+import org.carlspring.cloud.storage.s3fs.S3FileSystemProvider;
+import org.carlspring.cloud.storage.s3fs.S3Path;
+import org.carlspring.cloud.storage.s3fs.cache.S3FileAttributesCache;
+import org.carlspring.cloud.storage.s3fs.util.EnvironmentBuilder;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.net.URI;
+import java.nio.file.FileSystem;
+import java.nio.file.FileSystemNotFoundException;
+import java.nio.file.FileSystems;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.nio.file.attribute.PosixFileAttributes;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.carlspring.cloud.storage.s3fs.util.S3EndpointConstant.S3_GLOBAL_URI_IT;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+
+class ReadAttributesIT
+ extends BaseIntegrationTest
+{
+
+ private static final String bucket = EnvironmentBuilder.getBucket();
+
+ private static final URI uriGlobal = EnvironmentBuilder.getS3URI(S3_GLOBAL_URI_IT);
+
+ private S3FileSystem fileSystemAmazon;
+
+ private S3FileSystemProvider provider;
+
+ @BeforeEach
+ public void setup()
+ throws IOException
+ {
+ System.clearProperty(S3FileSystemProvider.S3_FACTORY_CLASS);
+
+ fileSystemAmazon = (S3FileSystem) build();
+ provider = fileSystemAmazon.provider();
+ }
+
+ private static FileSystem build()
+ throws IOException
+ {
+ try
+ {
+ FileSystems.getFileSystem(uriGlobal).close();
+
+ return createNewFileSystem();
+ }
+ catch (FileSystemNotFoundException e)
+ {
+ return createNewFileSystem();
+ }
+ }
+
+ private static FileSystem createNewFileSystem()
+ throws IOException
+ {
+ return FileSystems.newFileSystem(uriGlobal, EnvironmentBuilder.getRealEnv());
+ }
+
+ private Path uploadSingleFile(String content)
+ throws IOException
+ {
+ try (FileSystem linux = MemoryFileSystemBuilder.newLinux().build("linux"))
+ {
+ if (content != null)
+ {
+ Files.write(linux.getPath("/index.html"), content.getBytes());
+ }
+ else
+ {
+ Files.createFile(linux.getPath("/index.html"));
+ }
+
+ Path result = fileSystemAmazon.getPath(bucket, getTestBasePathWithUUID());
+
+ Files.copy(linux.getPath("/index.html"), result);
+
+ return result;
+ }
+ }
+
+ @Test
+ void readAttributesRegenerateCacheWhenNotExistsBasic()
+ throws IOException
+ {
+ S3FileSystem fs = fileSystemAmazon;
+ S3Path file = fileSystemAmazon.getPath(bucket, getTestBasePathWithUUID(), "1234");
+ Files.write(file, "1234".getBytes());
+
+ // No cache assertion
+ S3FileAttributesCache cache = fs.getFileAttributesCache();
+ CacheStats stats = cache.stats(); // temporary snapshot
+ assertThat(stats.hitCount()).isEqualTo(0);
+ assertThat(stats.missCount()).isEqualTo(0);
+
+ // Pre-requisites (cache entry key should not exist)
+ String fileAttrCacheKey = cache.generateCacheKey(file, BasicFileAttributes.class);
+ assertThat(cache.contains(fileAttrCacheKey)).isFalse();
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(0);
+ assertThat(stats.missCount()).isEqualTo(0);
+
+ // Reading the attributes should create the cache entry.
+ BasicFileAttributes attrs = provider.readAttributes(file, BasicFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertThat(attrs).isEqualTo(file.getFileAttributes(BasicFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(1);
+ assertThat(stats.missCount()).isEqualTo(1);
+
+ // Should hit the cache.
+ attrs = provider.readAttributes(file, BasicFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertThat(attrs).isEqualTo(file.getFileAttributes(BasicFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(3);
+ assertThat(stats.missCount()).isEqualTo(1);
+
+ // Should hit the cache.
+ attrs = provider.readAttributes(file, BasicFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertThat(attrs).isEqualTo(file.getFileAttributes(BasicFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(5);
+ assertThat(stats.missCount()).isEqualTo(1);
+
+ // Invalidate cache manually.
+ cache.invalidate(fileAttrCacheKey);
+ assertThat(cache.contains(fileAttrCacheKey)).isFalse();
+
+ // Should populate the cache again.
+ attrs = provider.readAttributes(file, BasicFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertNotNull(file.getFileAttributes(BasicFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(6);
+ assertThat(stats.missCount()).isEqualTo(2);
+ }
+
+ @Test
+ void readAttributesRegenerateCacheWhenNotExistsPosix()
+ throws IOException
+ {
+ S3FileSystem fs = fileSystemAmazon;
+ S3Path file = fileSystemAmazon.getPath(bucket, getTestBasePathWithUUID(), "1234");
+ Files.write(file, "1234".getBytes());
+
+ // No cache assertion
+ S3FileAttributesCache cache = fs.getFileAttributesCache();
+ CacheStats stats = cache.stats(); // temporary snapshot
+ assertThat(stats.hitCount()).isEqualTo(0);
+ assertThat(stats.missCount()).isEqualTo(0);
+
+ // Pre-requisites (cache entry key should not exist)
+ String fileAttrCacheKey = cache.generateCacheKey(file, PosixFileAttributes.class);
+ assertThat(cache.contains(fileAttrCacheKey)).isFalse();
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(0);
+ assertThat(stats.missCount()).isEqualTo(0);
+
+ // Reading the attributes should create the cache entry.
+ PosixFileAttributes attrs = provider.readAttributes(file, PosixFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertThat(attrs).isEqualTo(file.getFileAttributes(PosixFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(1);
+ assertThat(stats.missCount()).isEqualTo(1);
+
+ // Should hit the cache.
+ attrs = provider.readAttributes(file, PosixFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertThat(attrs).isEqualTo(file.getFileAttributes(PosixFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(3);
+ assertThat(stats.missCount()).isEqualTo(1);
+
+ // Should hit the cache.
+ attrs = provider.readAttributes(file, PosixFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertThat(attrs).isEqualTo(file.getFileAttributes(PosixFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(5);
+ assertThat(stats.missCount()).isEqualTo(1);
+
+ // Invalidate cache manually.
+ cache.invalidate(fileAttrCacheKey);
+ assertThat(cache.contains(fileAttrCacheKey)).isFalse();
+
+ // Should populate the cache again.
+ attrs = provider.readAttributes(file, PosixFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertNotNull(file.getFileAttributes(PosixFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(6);
+ assertThat(stats.missCount()).isEqualTo(2);
+
+ }
+
+ @Test
+ void readAttributesCastDownFromPosixToBasic()
+ throws IOException
+ {
+ S3FileSystem fs = fileSystemAmazon;
+ S3Path file = fileSystemAmazon.getPath(bucket, getTestBasePathWithUUID(), "1234");
+ Files.write(file, "1234".getBytes());
+
+ // No cache assertion
+ S3FileAttributesCache cache = fs.getFileAttributesCache();
+ CacheStats stats = cache.stats(); // temporary snapshot
+ assertThat(stats.hitCount()).isEqualTo(0);
+ assertThat(stats.missCount()).isEqualTo(0);
+
+ // Pre-requisites (cache entry key should not exist)
+ String basicFileAttrCacheKey = cache.generateCacheKey(file, BasicFileAttributes.class);
+ String posixFileAttrCacheKey = cache.generateCacheKey(file, PosixFileAttributes.class);
+ assertThat(cache.contains(basicFileAttrCacheKey)).isFalse();
+ assertThat(cache.contains(posixFileAttrCacheKey)).isFalse();
+
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(0);
+ assertThat(stats.missCount()).isEqualTo(0);
+
+ // Reading the attributes should create the cache entry.
+ BasicFileAttributes attrs = provider.readAttributes(file, PosixFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertThat(attrs).isEqualTo(file.getFileAttributes(BasicFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(1);
+ assertThat(stats.missCount()).isEqualTo(1);
+
+ // Should hit the cache.
+ attrs = provider.readAttributes(file, BasicFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertThat(attrs).isEqualTo(file.getFileAttributes(BasicFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(3);
+ assertThat(stats.missCount()).isEqualTo(1);
+
+ // Should hit the cache.
+ attrs = provider.readAttributes(file, BasicFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertThat(attrs).isEqualTo(file.getFileAttributes(BasicFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(5);
+ assertThat(stats.missCount()).isEqualTo(1);
+
+ // Invalidate cache manually.
+ cache.invalidate(basicFileAttrCacheKey);
+ assertThat(cache.contains(basicFileAttrCacheKey)).isFalse();
+
+ // Should populate the cache again.
+ attrs = provider.readAttributes(file, PosixFileAttributes.class);
+ assertThat(attrs).isNotNull();
+ assertNotNull(file.getFileAttributes(BasicFileAttributes.class));
+ stats = cache.stats();
+ assertThat(stats.hitCount()).isEqualTo(6);
+ assertThat(stats.missCount()).isEqualTo(2);
+ }
+
+}