diff --git a/107/build.gradle b/107/build.gradle index 469acad59e..8a205f3f09 100644 --- a/107/build.gradle +++ b/107/build.gradle @@ -30,21 +30,22 @@ sourceSets { } dependencies { - compile project(':impl'), project(':xml') - provided "javax.cache:cache-api:$parent.jcacheVersion" - tckTestRuntime 'javax.cache:cache-tests:1.0.1' - tckTestClasses('javax.cache:cache-tests:1.0.1:tests') { + api project(':api') + providedApi "javax.cache:cache-api:$parent.jcacheVersion" + + implementation project(':impl') + implementation project(':xml') + implementation "org.terracotta:statistics:$parent.statisticVersion" + + tckTestRuntime "javax.cache:cache-tests:$jcacheTckVersion" + tckTestClasses("javax.cache:cache-tests:$jcacheTckVersion:tests") { transitive = false } -} - -tasks.withType(JavaCompile) { - options.compilerArgs += ['-Werror'] + testCompile project(path: ':xml', configuration: 'testArchives') } javadoc { exclude '**/tck/**' - classpath = sourceSets.main.compileClasspath + sourceSets.main.runtimeClasspath + configurations.provided } test { @@ -78,5 +79,3 @@ task tckTest(type: Test, dependsOn: unpackTckTests) { } test.dependsOn tckTest - - diff --git a/107/src/main/java/org/ehcache/jsr107/CacheResources.java b/107/src/main/java/org/ehcache/jsr107/CacheResources.java index b1d71e7e1d..deb75016a8 100644 --- a/107/src/main/java/org/ehcache/jsr107/CacheResources.java +++ b/107/src/main/java/org/ehcache/jsr107/CacheResources.java @@ -15,16 +15,18 @@ */ package org.ehcache.jsr107; -import java.io.Closeable; import java.util.Collections; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; +import javax.cache.CacheException; import javax.cache.configuration.CacheEntryListenerConfiguration; import org.ehcache.jsr107.internal.Jsr107CacheLoaderWriter; +import static org.ehcache.jsr107.CloseUtil.closeAllAfter; + /** * @author teck */ @@ -66,9 +68,7 @@ synchronized ListenerResources registerCacheEntryListener(CacheEntryListen throw new IllegalArgumentException("listener config already registered"); } - MultiCacheException mce = new MultiCacheException(); - ListenerResources rv = ListenerResources.createListenerResources(listenerConfig, mce); - mce.throwIfNotEmpty(); + ListenerResources rv = ListenerResources.createListenerResources(listenerConfig); listenerResources.put(listenerConfig, rv); return rv; } @@ -86,33 +86,29 @@ synchronized ListenerResources deregisterCacheEntryListener(CacheEntryList if (resources == null) { return null; } - MultiCacheException mce = new MultiCacheException(); - close(resources, mce); - mce.throwIfNotEmpty(); + try { + CloseUtil.closeAll(resources); + } catch (Throwable t) { + throw new CacheException(t); + } return resources; } - synchronized void closeResources(MultiCacheException mce) { + synchronized void closeResources() { if (closed.compareAndSet(false, true)) { - close(expiryPolicy, mce); - close(cacheLoaderWriter, mce); - for (ListenerResources lr : listenerResources.values()) { - close(lr, mce); + try { + CloseUtil.closeAll(expiryPolicy, cacheLoaderWriter, listenerResources.values()); + } catch (Throwable t) { + throw new CacheException(t); } } } - boolean isClosed() { - return closed.get(); - } - - static void close(Object obj, MultiCacheException mce) { - if (obj instanceof Closeable) { - try { - ((Closeable) obj).close(); - } catch (Throwable t) { - mce.addThrowable(t); - } + synchronized CacheException closeResourcesAfter(CacheException exception) { + if (closed.compareAndSet(false, true)) { + return closeAllAfter(exception, expiryPolicy, cacheLoaderWriter, listenerResources.values()); + } else { + return exception; } } } diff --git a/107/src/main/java/org/ehcache/jsr107/CloseUtil.java b/107/src/main/java/org/ehcache/jsr107/CloseUtil.java new file mode 100644 index 0000000000..e2c350b478 --- /dev/null +++ b/107/src/main/java/org/ehcache/jsr107/CloseUtil.java @@ -0,0 +1,86 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.jsr107; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Optional; +import java.util.stream.Stream; + +public class CloseUtil { + public static T closeAllAfter(T failure, Object ... objects) { + Optional closeable = extractCloseables(Stream.of(objects)).reduce(CloseUtil::composeCloseables); + if (closeable.isPresent()) { + try { + closeable.get().close(); + } catch (Throwable t) { + failure.addSuppressed(t); + } + } + return failure; + } + + static void closeAll(Object ... objects) throws IOException { + closeAll(Stream.of(objects)); + } + + static void closeAll(Stream objects) throws IOException { + chain(extractCloseables(objects)); + } + + static void chain(Closeable ... objects) throws IOException { + chain(Stream.of(objects)); + } + + public static void chain(Stream objects) throws IOException { + Optional closeable = objects.reduce(CloseUtil::composeCloseables); + if (closeable.isPresent()) { + closeable.get().close(); + } + } + + private static Stream extractCloseables(Stream objects) { + return objects.filter(o -> o != null).flatMap(o -> { + if (o instanceof Collection) { + return ((Collection) o).stream(); + } else if (o.getClass().isArray()) { + return Arrays.stream((Object[]) o); + } else { + return Stream.of(o); + } + }).filter(o -> o != null).filter(Closeable.class::isInstance).map(Closeable.class::cast); + } + + private static Closeable composeCloseables(Closeable a, Closeable b) { + return () -> { + try { + a.close(); + } catch (Throwable t1) { + try { + b.close(); + } catch (Throwable t2) { + t1.addSuppressed(t2); + } + throw t1; + } + b.close(); + }; + } + +} diff --git a/107/src/main/java/org/ehcache/jsr107/ConfigurationMerger.java b/107/src/main/java/org/ehcache/jsr107/ConfigurationMerger.java index ec8db61e07..5c6a8f4a75 100644 --- a/107/src/main/java/org/ehcache/jsr107/ConfigurationMerger.java +++ b/107/src/main/java/org/ehcache/jsr107/ConfigurationMerger.java @@ -27,9 +27,9 @@ import org.ehcache.impl.copy.SerializingCopier; import org.ehcache.jsr107.config.ConfigurationElementState; import org.ehcache.jsr107.config.Jsr107CacheConfiguration; -import org.ehcache.jsr107.config.Jsr107Service; import org.ehcache.jsr107.internal.Jsr107CacheLoaderWriter; import org.ehcache.spi.copy.Copier; +import org.ehcache.spi.loaderwriter.CacheLoaderWriterConfiguration; import org.ehcache.xml.XmlConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,6 +39,7 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import javax.cache.CacheException; import javax.cache.configuration.CacheEntryListenerConfiguration; import javax.cache.configuration.CompleteConfiguration; import javax.cache.configuration.Configuration; @@ -49,6 +50,7 @@ import static org.ehcache.config.builders.CacheConfigurationBuilder.newCacheConfigurationBuilder; import static org.ehcache.config.builders.ResourcePoolsBuilder.heap; import static org.ehcache.core.spi.service.ServiceUtils.findSingletonAmongst; +import static org.ehcache.jsr107.CloseUtil.closeAllAfter; /** * ConfigurationMerger @@ -81,7 +83,7 @@ ConfigHolder mergeConfigurations(String cacheName, Configuration templateBuilder = null; + CacheConfigurationBuilder templateBuilder; try { templateBuilder = xmlConfiguration.newCacheConfigurationBuilderFromTemplate(templateName, jsr107Configuration.getKeyType(), jsr107Configuration.getValueType()); @@ -106,11 +108,11 @@ ConfigHolder mergeConfigurations(String cacheName, Configuration ConfigHolder mergeConfigurations(String cacheName, Configuration(cacheConfiguration.getExpiry()); + expiryPolicy = new EhcacheExpiryWrapper<>(cacheConfiguration.getExpiryPolicy()); } return new ConfigHolder<>( @@ -135,29 +137,21 @@ ConfigHolder mergeConfigurations(String cacheName, Configuration(jsr107Configuration, cacheConfiguration, hasConfiguredExpiry, useEhcacheLoaderWriter), cacheConfiguration, useEhcacheLoaderWriter); } catch (Throwable throwable) { - MultiCacheException mce = new MultiCacheException(); - CacheResources.close(expiryPolicy, mce); - CacheResources.close(loaderWriter, mce); - if (throwable instanceof IllegalArgumentException) { - String message = throwable.getMessage(); - if (mce.getMessage() != null) { - message = message + "\nSuppressed " + mce.getMessage(); - } - throw new IllegalArgumentException(message, throwable); + throw closeAllAfter((IllegalArgumentException) throwable, expiryPolicy, loaderWriter); + } else { + throw closeAllAfter(new CacheException(throwable), expiryPolicy, loaderWriter); } - mce.addFirstThrowable(throwable); - throw mce; } } private CacheConfigurationBuilder handleStoreByValue(Eh107CompleteConfiguration jsr107Configuration, CacheConfigurationBuilder builder, String cacheName) { - DefaultCopierConfiguration copierConfig = builder.getExistingServiceConfiguration(DefaultCopierConfiguration.class); + DefaultCopierConfiguration copierConfig = builder.getExistingServiceConfiguration(DefaultCopierConfiguration.class); if(copierConfig == null) { if(jsr107Configuration.isStoreByValue()) { if (xmlConfiguration != null) { DefaultCopyProviderConfiguration defaultCopyProviderConfiguration = findSingletonAmongst(DefaultCopyProviderConfiguration.class, - xmlConfiguration.getServiceCreationConfigurations().toArray()); + xmlConfiguration.getServiceCreationConfigurations()); if (defaultCopyProviderConfiguration != null) { Map, ClassInstanceConfiguration>> defaults = defaultCopyProviderConfiguration.getDefaults(); handleCopierDefaultsforImmutableTypes(defaults); @@ -188,8 +182,8 @@ private CacheConfigurationBuilder handleStoreByValue(Eh107CompleteC } @SuppressWarnings("unchecked") - private static CacheConfigurationBuilder addDefaultCopiers(CacheConfigurationBuilder builder, Class keyType, Class valueType ) { - Set immutableTypes = new HashSet<>(); + private static CacheConfigurationBuilder addDefaultCopiers(CacheConfigurationBuilder builder, Class keyType, Class valueType ) { + Set> immutableTypes = new HashSet<>(); immutableTypes.add(String.class); immutableTypes.add(Long.class); immutableTypes.add(Float.class); @@ -219,7 +213,7 @@ private static void handleCopierDefaultsforImmutableTypes(Map, ClassIns addIdentityCopierIfNoneRegistered(defaults, Character.class); } - @SuppressWarnings("unchecked") + @SuppressWarnings({"rawtypes", "unchecked"}) private static void addIdentityCopierIfNoneRegistered(Map, ClassInstanceConfiguration>> defaults, Class clazz) { if (!defaults.containsKey(clazz)) { defaults.put(clazz, new DefaultCopierConfiguration(Eh107IdentityCopier.class, DefaultCopierConfiguration.Type.VALUE)); @@ -228,9 +222,8 @@ private static void addIdentityCopierIfNoneRegistered(Map, ClassInstanc private Map, ListenerResources> initCacheEventListeners(CompleteConfiguration config) { Map, ListenerResources> listenerResources = new ConcurrentHashMap<>(); - MultiCacheException mce = new MultiCacheException(); for (CacheEntryListenerConfiguration listenerConfig : config.getCacheEntryListenerConfigurations()) { - listenerResources.put(listenerConfig, ListenerResources.createListenerResources(listenerConfig, mce)); + listenerResources.put(listenerConfig, ListenerResources.createListenerResources(listenerConfig)); } return listenerResources; } @@ -239,7 +232,7 @@ private Eh107Expiry initExpiryPolicy(CompleteConfiguration co return new ExpiryPolicyToEhcacheExpiry<>(config.getExpiryPolicyFactory().create()); } - private Jsr107CacheLoaderWriter initCacheLoaderWriter(CompleteConfiguration config, MultiCacheException mce) { + private Jsr107CacheLoaderWriter initCacheLoaderWriter(CompleteConfiguration config) { Factory> cacheLoaderFactory = config.getCacheLoaderFactory(); @SuppressWarnings("unchecked") Factory> cacheWriterFactory = (Factory>) (Object) config.getCacheWriterFactory(); @@ -256,11 +249,7 @@ private Jsr107CacheLoaderWriter initCacheLoaderWriter(CompleteConfi try { cacheWriter = cacheWriterFactory == null ? null : cacheWriterFactory.create(); } catch (Throwable t) { - if (t != mce) { - mce.addThrowable(t); - } - CacheResources.close(cacheLoader, mce); - throw mce; + throw closeAllAfter(new CacheException(t), cacheLoader); } if (cacheLoader == null && cacheWriter == null) { diff --git a/107/src/main/java/org/ehcache/jsr107/DefaultConfigurationResolver.java b/107/src/main/java/org/ehcache/jsr107/DefaultConfigurationResolver.java index 0db6470554..a4e89bf738 100644 --- a/107/src/main/java/org/ehcache/jsr107/DefaultConfigurationResolver.java +++ b/107/src/main/java/org/ehcache/jsr107/DefaultConfigurationResolver.java @@ -27,9 +27,7 @@ class DefaultConfigurationResolver { static final String DEFAULT_CONFIG_PROPERTY_NAME = "ehcache.jsr107.config.default"; static URI resolveConfigURI(Properties cacheManagerProperties) { - Object config = null; - - config = cacheManagerProperties.get(DEFAULT_CONFIG_PROPERTY_NAME); + Object config = cacheManagerProperties.get(DEFAULT_CONFIG_PROPERTY_NAME); if (config == null) { config = System.getProperties().get(DEFAULT_CONFIG_PROPERTY_NAME); diff --git a/107/src/main/java/org/ehcache/jsr107/Eh107Cache.java b/107/src/main/java/org/ehcache/jsr107/Eh107Cache.java index a639c48752..4e64487b9c 100644 --- a/107/src/main/java/org/ehcache/jsr107/Eh107Cache.java +++ b/107/src/main/java/org/ehcache/jsr107/Eh107Cache.java @@ -17,7 +17,6 @@ import org.ehcache.core.InternalCache; import org.ehcache.Status; -import org.ehcache.UserManagedCache; import org.ehcache.core.Jsr107Cache; import org.ehcache.core.spi.service.StatisticsService; import org.ehcache.event.EventFiring; @@ -36,11 +35,9 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.BiFunction; -import java.util.function.Function; -import java.util.function.Supplier; import javax.cache.Cache; +import javax.cache.CacheException; import javax.cache.CacheManager; import javax.cache.configuration.CacheEntryListenerConfiguration; import javax.cache.configuration.Configuration; @@ -68,7 +65,7 @@ class Eh107Cache implements Cache { private final Jsr107CacheLoaderWriter cacheLoaderWriter; Eh107Cache(String name, Eh107Configuration config, CacheResources cacheResources, - InternalCache ehCache, Eh107CacheManager cacheManager) { + InternalCache ehCache, StatisticsService statisticsService, Eh107CacheManager cacheManager) { this.cacheLoaderWriter = cacheResources.getCacheLoaderWriter(); this.config = config; this.ehCache = ehCache; @@ -76,15 +73,14 @@ class Eh107Cache implements Cache { this.name = name; this.cacheResources = cacheResources; this.managementBean = new Eh107CacheMXBean(name, cacheManager.getURI(), config); - this.statisticsBean = new Eh107CacheStatisticsMXBean(name, cacheManager.getURI(), - cacheManager.getEhCacheManager().getServiceProvider().getService(StatisticsService.class)); + this.statisticsBean = new Eh107CacheStatisticsMXBean(name, cacheManager.getURI(), statisticsService); for (Map.Entry, ListenerResources> entry : cacheResources .getListenerResources().entrySet()) { registerEhcacheListeners(entry.getKey(), entry.getValue()); } - this.jsr107Cache = ehCache.getJsr107Cache(); + this.jsr107Cache = ehCache.createJsr107Cache(); } @Override @@ -138,14 +134,7 @@ public void loadAll(Set keys, boolean replaceExistingValues, Comple jsr107Cache.loadAll(keys, replaceExistingValues, this::loadAllFunction); } catch (Exception e) { final CacheLoaderException cle; - if (e instanceof CacheLoaderException) { - cle = (CacheLoaderException) e; - } else if (e.getCause() instanceof CacheLoaderException) { - cle = (CacheLoaderException) e.getCause(); - } else { - cle = new CacheLoaderException(e); - } - + cle = getCacheLoaderException(e); completionListener.onException(cle); return; } @@ -153,6 +142,18 @@ public void loadAll(Set keys, boolean replaceExistingValues, Comple completionListener.onCompletion(); } + private CacheLoaderException getCacheLoaderException(Exception e) { + CacheLoaderException cle; + if (e instanceof CacheLoaderException) { + cle = (CacheLoaderException) e; + } else if (e.getCause() instanceof CacheLoaderException) { + cle = (CacheLoaderException) e.getCause(); + } else { + cle = new CacheLoaderException(e); + } + return cle; + } + private Map loadAllFunction(Iterable keysIterable) { try { Map loadResult = cacheLoaderWriter.loadAllAlways(keysIterable); @@ -162,15 +163,7 @@ private Map loadAllFunction(Iterable keysIterable) { } return resultMap; } catch (Exception e) { - final CacheLoaderException cle; - if (e instanceof CacheLoaderException) { - cle = (CacheLoaderException) e; - } else if (e.getCause() instanceof CacheLoaderException) { - cle = (CacheLoaderException) e.getCause(); - } else { - cle = new CacheLoaderException(e); - } - + CacheLoaderException cle = getCacheLoaderException(e); throw cle; } } @@ -422,9 +415,7 @@ public CacheManager getCacheManager() { @Override public void close() { - MultiCacheException closeException = new MultiCacheException(); - cacheManager.close(this, closeException); - closeException.throwIfNotEmpty(); + cacheManager.close(this); } @Override @@ -432,33 +423,40 @@ public boolean isClosed() { return syncedIsClose(); } - void closeInternal(MultiCacheException closeException) { - closeInternal(false, closeException); + CacheException closeInternalAfter(CacheException failure) { + if (hypotheticallyClosed.compareAndSet(false, true)) { + return cacheResources.closeResourcesAfter(failure); + } else { + return failure; + } } - private void closeInternal(boolean destroy, MultiCacheException closeException) { + void closeInternal() { + closeInternal(false); + } + + private void closeInternal(boolean destroy) { if (hypotheticallyClosed.compareAndSet(false, true)) { if (destroy) { try { clear(false); } catch (Throwable t) { - closeException.addThrowable(t); + throw cacheResources.closeResourcesAfter(new CacheException(t)); } } - - cacheResources.closeResources(closeException); + cacheResources.closeResources(); } } private boolean syncedIsClose() { - if (((UserManagedCache)ehCache).getStatus() == Status.UNINITIALIZED && !hypotheticallyClosed.get()) { + if (ehCache.getStatus() == Status.UNINITIALIZED && !hypotheticallyClosed.get()) { close(); } return hypotheticallyClosed.get(); } - void destroy(MultiCacheException destroyException) { - closeInternal(true, destroyException); + void destroy() { + closeInternal(true); } @Override @@ -622,8 +620,8 @@ public T unwrap(Class clazz) { } } - private static enum MutableEntryOperation { - NONE, ACCESS, CREATE, LOAD, REMOVE, UPDATE; + private enum MutableEntryOperation { + NONE, ACCESS, CREATE, LOAD, REMOVE, UPDATE } private static final Object UNDEFINED = new Object(); diff --git a/107/src/main/java/org/ehcache/jsr107/Eh107CacheEntryEvent.java b/107/src/main/java/org/ehcache/jsr107/Eh107CacheEntryEvent.java index 85d1725031..dc08b2255c 100644 --- a/107/src/main/java/org/ehcache/jsr107/Eh107CacheEntryEvent.java +++ b/107/src/main/java/org/ehcache/jsr107/Eh107CacheEntryEvent.java @@ -63,6 +63,8 @@ public boolean isOldValueAvailable() { static class NormalEvent extends Eh107CacheEntryEvent { + private static final long serialVersionUID = 1566947833363986792L; + public NormalEvent(Cache source, EventType eventType, CacheEvent ehEvent, boolean hasOldValue) { super(source, eventType, ehEvent, hasOldValue); } @@ -75,6 +77,8 @@ public V getValue() { static class RemovingEvent extends Eh107CacheEntryEvent { + private static final long serialVersionUID = -1363817518693572909L; + public RemovingEvent(Cache source, EventType eventType, CacheEvent ehEvent, boolean hasOldValue) { super(source, eventType, ehEvent, hasOldValue); } diff --git a/107/src/main/java/org/ehcache/jsr107/Eh107CacheLoaderWriter.java b/107/src/main/java/org/ehcache/jsr107/Eh107CacheLoaderWriter.java index 815b9315a3..5e9df94dab 100644 --- a/107/src/main/java/org/ehcache/jsr107/Eh107CacheLoaderWriter.java +++ b/107/src/main/java/org/ehcache/jsr107/Eh107CacheLoaderWriter.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; -import static java.util.Collections.emptyMap; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -30,9 +29,10 @@ import javax.cache.integration.CacheWriter; import org.ehcache.jsr107.internal.Jsr107CacheLoaderWriter; -import org.ehcache.spi.loaderwriter.BulkCacheLoadingException; import org.ehcache.spi.loaderwriter.BulkCacheWritingException; +import static java.util.Collections.emptyMap; + /** * @author teck */ @@ -53,7 +53,7 @@ class Eh107CacheLoaderWriter implements Jsr107CacheLoaderWriter, Clo } @Override - public V load(K key) throws Exception { + public V load(K key) { if (readThrough) { return cacheLoader.load(key); } else { @@ -62,7 +62,7 @@ public V load(K key) throws Exception { } @Override - public Map loadAll(Iterable keys) throws Exception { + public Map loadAll(Iterable keys) { if (readThrough) { return loadAllAlways(keys); } else { @@ -71,7 +71,7 @@ public Map loadAll(Iterable keys) throws Exception { } @Override - public Map loadAllAlways(Iterable keys) throws BulkCacheLoadingException, Exception { + public Map loadAllAlways(Iterable keys) { if (cacheLoader == null) { return emptyMap(); } else { @@ -80,21 +80,21 @@ public Map loadAllAlways(Iterable keys) throws BulkCacheLoadi } @Override - public void write(K key, V value) throws Exception { + public void write(K key, V value) { if (cacheWriter != null) { cacheWriter.write(cacheEntryFor(key, value)); } } @Override - public void delete(K key) throws Exception { + public void delete(K key) { if (cacheWriter != null) { cacheWriter.delete(key); } } @Override - public void deleteAll(Iterable keys) throws Exception { + public void deleteAll(Iterable keys) throws BulkCacheWritingException { if (cacheWriter != null) { Set allKeys = new HashSet<>(); for (K key : keys) { @@ -131,7 +131,7 @@ private Map failures(Set keys, Exception e) { } @Override - public void writeAll(Iterable> entries) throws Exception { + public void writeAll(Iterable> entries) { if (cacheWriter != null) { Collection> toWrite = new ArrayList<>(); for (Map.Entry entry : entries) { diff --git a/107/src/main/java/org/ehcache/jsr107/Eh107CacheLoaderWriterProvider.java b/107/src/main/java/org/ehcache/jsr107/Eh107CacheLoaderWriterProvider.java index 3738fdb10a..2fd305cd27 100644 --- a/107/src/main/java/org/ehcache/jsr107/Eh107CacheLoaderWriterProvider.java +++ b/107/src/main/java/org/ehcache/jsr107/Eh107CacheLoaderWriterProvider.java @@ -45,12 +45,13 @@ public CacheLoaderWriter createCacheLoaderWriter(String ali } @Override - public void releaseCacheLoaderWriter(CacheLoaderWriter cacheLoaderWriter) { - // + public void releaseCacheLoaderWriter(String alias, CacheLoaderWriter cacheLoaderWriter) { + deregisterJsrLoaderForCache(alias); } void registerJsr107Loader(String alias, CacheLoaderWriter cacheLoaderWriter) { CacheLoaderWriter prev = cacheLoaderWriters.putIfAbsent(alias, cacheLoaderWriter); + registerJsrLoaderForCache(alias); if (prev != null) { throw new IllegalStateException("loader already registered for [" + alias + "]"); } diff --git a/107/src/main/java/org/ehcache/jsr107/Eh107CacheManager.java b/107/src/main/java/org/ehcache/jsr107/Eh107CacheManager.java index 1b4e308765..caa07cc3bf 100644 --- a/107/src/main/java/org/ehcache/jsr107/Eh107CacheManager.java +++ b/107/src/main/java/org/ehcache/jsr107/Eh107CacheManager.java @@ -18,6 +18,7 @@ import org.ehcache.Status; import org.ehcache.config.CacheConfiguration; import org.ehcache.core.InternalCache; +import org.ehcache.core.spi.service.StatisticsService; import org.ehcache.impl.config.copy.DefaultCopierConfiguration; import org.ehcache.impl.copy.IdentityCopier; import org.ehcache.jsr107.internal.Jsr107CacheLoaderWriter; @@ -27,6 +28,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.Closeable; +import java.io.IOException; import java.lang.management.ManagementFactory; import java.net.URI; import java.util.ArrayList; @@ -45,6 +48,9 @@ import javax.management.InstanceNotFoundException; import javax.management.MBeanServer; +import static org.ehcache.jsr107.CloseUtil.chain; +import static org.ehcache.jsr107.CloseUtil.closeAll; + /** * @author teck */ @@ -52,33 +58,31 @@ class Eh107CacheManager implements CacheManager { private static final Logger LOG = LoggerFactory.getLogger(Eh107CacheManager.class); - private static MBeanServer MBEAN_SERVER = ManagementFactory.getPlatformMBeanServer(); + private static final MBeanServer MBEAN_SERVER = ManagementFactory.getPlatformMBeanServer(); private final Object cachesLock = new Object(); private final ConcurrentMap> caches = new ConcurrentHashMap<>(); - private final Eh107InternalCacheManager ehCacheManager; + private final org.ehcache.CacheManager ehCacheManager; private final EhcacheCachingProvider cachingProvider; private final ClassLoader classLoader; private final URI uri; private final Properties props; private final ConfigurationMerger configurationMerger; + private final StatisticsService statisticsService; - Eh107CacheManager(EhcacheCachingProvider cachingProvider, Eh107InternalCacheManager ehCacheManager, Properties props, - ClassLoader classLoader, URI uri, ConfigurationMerger configurationMerger) { + Eh107CacheManager(EhcacheCachingProvider cachingProvider, org.ehcache.CacheManager ehCacheManager, Jsr107Service jsr107Service, + Properties props, ClassLoader classLoader, URI uri, ConfigurationMerger configurationMerger) { this.cachingProvider = cachingProvider; this.ehCacheManager = ehCacheManager; this.props = props; this.classLoader = classLoader; this.uri = uri; this.configurationMerger = configurationMerger; + this.statisticsService = jsr107Service.getStatistics(); refreshAllCaches(); } - Eh107InternalCacheManager getEhCacheManager() { - return ehCacheManager; - } - private void refreshAllCaches() { for (Map.Entry> entry : ehCacheManager.getRuntimeConfiguration().getCacheConfigurations().entrySet()) { String name = entry.getKey(); @@ -119,9 +123,9 @@ private Eh107Cache wrapEhcacheCache(String alias, InternalCache config = new Eh107ReverseConfiguration<>(cache, cacheLoaderWriter != null, cacheLoaderWriter != null, storeByValueOnHeap); configurationMerger.setUpManagementAndStats(cache, config); - Eh107Expiry expiry = new EhcacheExpiryWrapper<>(cache.getRuntimeConfiguration().getExpiry()); + Eh107Expiry expiry = new EhcacheExpiryWrapper<>(cache.getRuntimeConfiguration().getExpiryPolicy()); CacheResources resources = new CacheResources<>(alias, wrapCacheLoaderWriter(cacheLoaderWriter), expiry); - return new Eh107Cache<>(alias, config, resources, cache, this); + return new Eh107Cache<>(alias, config, resources, cache, statisticsService, this); } private Jsr107CacheLoaderWriter wrapCacheLoaderWriter(CacheLoaderWriter cacheLoaderWriter) { @@ -162,6 +166,7 @@ public > Cache createCache(String cach synchronized (cachesLock) { if (config instanceof Eh107Configuration.Eh107ConfigurationWrapper) { + @SuppressWarnings("unchecked") Eh107Configuration.Eh107ConfigurationWrapper configurationWrapper = (Eh107Configuration.Eh107ConfigurationWrapper)config; CacheConfiguration unwrap = configurationWrapper.getCacheConfiguration(); final org.ehcache.Cache ehcache; @@ -193,14 +198,10 @@ public > Cache createCache(String cach try { ehCache = (InternalCache)ehCacheManager.createCache(cacheName, configHolder.cacheConfiguration); } catch (IllegalArgumentException e) { - MultiCacheException mce = new MultiCacheException(e); - configHolder.cacheResources.closeResources(mce); - throw new CacheException("A Cache named [" + cacheName + "] already exists", mce); + throw configHolder.cacheResources.closeResourcesAfter(new CacheException("A Cache named [" + cacheName + "] already exists")); } catch (Throwable t) { // something went wrong in ehcache land, make sure to clean up our stuff - MultiCacheException mce = new MultiCacheException(t); - configHolder.cacheResources.closeResources(mce); - throw mce; + throw configHolder.cacheResources.closeResourcesAfter(new CacheException(t)); } Eh107Cache cache = null; @@ -211,7 +212,7 @@ public > Cache createCache(String cach cacheResources.getExpiryPolicy(), cacheResources.getListenerResources()); } cache = new Eh107Cache<>(cacheName, new Eh107CompleteConfiguration<>(configHolder.jsr107Configuration, ehCache - .getRuntimeConfiguration()), cacheResources, ehCache, this); + .getRuntimeConfiguration()), cacheResources, ehCache, statisticsService, this); caches.put(cacheName, cache); @@ -225,13 +226,11 @@ public > Cache createCache(String cach return cache; } catch (Throwable t) { - MultiCacheException mce = new MultiCacheException(t); if (cache != null) { - cache.closeInternal(mce); + throw cache.closeInternalAfter(new CacheException(t)); } else { - cacheResources.closeResources(mce); + throw cacheResources.closeResourcesAfter(new CacheException(t)); } - throw mce; } } } @@ -286,18 +285,7 @@ public Cache getCache(String cacheName) { throw new NullPointerException(); } - Eh107Cache cache = safeCacheRetrieval(cacheName); - - if (cache == null) { - return null; - } - - if (cache.getConfiguration(Configuration.class).getKeyType() != Object.class - || cache.getConfiguration(Configuration.class).getValueType() != Object.class) { - throw new IllegalArgumentException("Cache [" + cacheName - + "] specifies key/value types. Use getCache(String, Class, Class)"); - } - return cache; + return safeCacheRetrieval(cacheName); } @SuppressWarnings("unchecked") @@ -311,6 +299,7 @@ private Eh107Cache safeCacheRetrieval(final String cacheName) { @Override public Iterable getCacheNames() { + checkClosed(); refreshAllCaches(); return Collections.unmodifiableList(new ArrayList<>(caches.keySet())); } @@ -321,7 +310,6 @@ public void destroyCache(String cacheName) { throw new NullPointerException(); } - MultiCacheException destroyException = new MultiCacheException(); synchronized (cachesLock) { checkClosed(); @@ -333,27 +321,16 @@ public void destroyCache(String cacheName) { } try { - enableManagement(cache, false); - } catch (Throwable t) { - destroyException.addThrowable(t); - } - - try { - enableStatistics(cache, false); - } catch (Throwable t) { - destroyException.addThrowable(t); - } - - cache.destroy(destroyException); - - try { - ehCacheManager.removeCache(cache.getName()); + chain( + () -> enableManagement(cache, false), + () -> enableStatistics(cache, false), + () -> cache.destroy(), + () -> ehCacheManager.removeCache(cache.getName()) + ); } catch (Throwable t) { - destroyException.addThrowable(t); + throw new CacheException(t); } } - - destroyException.throwIfNotEmpty(); } @Override @@ -449,68 +426,30 @@ public T unwrap(Class clazz) { @Override public void close() { - MultiCacheException closeException = new MultiCacheException(); - cachingProvider.close(this, closeException); - closeException.throwIfNotEmpty(); + cachingProvider.close(this); } - void closeInternal(MultiCacheException closeException) { - try { - synchronized (cachesLock) { - for (Eh107Cache cache : caches.values()) { - try { - close(cache, closeException); - } catch (Throwable t) { - closeException.addThrowable(t); - } - } - - try { - caches.clear(); - } catch (Throwable t) { - closeException.addThrowable(t); - } - - try { - ehCacheManager.close(); - } catch (Throwable t) { - closeException.addThrowable(t); - } + void closeInternal() { + synchronized (cachesLock) { + try { + closeAll(caches.values(), (Closeable) caches::clear, ehCacheManager); + } catch (IOException e) { + throw new CacheException(e); } - } catch (Throwable t) { - closeException.addThrowable(t); } } - void close(Eh107Cache cache, MultiCacheException closeException) { - try { - if (caches.remove(cache.getName(), cache)) { - try { - unregisterObject(cache.getManagementMBean()); - } catch (Throwable t) { - closeException.addThrowable(t); - } - - try { - unregisterObject(cache.getStatisticsMBean()); - } catch (Throwable t) { - closeException.addThrowable(t); - } - - try { - cache.closeInternal(closeException); - } catch (Throwable t) { - closeException.addThrowable(t); - } - - try { - ehCacheManager.removeCache(cache.getName()); - } catch (Throwable t) { - closeException.addThrowable(t); - } + void close(Eh107Cache cache) { + if (caches.remove(cache.getName(), cache)) { + try { + chain( + () -> unregisterObject(cache.getManagementMBean()), + () -> unregisterObject(cache.getStatisticsMBean()), + () -> cache.closeInternal(), + () -> ehCacheManager.removeCache(cache.getName())); + } catch (Throwable t) { + throw new CacheException(t); } - } catch (Throwable t) { - closeException.addThrowable(t); } } } diff --git a/107/src/main/java/org/ehcache/jsr107/Eh107CacheStatisticsMXBean.java b/107/src/main/java/org/ehcache/jsr107/Eh107CacheStatisticsMXBean.java index 49091e6308..0c94c9b47f 100644 --- a/107/src/main/java/org/ehcache/jsr107/Eh107CacheStatisticsMXBean.java +++ b/107/src/main/java/org/ehcache/jsr107/Eh107CacheStatisticsMXBean.java @@ -16,85 +16,102 @@ package org.ehcache.jsr107; import org.ehcache.core.spi.service.StatisticsService; +import org.ehcache.core.statistics.CacheOperationOutcomes; import org.ehcache.core.statistics.CacheStatistics; +import org.terracotta.statistics.derived.latency.Jsr107LatencyMonitor; import java.net.URI; +import java.util.EnumSet; /** * @author Ludovic Orban */ class Eh107CacheStatisticsMXBean extends Eh107MXBean implements javax.cache.management.CacheStatisticsMXBean { - private final String cacheName; - private final StatisticsService statisticsService; + private final CacheStatistics cacheStatistics; + + private final Jsr107LatencyMonitor averageGetTime; + private final Jsr107LatencyMonitor averagePutTime; + private final Jsr107LatencyMonitor averageRemoveTime; Eh107CacheStatisticsMXBean(String cacheName, URI cacheManagerURI, StatisticsService statisticsService) { super(cacheName, cacheManagerURI, "CacheStatistics"); - this.cacheName = cacheName; - this.statisticsService = statisticsService; + + cacheStatistics = statisticsService.getCacheStatistics(cacheName); + + averageGetTime = registerDerivedStatistics(CacheOperationOutcomes.GetOutcome.class, "get"); + averagePutTime = registerDerivedStatistics(CacheOperationOutcomes.PutOutcome.class, "put"); + averageRemoveTime = registerDerivedStatistics(CacheOperationOutcomes.RemoveOutcome.class, "remove"); + } + + private > Jsr107LatencyMonitor registerDerivedStatistics(Class outcome, String name) { + Jsr107LatencyMonitor monitor = new Jsr107LatencyMonitor<>(EnumSet.allOf(outcome), 1.0); + CacheStatistics cacheStatistics = this.cacheStatistics; + cacheStatistics.registerDerivedStatistic(outcome, name, monitor); + return monitor; } @Override public void clear() { - getCacheStatistics().clear(); + cacheStatistics.clear(); + averageGetTime.clear(); + averagePutTime.clear(); + averageRemoveTime.clear(); } @Override public long getCacheHits() { - return getCacheStatistics().getCacheHits(); + return cacheStatistics.getCacheHits(); } @Override public float getCacheHitPercentage() { - return getCacheStatistics().getCacheHitPercentage(); + return cacheStatistics.getCacheHitPercentage(); } @Override public long getCacheMisses() { - return getCacheStatistics().getCacheMisses(); + return cacheStatistics.getCacheMisses(); } @Override public float getCacheMissPercentage() { - return getCacheStatistics().getCacheMissPercentage(); + return cacheStatistics.getCacheMissPercentage(); } @Override public long getCacheGets() { - return getCacheStatistics().getCacheGets(); + return cacheStatistics.getCacheGets(); } @Override public long getCachePuts() { - return getCacheStatistics().getCachePuts(); + return cacheStatistics.getCachePuts(); } @Override public long getCacheRemovals() { - return getCacheStatistics().getCacheRemovals(); + return cacheStatistics.getCacheRemovals(); } @Override public long getCacheEvictions() { - return getCacheStatistics().getCacheEvictions(); + return cacheStatistics.getCacheEvictions(); } @Override public float getAverageGetTime() { - return getCacheStatistics().getCacheAverageGetTime(); + return (float) averageGetTime.average(); } @Override public float getAveragePutTime() { - return getCacheStatistics().getCacheAveragePutTime(); + return (float) averagePutTime.average(); } @Override public float getAverageRemoveTime() { - return getCacheStatistics().getCacheAverageRemoveTime(); + return (float) averageRemoveTime.average(); } - private CacheStatistics getCacheStatistics() { - return statisticsService.getCacheStatistics(cacheName); - } } diff --git a/107/src/main/java/org/ehcache/jsr107/Eh107CompleteConfiguration.java b/107/src/main/java/org/ehcache/jsr107/Eh107CompleteConfiguration.java index 5dcfe8c74b..041da404fe 100644 --- a/107/src/main/java/org/ehcache/jsr107/Eh107CompleteConfiguration.java +++ b/107/src/main/java/org/ehcache/jsr107/Eh107CompleteConfiguration.java @@ -207,7 +207,7 @@ public T unwrap(Class clazz) { return Unwrap.unwrap(clazz, this, ehcacheConfig); } - private Object writeReplace() throws ObjectStreamException { + private Object writeReplace() { throw new UnsupportedOperationException("Serialization of Ehcache provider configuration classes is not supported"); } diff --git a/107/src/main/java/org/ehcache/jsr107/Eh107Configuration.java b/107/src/main/java/org/ehcache/jsr107/Eh107Configuration.java index 5c85f79fd6..0b8e8f8f4f 100644 --- a/107/src/main/java/org/ehcache/jsr107/Eh107Configuration.java +++ b/107/src/main/java/org/ehcache/jsr107/Eh107Configuration.java @@ -118,8 +118,7 @@ public Class getValueType() { @Override public boolean isStoreByValue() { - Collection copierConfig = findAmongst(DefaultCopierConfiguration.class, cacheConfiguration.getServiceConfigurations()); - return !copierConfig.isEmpty(); + return !findAmongst(DefaultCopierConfiguration.class, cacheConfiguration.getServiceConfigurations()).isEmpty(); } } } diff --git a/107/src/main/java/org/ehcache/jsr107/Eh107Expiry.java b/107/src/main/java/org/ehcache/jsr107/Eh107Expiry.java index 16acd241a2..e3ccaacaee 100644 --- a/107/src/main/java/org/ehcache/jsr107/Eh107Expiry.java +++ b/107/src/main/java/org/ehcache/jsr107/Eh107Expiry.java @@ -15,12 +15,15 @@ */ package org.ehcache.jsr107; -import org.ehcache.expiry.Expiry; +import org.ehcache.expiry.ExpiryPolicy; + +import java.time.Duration; +import java.util.function.Supplier; /** * Eh107Expiry */ -abstract class Eh107Expiry implements Expiry { +abstract class Eh107Expiry implements ExpiryPolicy { private final ThreadLocal shortCircuitAccess = new ThreadLocal<>(); void enableShortCircuitAccessCalls() { @@ -31,8 +34,18 @@ void disableShortCircuitAccessCalls() { shortCircuitAccess.remove(); } - boolean isShortCircuitAccessCalls() { + private boolean isShortCircuitAccessCalls() { return shortCircuitAccess.get() != null; } + @Override + public final Duration getExpiryForAccess(K key, Supplier value) { + if (isShortCircuitAccessCalls()) { + return null; + } else { + return getExpiryForAccessInternal(key, value); + } + } + + protected abstract Duration getExpiryForAccessInternal(K key, Supplier value); } diff --git a/107/src/main/java/org/ehcache/jsr107/Eh107ReverseConfiguration.java b/107/src/main/java/org/ehcache/jsr107/Eh107ReverseConfiguration.java index a9199cefc1..c667c8a795 100644 --- a/107/src/main/java/org/ehcache/jsr107/Eh107ReverseConfiguration.java +++ b/107/src/main/java/org/ehcache/jsr107/Eh107ReverseConfiguration.java @@ -102,7 +102,7 @@ public boolean isStoreByValue() { return storeByValueOnHeap; } - private Object writeReplace() throws ObjectStreamException { + private Object writeReplace() { throw new UnsupportedOperationException("Serialization of Ehcache provider configuration classes is not supported"); } } diff --git a/107/src/main/java/org/ehcache/jsr107/EhcacheCachingProvider.java b/107/src/main/java/org/ehcache/jsr107/EhcacheCachingProvider.java index 496aba8080..46f353d777 100644 --- a/107/src/main/java/org/ehcache/jsr107/EhcacheCachingProvider.java +++ b/107/src/main/java/org/ehcache/jsr107/EhcacheCachingProvider.java @@ -16,14 +16,15 @@ package org.ehcache.jsr107; import org.ehcache.config.Configuration; +import org.ehcache.core.EhcacheManager; import org.ehcache.core.config.DefaultConfiguration; import org.ehcache.core.internal.util.ClassLoading; import org.ehcache.core.spi.service.ServiceUtils; import org.ehcache.impl.config.serializer.DefaultSerializationProviderConfiguration; import org.ehcache.jsr107.config.Jsr107Configuration; -import org.ehcache.jsr107.config.Jsr107Service; import org.ehcache.jsr107.internal.DefaultJsr107Service; import org.ehcache.spi.service.Service; +import org.ehcache.spi.service.ServiceCreationConfiguration; import org.ehcache.xml.XmlConfiguration; import java.net.URI; @@ -36,10 +37,13 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import javax.cache.CacheException; import javax.cache.CacheManager; import javax.cache.configuration.OptionalFeature; import javax.cache.spi.CachingProvider; +import static org.ehcache.jsr107.CloseUtil.chain; + /** * {@link CachingProvider} implementation for Ehcache. */ @@ -135,7 +139,7 @@ Eh107CacheManager getCacheManager(ConfigSupplier configSupplier, Properties prop private Eh107CacheManager createCacheManager(URI uri, Configuration config, Properties properties) { Eh107CacheLoaderWriterProvider cacheLoaderWriterFactory = new Eh107CacheLoaderWriterProvider(); - Object[] serviceCreationConfigurations = config.getServiceCreationConfigurations().toArray(); + Collection> serviceCreationConfigurations = config.getServiceCreationConfigurations(); Jsr107Service jsr107Service = new DefaultJsr107Service(ServiceUtils.findSingletonAmongst(Jsr107Configuration.class, serviceCreationConfigurations)); @@ -147,10 +151,10 @@ private Eh107CacheManager createCacheManager(URI uri, Configuration config, Prop services.add(new DefaultJsr107SerializationProvider()); } - Eh107InternalCacheManager ehcacheManager = new Eh107InternalCacheManager(config, services, !jsr107Service.jsr107CompliantAtomics()); + org.ehcache.CacheManager ehcacheManager = new EhcacheManager(config, services, !jsr107Service.jsr107CompliantAtomics()); ehcacheManager.init(); - return new Eh107CacheManager(this, ehcacheManager, properties, config.getClassLoader(), uri, + return new Eh107CacheManager(this, ehcacheManager, jsr107Service, properties, config.getClassLoader(), uri, new ConfigurationMerger(config, jsr107Service, cacheLoaderWriterFactory)); } @@ -218,17 +222,16 @@ public void close(final ClassLoader classLoader) { throw new NullPointerException(); } - MultiCacheException closeException = new MultiCacheException(); synchronized (cacheManagers) { final ConcurrentMap map = cacheManagers.remove(classLoader); if (map != null) { - for (Eh107CacheManager cacheManager : map.values()) { - cacheManager.closeInternal(closeException); + try { + chain(map.values().stream().map(cm -> cm::closeInternal)); + } catch (Throwable t) { + throw new CacheException(t); } } } - - closeException.throwIfNotEmpty(); } /** @@ -240,17 +243,15 @@ public void close(final URI uri, final ClassLoader classLoader) { throw new NullPointerException(); } - MultiCacheException closeException = new MultiCacheException(); synchronized (cacheManagers) { final ConcurrentMap map = cacheManagers.get(classLoader); if (map != null) { final Eh107CacheManager cacheManager = map.remove(uri); if (cacheManager != null) { - cacheManager.closeInternal(closeException); + cacheManager.closeInternal(); } } } - closeException.throwIfNotEmpty(); } /** @@ -272,16 +273,12 @@ public boolean isSupported(final OptionalFeature optionalFeature) { throw new IllegalArgumentException("Unknown OptionalFeature: " + optionalFeature.name()); } - void close(Eh107CacheManager cacheManager, MultiCacheException closeException) { - try { - synchronized (cacheManagers) { - final ConcurrentMap map = cacheManagers.get(cacheManager.getClassLoader()); - if (map != null && map.remove(cacheManager.getURI()) != null) { - cacheManager.closeInternal(closeException); - } + void close(Eh107CacheManager cacheManager) { + synchronized (cacheManagers) { + final ConcurrentMap map = cacheManagers.get(cacheManager.getClassLoader()); + if (map != null && map.remove(cacheManager.getURI()) != null) { + cacheManager.closeInternal(); } - } catch (Throwable t) { - closeException.addThrowable(t); } } diff --git a/107/src/main/java/org/ehcache/jsr107/EhcacheExpiryWrapper.java b/107/src/main/java/org/ehcache/jsr107/EhcacheExpiryWrapper.java index 3b49e088b3..e3a23b2a29 100644 --- a/107/src/main/java/org/ehcache/jsr107/EhcacheExpiryWrapper.java +++ b/107/src/main/java/org/ehcache/jsr107/EhcacheExpiryWrapper.java @@ -15,18 +15,19 @@ */ package org.ehcache.jsr107; -import org.ehcache.ValueSupplier; -import org.ehcache.expiry.Duration; -import org.ehcache.expiry.Expiry; +import org.ehcache.expiry.ExpiryPolicy; + +import java.time.Duration; +import java.util.function.Supplier; /** * EhcacheExpiryWrapper */ class EhcacheExpiryWrapper extends Eh107Expiry { - private final Expiry wrappedExpiry; + private final ExpiryPolicy wrappedExpiry; - EhcacheExpiryWrapper(Expiry wrappedExpiry) { + EhcacheExpiryWrapper(ExpiryPolicy wrappedExpiry) { this.wrappedExpiry = wrappedExpiry; } @@ -36,12 +37,12 @@ public Duration getExpiryForCreation(K key, V value) { } @Override - public Duration getExpiryForAccess(K key, ValueSupplier value) { + protected Duration getExpiryForAccessInternal(K key, Supplier value) { return wrappedExpiry.getExpiryForAccess(key, value); } @Override - public Duration getExpiryForUpdate(K key, ValueSupplier oldValue, V newValue) { + public Duration getExpiryForUpdate(K key, Supplier oldValue, V newValue) { return wrappedExpiry.getExpiryForUpdate(key, oldValue, newValue); } } diff --git a/107/src/main/java/org/ehcache/jsr107/EventListenerAdaptors.java b/107/src/main/java/org/ehcache/jsr107/EventListenerAdaptors.java index a47040ab6a..b03d1080db 100644 --- a/107/src/main/java/org/ehcache/jsr107/EventListenerAdaptors.java +++ b/107/src/main/java/org/ehcache/jsr107/EventListenerAdaptors.java @@ -174,7 +174,7 @@ org.ehcache.event.EventType getEhcacheEventType() { @SuppressWarnings("unchecked") @Override public void onEvent(org.ehcache.event.CacheEvent ehEvent) { - Eh107CacheEntryEvent event = new Eh107CacheEntryEvent.NormalEvent<>(source, EventType.CREATED, ehEvent, requestsOld); + Eh107CacheEntryEvent event = new Eh107CacheEntryEvent.NormalEvent<>(source, EventType.CREATED, ehEvent, false); if (filter.evaluate(event)) { Set events = Collections.singleton(event); listener.onCreated((Iterable>) events); diff --git a/107/src/main/java/org/ehcache/jsr107/ExpiryPolicyToEhcacheExpiry.java b/107/src/main/java/org/ehcache/jsr107/ExpiryPolicyToEhcacheExpiry.java index 43c4ac13b8..3bf9dd4c0c 100644 --- a/107/src/main/java/org/ehcache/jsr107/ExpiryPolicyToEhcacheExpiry.java +++ b/107/src/main/java/org/ehcache/jsr107/ExpiryPolicyToEhcacheExpiry.java @@ -15,10 +15,11 @@ */ package org.ehcache.jsr107; -import org.ehcache.ValueSupplier; +import org.ehcache.core.config.ExpiryUtils; import java.io.Closeable; import java.io.IOException; +import java.util.function.Supplier; import javax.cache.expiry.Duration; import javax.cache.expiry.ExpiryPolicy; @@ -32,51 +33,38 @@ class ExpiryPolicyToEhcacheExpiry extends Eh107Expiry implements Clo } @Override - public org.ehcache.expiry.Duration getExpiryForCreation(K key, V value) { + public java.time.Duration getExpiryForCreation(K key, V value) { try { Duration duration = expiryPolicy.getExpiryForCreation(); - if (duration.isEternal()) { - return org.ehcache.expiry.Duration.INFINITE; - } - return new org.ehcache.expiry.Duration(duration.getDurationAmount(), duration.getTimeUnit()); + return convertDuration(duration); } catch (Throwable t) { - return org.ehcache.expiry.Duration.ZERO; + return java.time.Duration.ZERO; } } @Override - public org.ehcache.expiry.Duration getExpiryForAccess(K key, ValueSupplier value) { - if (isShortCircuitAccessCalls()) { - return null; - } - + protected java.time.Duration getExpiryForAccessInternal(K key, Supplier value) { try { Duration duration = expiryPolicy.getExpiryForAccess(); if (duration == null) { return null; } - if (duration.isEternal()) { - return org.ehcache.expiry.Duration.INFINITE; - } - return new org.ehcache.expiry.Duration(duration.getDurationAmount(), duration.getTimeUnit()); + return convertDuration(duration); } catch (Throwable t) { - return org.ehcache.expiry.Duration.ZERO; + return java.time.Duration.ZERO; } } @Override - public org.ehcache.expiry.Duration getExpiryForUpdate(K key, ValueSupplier oldValue, V newValue) { + public java.time.Duration getExpiryForUpdate(K key, Supplier oldValue, V newValue) { try { Duration duration = expiryPolicy.getExpiryForUpdate(); if (duration == null) { return null; } - if (duration.isEternal()) { - return org.ehcache.expiry.Duration.INFINITE; - } - return new org.ehcache.expiry.Duration(duration.getDurationAmount(), duration.getTimeUnit()); + return convertDuration(duration); } catch (Throwable t) { - return org.ehcache.expiry.Duration.ZERO; + return java.time.Duration.ZERO; } } @@ -86,4 +74,11 @@ public void close() throws IOException { ((Closeable)expiryPolicy).close(); } } + + private java.time.Duration convertDuration(Duration duration) { + if (duration.isEternal()) { + return org.ehcache.expiry.ExpiryPolicy.INFINITE; + } + return java.time.Duration.of(duration.getDurationAmount(), ExpiryUtils.jucTimeUnitToTemporalUnit(duration.getTimeUnit())); + } } diff --git a/107/src/main/java/org/ehcache/jsr107/config/Jsr107Service.java b/107/src/main/java/org/ehcache/jsr107/Jsr107Service.java similarity index 85% rename from 107/src/main/java/org/ehcache/jsr107/config/Jsr107Service.java rename to 107/src/main/java/org/ehcache/jsr107/Jsr107Service.java index b713fa0b95..6d1754cd51 100644 --- a/107/src/main/java/org/ehcache/jsr107/config/Jsr107Service.java +++ b/107/src/main/java/org/ehcache/jsr107/Jsr107Service.java @@ -14,8 +14,10 @@ * limitations under the License. */ -package org.ehcache.jsr107.config; +package org.ehcache.jsr107; +import org.ehcache.core.spi.service.StatisticsService; +import org.ehcache.jsr107.config.ConfigurationElementState; import org.ehcache.spi.service.Service; /** @@ -54,4 +56,10 @@ public interface Jsr107Service extends Service { */ ConfigurationElementState isStatisticsEnabledOnAllCaches(); + /** + * Returns the statistics service associated with this JSR107 service. + * + * @return the statistics service + */ + StatisticsService getStatistics(); } diff --git a/107/src/main/java/org/ehcache/jsr107/ListenerResources.java b/107/src/main/java/org/ehcache/jsr107/ListenerResources.java index d4fa873b92..cb43b3befa 100644 --- a/107/src/main/java/org/ehcache/jsr107/ListenerResources.java +++ b/107/src/main/java/org/ehcache/jsr107/ListenerResources.java @@ -17,16 +17,19 @@ package org.ehcache.jsr107; import java.io.Closeable; -import java.io.IOException; import java.util.Collections; import java.util.List; import javax.cache.Cache; +import javax.cache.CacheException; import javax.cache.configuration.CacheEntryListenerConfiguration; import javax.cache.configuration.Factory; import javax.cache.event.CacheEntryEventFilter; import javax.cache.event.CacheEntryListener; +import static org.ehcache.jsr107.CloseUtil.closeAllAfter; +import static org.ehcache.jsr107.CloseUtil.closeAll; + /** * ListenerResources */ @@ -37,8 +40,7 @@ class ListenerResources implements Closeable { private List> ehListeners = null; @SuppressWarnings("unchecked") - static ListenerResources createListenerResources(CacheEntryListenerConfiguration listenerConfig, - MultiCacheException mce) { + static ListenerResources createListenerResources(CacheEntryListenerConfiguration listenerConfig) { CacheEntryListener listener = listenerConfig.getCacheEntryListenerFactory().create(); // create the filter, closing the listener above upon exception @@ -49,21 +51,16 @@ static ListenerResources createListenerResources(CacheEntryListener if (filterFactory != null) { filter = listenerConfig.getCacheEntryEventFilterFactory().create(); } else { - filter = (CacheEntryEventFilter) NullCacheEntryEventFilter.INSTANCE; + filter = event -> true; } } catch (Throwable t) { - mce.addThrowable(t); - CacheResources.close(listener, mce); - throw mce; + throw closeAllAfter(new CacheException(t), listener); } try { return new ListenerResources<>(listener, filter); } catch (Throwable t) { - mce.addThrowable(t); - CacheResources.close(filter, mce); - CacheResources.close(listener, mce); - throw mce; + throw closeAllAfter(new CacheException(t), filter, listener); } } @@ -91,11 +88,12 @@ synchronized List> getEhcacheLi } @Override - public void close() throws IOException { - MultiCacheException mce = new MultiCacheException(); - CacheResources.close(listener, mce); - CacheResources.close(filter, mce); - mce.throwIfNotEmpty(); + public void close() { + try { + closeAll(listener, filter); + } catch (Throwable t) { + throw new CacheException(t); + } } } diff --git a/107/src/main/java/org/ehcache/jsr107/MultiCacheException.java b/107/src/main/java/org/ehcache/jsr107/MultiCacheException.java deleted file mode 100644 index c3543dd478..0000000000 --- a/107/src/main/java/org/ehcache/jsr107/MultiCacheException.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright Terracotta, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.ehcache.jsr107; - -import java.io.PrintStream; -import java.io.PrintWriter; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.ListIterator; - -import javax.cache.CacheException; - -class MultiCacheException extends CacheException { - private static final long serialVersionUID = -6839700789356356261L; - - private final List throwables = new ArrayList<>(); - - MultiCacheException() { - super(); - } - - MultiCacheException(Throwable t) { - addThrowable(t); - } - - void addThrowable(Throwable t) { - if (t == null) { - throw new NullPointerException(); - } - - if (t == this) { - throw new IllegalArgumentException("cannot add to self"); - } - - if (t instanceof MultiCacheException) { - for (Throwable t2 : ((MultiCacheException)t).getThrowables()) { - throwables.add(t2); - } - } else { - throwables.add(t); - } - } - - private List getThrowables() { - return Collections.unmodifiableList(throwables); - } - - @Override - public String getMessage() { - List ts = getThrowables(); - if (ts.isEmpty()) { - return super.getMessage(); - } else { - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < ts.size(); i++) { - sb.append("[Exception ").append(i).append("] ").append(ts.get(i).getMessage()).append("\n"); - } - return sb.deleteCharAt(sb.length() - 1).toString(); - } - } - - MultiCacheException addFirstThrowable(Throwable t) { - if (t == null) { - throw new NullPointerException(); - } - - if (t == this) { - throw new IllegalArgumentException("cannot add to self"); - } - - if (t instanceof MultiCacheException) { - MultiCacheException mce = (MultiCacheException) t; - throwables.addAll(0, mce.getThrowables()); - } - throwables.add(0, t); - return this; - } - - @Override - public Throwable initCause(Throwable cause) { - throw new UnsupportedOperationException(); - } - - @Override - public Throwable getCause() { - return null; - } - - @Override - public void printStackTrace() { - super.printStackTrace(); - for (int i = 0; i < throwables.size(); i++) { - System.err.print(" [Exception " + i + "] "); - throwables.get(i).printStackTrace(); - } - } - - @Override - public void printStackTrace(PrintStream ps) { - super.printStackTrace(ps); - for (int i = 0; i < throwables.size(); i++) { - ps.print(" [Exception " + i + "] "); - throwables.get(i).printStackTrace(ps); - } - } - - @Override - public void printStackTrace(PrintWriter pw) { - super.printStackTrace(pw); - for (int i = 0; i < throwables.size(); i++) { - pw.print(" [Exception " + i + "] "); - throwables.get(i).printStackTrace(pw); - } - } - - void throwIfNotEmpty() { - if (!throwables.isEmpty()) { - - // if the only thing we contain is a single CacheException, then throw that - if (throwables.size() == 1) { - Throwable t = throwables.get(0); - if (t instanceof CacheException) { - throw (CacheException)t; - } - } - - throw this; - } - } -} diff --git a/107/src/main/java/org/ehcache/jsr107/Unwrap.java b/107/src/main/java/org/ehcache/jsr107/Unwrap.java index b4b5a917b8..2bdafe4a5d 100644 --- a/107/src/main/java/org/ehcache/jsr107/Unwrap.java +++ b/107/src/main/java/org/ehcache/jsr107/Unwrap.java @@ -15,23 +15,18 @@ */ package org.ehcache.jsr107; +import static java.util.Arrays.stream; +import static java.util.Objects.requireNonNull; + /** * @author teck */ final class Unwrap { static T unwrap(Class clazz, Object... obj) { - if (clazz == null || obj == null) { - throw new NullPointerException(); - } - - for (Object o : obj) { - if (o != null && clazz.isAssignableFrom(o.getClass())) { - return clazz.cast(o); - } - } - - throw new IllegalArgumentException("Cannot unwrap to " + clazz); + requireNonNull(clazz); + return stream(obj).filter(clazz::isInstance).map(clazz::cast).findFirst() + .orElseThrow(() -> new IllegalArgumentException("Cannot unwrap to " + clazz)); } private Unwrap() { diff --git a/107/src/main/java/org/ehcache/jsr107/config/Jsr107CacheConfiguration.java b/107/src/main/java/org/ehcache/jsr107/config/Jsr107CacheConfiguration.java index 8791e4f20a..e9d8abd9d9 100644 --- a/107/src/main/java/org/ehcache/jsr107/config/Jsr107CacheConfiguration.java +++ b/107/src/main/java/org/ehcache/jsr107/config/Jsr107CacheConfiguration.java @@ -16,6 +16,7 @@ package org.ehcache.jsr107.config; +import org.ehcache.jsr107.Jsr107Service; import org.ehcache.spi.service.ServiceConfiguration; /** diff --git a/107/src/main/java/org/ehcache/jsr107/config/Jsr107Configuration.java b/107/src/main/java/org/ehcache/jsr107/config/Jsr107Configuration.java index 13f6cc1ee9..d2eeb8bdb7 100644 --- a/107/src/main/java/org/ehcache/jsr107/config/Jsr107Configuration.java +++ b/107/src/main/java/org/ehcache/jsr107/config/Jsr107Configuration.java @@ -16,6 +16,7 @@ package org.ehcache.jsr107.config; +import org.ehcache.jsr107.Jsr107Service; import org.ehcache.spi.service.ServiceCreationConfiguration; import java.util.Map; @@ -37,8 +38,8 @@ public class Jsr107Configuration implements ServiceCreationConfiguration templates, boolean jsr107CompliantAtomics, ConfigurationElementState enableManagementAll, ConfigurationElementState enableStatisticsAll) { diff --git a/107/src/main/java/org/ehcache/jsr107/internal/DefaultJsr107Service.java b/107/src/main/java/org/ehcache/jsr107/internal/DefaultJsr107Service.java index ed95db8061..e9ba6be1fd 100644 --- a/107/src/main/java/org/ehcache/jsr107/internal/DefaultJsr107Service.java +++ b/107/src/main/java/org/ehcache/jsr107/internal/DefaultJsr107Service.java @@ -19,15 +19,18 @@ import org.ehcache.core.spi.service.StatisticsService; import org.ehcache.jsr107.config.ConfigurationElementState; import org.ehcache.jsr107.config.Jsr107Configuration; -import org.ehcache.jsr107.config.Jsr107Service; +import org.ehcache.jsr107.Jsr107Service; import org.ehcache.spi.service.ServiceDependencies; import org.ehcache.spi.service.ServiceProvider; import org.ehcache.spi.service.Service; +import static java.util.Objects.requireNonNull; + @ServiceDependencies(StatisticsService.class) public class DefaultJsr107Service implements Jsr107Service { private final Jsr107Configuration configuration; + private volatile StatisticsService statisticsService; public DefaultJsr107Service(Jsr107Configuration configuration) { this.configuration = configuration; @@ -35,7 +38,7 @@ public DefaultJsr107Service(Jsr107Configuration configuration) { @Override public void start(final ServiceProvider serviceProvider) { - // no-op + this.statisticsService = serviceProvider.getService(StatisticsService.class); } @Override @@ -82,4 +85,9 @@ public ConfigurationElementState isStatisticsEnabledOnAllCaches() { return configuration.isEnableStatisticsAll(); } } + + @Override + public StatisticsService getStatistics() { + return requireNonNull(statisticsService); + } } diff --git a/107/src/main/java/org/ehcache/jsr107/internal/Jsr107CacheConfigurationParser.java b/107/src/main/java/org/ehcache/jsr107/internal/Jsr107CacheConfigurationParser.java index 28caa6c52b..835b2a36f6 100644 --- a/107/src/main/java/org/ehcache/jsr107/internal/Jsr107CacheConfigurationParser.java +++ b/107/src/main/java/org/ehcache/jsr107/internal/Jsr107CacheConfigurationParser.java @@ -16,9 +16,10 @@ package org.ehcache.jsr107.internal; + import org.ehcache.jsr107.config.ConfigurationElementState; import org.ehcache.jsr107.config.Jsr107CacheConfiguration; -import org.ehcache.jsr107.config.Jsr107Service; +import org.ehcache.jsr107.Jsr107Service; import org.ehcache.spi.service.ServiceConfiguration; import org.ehcache.xml.CacheServiceConfigurationParser; import org.ehcache.xml.exceptions.XmlConfigurationException; @@ -69,4 +70,15 @@ public ServiceConfiguration parseServiceConfiguration(Element fra fragment.getTagName(), (fragment.getParentNode() == null ? "null" : fragment.getParentNode().getLocalName()))); } } + + @Override + public Class getServiceType() { + return Jsr107Service.class; + } + + @Override + public Element unparseServiceConfiguration(ServiceConfiguration serviceConfiguration) { + throw new XmlConfigurationException("XML translation of JSR-107 cache elements are not supported"); + } + } diff --git a/107/src/main/java/org/ehcache/jsr107/internal/Jsr107ServiceConfigurationParser.java b/107/src/main/java/org/ehcache/jsr107/internal/Jsr107ServiceConfigurationParser.java index 51dc5fa2c1..a80c67bf5d 100644 --- a/107/src/main/java/org/ehcache/jsr107/internal/Jsr107ServiceConfigurationParser.java +++ b/107/src/main/java/org/ehcache/jsr107/internal/Jsr107ServiceConfigurationParser.java @@ -19,8 +19,9 @@ import org.ehcache.jsr107.config.ConfigurationElementState; import org.ehcache.jsr107.config.Jsr107Configuration; import org.ehcache.xml.CacheManagerServiceConfigurationParser; -import org.ehcache.jsr107.config.Jsr107Service; +import org.ehcache.jsr107.Jsr107Service; import org.ehcache.spi.service.ServiceCreationConfiguration; +import org.ehcache.xml.exceptions.XmlConfigurationException; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; @@ -86,4 +87,15 @@ public ServiceCreationConfiguration parseServiceCreationConfigura return new Jsr107Configuration(defaultTemplate, templates, jsr107CompliantAtomics, enableManagementAll, enableStatisticsAll); } + + @Override + public Class getServiceType() { + return Jsr107Service.class; + } + + @Override + public Element unparseServiceCreationConfiguration(ServiceCreationConfiguration serviceCreationConfiguration) { + throw new XmlConfigurationException("XML translation of JSR-107 cache elements are not supported"); + } + } diff --git a/107/src/tck/resources/ExcludeList b/107/src/tck/resources/ExcludeList index 0cc2da0b52..e84e6c9418 100644 --- a/107/src/tck/resources/ExcludeList +++ b/107/src/tck/resources/ExcludeList @@ -4,10 +4,3 @@ # This is a dummy test that fails if not in the exclude list. org.jsr107.tck.CachingTest#dummyTest - -# see https://github.com/jsr107/jsr107tck/issues/63 -org.jsr107.tck.management.CacheMBStatisticsBeanTest#testPutIfAbsent - -# see https://github.com/jsr107/jsr107tck/issues/61 -org.jsr107.tck.spi.CachingProviderClassLoaderTest#getCacheManagerSingleton - diff --git a/107/src/test/java/com/pany/domain/Client.java b/107/src/test/java/com/pany/domain/Client.java index ae5745433b..7829184092 100644 --- a/107/src/test/java/com/pany/domain/Client.java +++ b/107/src/test/java/com/pany/domain/Client.java @@ -23,6 +23,8 @@ */ public class Client implements Serializable { + private static final long serialVersionUID = 1L; + private final String name; private final long creditLine; diff --git a/107/src/test/java/com/pany/ehcache/integration/ProductCacheLoaderWriter.java b/107/src/test/java/com/pany/ehcache/integration/ProductCacheLoaderWriter.java index 30770b375e..efd2315a88 100644 --- a/107/src/test/java/com/pany/ehcache/integration/ProductCacheLoaderWriter.java +++ b/107/src/test/java/com/pany/ehcache/integration/ProductCacheLoaderWriter.java @@ -38,13 +38,13 @@ public class ProductCacheLoaderWriter implements CacheLoaderWriter seen = new HashSet<>(); @Override - public Product load(final Long key) throws Exception { + public Product load(final Long key) { seen.add(key); return new Product(key); } @Override - public Map loadAll(final Iterable keys) throws Exception { + public Map loadAll(final Iterable keys) { for (Long key : keys) { seen.add(key); } @@ -52,7 +52,7 @@ public Map loadAll(final Iterable keys) throws Ex } @Override - public void write(final Long key, final Product value) throws Exception { + public void write(final Long key, final Product value) { List products = written.get(key); if(products == null) { products = new ArrayList<>(); @@ -65,17 +65,17 @@ public void write(final Long key, final Product value) throws Exception { } @Override - public void writeAll(final Iterable> entries) throws Exception { + public void writeAll(final Iterable> entries) { // no-op } @Override - public void delete(final Long key) throws Exception { + public void delete(final Long key) { // no-op } @Override - public void deleteAll(final Iterable keys) throws Exception { + public void deleteAll(final Iterable keys) { // no-op } } diff --git a/107/src/test/java/org/ehcache/ParsesConfigurationExtensionTest.java b/107/src/test/java/org/ehcache/ParsesConfigurationExtensionTest.java index eedc12cae4..26f774dff6 100644 --- a/107/src/test/java/org/ehcache/ParsesConfigurationExtensionTest.java +++ b/107/src/test/java/org/ehcache/ParsesConfigurationExtensionTest.java @@ -23,20 +23,19 @@ import org.ehcache.config.CacheRuntimeConfiguration; import org.ehcache.core.EhcacheManager; import org.ehcache.core.spi.service.ServiceUtils; +import org.ehcache.expiry.ExpiryPolicy; import org.ehcache.jsr107.config.Jsr107Configuration; import org.ehcache.config.ResourceType; import org.ehcache.xml.XmlConfiguration; -import org.ehcache.expiry.Duration; -import org.ehcache.expiry.Expiry; import org.ehcache.jsr107.internal.DefaultJsr107Service; import org.ehcache.spi.service.Service; import org.junit.Test; import org.xml.sax.SAXException; import java.io.IOException; +import java.time.Duration; import java.util.Collections; import java.util.List; -import java.util.concurrent.TimeUnit; import static org.ehcache.config.builders.ResourcePoolsBuilder.heap; import static org.hamcrest.CoreMatchers.equalTo; @@ -55,7 +54,7 @@ public class ParsesConfigurationExtensionTest { @Test public void testConfigParse() throws ClassNotFoundException, SAXException, InstantiationException, IllegalAccessException, IOException { final XmlConfiguration configuration = new XmlConfiguration(this.getClass().getResource("/ehcache-107.xml")); - final DefaultJsr107Service jsr107Service = new DefaultJsr107Service(ServiceUtils.findSingletonAmongst(Jsr107Configuration.class, configuration.getServiceCreationConfigurations().toArray())); + final DefaultJsr107Service jsr107Service = new DefaultJsr107Service(ServiceUtils.findSingletonAmongst(Jsr107Configuration.class, configuration.getServiceCreationConfigurations())); final CacheManager cacheManager = new EhcacheManager(configuration, Collections.singletonList(jsr107Service)); cacheManager.init(); @@ -68,7 +67,7 @@ public void testConfigParse() throws ClassNotFoundException, SAXException, Insta @Test public void testXmlExample() throws ClassNotFoundException, SAXException, InstantiationException, IOException, IllegalAccessException { XmlConfiguration config = new XmlConfiguration(ParsesConfigurationExtensionTest.class.getResource("/ehcache-example.xml")); - final DefaultJsr107Service jsr107Service = new DefaultJsr107Service(ServiceUtils.findSingletonAmongst(Jsr107Configuration.class, config.getServiceCreationConfigurations().toArray())); + final DefaultJsr107Service jsr107Service = new DefaultJsr107Service(ServiceUtils.findSingletonAmongst(Jsr107Configuration.class, config.getServiceCreationConfigurations())); final CacheManager cacheManager = new EhcacheManager(config, Collections.singletonList(jsr107Service)); cacheManager.init(); @@ -83,9 +82,9 @@ public void testXmlExample() throws ClassNotFoundException, SAXException, Instan final CacheRuntimeConfiguration runtimeConfiguration = productCache.getRuntimeConfiguration(); assertThat(runtimeConfiguration.getResourcePools().getPoolForResource(ResourceType.Core.HEAP).getSize(), equalTo(200L)); - final Expiry expiry = runtimeConfiguration.getExpiry(); - assertThat(expiry.getClass().getName(), equalTo("org.ehcache.expiry.Expirations$TimeToIdleExpiry")); - assertThat(expiry.getExpiryForAccess(42L, null), equalTo(new Duration(2, TimeUnit.MINUTES))); + final ExpiryPolicy expiry = runtimeConfiguration.getExpiryPolicy(); + assertThat(expiry.getClass().getName(), equalTo("org.ehcache.config.builders.ExpiryPolicyBuilder$TimeToIdleExpiryPolicy")); + assertThat(expiry.getExpiryForAccess(42L, null), equalTo(Duration.ofMinutes(2))); assertThat(runtimeConfiguration.getEvictionAdvisor(), instanceOf(com.pany.ehcache.MyEvictionAdvisor.class)); } diff --git a/107/src/test/java/org/ehcache/docs/EhCache107ConfigurationIntegrationDocTest.java b/107/src/test/java/org/ehcache/docs/EhCache107ConfigurationIntegrationDocTest.java index be00ddb591..d54c7ae2b7 100644 --- a/107/src/test/java/org/ehcache/docs/EhCache107ConfigurationIntegrationDocTest.java +++ b/107/src/test/java/org/ehcache/docs/EhCache107ConfigurationIntegrationDocTest.java @@ -22,7 +22,6 @@ import org.ehcache.config.ResourceType; import org.ehcache.config.builders.ResourcePoolsBuilder; import org.ehcache.core.config.DefaultConfiguration; -import org.ehcache.core.internal.util.ValueSuppliers; import org.ehcache.impl.config.persistence.DefaultPersistenceConfiguration; import org.ehcache.jsr107.Eh107Configuration; import org.ehcache.jsr107.EhcacheCachingProvider; @@ -39,7 +38,6 @@ import java.io.File; import java.util.Random; -import java.util.concurrent.TimeUnit; import javax.cache.Cache; import javax.cache.CacheManager; @@ -129,12 +127,12 @@ public void testGettingToEhcacheConfiguration() { long nanoTime = System.nanoTime(); LOGGER.info("Seeding random with {}", nanoTime); Random random = new Random(nanoTime); - assertThat(runtimeConfiguration.getExpiry().getExpiryForCreation(random.nextLong(), Long.toOctalString(random.nextLong())), - equalTo(org.ehcache.expiry.Duration.INFINITE)); - assertThat(runtimeConfiguration.getExpiry().getExpiryForAccess(random.nextLong(), - ValueSuppliers.supplierOf(Long.toOctalString(random.nextLong()))), nullValue()); - assertThat(runtimeConfiguration.getExpiry().getExpiryForUpdate(random.nextLong(), - ValueSuppliers.supplierOf(Long.toOctalString(random.nextLong())), Long.toOctalString(random.nextLong())), nullValue()); + assertThat(runtimeConfiguration.getExpiryPolicy().getExpiryForCreation(random.nextLong(), Long.toOctalString(random.nextLong())), + equalTo(org.ehcache.expiry.ExpiryPolicy.INFINITE)); + assertThat(runtimeConfiguration.getExpiryPolicy().getExpiryForAccess(random.nextLong(), + () -> Long.toOctalString(random.nextLong())), nullValue()); + assertThat(runtimeConfiguration.getExpiryPolicy().getExpiryForUpdate(random.nextLong(), + () -> Long.toOctalString(random.nextLong()), Long.toOctalString(random.nextLong())), nullValue()); } @Test @@ -201,7 +199,7 @@ public void testWithoutEhcacheExplicitDependencyAndNoCodeChanges() throws Except CacheRuntimeConfiguration foosEhcacheConfig = (CacheRuntimeConfiguration)foosCache.getConfiguration( Eh107Configuration.class).unwrap(CacheRuntimeConfiguration.class); Client client1 = new Client("client1", 1); - foosEhcacheConfig.getExpiry().getExpiryForCreation(42L, client1).getLength(); // <8> + foosEhcacheConfig.getExpiryPolicy().getExpiryForCreation(42L, client1).toMinutes(); // <8> CompleteConfiguration foosConfig = foosCache.getConfiguration(CompleteConfiguration.class); @@ -214,8 +212,8 @@ public void testWithoutEhcacheExplicitDependencyAndNoCodeChanges() throws Except } // end::jsr107SupplementWithTemplatesExample[] assertThat(ehcacheConfig.getResourcePools().getPoolForResource(ResourceType.Core.HEAP).getSize(), is(20L)); - assertThat(foosEhcacheConfig.getExpiry().getExpiryForCreation(42L, client1), - is(new org.ehcache.expiry.Duration(2, TimeUnit.MINUTES))); + assertThat(foosEhcacheConfig.getExpiryPolicy().getExpiryForCreation(42L, client1), + is(java.time.Duration.ofMinutes(2))); } @Test diff --git a/107/src/test/java/org/ehcache/jsr107/CacheResourcesTest.java b/107/src/test/java/org/ehcache/jsr107/CacheResourcesTest.java index ffa0c06f49..063366662a 100644 --- a/107/src/test/java/org/ehcache/jsr107/CacheResourcesTest.java +++ b/107/src/test/java/org/ehcache/jsr107/CacheResourcesTest.java @@ -19,12 +19,12 @@ import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.withSettings; import javax.cache.configuration.CacheEntryListenerConfiguration; import org.ehcache.jsr107.internal.Jsr107CacheLoaderWriter; import org.junit.Test; -import org.mockito.internal.creation.MockSettingsImpl; import java.io.Closeable; import java.util.HashMap; @@ -37,7 +37,7 @@ public class CacheResourcesTest { public void testRegisterDeregisterAfterClose() { Map, ListenerResources> emptyMap = emptyMap(); CacheResources cacheResources = new CacheResources<>("cache", null, null, emptyMap); - cacheResources.closeResources(new MultiCacheException()); + cacheResources.closeResources(); try { cacheResources.registerCacheEntryListener(mock(CacheEntryListenerConfiguration.class)); @@ -57,8 +57,8 @@ public void testRegisterDeregisterAfterClose() { @SuppressWarnings("unchecked") @Test public void closesAllResources() throws Exception { - Jsr107CacheLoaderWriter loaderWriter = mock(Jsr107CacheLoaderWriter.class, new MockSettingsImpl<>().extraInterfaces(Closeable.class)); - Eh107Expiry expiry = mock(Eh107Expiry.class, new MockSettingsImpl<>().extraInterfaces(Closeable.class)); + Jsr107CacheLoaderWriter loaderWriter = mock(Jsr107CacheLoaderWriter.class, withSettings().extraInterfaces(Closeable.class)); + Eh107Expiry expiry = mock(Eh107Expiry.class, withSettings().extraInterfaces(Closeable.class)); CacheEntryListenerConfiguration listenerConfiguration = mock(CacheEntryListenerConfiguration.class); ListenerResources listenerResources = mock(ListenerResources.class); @@ -67,7 +67,7 @@ public void closesAllResources() throws Exception { map.put(listenerConfiguration, listenerResources); CacheResources cacheResources = new CacheResources<>("cache", loaderWriter, expiry, map); - cacheResources.closeResources(new MultiCacheException()); + cacheResources.closeResources(); verify((Closeable) loaderWriter).close(); verify((Closeable) expiry).close(); diff --git a/107/src/test/java/org/ehcache/jsr107/ConfigStatsManagementActivationTest.java b/107/src/test/java/org/ehcache/jsr107/ConfigStatsManagementActivationTest.java index 16db6abf35..a9d90e53f3 100644 --- a/107/src/test/java/org/ehcache/jsr107/ConfigStatsManagementActivationTest.java +++ b/107/src/test/java/org/ehcache/jsr107/ConfigStatsManagementActivationTest.java @@ -202,7 +202,7 @@ public void basicJsr107StillWorks() throws Exception { Cache cache = cacheManager.createCache("cache", configuration); @SuppressWarnings("unchecked") - Eh107Configuration eh107Configuration = cache.getConfiguration(Eh107Configuration.class); + Eh107Configuration eh107Configuration = cache.getConfiguration(Eh107Configuration.class); assertThat(eh107Configuration.isManagementEnabled(), is(true)); assertThat(eh107Configuration.isStatisticsEnabled(), is(true)); diff --git a/107/src/test/java/org/ehcache/jsr107/ConfigurationMergerTest.java b/107/src/test/java/org/ehcache/jsr107/ConfigurationMergerTest.java index 2f20b29728..b386b67620 100644 --- a/107/src/test/java/org/ehcache/jsr107/ConfigurationMergerTest.java +++ b/107/src/test/java/org/ehcache/jsr107/ConfigurationMergerTest.java @@ -17,16 +17,13 @@ package org.ehcache.jsr107; import org.ehcache.config.builders.CacheConfigurationBuilder; +import org.ehcache.config.builders.ExpiryPolicyBuilder; import org.ehcache.core.spi.service.ServiceUtils; import org.ehcache.impl.config.copy.DefaultCopierConfiguration; import org.ehcache.impl.config.copy.DefaultCopyProviderConfiguration; import org.ehcache.impl.config.loaderwriter.DefaultCacheLoaderWriterConfiguration; -import org.ehcache.expiry.Duration; -import org.ehcache.expiry.Expirations; -import org.ehcache.expiry.Expiry; import org.ehcache.impl.copy.IdentityCopier; import org.ehcache.jsr107.config.Jsr107Configuration; -import org.ehcache.jsr107.config.Jsr107Service; import org.ehcache.jsr107.internal.DefaultJsr107Service; import org.ehcache.spi.loaderwriter.CacheLoaderWriter; import org.ehcache.spi.service.ServiceConfiguration; @@ -38,9 +35,10 @@ import org.mockito.internal.creation.MockSettingsImpl; import java.io.Closeable; +import java.time.Duration; import java.util.Collection; -import java.util.concurrent.TimeUnit; +import javax.cache.CacheException; import javax.cache.configuration.CacheEntryListenerConfiguration; import javax.cache.configuration.Factory; import javax.cache.configuration.MutableConfiguration; @@ -53,7 +51,6 @@ import static org.ehcache.config.builders.CacheConfigurationBuilder.newCacheConfigurationBuilder; import static org.ehcache.config.builders.ResourcePoolsBuilder.heap; -import static org.ehcache.core.internal.util.ValueSuppliers.supplierOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -90,7 +87,7 @@ public void mergeConfigNoTemplateNoLoaderWriter() { MutableConfiguration configuration = new MutableConfiguration<>(); ConfigurationMerger.ConfigHolder configHolder = merger.mergeConfigurations("cache", configuration); - assertThat(configHolder.cacheResources.getExpiryPolicy().getExpiryForCreation(42L, "Yay!"), is(Duration.INFINITE)); + assertThat(configHolder.cacheResources.getExpiryPolicy().getExpiryForCreation(42L, "Yay!"), is(org.ehcache.expiry.ExpiryPolicy.INFINITE)); assertThat(configHolder.cacheResources.getCacheLoaderWriter(), nullValue()); assertThat(configHolder.useEhcacheLoaderWriter, is(false)); @@ -114,8 +111,8 @@ public void jsr107ExpiryGetsRegistered() { ConfigurationMerger.ConfigHolder configHolder = merger.mergeConfigurations("Cache", configuration); assertThat(factory.called, is(true)); - Expiry resourcesExpiry = configHolder.cacheResources.getExpiryPolicy(); - Expiry configExpiry = configHolder.cacheConfiguration.getExpiry(); + org.ehcache.expiry.ExpiryPolicy resourcesExpiry = configHolder.cacheResources.getExpiryPolicy(); + org.ehcache.expiry.ExpiryPolicy configExpiry = configHolder.cacheConfiguration.getExpiryPolicy(); assertThat(configExpiry, sameInstance(resourcesExpiry)); } @@ -165,7 +162,7 @@ public void loadsTemplateWhenNameFound() throws Exception { public void jsr107ExpiryGetsOverriddenByTemplate() throws Exception { when(jsr107Service.getTemplateNameForCache("cache")).thenReturn("cacheTemplate"); when(xmlConfiguration.newCacheConfigurationBuilderFromTemplate("cacheTemplate", Object.class, Object.class)).thenReturn( - newCacheConfigurationBuilder(Object.class, Object.class, heap(10)).withExpiry(Expirations.timeToLiveExpiration(new Duration(5, TimeUnit.MINUTES))) + newCacheConfigurationBuilder(Object.class, Object.class, heap(10)).withExpiry(ExpiryPolicyBuilder.timeToLiveExpiration(Duration.ofMinutes(5))) ); MutableConfiguration configuration = new MutableConfiguration<>(); @@ -176,9 +173,9 @@ public void jsr107ExpiryGetsOverriddenByTemplate() throws Exception { assertThat(factory.called, is(false)); Eh107Expiry expiryPolicy = configHolder.cacheResources.getExpiryPolicy(); - Expiry expiry = configHolder.cacheConfiguration.getExpiry(); - assertThat(expiryPolicy.getExpiryForAccess(42, supplierOf("Yay")), is(expiry.getExpiryForAccess(42, supplierOf("Yay")))); - assertThat(expiryPolicy.getExpiryForUpdate(42, supplierOf("Yay"), "Lala"), is(expiry.getExpiryForUpdate(42, supplierOf("Yay"), "Lala"))); + org.ehcache.expiry.ExpiryPolicy expiry = configHolder.cacheConfiguration.getExpiryPolicy(); + assertThat(expiryPolicy.getExpiryForAccess(42, () -> "Yay"), is(expiry.getExpiryForAccess(42, () -> "Yay"))); + assertThat(expiryPolicy.getExpiryForUpdate(42, () -> "Yay", "Lala"), is(expiry.getExpiryForUpdate(42, () -> "Yay", "Lala"))); assertThat(expiryPolicy.getExpiryForCreation(42, "Yay"), is(expiry.getExpiryForCreation(42, "Yay"))); } @@ -218,7 +215,7 @@ public void jsr107StoreByValueGetsOverriddenByTemplate() throws Exception { Collection> serviceConfigurations = configHolder.cacheConfiguration.getServiceConfigurations(); for (ServiceConfiguration serviceConfiguration : serviceConfigurations) { if (serviceConfiguration instanceof DefaultCopierConfiguration) { - DefaultCopierConfiguration copierConfig = (DefaultCopierConfiguration)serviceConfiguration; + DefaultCopierConfiguration copierConfig = (DefaultCopierConfiguration)serviceConfiguration; if(copierConfig.getClazz().isAssignableFrom(IdentityCopier.class)) storeByValue = false; break; @@ -240,7 +237,7 @@ public void jsr107LoaderInitFailureClosesExpiry() throws Exception { try { merger.mergeConfigurations("cache", configuration); fail("Loader factory should have thrown"); - } catch (MultiCacheException mce) { + } catch (CacheException mce) { verify((Closeable) expiryPolicy).close(); } } @@ -259,7 +256,7 @@ public void jsr107ListenerFactoryInitFailureClosesExpiryLoader() throws Exceptio try { merger.mergeConfigurations("cache", configuration); fail("Loader factory should have thrown"); - } catch (MultiCacheException mce) { + } catch (CacheException mce) { verify((Closeable) expiryPolicy).close(); verify((Closeable) loader).close(); } @@ -311,7 +308,7 @@ public void setWriteThroughWithoutWriterFails() { @Test public void jsr107DefaultEh107IdentityCopierForImmutableTypes() { XmlConfiguration xmlConfiguration = new XmlConfiguration(getClass().getResource("/ehcache-107-copiers-immutable-types.xml")); - final DefaultJsr107Service jsr107Service = new DefaultJsr107Service(ServiceUtils.findSingletonAmongst(Jsr107Configuration.class, xmlConfiguration.getServiceCreationConfigurations().toArray())); + DefaultJsr107Service jsr107Service = new DefaultJsr107Service(ServiceUtils.findSingletonAmongst(Jsr107Configuration.class, xmlConfiguration.getServiceCreationConfigurations())); merger = new ConfigurationMerger(xmlConfiguration, jsr107Service, mock(Eh107CacheLoaderWriterProvider.class)); MutableConfiguration stringCacheConfiguration = new MutableConfiguration<>(); @@ -349,7 +346,7 @@ public void jsr107DefaultEh107IdentityCopierForImmutableTypes() { @Test public void jsr107DefaultEh107IdentityCopierForImmutableTypesWithCMLevelDefaults() { XmlConfiguration xmlConfiguration = new XmlConfiguration(getClass().getResource("/ehcache-107-immutable-types-cm-level-copiers.xml")); - final DefaultJsr107Service jsr107Service = new DefaultJsr107Service(ServiceUtils.findSingletonAmongst(Jsr107Configuration.class, xmlConfiguration.getServiceCreationConfigurations().toArray())); + DefaultJsr107Service jsr107Service = new DefaultJsr107Service(ServiceUtils.findSingletonAmongst(Jsr107Configuration.class, xmlConfiguration.getServiceCreationConfigurations())); merger = new ConfigurationMerger(xmlConfiguration, jsr107Service, mock(Eh107CacheLoaderWriterProvider.class)); MutableConfiguration stringCacheConfiguration = new MutableConfiguration<>(); @@ -386,7 +383,7 @@ private static void assertDefaultCopier(Collection> serv for (ServiceConfiguration serviceConfiguration : serviceConfigurations) { if (serviceConfiguration instanceof DefaultCopierConfiguration) { noCopierConfigPresent = true; - DefaultCopierConfiguration copierConfig = (DefaultCopierConfiguration)serviceConfiguration; + DefaultCopierConfiguration copierConfig = (DefaultCopierConfiguration)serviceConfiguration; assertThat(copierConfig.getClazz().isAssignableFrom(Eh107IdentityCopier.class), is(true)); } } @@ -406,6 +403,7 @@ private RecordingFactory factoryOf(final T instance) { } private static class RecordingFactory implements Factory { + private static final long serialVersionUID = 1L; private final T instance; boolean called; @@ -421,6 +419,8 @@ public T create() { } private static class ThrowingCacheEntryListenerConfiguration implements CacheEntryListenerConfiguration { + private static final long serialVersionUID = 1L; + @Override public Factory> getCacheEntryListenerFactory() { throw new UnsupportedOperationException("BOOM"); diff --git a/107/src/test/java/org/ehcache/jsr107/Eh107CacheTypeTest.java b/107/src/test/java/org/ehcache/jsr107/Eh107CacheTypeTest.java index c633988975..ae4802d6af 100644 --- a/107/src/test/java/org/ehcache/jsr107/Eh107CacheTypeTest.java +++ b/107/src/test/java/org/ehcache/jsr107/Eh107CacheTypeTest.java @@ -42,28 +42,26 @@ public void testCompileTimeTypeSafety() throws Exception { cache.put(1l, "one"); cache.put(2l, "two"); - Configuration cache1CompleteConf = cache.getConfiguration(Configuration.class); + Configuration cache1CompleteConf = cache.getConfiguration(Configuration.class); //This ensures that we have compile time type safety, i.e when configuration does not have types defined but // what you get cache as should work. - assertThat((Class)cache1CompleteConf.getKeyType(), is(equalTo(Object.class))); - assertThat((Class)cache1CompleteConf.getValueType(), is(equalTo(Object.class))); + assertThat(cache1CompleteConf.getKeyType(), is(equalTo(Object.class))); + assertThat(cache1CompleteConf.getValueType(), is(equalTo(Object.class))); assertThat(cache.get(1l), is(equalTo("one"))); assertThat(cache.get(2l), is(equalTo("two"))); - javax.cache.Cache second = cacheManager.getCache("cache1"); + javax.cache.Cache second = cacheManager.getCache("cache1"); second.put("3","three"); - assertThat((String)second.get("3"), is(equalTo("three"))); + assertThat(second.get("3"), is(equalTo("three"))); cacheManager.destroyCache("cache1"); cacheManager.close(); - } - @Test - public void testRunTimeTypeSafety() throws Exception { + public void testRunTimeTypeLaxity() throws Exception { CachingProvider provider = Caching.getCachingProvider(); javax.cache.CacheManager cacheManager = provider.getCacheManager(this.getClass().getResource("/ehcache-107-types.xml").toURI(), getClass().getClassLoader()); @@ -79,9 +77,6 @@ public void testRunTimeTypeSafety() throws Exception { try { cacheManager.getCache("cache1"); - fail("Caches with runtime types should throw illegal argument exception when different types are used in getcache"); - } catch (IllegalArgumentException e) { - //Empty block as nothing is required to be tested } finally { cacheManager.destroyCache("cache1"); cacheManager.close(); diff --git a/107/src/test/java/org/ehcache/jsr107/Eh107XmlIntegrationTest.java b/107/src/test/java/org/ehcache/jsr107/Eh107XmlIntegrationTest.java index 670dfdf9b7..0563e120a7 100644 --- a/107/src/test/java/org/ehcache/jsr107/Eh107XmlIntegrationTest.java +++ b/107/src/test/java/org/ehcache/jsr107/Eh107XmlIntegrationTest.java @@ -76,6 +76,8 @@ public void setUp() throws Exception { public void test107CacheCanReturnCompleteConfigurationWhenNonePassedIn() { CacheManager cacheManager = cachingProvider.getCacheManager(); Cache cache = cacheManager.createCache("cacheWithoutCompleteConfig", new Configuration() { + private static final long serialVersionUID = 1L; + @Override public Class getKeyType() { return Long.class; diff --git a/107/src/test/java/org/ehcache/jsr107/Jsr107CacheParserIT.java b/107/src/test/java/org/ehcache/jsr107/Jsr107CacheParserIT.java new file mode 100644 index 0000000000..b825f6ed69 --- /dev/null +++ b/107/src/test/java/org/ehcache/jsr107/Jsr107CacheParserIT.java @@ -0,0 +1,37 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.jsr107; + +import org.ehcache.config.Configuration; +import org.ehcache.xml.XmlConfiguration; +import org.ehcache.xml.XmlConfigurationTest; +import org.ehcache.xml.exceptions.XmlConfigurationException; +import org.junit.Test; + +import java.net.URL; + +/** + * Jsr107CacheParserIT + */ +public class Jsr107CacheParserIT { + + @Test(expected = XmlConfigurationException.class) + public void testJsr107CacheXmlTranslationToString() { + URL resource = XmlConfigurationTest.class.getResource("/ehcache-107.xml"); + Configuration config = new XmlConfiguration(resource); + XmlConfiguration xmlConfig = new XmlConfiguration(config); + } +} diff --git a/107/src/test/java/org/ehcache/jsr107/UnwrapTest.java b/107/src/test/java/org/ehcache/jsr107/UnwrapTest.java index f9926e51c5..e89a692516 100644 --- a/107/src/test/java/org/ehcache/jsr107/UnwrapTest.java +++ b/107/src/test/java/org/ehcache/jsr107/UnwrapTest.java @@ -77,7 +77,6 @@ public void testCacheEntryEventUnwrap() { assertThat(cacheEntryEvent.unwrap(cacheEntryEvent.getClass()), is(instanceOf(Eh107CacheEntryEvent.NormalEvent.class))); } - @SuppressWarnings("unchecked") private class EhEvent implements CacheEvent { @Override public org.ehcache.event.EventType getType() { @@ -99,8 +98,9 @@ public String getOldValue() { throw new UnsupportedOperationException("Implement me!"); } + @SuppressWarnings("deprecation") @Override - public org.ehcache.Cache getSource() { + public org.ehcache.Cache getSource() { throw new UnsupportedOperationException("Implement me!"); } } diff --git a/107/src/test/java/org/ehcache/jsr107/internal/Jsr107CacheConfigurationParserTest.java b/107/src/test/java/org/ehcache/jsr107/internal/Jsr107CacheConfigurationParserTest.java new file mode 100644 index 0000000000..85d24f01da --- /dev/null +++ b/107/src/test/java/org/ehcache/jsr107/internal/Jsr107CacheConfigurationParserTest.java @@ -0,0 +1,36 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.jsr107.internal; + +import org.ehcache.jsr107.config.ConfigurationElementState; +import org.ehcache.jsr107.config.Jsr107CacheConfiguration; +import org.ehcache.xml.exceptions.XmlConfigurationException; +import org.junit.Test; + +/** + * Jsr107CacheConfigurationParserTest + */ +public class Jsr107CacheConfigurationParserTest { + + @Test(expected = XmlConfigurationException.class) + public void testTranslateServiceCreationConfigurationWithStatisticsManagementEnabled() { + Jsr107CacheConfigurationParser configTranslator = new Jsr107CacheConfigurationParser(); + Jsr107CacheConfiguration cacheConfiguration = + new Jsr107CacheConfiguration(ConfigurationElementState.ENABLED, ConfigurationElementState.DISABLED); + configTranslator.unparseServiceConfiguration(cacheConfiguration); + } + +} diff --git a/107/src/test/java/org/ehcache/jsr107/internal/Jsr107ServiceConfigurationParserTest.java b/107/src/test/java/org/ehcache/jsr107/internal/Jsr107ServiceConfigurationParserTest.java new file mode 100644 index 0000000000..508bbda7b4 --- /dev/null +++ b/107/src/test/java/org/ehcache/jsr107/internal/Jsr107ServiceConfigurationParserTest.java @@ -0,0 +1,46 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.jsr107.internal; + +import org.ehcache.jsr107.config.ConfigurationElementState; +import org.ehcache.jsr107.config.Jsr107Configuration; +import org.ehcache.xml.exceptions.XmlConfigurationException; +import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; + +/** + * Jsr107ServiceConfigurationParserTest + */ +public class Jsr107ServiceConfigurationParserTest { + + @Test(expected = XmlConfigurationException.class) + public void testTranslateServiceCreationConfiguration() { + Jsr107ServiceConfigurationParser configTranslator = new Jsr107ServiceConfigurationParser(); + + Map templateMap = new HashMap<>(); + templateMap.put("testCache", "simpleCacheTemplate"); + templateMap.put("testCache1", "simpleCacheTemplate1"); + boolean jsr107CompliantAtomics = true; + Jsr107Configuration serviceCreationConfiguration = + new Jsr107Configuration("tiny-template", templateMap, jsr107CompliantAtomics, + ConfigurationElementState.ENABLED, ConfigurationElementState.DISABLED); + + configTranslator.unparseServiceCreationConfiguration(serviceCreationConfiguration); + } + +} diff --git a/README.adoc b/README.adoc index 399bf6297e..f9de4e9bb5 100644 --- a/README.adoc +++ b/README.adoc @@ -1,32 +1,31 @@ = The Ehcache 3.x line is currently the development line. -Status of the build: image:https://ehcache.ci.cloudbees.com/buildStatus/icon?job=ehcache3[Ehcache@Cloudbees, link="https://ehcache.ci.cloudbees.com/job/ehcache3/"] +Status of the build: +[link="https://dev.azure.com/TerracottaCI/ehcache/_build/latest?definitionId=14"] +image::https://dev.azure.com/TerracottaCI/ehcache/_apis/build/status/ehcache3[Build Status] For more information, you might want to go check the https://github.com/ehcache/ehcache3/wiki[wiki]. -image:https://www.cloudbees.com/sites/default/files/styles/large/public/Button-Powered-by-CB.png?itok=uMDWINfY[Cloudbees, link="http://www.cloudbees.com/resources/foss"] - == Getting started with the new API For samples, documentation, and usage information, please see http://ehcache.org. == Current release -We released 3.4.0 on August 29th 2017. +We released 3.6.1 on September 21st 2018. -The https://github.com/ehcache/ehcache3/releases/tag/v3.4.0[release notes] contain the links to the artifacts and the documentation to help you get started. +The https://github.com/ehcache/ehcache3/releases/tag/v3.6.1[release notes] contain the links to the artifacts and the documentation to help you get started. -You should consider upgrading to 3.4.x as it does all 3.0.x, 3.1.x, 3.2.x and 3.3.x do and more with a fully compatible API. +You should consider upgrading to 3.6.x as it does all previous 3.x do and more with a fully compatible API. The only thing to note compared to 3.0.x is that transactional support has been moved to a separate jar. == Current development & next release We are still working on the missing features of the clustering tier of Ehcache 3 which will be included in upcoming releases. -We are also considering moving to Java 8 support only for the upcoming 3.5.0. +Starting with version 3.5, Ehcache only supports Java 8 and later. -We may still do 3.3.x release to include all fixes that have been made on it, but this is now less a priority. -There is no longer any plan for a 3.0.x, 3.1.x or 3.2.x release. +Version 3.5 is now in maintenance mode. We are no longer planning to perform release of earlier versions. See the https://github.com/ehcache/ehcache3/milestones[milestones on GitHub] for more details on the current status. diff --git a/api/build.gradle b/api/build.gradle index 8119b013a6..540fd89a80 100644 --- a/api/build.gradle +++ b/api/build.gradle @@ -19,7 +19,3 @@ apply plugin: EhDeploy checkstyle { configFile = file("$projectDir/config/checkstyle.xml") } - -tasks.withType(JavaCompile) { - options.compilerArgs += ['-Werror'] -} diff --git a/api/src/main/java/org/ehcache/CachePersistenceException.java b/api/src/main/java/org/ehcache/CachePersistenceException.java index 04d73477dc..6b90c9dd2a 100644 --- a/api/src/main/java/org/ehcache/CachePersistenceException.java +++ b/api/src/main/java/org/ehcache/CachePersistenceException.java @@ -25,6 +25,8 @@ */ public class CachePersistenceException extends Exception { + private static final long serialVersionUID = -5858875151420107040L; + /** * Creates a {@code CachePersistenceException} with the provided message. * diff --git a/api/src/main/java/org/ehcache/ValueSupplier.java b/api/src/main/java/org/ehcache/ValueSupplier.java index 65a8a4b4bb..505813e4cc 100644 --- a/api/src/main/java/org/ehcache/ValueSupplier.java +++ b/api/src/main/java/org/ehcache/ValueSupplier.java @@ -22,7 +22,11 @@ * This indicates that the value needs to be computed before it can be retrieved, such as deserialization. * * @param the value type + * + * @deprecated Now using {@code Supplier} for {@link org.ehcache.expiry.ExpiryPolicy} */ +@Deprecated +@FunctionalInterface public interface ValueSupplier { /** diff --git a/api/src/main/java/org/ehcache/config/CacheConfiguration.java b/api/src/main/java/org/ehcache/config/CacheConfiguration.java index d90c349792..a00f258d72 100644 --- a/api/src/main/java/org/ehcache/config/CacheConfiguration.java +++ b/api/src/main/java/org/ehcache/config/CacheConfiguration.java @@ -17,7 +17,8 @@ package org.ehcache.config; import org.ehcache.Cache; -import org.ehcache.expiry.Expiry; + +import org.ehcache.expiry.ExpiryPolicy; import org.ehcache.spi.service.ServiceConfiguration; import java.util.Collection; @@ -82,13 +83,25 @@ public interface CacheConfiguration { ClassLoader getClassLoader(); /** - * The {@link Expiry} rules for the {@link Cache}. + * The {@link org.ehcache.expiry.Expiry} rules for the {@link Cache}. *

* The {@code Expiry} cannot be null. * * @return the {@code Expiry} + * + * @deprecated Use {@link #getExpiryPolicy()} + */ + @Deprecated + org.ehcache.expiry.Expiry getExpiry(); + + /** + * The {@link ExpiryPolicy} rules for the {@link Cache}. + *

+ * The {@code ExpiryPolicy} cannot be null. + * + * @return the {@code ExpiryPolicy} */ - Expiry getExpiry(); + ExpiryPolicy getExpiryPolicy(); /** * The {@link ResourcePools} for the {@link Cache}. diff --git a/api/src/main/java/org/ehcache/config/EvictionAdvisor.java b/api/src/main/java/org/ehcache/config/EvictionAdvisor.java index ca21460c7d..2413d303f5 100644 --- a/api/src/main/java/org/ehcache/config/EvictionAdvisor.java +++ b/api/src/main/java/org/ehcache/config/EvictionAdvisor.java @@ -22,6 +22,7 @@ * @param the key type for the cache * @param the value type for the cache */ +@FunctionalInterface public interface EvictionAdvisor { /** diff --git a/api/src/main/java/org/ehcache/expiry/Duration.java b/api/src/main/java/org/ehcache/expiry/Duration.java index 477d9ea41e..032baa4a72 100644 --- a/api/src/main/java/org/ehcache/expiry/Duration.java +++ b/api/src/main/java/org/ehcache/expiry/Duration.java @@ -19,7 +19,13 @@ /** * A time duration in a given {@link TimeUnit}. + * + * @see java.time.Duration + * @see ExpiryPolicy + * + * @deprecated Replaced with {@link java.time.Duration} */ +@Deprecated public final class Duration { /** diff --git a/api/src/main/java/org/ehcache/expiry/Expirations.java b/api/src/main/java/org/ehcache/expiry/Expirations.java index bb19093d9f..87ffbd8970 100644 --- a/api/src/main/java/org/ehcache/expiry/Expirations.java +++ b/api/src/main/java/org/ehcache/expiry/Expirations.java @@ -15,18 +15,22 @@ */ package org.ehcache.expiry; -import org.ehcache.ValueSupplier; +import java.util.Objects; /** * Utility class for getting predefined {@link Expiry} instances. */ +@Deprecated public final class Expirations { /** * Get an {@link Expiry} instance for a non expiring (ie. "eternal") cache. * * @return the no expiry instance + * + * @deprecated Use {@code org.ehcache.config.builders.ExpiryPolicyBuilder#noExpiration()} instead */ + @Deprecated public static Expiry noExpiration() { return NoExpiry.INSTANCE; } @@ -36,7 +40,10 @@ public static Expiry noExpiration() { * * @param timeToLive the TTL duration * @return a TTL expiry + * + * @deprecated Use {@code org.ehcache.config.builders.ExpiryPolicyBuilder#timeToLiveExpiration(java.time.Duration)} instead */ + @Deprecated public static Expiry timeToLiveExpiration(Duration timeToLive) { if (timeToLive == null) { throw new NullPointerException("Duration cannot be null"); @@ -49,7 +56,10 @@ public static Expiry timeToLiveExpiration(Duration timeToLive) { * * @param timeToIdle the TTI duration * @return a TTI expiry + * + * @deprecated Use {@code org.ehcache.config.builders.ExpiryPolicyBuilder#timeToIdleExpiration(java.time.Duration)} instead */ + @Deprecated public static Expiry timeToIdleExpiration(Duration timeToIdle) { if (timeToIdle == null) { throw new NullPointerException("Duration cannot be null"); @@ -65,7 +75,10 @@ public static Expiry timeToIdleExpiration(Duration timeToIdle) { * @param the key type for the cache * @param the value type for the cache * @return an {@link Expiry} builder + * + * @deprecated Use {@code org.ehcache.config.builders.ExpiryPolicyBuilder#expiry()} instead */ + @Deprecated public static ExpiryBuilder builder() { return new ExpiryBuilder<>(); } @@ -77,6 +90,7 @@ private Expirations() { /** * Simple implementation of the {@link Expiry} interface allowing to set constants to each expiry types. */ + @Deprecated private static class BaseExpiry implements Expiry { private final Duration create; @@ -95,12 +109,12 @@ public Duration getExpiryForCreation(K key, V value) { } @Override - public Duration getExpiryForAccess(K key, ValueSupplier value) { + public Duration getExpiryForAccess(K key, org.ehcache.ValueSupplier value) { return access; } @Override - public Duration getExpiryForUpdate(K key, ValueSupplier oldValue, V newValue) { + public Duration getExpiryForUpdate(K key, org.ehcache.ValueSupplier oldValue, V newValue) { return update; } @@ -109,20 +123,20 @@ public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - final BaseExpiry that = (BaseExpiry)o; + BaseExpiry that = (BaseExpiry)o; - if (access != null ? !access.equals(that.access) : that.access != null) return false; - if (create != null ? !create.equals(that.create) : that.create != null) return false; - if (update != null ? !update.equals(that.update) : that.update != null) return false; + if (!Objects.equals(access, that.access)) return false; + if (!Objects.equals(create, that.create)) return false; + if (!Objects.equals(update, that.update)) return false; return true; } @Override public int hashCode() { - int result = create != null ? create.hashCode() : 0; - result = 31 * result + (access != null ? access.hashCode() : 0); - result = 31 * result + (update != null ? update.hashCode() : 0); + int result = Objects.hashCode(create); + result = 31 * result + Objects.hashCode(access); + result = 31 * result + Objects.hashCode(update); return result; } @@ -136,18 +150,21 @@ public String toString() { } } + @Deprecated private static class TimeToLiveExpiry extends BaseExpiry { TimeToLiveExpiry(Duration ttl) { super(ttl, null, ttl); } } + @Deprecated private static class TimeToIdleExpiry extends BaseExpiry { TimeToIdleExpiry(Duration tti) { super(tti, tti, tti); } } + @Deprecated private static class NoExpiry extends BaseExpiry { private static final Expiry INSTANCE = new NoExpiry(); @@ -163,6 +180,7 @@ private NoExpiry() { * @param Key type of the cache entries * @param Value type of the cache entries */ + @Deprecated public static final class ExpiryBuilder { private Duration create = Duration.INFINITE; diff --git a/api/src/main/java/org/ehcache/expiry/Expiry.java b/api/src/main/java/org/ehcache/expiry/Expiry.java index b9a38f5847..546ee034d0 100644 --- a/api/src/main/java/org/ehcache/expiry/Expiry.java +++ b/api/src/main/java/org/ehcache/expiry/Expiry.java @@ -16,16 +16,14 @@ package org.ehcache.expiry; -import org.ehcache.ValueSupplier; - /** * A policy object that governs expiration for mappings in a {@link org.ehcache.Cache Cache}. *

- * Previous values are not accessible directly but are rather available through a {@link ValueSupplier value supplier} + * Previous values are not accessible directly but are rather available through a {@link org.ehcache.ValueSupplier value supplier} * to indicate that access can require computation (such as deserialization). *

* NOTE: Some cache configurations (eg. caches with eventual consistency) may use local (ie. non-consistent) state - * to decide whether to call {@link #getExpiryForUpdate(Object, ValueSupplier, Object)} vs. + * to decide whether to call {@link #getExpiryForUpdate(Object, org.ehcache.ValueSupplier, Object)} vs. * {@link #getExpiryForCreation(Object, Object)}. For these cache configurations it is advised to return the same * value for both of these methods *

@@ -35,7 +33,11 @@ * @param the value type for the cache * * @see Expirations + * @see ExpiryPolicy + * + * @deprecated Replaced with {@link ExpiryPolicy} that builds on the {@code java.time} types. */ +@Deprecated public interface Expiry { /** @@ -65,7 +67,7 @@ public interface Expiry { * @param value a value supplier for the accessed entry * @return an expiration {@code Duration}, {@code null} means unchanged */ - Duration getExpiryForAccess(K key, ValueSupplier value); + Duration getExpiryForAccess(K key, org.ehcache.ValueSupplier value); /** @@ -82,6 +84,6 @@ public interface Expiry { * @param newValue the new value of the entry * @return an expiration {@code Duration}, {@code null} means unchanged */ - Duration getExpiryForUpdate(K key, ValueSupplier oldValue, V newValue); + Duration getExpiryForUpdate(K key, org.ehcache.ValueSupplier oldValue, V newValue); } diff --git a/api/src/main/java/org/ehcache/expiry/ExpiryPolicy.java b/api/src/main/java/org/ehcache/expiry/ExpiryPolicy.java new file mode 100644 index 0000000000..c50c95abaa --- /dev/null +++ b/api/src/main/java/org/ehcache/expiry/ExpiryPolicy.java @@ -0,0 +1,113 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.expiry; + +import java.time.Duration; +import java.util.function.Supplier; + +/** + * A policy object that governs expiration for mappings in a {@link org.ehcache.Cache Cache}. + *

+ * Previous values are not accessible directly but are rather available through a value {@code Supplier} + * to indicate that access can require computation (such as deserialization). + *

+ * {@link java.time.Duration#isNegative() Negative durations} are not supported, expiry policy implementation returning such a + * duration will result in immediate expiry, as if the duration was {@link java.time.Duration#ZERO zero}. + *

+ * NOTE: Some cache configurations (eg. caches with eventual consistency) may use local (ie. non-consistent) state + * to decide whether to call {@link #getExpiryForUpdate(Object, Supplier, Object)} vs. + * {@link #getExpiryForCreation(Object, Object)}. For these cache configurations it is advised to return the same + * value for both of these methods + * + * @param the key type for the cache + * @param the value type for the cache + * + */ +public interface ExpiryPolicy { + + /** + * A {@link Duration duration} that represents an infinite time. + */ + Duration INFINITE = Duration.ofNanos(Long.MAX_VALUE); + + /** + * An {@code ExpiryPolicy} that represents a no expiration policy + */ + ExpiryPolicy NO_EXPIRY = new ExpiryPolicy() { + @Override + public Duration getExpiryForCreation(Object key, Object value) { + return INFINITE; + } + + @Override + public Duration getExpiryForAccess(Object key, Supplier value) { + return null; + } + + @Override + public Duration getExpiryForUpdate(Object key, Supplier oldValue, Object newValue) { + return null; + } + }; + + /** + * Returns the lifetime of an entry when it is initially added to a {@link org.ehcache.Cache Cache}. + *

+ * This method must not return {@code null}. + *

+ * Exceptions thrown from this method will be swallowed and result in the expiry duration being + * {@link Duration#ZERO ZERO}. + * + * @param key the key of the newly added entry + * @param value the value of the newly added entry + * @return a non-null {@code Duration} + */ + Duration getExpiryForCreation(K key, V value); + + /** + * Returns the expiration {@link Duration duration} (relative to the current time) when an existing entry + * is accessed from a {@link org.ehcache.Cache Cache}. + *

+ * Returning {@code null} indicates that the expiration time remains unchanged. + *

+ * Exceptions thrown from this method will be swallowed and result in the expiry duration being + * {@link Duration#ZERO ZERO}. + * + * @param key the key of the accessed entry + * @param value a value supplier for the accessed entry + * @return an expiration {@code Duration}, {@code null} means unchanged + */ + Duration getExpiryForAccess(K key, Supplier value); + + + /** + * Returns the expiration {@link Duration duration} (relative to the current time) when an existing entry + * is updated in a {@link org.ehcache.Cache Cache}. + *

+ * Returning {@code null} indicates that the expiration time remains unchanged. + *

+ * Exceptions thrown from this method will be swallowed and result in the expiry duration being + * {@link Duration#ZERO ZERO}. + * + * @param key the key of the updated entry + * @param oldValue a value supplier for the previous value of the entry + * @param newValue the new value of the entry + * @return an expiration {@code Duration}, {@code null} means unchanged + */ + Duration getExpiryForUpdate(K key, Supplier oldValue, V newValue); + +} diff --git a/api/src/main/java/org/ehcache/expiry/package-info.java b/api/src/main/java/org/ehcache/expiry/package-info.java index 4ea3f314dd..5882311f93 100644 --- a/api/src/main/java/org/ehcache/expiry/package-info.java +++ b/api/src/main/java/org/ehcache/expiry/package-info.java @@ -15,6 +15,6 @@ */ /** - * {@link org.ehcache.expiry.Expiry Expiry} API of a {@link org.ehcache.Cache Cache}. + * {@link org.ehcache.expiry.ExpiryPolicy Expiry} API of a {@link org.ehcache.Cache Cache}. */ -package org.ehcache.expiry; \ No newline at end of file +package org.ehcache.expiry; diff --git a/api/src/main/java/org/ehcache/spi/loaderwriter/BulkCacheLoadingException.java b/api/src/main/java/org/ehcache/spi/loaderwriter/BulkCacheLoadingException.java index 9ae3e56139..421e5ef0d2 100644 --- a/api/src/main/java/org/ehcache/spi/loaderwriter/BulkCacheLoadingException.java +++ b/api/src/main/java/org/ehcache/spi/loaderwriter/BulkCacheLoadingException.java @@ -42,7 +42,7 @@ public class BulkCacheLoadingException extends CacheLoadingException { * @param failures the map of keys to failure encountered while loading * @param successes the map of keys successfully loaded and their associated value */ - public BulkCacheLoadingException(final Map failures, final Map successes) { + public BulkCacheLoadingException(Map failures, Map successes) { this.failures = Collections.unmodifiableMap(failures); this.successes = Collections.unmodifiableMap(successes); } @@ -60,7 +60,7 @@ public BulkCacheLoadingException(final Map failures, final Map failures, final Map successes) { + public BulkCacheLoadingException(String message, final Map failures, Map successes) { super(message); this.failures = Collections.unmodifiableMap(failures); this.successes = Collections.unmodifiableMap(successes); diff --git a/api/src/main/java/org/ehcache/spi/loaderwriter/BulkCacheWritingException.java b/api/src/main/java/org/ehcache/spi/loaderwriter/BulkCacheWritingException.java index 893a0993ad..38e591aa07 100644 --- a/api/src/main/java/org/ehcache/spi/loaderwriter/BulkCacheWritingException.java +++ b/api/src/main/java/org/ehcache/spi/loaderwriter/BulkCacheWritingException.java @@ -43,7 +43,7 @@ public class BulkCacheWritingException extends CacheWritingException { * @param failures the map of keys to failure encountered while loading * @param successes the map of keys successfully loaded and their associated value */ - public BulkCacheWritingException(final Map failures, final Set successes) { + public BulkCacheWritingException(Map failures, Set successes) { this.failures = Collections.unmodifiableMap(failures); this.successes = Collections.unmodifiableSet(successes); } @@ -68,11 +68,9 @@ public Set getSuccesses() { @Override public String getMessage() { - StringBuilder sb = new StringBuilder(); - sb.append("Failed keys :"); - for (Map.Entry entry : failures.entrySet()) { - sb.append("\n ").append(entry.getKey()).append(" : ").append(entry.getValue()); - } + StringBuilder sb = new StringBuilder(13 + failures.size() * 20); // try to guess the final size + sb.append("Failed keys:"); + failures.forEach((k, v) -> sb.append("\n ").append(k).append(" : ").append(v)); return sb.toString(); } diff --git a/api/src/main/java/org/ehcache/spi/loaderwriter/CacheLoaderWriter.java b/api/src/main/java/org/ehcache/spi/loaderwriter/CacheLoaderWriter.java index 599facaa75..d7c40c29e2 100644 --- a/api/src/main/java/org/ehcache/spi/loaderwriter/CacheLoaderWriter.java +++ b/api/src/main/java/org/ehcache/spi/loaderwriter/CacheLoaderWriter.java @@ -16,6 +16,7 @@ package org.ehcache.spi.loaderwriter; +import java.util.HashMap; import java.util.Map; /** @@ -75,7 +76,13 @@ public interface CacheLoaderWriter { * @throws BulkCacheLoadingException in case of partial success * @throws Exception in case no values could be loaded */ - Map loadAll(Iterable keys) throws BulkCacheLoadingException, Exception; + default Map loadAll(Iterable keys) throws BulkCacheLoadingException, Exception { + Map entries = new HashMap<>(); + for (K k : keys) { + entries.put(k, load(k)) ; + } + return entries; + } /** * Writes a single mapping. @@ -105,7 +112,11 @@ public interface CacheLoaderWriter { * @throws BulkCacheWritingException in case of partial success * @throws Exception in case no values could be written */ - void writeAll(Iterable> entries) throws BulkCacheWritingException, Exception; + default void writeAll(Iterable> entries) throws BulkCacheWritingException, Exception { + for (Map.Entry entry : entries) { + write(entry.getKey(), entry.getValue()); + } + } /** * Deletes a single mapping. @@ -127,6 +138,10 @@ public interface CacheLoaderWriter { * @throws BulkCacheWritingException in case of partial success * @throws Exception in case no values can be loaded */ - void deleteAll(Iterable keys) throws BulkCacheWritingException, Exception; + default void deleteAll(Iterable keys) throws BulkCacheWritingException, Exception { + for (K k : keys) { + delete(k); + } + } } diff --git a/api/src/main/java/org/ehcache/spi/loaderwriter/CacheLoaderWriterConfiguration.java b/api/src/main/java/org/ehcache/spi/loaderwriter/CacheLoaderWriterConfiguration.java new file mode 100644 index 0000000000..dbe07f0318 --- /dev/null +++ b/api/src/main/java/org/ehcache/spi/loaderwriter/CacheLoaderWriterConfiguration.java @@ -0,0 +1,34 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.spi.loaderwriter; + +import org.ehcache.spi.service.ServiceConfiguration; + +/** + * {@link ServiceConfiguration} for the {@link CacheLoaderWriterProvider}. + *

+ * The {@code CacheLoaderWriterProvider} provides write-behind services to a + * {@link org.ehcache.Cache Cache}. + */ +public interface CacheLoaderWriterConfiguration extends ServiceConfiguration { + /** + * {@inheritDoc} + */ + @Override + default Class getServiceType() { + return CacheLoaderWriterProvider.class; + } +} diff --git a/api/src/main/java/org/ehcache/spi/loaderwriter/CacheLoaderWriterProvider.java b/api/src/main/java/org/ehcache/spi/loaderwriter/CacheLoaderWriterProvider.java index 38e07d9418..34f77b1114 100644 --- a/api/src/main/java/org/ehcache/spi/loaderwriter/CacheLoaderWriterProvider.java +++ b/api/src/main/java/org/ehcache/spi/loaderwriter/CacheLoaderWriterProvider.java @@ -51,9 +51,28 @@ public interface CacheLoaderWriterProvider extends Service { * If the {@code CacheLoaderWriter} instance was user provided {@link java.io.Closeable#close() close} * will not be invoked. * + * + * @param alias the {@code Cache} alias in the {@code CacheManager} * @param cacheLoaderWriter the {@code CacheLoaderWriter} being released * @throws Exception when the release fails */ - void releaseCacheLoaderWriter(CacheLoaderWriter cacheLoaderWriter) throws Exception; + void releaseCacheLoaderWriter(String alias, CacheLoaderWriter cacheLoaderWriter) throws Exception; + + /** + * Returns preconfigured {@link org.ehcache.spi.loaderwriter.CacheLoaderWriterConfiguration} for the given alias + * + * @param alias the {@code Cache} alias in the {@code CacheManager} + * + * @return {@code CacheLoaderWriterConfiguration} configured for the {@code Cache}, otherwise null + */ + CacheLoaderWriterConfiguration getPreConfiguredCacheLoaderWriterConfig(String alias); + + /** + * Checks whether {@link org.ehcache.spi.loaderwriter.CacheLoaderWriter} was provided using jsr api + * + * @param alias the {@code Cache} alias in the {@code CacheManager} + * @return {@code true} if {@code CacheLoaderWriter} was provided using jsr api, otherwise false. + */ + boolean isLoaderJsrProvided(String alias); } diff --git a/api/src/main/java/org/ehcache/spi/persistence/PersistableResourceService.java b/api/src/main/java/org/ehcache/spi/persistence/PersistableResourceService.java index cede0787b1..41e9e12cd2 100644 --- a/api/src/main/java/org/ehcache/spi/persistence/PersistableResourceService.java +++ b/api/src/main/java/org/ehcache/spi/persistence/PersistableResourceService.java @@ -16,11 +16,9 @@ package org.ehcache.spi.persistence; -import org.ehcache.config.ResourcePool; import org.ehcache.config.ResourceType; import org.ehcache.CachePersistenceException; -import java.util.Collection; import org.ehcache.config.CacheConfiguration; import org.ehcache.spi.service.MaintainableService; import org.ehcache.spi.service.PluralService; diff --git a/api/src/main/java/org/ehcache/spi/resilience/RecoveryStore.java b/api/src/main/java/org/ehcache/spi/resilience/RecoveryStore.java new file mode 100644 index 0000000000..ea8173a556 --- /dev/null +++ b/api/src/main/java/org/ehcache/spi/resilience/RecoveryStore.java @@ -0,0 +1,60 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.spi.resilience; + +import org.ehcache.spi.resilience.StoreAccessException; + +/** + * A recovery store is used during entry cleanup done by the {@link ResilienceStrategy}. It's called + * when a {@link org.ehcache.core.spi.store.Store} failed on an entry. Implementations will in general want to get rid + * of this entry which is what the recovery store is used for. + *

+ * Note that the methods on this call with tend to fail since the store already failed once and caused the resilience + * strategy to be called. + * + * @param store key type + */ +public interface RecoveryStore { + + /** + * Obliterate all keys in a store. + * + * @throws StoreAccessException in case of store failure + */ + void obliterate() throws StoreAccessException; + + /** + * Obliterate a given key. + * + * @param key the key to obliterate + * @throws StoreAccessException in case of store failure + */ + void obliterate(K key) throws StoreAccessException; + + /** + * Obliterate a list of keys. + * + * @param keys keys to obliterate + * @throws StoreAccessException in case of store failure + */ + default void obliterate(Iterable keys) throws StoreAccessException { + for (K key : keys) { + obliterate(key); + } + } + +} diff --git a/api/src/main/java/org/ehcache/spi/resilience/ResilienceStrategy.java b/api/src/main/java/org/ehcache/spi/resilience/ResilienceStrategy.java new file mode 100644 index 0000000000..a709db85b0 --- /dev/null +++ b/api/src/main/java/org/ehcache/spi/resilience/ResilienceStrategy.java @@ -0,0 +1,178 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.spi.resilience; + +import java.util.Map; + +import org.ehcache.Cache; +import org.ehcache.spi.loaderwriter.CacheLoaderWriter; + +/** + * A strategy for providing cache resilience in the face of failure. + *

+ * An implementation of this interface is used by a cache to decide how to + * recover after internal components of the cache fail. Implementations of + * these methods are expected to take suitable recovery steps. They can then + * choose between allowing the operation to terminate successfully, or throw an + * exception which will be propagated to the thread calling in to the cache. + *

+ * Resilience in this context refers only to resilience against cache failures + * and not to resilience against failures of any underlying + * {@link CacheLoaderWriter}. To this end writer or loader failures will only be + * reported to the strategy in the context of a coincident cache failure. + * Isolated writer and loader exceptions will be thrown directly. + * + * @param the type of the keys used to access data within the cache + * @param the type of the values held within the cache + * + * @author Chris Dennis + */ +public interface ResilienceStrategy { + + /** + * Called when a {@link Cache#get(java.lang.Object)} fails on a cache without + * a cache loader due to an underlying store failure. + * + * @param key the key being retrieved + * @param e the triggered failure + * @return the value to return from the operation + */ + V getFailure(K key, StoreAccessException e); + + /** + * Called when a {@link Cache#containsKey(java.lang.Object)} fails due to an + * underlying store failure, and the resultant cache load operation also fails. + * + * @param key the key being queried + * @param e the triggered failure + * @return the value to return from the operation + */ + boolean containsKeyFailure(K key, StoreAccessException e); + + /** + * Called when a {@link Cache#put(java.lang.Object, java.lang.Object)} fails + * due to an underlying store failure. + * + * @param key the key being put + * @param value the value being put + * @param e the triggered failure + */ + void putFailure(K key, V value, StoreAccessException e); + + /** + * Called when a {@link Cache#remove(java.lang.Object)} fails due to an + * underlying store failure. + * + * @param key the key being removed + * @param e the triggered failure + */ + void removeFailure(K key, StoreAccessException e); + + /** + * Called when a {@link Cache#clear()} fails due to an underlying store + * failure. + * + * @param e the triggered failure + */ + void clearFailure(StoreAccessException e); + + /** + * Called when a cache iterator advancement fails due to an underlying store + * failure. + * + * @param e the triggered failure + * @return an entry to return on a failed iteration + */ + Cache.Entry iteratorFailure(StoreAccessException e); + + /** + * Called when a {@link Cache#putIfAbsent(java.lang.Object, java.lang.Object)} + * fails due to an underlying store failure. + *

+ * If it is known at the time of calling that the key is absent from the cache + * (and the writer if one is present) then {@code knownToBeAbsent} will be + * {@code true}. + * + * @param key the key being put + * @param value the value being put + * @param e the triggered failure + * @return the value to return from the operation + */ + V putIfAbsentFailure(K key, V value, StoreAccessException e); + + /** + * Called when a {@link Cache#remove(Object, Object)} + * fails due to an underlying store failure. + * + * @param key the key being removed + * @param value the value being removed + * @param e the triggered failure + * @return the value to return from the operation + */ + boolean removeFailure(K key, V value, StoreAccessException e); + + /** + * Called when a {@link Cache#replace(java.lang.Object, java.lang.Object)} + * fails due to an underlying store failure. + * + * @param key the key being replaced + * @param value the value being replaced + * @param e the triggered failure + * @return the value to return from the operation + */ + V replaceFailure(K key, V value, StoreAccessException e); + + /** + * Called when a {@link Cache#replace(java.lang.Object, java.lang.Object, java.lang.Object)} + * fails due to an underlying store failure. + * + * @param key the key being replaced + * @param value the expected value + * @param newValue the replacement value + * @param e the triggered failure + * @return the value to return from the operation + */ + boolean replaceFailure(K key, V value, V newValue, StoreAccessException e); + + /** + * Called when a {@link Cache#getAll(java.util.Set)} fails on a cache + * without a cache loader due to an underlying store failure. + * + * @param keys the keys being retrieved + * @param e the triggered failure + * @return the value to return from the operation + */ + Map getAllFailure(Iterable keys, StoreAccessException e); + + /** + * Called when a {@link Cache#putAll(java.util.Map)} fails due to an + * underlying store failure. + * + * @param entries the entries being put + * @param e the triggered failure + */ + void putAllFailure(Map entries, StoreAccessException e); + + /** + * Called when a {@link Cache#removeAll(java.util.Set)} fails due to an + * underlying store failure. + * + * @param keys the keys being removed + * @param e the triggered failure + */ + void removeAllFailure(Iterable keys, StoreAccessException e); +} diff --git a/api/src/main/java/org/ehcache/spi/resilience/ResilienceStrategyProvider.java b/api/src/main/java/org/ehcache/spi/resilience/ResilienceStrategyProvider.java new file mode 100644 index 0000000000..4265bfdd31 --- /dev/null +++ b/api/src/main/java/org/ehcache/spi/resilience/ResilienceStrategyProvider.java @@ -0,0 +1,60 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.spi.resilience; + +import org.ehcache.config.CacheConfiguration; +import org.ehcache.spi.loaderwriter.CacheLoaderWriter; +import org.ehcache.spi.service.Service; +import org.ehcache.spi.service.ServiceConfiguration; + +/** + * A {@link Service} that creates {@link ResilienceStrategy} instances. + *

+ * A {@code CacheManager} will use the {@link #createResilienceStrategy(String, CacheConfiguration, RecoveryStore)} and + * {@link #createResilienceStrategy(String, CacheConfiguration, RecoveryStore, CacheLoaderWriter)} methods to create + * {@code ResilienceStrategy} instances for each {@code Cache} it manages. + */ +public interface ResilienceStrategyProvider extends Service { + + /** + * Creates a {@code ResilienceStrategy} for the {@link org.ehcache.Cache Cache} with the given alias and configuration + * using the given {@link RecoveryStore}. + * + * @param alias the {@code Cache} alias in the {@code CacheManager} + * @param configuration the configuration for the associated cache + * @param recoveryStore the associated recovery store + * @param the stores key type + * @param the stores value type + * @return the {@code ResilienceStrategy} to be used by the {@code Cache} + */ + ResilienceStrategy createResilienceStrategy(String alias, CacheConfiguration configuration, + RecoveryStore recoveryStore); + + /** + * Creates a {@code ResilienceStrategy} for the {@link org.ehcache.Cache Cache} with the given alias and configuration + * using the given {@link RecoveryStore} and {@link CacheLoaderWriter} + * + * @param alias the {@code Cache} alias in the {@code CacheManager} + * @param configuration the configuration for the associated cache + * @param recoveryStore the associated recovery store + * @param loaderWriter the associated loader-writer + * @param the stores key type + * @param the stores value type + * @return the {@code ResilienceStrategy} to be used by the {@code Cache} + */ + ResilienceStrategy createResilienceStrategy(String alias, CacheConfiguration configuration, + RecoveryStore recoveryStore, CacheLoaderWriter loaderWriter); +} diff --git a/core/src/main/java/org/ehcache/core/spi/store/StoreAccessException.java b/api/src/main/java/org/ehcache/spi/resilience/StoreAccessException.java similarity index 97% rename from core/src/main/java/org/ehcache/core/spi/store/StoreAccessException.java rename to api/src/main/java/org/ehcache/spi/resilience/StoreAccessException.java index 6c4cf9cbd7..f3eb3e6337 100644 --- a/core/src/main/java/org/ehcache/core/spi/store/StoreAccessException.java +++ b/api/src/main/java/org/ehcache/spi/resilience/StoreAccessException.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.ehcache.core.spi.store; +package org.ehcache.spi.resilience; /** * Generic exception used when an internal operation fails on a {@link org.ehcache.Cache}. diff --git a/api/src/main/java/org/ehcache/spi/resilience/package-info.java b/api/src/main/java/org/ehcache/spi/resilience/package-info.java new file mode 100644 index 0000000000..6e0322fec1 --- /dev/null +++ b/api/src/main/java/org/ehcache/spi/resilience/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * {@link org.ehcache.spi.resilience.ResilienceStrategy} API of a {@link org.ehcache.Cache Cache}. + */ +package org.ehcache.spi.resilience; diff --git a/api/src/main/java/org/ehcache/spi/service/OptionalServiceDependencies.java b/api/src/main/java/org/ehcache/spi/service/OptionalServiceDependencies.java new file mode 100644 index 0000000000..231f71de9d --- /dev/null +++ b/api/src/main/java/org/ehcache/spi/service/OptionalServiceDependencies.java @@ -0,0 +1,35 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.spi.service; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Annotation that allows a {@link Service} implementation to declare an optional dependency on other {@code Service}s. + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +public @interface OptionalServiceDependencies { + + /** + * Array of {@link Service} dependency classes + */ + String[] value(); +} diff --git a/api/src/test/java/org/ehcache/expiry/DurationTest.java b/api/src/test/java/org/ehcache/expiry/DurationTest.java index 635a288b90..3b694d479e 100644 --- a/api/src/test/java/org/ehcache/expiry/DurationTest.java +++ b/api/src/test/java/org/ehcache/expiry/DurationTest.java @@ -26,6 +26,7 @@ import org.junit.Test; +@SuppressWarnings("deprecation") public class DurationTest { @Test diff --git a/api/src/test/java/org/ehcache/expiry/ExpirationsTest.java b/api/src/test/java/org/ehcache/expiry/ExpirationsTest.java index 05d8d1988c..223957c3c5 100644 --- a/api/src/test/java/org/ehcache/expiry/ExpirationsTest.java +++ b/api/src/test/java/org/ehcache/expiry/ExpirationsTest.java @@ -21,17 +21,17 @@ import java.util.concurrent.TimeUnit; -import org.ehcache.ValueSupplier; import org.junit.Test; +@SuppressWarnings("deprecation") public class ExpirationsTest { @Test public void testNoExpiration() { Expiry expiry = Expirations.noExpiration(); assertThat(expiry.getExpiryForCreation(this, this), equalTo(Duration.INFINITE)); - assertThat(expiry.getExpiryForAccess(this, holderOf(this)), nullValue()); - assertThat(expiry.getExpiryForUpdate(this, holderOf(this), this), nullValue()); + assertThat(expiry.getExpiryForAccess(this, () -> this), nullValue()); + assertThat(expiry.getExpiryForUpdate(this, () -> this, this), nullValue()); } @Test @@ -39,17 +39,17 @@ public void testTTIExpiration() { Duration duration = new Duration(1L, TimeUnit.SECONDS); Expiry expiry = Expirations.timeToIdleExpiration(duration); assertThat(expiry.getExpiryForCreation(this, this), equalTo(duration)); - assertThat(expiry.getExpiryForAccess(this, holderOf(this)), equalTo(duration)); - assertThat(expiry.getExpiryForUpdate(this, holderOf(this), this), equalTo(duration)); + assertThat(expiry.getExpiryForAccess(this, () -> this), equalTo(duration)); + assertThat(expiry.getExpiryForUpdate(this, () -> this, this), equalTo(duration)); } @Test public void testTTLExpiration() { Duration duration = new Duration(1L, TimeUnit.SECONDS); Expiry expiry = Expirations.timeToLiveExpiration(duration); - assertThat(expiry.getExpiryForCreation(this, holderOf(this)), equalTo(duration)); - assertThat(expiry.getExpiryForAccess(this, holderOf(this)), nullValue()); - assertThat(expiry.getExpiryForUpdate(this, holderOf(this), this), equalTo(duration)); + assertThat(expiry.getExpiryForCreation(this, this), equalTo(duration)); + assertThat(expiry.getExpiryForAccess(this, () -> this), nullValue()); + assertThat(expiry.getExpiryForUpdate(this, () -> this, this), equalTo(duration)); } @Test @@ -59,11 +59,8 @@ public void testExpiration() { Duration update = new Duration(3L, TimeUnit.SECONDS); Expiry expiry = Expirations.builder().setCreate(creation).setAccess(access).setUpdate(update).build(); assertThat(expiry.getExpiryForCreation(this, this), equalTo(creation)); - assertThat(expiry.getExpiryForAccess(this, holderOf(this)), equalTo(access)); - assertThat(expiry.getExpiryForUpdate(this, holderOf(this),this), equalTo(update)); + assertThat(expiry.getExpiryForAccess(this, () -> this), equalTo(access)); + assertThat(expiry.getExpiryForUpdate(this, () -> this,this), equalTo(update)); } - private ValueSupplier holderOf(final Object obj) { - return () -> obj; - } } diff --git a/azure-pipelines-static-analysis.yml b/azure-pipelines-static-analysis.yml new file mode 100644 index 0000000000..481793ad3e --- /dev/null +++ b/azure-pipelines-static-analysis.yml @@ -0,0 +1,29 @@ +# +# Copyright Terracotta, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# See shared code location for steps and parameters: +# https://dev.azure.com/TerracottaCI/_git/terracotta + +resources: + repositories: + - repository: templates + type: git + name: terracotta/terracotta + +jobs: +- template: build-templates/maven-common.yml@templates + parameters: + gradleTasks: 'check' diff --git a/azure-pipelines.yml b/azure-pipelines.yml new file mode 100644 index 0000000000..807c859598 --- /dev/null +++ b/azure-pipelines.yml @@ -0,0 +1,27 @@ +# +# Copyright Terracotta, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# See shared code location for steps and parameters: +# https://dev.azure.com/TerracottaCI/_git/terracotta + +resources: + repositories: + - repository: templates + type: git + name: terracotta/terracotta + +jobs: +- template: build-templates/gradle-common.yml@templates diff --git a/build.gradle b/build.gradle index b094bd8526..202ad17493 100644 --- a/build.gradle +++ b/build.gradle @@ -23,6 +23,12 @@ plugins { // This adds the ability to print a taskTree // ./gradlew ... taskTree id "com.dorongold.task-tree" version "1.3" + // Declare spotbugs at the top + id 'com.github.spotbugs' version '1.6.3' apply false +} + +wrapper { + distributionType = Wrapper.DistributionType.ALL } if (deployUrl.contains('nexus')) { @@ -57,7 +63,7 @@ closeAndReleaseRepository.enabled = false ext { - baseVersion = findProperty('overrideVersion') ?: '3.5.0-SNAPSHOT' + baseVersion = findProperty('overrideVersion') ?: ehcacheVersion utils = new Utils(baseVersion, logger) isReleaseVersion = !baseVersion.endsWith('SNAPSHOT') @@ -76,10 +82,10 @@ if (hasProperty('testVM')) { } subprojects { - apply plugin: 'java' + apply plugin: 'java-library' apply plugin: 'eclipse' apply plugin: 'checkstyle' - apply plugin: 'findbugs' + apply plugin: 'com.github.spotbugs' apply plugin: 'jacoco' group = 'org.ehcache.modules' @@ -108,13 +114,15 @@ subprojects { } dependencies { - compileOnly "com.google.code.findbugs:annotations:$parent.findbugsVersion" - testCompileOnly "com.google.code.findbugs:annotations:$parent.findbugsVersion" - testCompile "junit:junit:$junitVersion", "org.assertj:assertj-core:$assertjVersion", "org.hamcrest:hamcrest-library:$hamcrestVersion" - testCompile("org.mockito:mockito-core:$mockitoVersion") { - exclude group:'org.hamcrest', module:'hamcrest-core' - } - testRuntime "org.slf4j:slf4j-simple:$parent.slf4jVersion" + implementation "org.slf4j:slf4j-api:$parent.slf4jVersion" + compileOnly "com.github.spotbugs:spotbugs-annotations:$parent.spotbugsVersion" + testCompileOnly "com.github.spotbugs:spotbugs-annotations:$parent.spotbugsVersion" + testImplementation "junit:junit:$junitVersion" + testImplementation "org.assertj:assertj-core:$assertjVersion" + testImplementation "org.hamcrest:hamcrest-library:$hamcrestVersion" + testImplementation "org.mockito:mockito-core:$mockitoVersion" + testCompile 'org.xmlunit:xmlunit-core:2.6.0', 'org.xmlunit:xmlunit-matchers:2.6.0' + testRuntimeOnly "org.slf4j:slf4j-simple:$parent.slf4jVersion" } jar { @@ -167,13 +175,13 @@ subprojects { toolVersion = checkstyleVersion } - findbugs { + spotbugs { ignoreFailures = false sourceSets = [sourceSets.main] - toolVersion = findbugsVersion + toolVersion = spotbugsVersion } - findbugsMain { + spotbugsMain { reports { // Switch from xml to html by changing these flags xml.enabled = true @@ -207,6 +215,8 @@ subprojects { configurations.all { resolutionStrategy { failOnVersionConflict() + // If you want to override a dependency, instead of changing gradle.properties, use something like below + // force 'org.terracotta:statistics:2.0-SNAPSHOT' } } } @@ -214,7 +224,7 @@ subprojects { allprojects { tasks.withType(JavaCompile) { options.encoding = 'UTF-8' - options.compilerArgs += ['-Xlint:unchecked'] + options.compilerArgs += ['-Werror', '-Xlint:all'] } tasks.withType(Javadoc) { options.encoding = 'UTF-8' diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 5e7622186e..abd602ec69 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -20,5 +20,5 @@ repositories { jcenter() } dependencies { compile gradleApi() compile localGroovy() - compile 'com.github.jengelman.gradle.plugins:shadow:2.0.1' + compile 'com.github.jengelman.gradle.plugins:shadow:2.0.4' } diff --git a/buildSrc/src/main/groovy/EhDeploy.groovy b/buildSrc/src/main/groovy/EhDeploy.groovy index 417716d255..4465ea89ff 100644 --- a/buildSrc/src/main/groovy/EhDeploy.groovy +++ b/buildSrc/src/main/groovy/EhDeploy.groovy @@ -36,17 +36,11 @@ class EhDeploy implements Plugin { project.plugins.apply EhPomGenerate // for generating pom.* project.configurations { - provided - } + providedApi + providedImplementation - project.sourceSets { - main { - compileClasspath += project.configurations.provided - } - test { - compileClasspath += project.configurations.provided - runtimeClasspath += project.configurations.provided - } + api.extendsFrom providedApi + implementation.extendsFrom providedImplementation } project.signing { @@ -55,9 +49,11 @@ class EhDeploy implements Plugin { } def artifactFiltering = { - pom.scopeMappings.mappings.remove(project.configurations.testCompile) - pom.scopeMappings.mappings.remove(project.configurations.testRuntime) - pom.scopeMappings.addMapping(MavenPlugin.COMPILE_PRIORITY, project.configurations.provided, Conf2ScopeMappingContainer.PROVIDED) + project.configurations.matching {it.name.startsWith('test')}.forEach { + pom.scopeMappings.mappings.remove(it) + } + pom.scopeMappings.addMapping(MavenPlugin.COMPILE_PRIORITY, project.configurations.providedApi, Conf2ScopeMappingContainer.PROVIDED) + pom.scopeMappings.addMapping(MavenPlugin.COMPILE_PRIORITY, project.configurations.providedImplementation, Conf2ScopeMappingContainer.PROVIDED) utils.pomFiller(pom, project.subPomName, project.subPomDesc) diff --git a/buildSrc/src/main/groovy/EhDistribute.groovy b/buildSrc/src/main/groovy/EhDistribute.groovy index e1d1ba8f5d..c3f2e59319 100644 --- a/buildSrc/src/main/groovy/EhDistribute.groovy +++ b/buildSrc/src/main/groovy/EhDistribute.groovy @@ -29,7 +29,7 @@ class EhDistribute implements Plugin { def utils = new Utils(project.baseVersion, project.logger) def hashsetOfProjects = project.configurations.compileOnly.dependencies.withType(ProjectDependency).dependencyProject - project.plugins.apply 'java' + project.plugins.apply 'java-library' project.plugins.apply 'maven' project.plugins.apply 'signing' project.plugins.apply 'com.github.johnrengelman.shadow' diff --git a/buildSrc/src/main/groovy/EhOsgi.groovy b/buildSrc/src/main/groovy/EhOsgi.groovy index 6b16d01d1c..dda5ea0c88 100644 --- a/buildSrc/src/main/groovy/EhOsgi.groovy +++ b/buildSrc/src/main/groovy/EhOsgi.groovy @@ -37,7 +37,7 @@ class EhOsgi implements Plugin { project.configurations.compileOnly.dependencies.withType(ProjectDependency).dependencyProject hashsetOfProjects += project //self also, in case the invoking project defines osgi properties - project.plugins.apply 'java' + project.plugins.apply 'java-library' project.plugins.apply 'maven' project.plugins.apply 'signing' diff --git a/buildSrc/src/main/groovy/EhPomMangle.groovy b/buildSrc/src/main/groovy/EhPomMangle.groovy index 271271ab3d..2bc761fdb9 100644 --- a/buildSrc/src/main/groovy/EhPomMangle.groovy +++ b/buildSrc/src/main/groovy/EhPomMangle.groovy @@ -44,7 +44,7 @@ class EhPomMangle implements Plugin { void apply(Project project) { def utils = new Utils(project.baseVersion, project.logger) - project.plugins.apply 'java' + project.plugins.apply 'java-library' project.plugins.apply 'maven' project.plugins.apply 'signing' @@ -56,10 +56,10 @@ class EhPomMangle implements Plugin { } def artifactFiltering = { - pom.scopeMappings.mappings.remove(project.configurations.compile) - pom.scopeMappings.mappings.remove(project.configurations.runtime) - pom.scopeMappings.mappings.remove(project.configurations.testCompile) - pom.scopeMappings.mappings.remove(project.configurations.testRuntime) + project.configurations.forEach { + pom.scopeMappings.mappings.remove(it) + } + pom.scopeMappings.addMapping(MavenPlugin.COMPILE_PRIORITY, project.configurations.shadowCompile, Conf2ScopeMappingContainer.COMPILE) pom.scopeMappings.addMapping(MavenPlugin.COMPILE_PRIORITY, project.configurations.shadowProvided, Conf2ScopeMappingContainer.PROVIDED) diff --git a/buildSrc/src/main/groovy/scripts/Utils.groovy b/buildSrc/src/main/groovy/scripts/Utils.groovy index b674c95236..6f46430b9d 100644 --- a/buildSrc/src/main/groovy/scripts/Utils.groovy +++ b/buildSrc/src/main/groovy/scripts/Utils.groovy @@ -37,7 +37,7 @@ class Utils { try { def proc = cmd.execute() revision = proc.text.trim() - } catch (IOException ioex) { + } catch (IOException) { revision = 'Unknown' } } @@ -96,7 +96,7 @@ class Utils { static def jvmForHome(File home) { def java = Jvm.forHome(home).javaExecutable - def versionCommand = "$java -version".execute(); + def versionCommand = "$java -version".execute() def version = JavaVersion.toVersion((versionCommand.err.text =~ /\w+ version "(.+)"/)[0][1]) return Jvm.discovered(home, version) } diff --git a/clustered/client/build.gradle b/clustered/client/build.gradle index dd5e7e5e94..2198ebd21c 100644 --- a/clustered/client/build.gradle +++ b/clustered/client/build.gradle @@ -17,20 +17,30 @@ apply plugin: EhDeploy dependencies { - compileOnly project(':api') + compileOnly project(':impl') compileOnly project(':xml') - compile project(':clustered:common'), "org.slf4j:slf4j-api:$slf4jVersion" - provided "org.terracotta:entity-client-api:$terracottaApisVersion" - provided "org.terracotta:runnel:$terracottaPlatformVersion" + implementation project(':clustered:common') + implementation "org.terracotta:statistics:$parent.statisticVersion" + providedImplementation "org.terracotta:entity-client-api:$terracottaApisVersion" + providedImplementation "org.terracotta:runnel:$terracottaPlatformVersion" + providedImplementation "org.terracotta:lease-api:$terracottaPlatformVersion" + providedImplementation "org.terracotta:connection-api:$terracottaApisVersion" - testCompile project(':api') - testCompile project(':xml') - testCompile project(':transactions') - testCompile(project(':clustered:server')) { + testImplementation project(':api') + testImplementation project(':impl') + testImplementation project(':xml') + testImplementation project(':transactions') + testImplementation(project(':clustered:server')) { exclude group: 'org.terracotta.internal', module: 'tc-config-parser' } - testCompile "org.terracotta:entity-test-lib:$terracottaPassthroughTestingVersion" - testCompile "org.terracotta:passthrough-server:$terracottaPassthroughTestingVersion" + testImplementation "org.terracotta:entity-test-lib:$terracottaPassthroughTestingVersion" + testImplementation "org.terracotta:passthrough-server:$terracottaPassthroughTestingVersion" + testImplementation "org.terracotta.internal:common:$terracottaCoreVersion" + testImplementation "org.terracotta:passthrough-leased-connection-api:$terracottaPlatformVersion" + testImplementation (group: 'org.codehaus.btm', name: 'btm', version: '2.1.4') { + exclude group:'org.slf4j', module:'slf4j-api' + } + testCompile project(path: ':xml', configuration: 'testArchives') } test { @@ -38,7 +48,3 @@ test { jvmArgs += ['--add-modules', 'java.xml.bind'] } } - -tasks.withType(JavaCompile) { - options.compilerArgs += ['-Werror'] -} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/config/ClusteredResourceType.java b/clustered/client/src/main/java/org/ehcache/clustered/client/config/ClusteredResourceType.java index ae978d73b7..fdfabe8eef 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/config/ClusteredResourceType.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/config/ClusteredResourceType.java @@ -48,9 +48,8 @@ final class Types { * * @return an array containing the constants of {@code ClusteredResourceType} in the order declared */ - @SuppressWarnings("unchecked") public static ClusteredResourceType[] values() { - return new ClusteredResourceType[] {DEDICATED, SHARED, UNKNOWN}; // unchecked + return new ClusteredResourceType[] {DEDICATED, SHARED, UNKNOWN}; } /** diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/config/ClusteringServiceConfiguration.java b/clustered/client/src/main/java/org/ehcache/clustered/client/config/ClusteringServiceConfiguration.java index 0499bba666..7f07e1fffa 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/config/ClusteringServiceConfiguration.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/config/ClusteringServiceConfiguration.java @@ -18,34 +18,35 @@ import org.ehcache.CacheManager; import org.ehcache.PersistentCacheManager; +import org.ehcache.clustered.client.internal.ConnectionSource; import org.ehcache.clustered.client.service.ClusteringService; import org.ehcache.config.builders.CacheManagerBuilder; import org.ehcache.config.builders.CacheManagerConfiguration; import org.ehcache.core.HumanReadable; import org.ehcache.spi.service.ServiceCreationConfiguration; +import java.net.InetSocketAddress; import java.net.URI; import java.time.Duration; -import java.util.Map; import java.util.Objects; +import java.util.Properties; import org.ehcache.clustered.common.ServerSideConfiguration; -import static org.ehcache.clustered.client.config.Timeouts.DEFAULT_OPERATION_TIMEOUT; - /** * Specifies the configuration for a {@link ClusteringService}. */ -// TODO: Should this accept/hold a *list* of URIs? public class ClusteringServiceConfiguration implements ServiceCreationConfiguration, CacheManagerConfiguration, HumanReadable { - private final URI clusterUri; + public static final boolean DEFAULT_AUTOCREATE = false; + private final ConnectionSource connectionSource; private final boolean autoCreate; private final ServerSideConfiguration serverConfiguration; private final Timeouts timeouts; + private final Properties properties; /** * Creates a {@code ClusteringServiceConfiguration} from the properties provided. @@ -59,6 +60,18 @@ public ClusteringServiceConfiguration(URI clusterUri) { this(clusterUri, Timeouts.DEFAULT); } + /** + * Creates a {@code ClusteringServiceConfiguration} from the properties provided. + * + * @param servers the non-{@code null} iterable of servers in the cluster + * @param clusterTierManager the non-{@code null} cluster tier manager identifier + * + * @throws NullPointerException if {@code servers} is {@code null} + */ + public ClusteringServiceConfiguration(Iterable servers, String clusterTierManager) { + this(servers, clusterTierManager, Timeouts.DEFAULT); + } + /** * Creates a {@code ClusteringServiceConfiguration} from the properties provided. * @@ -72,6 +85,19 @@ public ClusteringServiceConfiguration(URI clusterUri, Timeouts timeouts) { this(clusterUri, timeouts, null); } + /** + * Creates a {@code ClusteringServiceConfiguration} from the properties provided. + * + * @param servers the non-{@code null} iterable of servers in the cluster + * @param clusterTierManager the non-{@code null} cluster tier manager identifier + * @param timeouts the {@link Timeouts} specifying the time limit for clustered cache operations + * + * @throws NullPointerException if {@code servers} is {@code null} + */ + public ClusteringServiceConfiguration(Iterable servers, String clusterTierManager, Timeouts timeouts) { + this(servers, clusterTierManager, timeouts, null); + } + /** * Creates a {@code ClusteringServiceConfiguration} from the properties provided. * @@ -96,7 +122,22 @@ public ClusteringServiceConfiguration(URI clusterUri, ServerSideConfiguration se * @throws IllegalArgumentException if {@code clusterUri} is not URI valid for cluster operations */ public ClusteringServiceConfiguration(URI clusterUri, Timeouts timeouts, ServerSideConfiguration serverConfig) { - this(clusterUri, timeouts, false, serverConfig); + this(clusterUri, timeouts, DEFAULT_AUTOCREATE, serverConfig); + } + + /** + * Creates a {@code ClusteringServiceConfiguration} from the properties provided. + * + * @param servers the non-{@code null} iterable of servers in the cluster + * @param clusterTierManager the non-{@code null} cluster tier manager identifier + * @param timeouts the {@link Timeouts} specifying the time limit for clustered cache operations + * @param serverConfig the server side entity configuration required + * + * @throws NullPointerException if {@code servers} is {@code null} + */ + public ClusteringServiceConfiguration(Iterable servers, String clusterTierManager, Timeouts timeouts, + ServerSideConfiguration serverConfig) { + this(servers, clusterTierManager, timeouts, DEFAULT_AUTOCREATE, serverConfig); } /** @@ -113,6 +154,21 @@ public ClusteringServiceConfiguration(URI clusterUri, boolean autoCreate, Server this(clusterUri, Timeouts.DEFAULT, autoCreate, serverConfig); } + /** + * Creates a {@code ClusteringServiceConfiguration} from the properties provided. + * + * @param servers the non-{@code null} iterable of servers in the cluster + * @param clusterTierManager the non-{@code null} cluster tier manager identifier + * @param autoCreate {@code true} if server components should be auto created + * @param serverConfig the server side entity configuration required + * + * @throws NullPointerException if {@code servers} is {@code null} + */ + public ClusteringServiceConfiguration(Iterable servers, String clusterTierManager, boolean autoCreate, + ServerSideConfiguration serverConfig) { + this(servers, clusterTierManager, Timeouts.DEFAULT, autoCreate, serverConfig); + } + /** * Creates a {@code ClusteringServiceConfiguration} from the properties provided. * @@ -125,18 +181,85 @@ public ClusteringServiceConfiguration(URI clusterUri, boolean autoCreate, Server * @throws IllegalArgumentException if {@code clusterUri} is not URI valid for cluster operations */ public ClusteringServiceConfiguration(URI clusterUri, Timeouts timeouts, boolean autoCreate, ServerSideConfiguration serverConfig) { - this.clusterUri = Objects.requireNonNull(clusterUri, "Cluster URI cannot be null"); + this(clusterUri, timeouts, autoCreate, serverConfig, new Properties()); + } + + /** + * Creates a {@code ClusteringServiceConfiguration} from the properties provided. + * + * @param servers the non-{@code null} iterable of servers in the cluster + * @param clusterTierManager the non-{@code null} cluster tier manager identifier + * @param timeouts the {@link Timeouts} specifying the time limit for clustered cache operations + * @param autoCreate {@code true} if server components should be auto created + * @param serverConfig the server side entity configuration required + * + * @throws NullPointerException if {@code servers} is {@code null} + */ + public ClusteringServiceConfiguration(Iterable servers, String clusterTierManager, Timeouts timeouts, + boolean autoCreate, ServerSideConfiguration serverConfig) { + this(servers, clusterTierManager, timeouts, autoCreate, serverConfig, new Properties()); + } + + /** + * Creates a {@code ClusteringServiceConfiguration} from the properties provided. + * + * @param clusterUri the non-{@code null} URI identifying the cluster server + * @param timeouts the {@link Timeouts} specifying the time limit for clustered cache operations + * @param autoCreate {@code true} if server components should be auto created + * @param serverConfig the server side entity configuration required + * @param properties the non-{@code null} connection Properties + * + * @throws NullPointerException if {@code clusterUri} is {@code null} + * @throws IllegalArgumentException if {@code clusterUri} is not URI valid for cluster operations + */ + public ClusteringServiceConfiguration(URI clusterUri, Timeouts timeouts, boolean autoCreate, ServerSideConfiguration serverConfig, Properties properties) { + this(new ConnectionSource.ClusterUri(clusterUri), timeouts, autoCreate, serverConfig, properties); + } + + /** + * Creates a {@code ClusteringServiceConfiguration} from the properties provided. + * + * @param servers the non-{@code null} iterable of servers in the cluster + * @param clusterTierManager the non-{@code null} cluster tier manager identifier + * @param timeouts the {@link Timeouts} specifying the time limit for clustered cache operations + * @param autoCreate {@code true} if server components should be auto created + * @param serverConfig the server side entity configuration required + * @param properties the non-{@code null} connection Properties + * + * @throws NullPointerException if {@code servers} is {@code null} + */ + public ClusteringServiceConfiguration(Iterable servers, String clusterTierManager, Timeouts timeouts, + boolean autoCreate, ServerSideConfiguration serverConfig, Properties properties) { + this(new ConnectionSource.ServerList(servers, clusterTierManager), timeouts, autoCreate, serverConfig, properties); + } + + /** + * Creates a {@code ClusteringServiceConfiguration} from the properties provided. + * + * @param connectionSource the non-{@code null} {@code ConnectionSource} identifying the source of connection to servers in the cluster + * @param timeouts the {@link Timeouts} specifying the time limit for clustered cache operations + * @param autoCreate {@code true} if server components should be auto created + * @param serverSideConfiguration the server side entity configuration required + * @param properties the non-{@code null} connection Properties + * + * @throws NullPointerException if {@code servers} is {@code null} + */ + public ClusteringServiceConfiguration(ConnectionSource connectionSource, Timeouts timeouts, boolean autoCreate, + ServerSideConfiguration serverSideConfiguration, Properties properties) { + this.connectionSource = connectionSource; this.autoCreate = autoCreate; - this.serverConfiguration = serverConfig; + this.serverConfiguration = serverSideConfiguration; this.timeouts = Objects.requireNonNull(timeouts, "Operation timeouts cannot be null"); + this.properties = (Properties) Objects.requireNonNull(properties, "Properties cannot be null").clone(); } protected ClusteringServiceConfiguration(ClusteringServiceConfiguration baseConfig) { Objects.requireNonNull(baseConfig, "Base configuration cannot be null"); - this.clusterUri = baseConfig.getClusterUri(); + this.connectionSource = baseConfig.getConnectionSource(); this.timeouts = baseConfig.getTimeouts(); this.autoCreate = baseConfig.isAutoCreate(); this.serverConfiguration = baseConfig.getServerConfiguration(); + this.properties = baseConfig.getProperties(); } /** @@ -145,7 +268,17 @@ protected ClusteringServiceConfiguration(ClusteringServiceConfiguration baseConf * @return the cluster {@code URI} */ public URI getClusterUri() { - return clusterUri; + return connectionSource.getClusterUri(); + } + + /** + * The {@code ConnectionSource} of the cluster, containing either a {@code URI}, or an {@code Iterable} + * of the servers in the cluster. + * + * @return a cluster {@code ConnectionSource} + */ + public ConnectionSource getConnectionSource() { + return connectionSource; } /** @@ -175,6 +308,15 @@ public Timeouts getTimeouts() { return timeouts; } + /** + * The {@code Properties} for the connection. + * + * @return the connection {@code Properties} + */ + public Properties getProperties() { + return (Properties) properties.clone(); + } + /** * The timeout for cache read operations. * @@ -201,7 +343,7 @@ public CacheManagerBuilder builder(CacheManagerBuilder { + pools.append(" "); + pools.append(key); + pools.append(": "); + pools.append(value); + pools.append("\n"); + }); return pools.toString(); } } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/config/Timeouts.java b/clustered/client/src/main/java/org/ehcache/clustered/client/config/Timeouts.java index 44ba00bc09..10d755c5de 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/config/Timeouts.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/config/Timeouts.java @@ -20,7 +20,6 @@ import java.time.Duration; -import java.time.temporal.ChronoUnit; import java.util.function.LongSupplier; /** @@ -30,8 +29,9 @@ public final class Timeouts { public static final Duration DEFAULT_OPERATION_TIMEOUT = Duration.ofSeconds(5); + public static final Duration DEFAULT_CONNECTION_TIMEOUT = Duration.ofSeconds(150); public static final Duration INFINITE_TIMEOUT = Duration.ofNanos(Long.MAX_VALUE); - public static final Timeouts DEFAULT = new Timeouts(DEFAULT_OPERATION_TIMEOUT, DEFAULT_OPERATION_TIMEOUT, INFINITE_TIMEOUT); + public static final Timeouts DEFAULT = new Timeouts(DEFAULT_OPERATION_TIMEOUT, DEFAULT_OPERATION_TIMEOUT, DEFAULT_CONNECTION_TIMEOUT); private final Duration readOperationTimeout; private final Duration writeOperationTimeout; diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/config/builders/ClusteringServiceConfigurationBuilder.java b/clustered/client/src/main/java/org/ehcache/clustered/client/config/builders/ClusteringServiceConfigurationBuilder.java index 0374f5943b..86ab700b8f 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/config/builders/ClusteringServiceConfigurationBuilder.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/config/builders/ClusteringServiceConfigurationBuilder.java @@ -15,6 +15,7 @@ */ package org.ehcache.clustered.client.config.builders; +import java.net.InetSocketAddress; import java.net.URI; import org.ehcache.clustered.client.config.ClusteringServiceConfiguration; @@ -22,17 +23,21 @@ import java.time.Duration; import java.time.temporal.ChronoUnit; import java.util.Objects; +import java.util.Properties; import java.util.concurrent.TimeUnit; import org.ehcache.clustered.client.config.Timeouts; +import org.ehcache.clustered.client.internal.ConnectionSource; import org.ehcache.clustered.common.ServerSideConfiguration; import org.ehcache.config.Builder; +import static org.ehcache.clustered.client.config.ClusteringServiceConfiguration.DEFAULT_AUTOCREATE; + /** * A builder of ClusteringService configurations. */ public final class ClusteringServiceConfigurationBuilder implements Builder { - private final URI clusterUri; + private final ConnectionSource connectionSource; private final Timeouts timeouts; private final boolean autoCreate; @@ -44,11 +49,23 @@ public final class ClusteringServiceConfigurationBuilder implements Builder servers, String clusterTierManager) { + return new ClusteringServiceConfigurationBuilder(new ConnectionSource.ServerList(servers, clusterTierManager), TimeoutsBuilder.timeouts().build(), DEFAULT_AUTOCREATE); } - private ClusteringServiceConfigurationBuilder(URI clusterUri, Timeouts timeouts, boolean autoCreate) { - this.clusterUri = Objects.requireNonNull(clusterUri, "Cluster URI can't be null"); + private ClusteringServiceConfigurationBuilder(ConnectionSource connectionSource, Timeouts timeouts, boolean autoCreate) { + this.connectionSource = connectionSource; this.timeouts = Objects.requireNonNull(timeouts, "Timeouts can't be null"); this.autoCreate = autoCreate; } @@ -59,7 +76,7 @@ private ClusteringServiceConfigurationBuilder(URI clusterUri, Timeouts timeouts, * @return a clustering service configuration builder */ public ServerSideConfigurationBuilder autoCreate() { - return new ServerSideConfigurationBuilder(new ClusteringServiceConfigurationBuilder(this.clusterUri, this.timeouts, true)); + return new ServerSideConfigurationBuilder(new ClusteringServiceConfigurationBuilder(this.connectionSource, this.timeouts, true)); } /** @@ -68,7 +85,7 @@ public ServerSideConfigurationBuilder autoCreate() { * @return a clustering service configuration builder */ public ServerSideConfigurationBuilder expecting() { - return new ServerSideConfigurationBuilder(new ClusteringServiceConfigurationBuilder(this.clusterUri, this.timeouts, false)); + return new ServerSideConfigurationBuilder(new ClusteringServiceConfigurationBuilder(this.connectionSource, this.timeouts, false)); } /** @@ -84,7 +101,7 @@ public ServerSideConfigurationBuilder expecting() { * @throws NullPointerException if {@code timeouts} is {@code null} */ public ClusteringServiceConfigurationBuilder timeouts(Timeouts timeouts) { - return new ClusteringServiceConfigurationBuilder(this.clusterUri, timeouts, this.autoCreate); + return new ClusteringServiceConfigurationBuilder(this.connectionSource, timeouts, this.autoCreate); } /** @@ -100,7 +117,7 @@ public ClusteringServiceConfigurationBuilder timeouts(Timeouts timeouts) { * @throws NullPointerException if {@code timeouts} is {@code null} */ public ClusteringServiceConfigurationBuilder timeouts(Builder timeoutsBuilder) { - return new ClusteringServiceConfigurationBuilder(this.clusterUri, timeoutsBuilder.build(), this.autoCreate); + return new ClusteringServiceConfigurationBuilder(this.connectionSource, timeoutsBuilder.build(), this.autoCreate); } /** @@ -126,7 +143,7 @@ public ClusteringServiceConfigurationBuilder readOperationTimeout(long duration, @Override public ClusteringServiceConfiguration build() { - return new ClusteringServiceConfiguration(clusterUri, timeouts, autoCreate, null); + return build(null); } /** @@ -138,7 +155,7 @@ public ClusteringServiceConfiguration build() { * {@code ClusteringServiceConfigurationBuilder} and the {@code serverSideConfiguration} provided */ ClusteringServiceConfiguration build(ServerSideConfiguration serverSideConfiguration) { - return new ClusteringServiceConfiguration(clusterUri, timeouts, autoCreate, serverSideConfiguration); + return new ClusteringServiceConfiguration(connectionSource, timeouts, autoCreate, serverSideConfiguration, new Properties()); } private static ChronoUnit toChronoUnit(TimeUnit unit) { diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/ClusterTierManagerClientEntityFactory.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/ClusterTierManagerClientEntityFactory.java index 5e4ebcacb5..0ba843cbc3 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/ClusterTierManagerClientEntityFactory.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/ClusterTierManagerClientEntityFactory.java @@ -22,6 +22,7 @@ import org.ehcache.clustered.client.internal.lock.VoltronReadWriteLock; import org.ehcache.clustered.client.internal.lock.VoltronReadWriteLock.Hold; import org.ehcache.clustered.client.internal.store.ClusterTierClientEntity; +import org.ehcache.clustered.client.internal.store.ClusterTierUserData; import org.ehcache.clustered.client.internal.store.InternalClusterTierClientEntity; import org.ehcache.clustered.client.service.EntityBusyException; import org.ehcache.clustered.common.ServerSideConfiguration; @@ -55,6 +56,7 @@ public class ClusterTierManagerClientEntityFactory { private final Connection connection; private final Map maintenanceHolds = new ConcurrentHashMap<>(); + private final Map fetchHolds = new ConcurrentHashMap<>(); private final Timeouts entityTimeouts; @@ -79,13 +81,30 @@ public boolean acquireLeadership(String entityIdentifier) { } } - public void abandonLeadership(String entityIdentifier) { + public boolean abandonAllHolds(String entityIdentifier) { + return abandonLeadership(entityIdentifier) | abandonFetchHolds(entityIdentifier); + } + + /** + * Proactively abandon leadership before closing connection. + * + * @param entityIdentifier the master entity identifier + * @return true of abandoned false otherwise + */ + public boolean abandonLeadership(String entityIdentifier) { Hold hold = maintenanceHolds.remove(entityIdentifier); - if (hold == null) { - throw new IllegalMonitorStateException("Leadership was never held"); - } else { - hold.unlock(); - } + return (hold != null) && silentlyUnlock(hold, entityIdentifier); + } + + /** + * Proactively abandon any READ holds before closing connection. + * + * @param entityIdentifier the master entity identifier + * @return true of abandoned false otherwise + */ + private boolean abandonFetchHolds(String entityIdentifier) { + Hold hold = fetchHolds.remove(entityIdentifier); + return (hold != null) && silentlyUnlock(hold, entityIdentifier); } /** @@ -109,7 +128,7 @@ public void create(final String identifier, final ServerSideConfiguration config throw new EntityBusyException("Unable to obtain maintenance lease for " + identifier); } - EntityRef ref = getEntityRef(identifier); + EntityRef ref = getEntityRef(identifier); try { ref.create(new ClusterTierManagerConfiguration(identifier, config)); } catch (EntityConfigurationException e) { @@ -162,6 +181,9 @@ public ClusterTierManagerClientEntity retrieve(String identifier, ServerSideConf if (!validated) { silentlyClose(entity, identifier); silentlyUnlock(fetchHold, identifier); + } else { + // track read holds as well so that we can explicitly abandon + fetchHolds.put(identifier, fetchHold); } } } @@ -174,7 +196,7 @@ public void destroy(final String identifier) throws EntityBusyException { throw new EntityBusyException("Unable to obtain maintenance lease for " + identifier); } - EntityRef ref = getEntityRef(identifier); + EntityRef ref = getEntityRef(identifier); destroyAllClusterTiers(ref, identifier); try { if (!ref.destroy()) { @@ -192,7 +214,8 @@ public void destroy(final String identifier) throws EntityBusyException { } } - private void destroyAllClusterTiers(EntityRef ref, String identifier) { + private void destroyAllClusterTiers(EntityRef ref, String identifier) { ClusterTierManagerClientEntity entity; try { entity = ref.fetchEntity(null); @@ -224,11 +247,13 @@ private void silentlyClose(ClusterTierManagerClientEntity entity, String identif } } - private void silentlyUnlock(Hold localMaintenance, String identifier) { + private boolean silentlyUnlock(Hold localMaintenance, String identifier) { try { localMaintenance.unlock(); + return true; } catch(Exception e) { LOGGER.error("Failed to unlock for id {}", identifier, e); + return false; } } @@ -236,7 +261,7 @@ private VoltronReadWriteLock createAccessLockFor(String entityIdentifier) { return new VoltronReadWriteLock(connection, "ClusterTierManagerClientEntityFactory-AccessLock-" + entityIdentifier); } - private EntityRef getEntityRef(String identifier) { + private EntityRef getEntityRef(String identifier) { try { return connection.getEntityRef(ClusterTierManagerClientEntity.class, ENTITY_VERSION, identifier); } catch (EntityNotProvidedException e) { @@ -248,7 +273,7 @@ private EntityRef entityRef; + EntityRef entityRef; try { entityRef = connection.getEntityRef(InternalClusterTierClientEntity.class, ENTITY_VERSION, entityName(clusterTierManagerIdentifier, storeIdentifier)); } catch (EntityNotProvidedException e) { @@ -267,10 +292,7 @@ public ClusterTierClientEntity fetchOrCreateClusteredStoreEntity(String clusterT throw new AssertionError(e); } try { - InternalClusterTierClientEntity entity = entityRef.fetchEntity(null); - entity.setStoreIdentifier(storeIdentifier); - entity.setTimeouts(entityTimeouts); - return entity; + return entityRef.fetchEntity(new ClusterTierUserData(entityTimeouts, storeIdentifier)); } catch (EntityNotFoundException e) { // Ignore - will try to create again } catch (EntityException e) { @@ -278,16 +300,30 @@ public ClusterTierClientEntity fetchOrCreateClusteredStoreEntity(String clusterT } } } else { - try { - InternalClusterTierClientEntity entity = entityRef.fetchEntity(null); - entity.setStoreIdentifier(storeIdentifier); - entity.setTimeouts(entityTimeouts); - return entity; - } catch (EntityNotFoundException e) { - throw e; - } catch (EntityException e) { - throw new AssertionError(e); - } + return fetchClusterTierClientEntity(storeIdentifier, entityRef); + } + } + + public ClusterTierClientEntity getClusterTierClientEntity(String clusterTierManagerIdentifier, String storeIdentifier) throws EntityNotFoundException { + EntityRef entityRef; + try { + entityRef = connection.getEntityRef(InternalClusterTierClientEntity.class, ENTITY_VERSION, entityName(clusterTierManagerIdentifier, storeIdentifier)); + } catch (EntityNotProvidedException e) { + throw new AssertionError(e); + } + + return fetchClusterTierClientEntity(storeIdentifier, entityRef); + } + + private ClusterTierClientEntity fetchClusterTierClientEntity(String storeIdentifier, + EntityRef entityRef) + throws EntityNotFoundException { + try { + return entityRef.fetchEntity(new ClusterTierUserData(entityTimeouts, storeIdentifier)); + } catch (EntityNotFoundException e) { + throw e; + } catch (EntityException e) { + throw new AssertionError(e); } } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/ClusterTierManagerValidationException.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/ClusterTierManagerValidationException.java index ab91bb5cc7..27c793b278 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/ClusterTierManagerValidationException.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/ClusterTierManagerValidationException.java @@ -21,6 +21,8 @@ */ public class ClusterTierManagerValidationException extends RuntimeException { + private static final long serialVersionUID = -428725072152588216L; + public ClusterTierManagerValidationException(String message) { super(message); } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/ConnectionSource.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/ConnectionSource.java new file mode 100644 index 0000000000..ac9b882a96 --- /dev/null +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/ConnectionSource.java @@ -0,0 +1,122 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal; + +import org.terracotta.connection.ConnectionException; +import org.terracotta.lease.connection.LeasedConnection; +import org.terracotta.lease.connection.LeasedConnectionFactory; + +import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.Properties; + +public abstract class ConnectionSource { + + public abstract String getClusterTierManager(); + + public abstract LeasedConnection connect(Properties connectionProperties) throws ConnectionException; + + public abstract URI getClusterUri(); + + public static class ClusterUri extends ConnectionSource { + + private final URI clusterUri; + private final String clusterTierManager; + + public ClusterUri(URI clusterUri) { + this.clusterUri = Objects.requireNonNull(clusterUri, "Cluster URI cannot be null"); + this.clusterTierManager = extractCacheManager(clusterUri); + } + + @Override + public String getClusterTierManager() { + return clusterTierManager; + } + + @Override + public LeasedConnection connect(Properties connectionProperties) throws ConnectionException { + return LeasedConnectionFactory.connect(extractClusterUri(clusterUri), connectionProperties); + } + + @Override + public URI getClusterUri() { + return clusterUri; + } + + @Override + public String toString() { + return "clusterUri: " + clusterUri; + } + + private static String extractCacheManager(URI uri) { + URI baseUri = extractClusterUri(uri); + return baseUri.relativize(uri).getPath(); + } + + private static URI extractClusterUri(URI uri) { + try { + return new URI(uri.getScheme(), uri.getAuthority(), null, null, null); + } catch (URISyntaxException e) { + throw new AssertionError(e); + } + } + } + + public static class ServerList extends ConnectionSource { + + private final Iterable servers; + private final String clusterTierManager; + + public ServerList(Iterable servers, String clusterTierManager) { + this.servers = cloneServers(Objects.requireNonNull(servers, "Servers cannot be null")); + this.clusterTierManager = Objects.requireNonNull(clusterTierManager, "Cluster tier manager identifier cannot be null"); + } + + @Override + public String getClusterTierManager() { + return clusterTierManager; + } + + @Override + public LeasedConnection connect(Properties connectionProperties) throws ConnectionException { + return LeasedConnectionFactory.connect(servers, connectionProperties); + } + + @Override + public URI getClusterUri() { + throw new IllegalStateException("Cannot use getClusterUri() on ConnectionSource.ServerList. Use getServers() instead."); + } + + public Iterable getServers() { + return cloneServers(servers); + } + + @Override + public String toString() { + return "servers: " + getServers() + " [cache-manager: " + getClusterTierManager() + "]"; + } + + private List cloneServers(Iterable servers) { + List socketAddresses = new ArrayList<>(); + servers.forEach(socketAddresses::add); + return socketAddresses; + } + } +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/SimpleClusterTierManagerClientEntity.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/SimpleClusterTierManagerClientEntity.java index 9dfadeaef6..ae3f42e1b7 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/SimpleClusterTierManagerClientEntity.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/SimpleClusterTierManagerClientEntity.java @@ -24,7 +24,10 @@ import org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse.PrepareForDestroy; import org.ehcache.clustered.common.internal.messages.EhcacheResponseType; import org.ehcache.clustered.common.internal.messages.LifeCycleMessageFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.terracotta.connection.entity.Entity; +import org.terracotta.entity.EndpointDelegate; import org.terracotta.entity.EntityClientEndpoint; import org.terracotta.entity.EntityResponse; import org.terracotta.entity.InvokeFuture; @@ -40,12 +43,30 @@ */ public class SimpleClusterTierManagerClientEntity implements ClusterTierManagerClientEntity { + private static final Logger LOGGER = LoggerFactory.getLogger(SimpleClusterTierManagerClientEntity.class); + private final EntityClientEndpoint endpoint; private final LifeCycleMessageFactory messageFactory; public SimpleClusterTierManagerClientEntity(EntityClientEndpoint endpoint) { this.endpoint = endpoint; this.messageFactory = new LifeCycleMessageFactory(); + endpoint.setDelegate(new EndpointDelegate() { + @Override + public void handleMessage(EhcacheEntityResponse messageFromServer) { + + } + + @Override + public byte[] createExtendedReconnectData() { + return new byte[0]; + } + + @Override + public void didDisconnectUnexpectedly() { + LOGGER.info("CacheManager got disconnected from server"); + } + }); } @Override diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/ClusteredResourcePoolImpl.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/ClusteredResourcePoolImpl.java index 9d1da5223a..5e91d126a9 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/ClusteredResourcePoolImpl.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/ClusteredResourcePoolImpl.java @@ -38,16 +38,6 @@ public PoolAllocation getPoolAllocation() { return new PoolAllocation.Unknown(); } - @Override - public ClusteredResourceType getType() { - return super.getType(); - } - - @Override - public boolean isPersistent() { - return super.isPersistent(); - } - @Override public void validateUpdate(ResourcePool newPool) { throw new UnsupportedOperationException("Updating CLUSTERED resource is not supported"); diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/SharedClusteredResourcePoolImpl.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/SharedClusteredResourcePoolImpl.java index 2b312545b3..5f74e57cce 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/SharedClusteredResourcePoolImpl.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/SharedClusteredResourcePoolImpl.java @@ -46,11 +46,6 @@ public SharedClusteredResourcePoolImpl(final String sharedResourcePool) { this.sharedResourcePool = sharedResourcePool; } - @Override - public ClusteredResourceType getType() { - return super.getType(); - } - @Override public String getSharedResourcePool() { return this.sharedResourcePool; diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/xml/ClusteredCacheConstants.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/xml/ClusteredCacheConstants.java index 6a5be3fb4e..881159e4b2 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/xml/ClusteredCacheConstants.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/xml/ClusteredCacheConstants.java @@ -34,4 +34,5 @@ final class ClusteredCacheConstants { * Namespace for cluster configuration elements. Must match {@code targetNamespace} in {@value #XSD}. */ static final URI NAMESPACE = URI.create("http://www.ehcache.org/v3/clustered"); + static final String TC_CLUSTERED_NAMESPACE_PREFIX = "tc:"; } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/xml/ClusteredResourceConfigurationParser.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/xml/ClusteredResourceConfigurationParser.java index 11903da55c..f2858511f3 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/xml/ClusteredResourceConfigurationParser.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/xml/ClusteredResourceConfigurationParser.java @@ -16,30 +16,49 @@ package org.ehcache.clustered.client.internal.config.xml; +import org.ehcache.clustered.client.internal.config.ClusteredResourcePoolImpl; import org.ehcache.clustered.client.internal.config.DedicatedClusteredResourcePoolImpl; import org.ehcache.clustered.client.internal.config.SharedClusteredResourcePoolImpl; -import org.ehcache.clustered.client.internal.config.ClusteredResourcePoolImpl; import org.ehcache.config.ResourcePool; import org.ehcache.config.units.MemoryUnit; +import org.ehcache.xml.BaseConfigParser; import org.ehcache.xml.CacheResourceConfigurationParser; import org.ehcache.xml.exceptions.XmlConfigurationException; import org.w3c.dom.Attr; import org.w3c.dom.DOMException; +import org.w3c.dom.Document; import org.w3c.dom.Element; import java.io.IOException; import java.net.URI; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; import javax.xml.transform.Source; import javax.xml.transform.stream.StreamSource; import static org.ehcache.clustered.client.internal.config.xml.ClusteredCacheConstants.NAMESPACE; import static org.ehcache.clustered.client.internal.config.xml.ClusteredCacheConstants.XML_SCHEMA; +import static org.ehcache.clustered.client.internal.config.xml.ClusteredCacheConstants.TC_CLUSTERED_NAMESPACE_PREFIX; /** * Provides a parser for the {@code /config/cache/resources} extension elements. */ -public class ClusteredResourceConfigurationParser implements CacheResourceConfigurationParser { +public class ClusteredResourceConfigurationParser extends BaseConfigParser implements CacheResourceConfigurationParser { + + private static final String CLUSTERED_ELEMENT_NAME = "clustered"; + private static final String DEDICATED_ELEMENT_NAME = "clustered-dedicated"; + private static final String SHARED_ELEMENT_NAME = "clustered-shared"; + private static final String FROM_ELEMENT_NAME = "from"; + private static final String UNIT_ELEMENT_NAME = "unit"; + private static final String SHARING_ELEMENT_NAME = "sharing"; + + public ClusteredResourceConfigurationParser() { + super(ResourcePool.class); + } + @Override public Source getXmlSchema() throws IOException { return new StreamSource(XML_SCHEMA.openStream()); @@ -52,39 +71,40 @@ public URI getNamespace() { protected ResourcePool parseResourceConfig(final Element fragment) { final String elementName = fragment.getLocalName(); - if ("clustered-shared".equals(elementName)) { - final String sharing = fragment.getAttribute("sharing"); - return new SharedClusteredResourcePoolImpl(sharing); - - } else if ("clustered-dedicated".equals(elementName)) { - // 'from' attribute is optional on 'clustered-dedicated' element - final Attr fromAttr = fragment.getAttributeNode("from"); - final String from = (fromAttr == null ? null : fromAttr.getValue()); - - final String unitValue = fragment.getAttribute("unit").toUpperCase(); - final MemoryUnit sizeUnits; - try { - sizeUnits = MemoryUnit.valueOf(unitValue); - } catch (IllegalArgumentException e) { - throw new XmlConfigurationException(String.format("XML configuration element <%s> 'unit' attribute '%s' is not valid", elementName, unitValue), e); - } + switch (elementName) { + case "clustered-shared": + final String sharing = fragment.getAttribute("sharing"); + return new SharedClusteredResourcePoolImpl(sharing); - final String sizeValue; - try { - sizeValue = fragment.getFirstChild().getNodeValue(); - } catch (DOMException e) { - throw new XmlConfigurationException(String.format("XML configuration element <%s> value is not valid", elementName), e); - } - final long size; - try { - size = Long.parseLong(sizeValue); - } catch (NumberFormatException e) { - throw new XmlConfigurationException(String.format("XML configuration element <%s> value '%s' is not valid", elementName, sizeValue), e); - } + case "clustered-dedicated": + // 'from' attribute is optional on 'clustered-dedicated' element + final Attr fromAttr = fragment.getAttributeNode("from"); + final String from = (fromAttr == null ? null : fromAttr.getValue()); + + final String unitValue = fragment.getAttribute("unit").toUpperCase(); + final MemoryUnit sizeUnits; + try { + sizeUnits = MemoryUnit.valueOf(unitValue); + } catch (IllegalArgumentException e) { + throw new XmlConfigurationException(String.format("XML configuration element <%s> 'unit' attribute '%s' is not valid", elementName, unitValue), e); + } + + final String sizeValue; + try { + sizeValue = fragment.getFirstChild().getNodeValue(); + } catch (DOMException e) { + throw new XmlConfigurationException(String.format("XML configuration element <%s> value is not valid", elementName), e); + } + final long size; + try { + size = Long.parseLong(sizeValue); + } catch (NumberFormatException e) { + throw new XmlConfigurationException(String.format("XML configuration element <%s> value '%s' is not valid", elementName, sizeValue), e); + } - return new DedicatedClusteredResourcePoolImpl(from, size, sizeUnits); - } else if("clustered".equals(elementName)) { - return new ClusteredResourcePoolImpl(); + return new DedicatedClusteredResourcePoolImpl(from, size, sizeUnits); + case "clustered": + return new ClusteredResourcePoolImpl(); } return null; } @@ -98,4 +118,36 @@ public ResourcePool parseResourceConfiguration(final Element fragment) { throw new XmlConfigurationException(String.format("XML configuration element <%s> in <%s> is not supported", fragment.getTagName(), (fragment.getParentNode() == null ? "null" : fragment.getParentNode().getLocalName()))); } + + @Override + public Element unparseResourcePool(ResourcePool resourcePool) { + return unparseConfig(resourcePool); + } + + @Override + protected Element createRootElement(Document doc, ResourcePool resourcePool) { + Element rootElement = null; + if (ClusteredResourcePoolImpl.class == resourcePool.getClass()) { + rootElement = doc.createElementNS(getNamespace().toString(), TC_CLUSTERED_NAMESPACE_PREFIX + CLUSTERED_ELEMENT_NAME); + } else if (DedicatedClusteredResourcePoolImpl.class == resourcePool.getClass()) { + DedicatedClusteredResourcePoolImpl dedicatedClusteredResourcePool = (DedicatedClusteredResourcePoolImpl) resourcePool; + rootElement = doc.createElementNS(getNamespace().toString(), TC_CLUSTERED_NAMESPACE_PREFIX + DEDICATED_ELEMENT_NAME); + if (dedicatedClusteredResourcePool.getFromResource() != null) { + rootElement.setAttribute(FROM_ELEMENT_NAME, dedicatedClusteredResourcePool.getFromResource()); + } + rootElement.setAttribute(UNIT_ELEMENT_NAME, dedicatedClusteredResourcePool.getUnit().toString()); + rootElement.setTextContent(String.valueOf(dedicatedClusteredResourcePool.getSize())); + } else if (SharedClusteredResourcePoolImpl.class == resourcePool.getClass()) { + SharedClusteredResourcePoolImpl sharedClusteredResourcePool = (SharedClusteredResourcePoolImpl) resourcePool; + rootElement = doc.createElementNS(getNamespace().toString(), TC_CLUSTERED_NAMESPACE_PREFIX + SHARED_ELEMENT_NAME); + rootElement.setAttribute(SHARING_ELEMENT_NAME, sharedClusteredResourcePool.getSharedResourcePool()); + } + return rootElement; + } + + @Override + public Set> getResourceTypes() { + return Collections.unmodifiableSet(new HashSet<>(Arrays.asList(ClusteredResourcePoolImpl.class, + DedicatedClusteredResourcePoolImpl.class, SharedClusteredResourcePoolImpl.class))); + } } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/xml/ClusteringCacheManagerServiceConfigurationParser.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/xml/ClusteringCacheManagerServiceConfigurationParser.java new file mode 100644 index 0000000000..975989101e --- /dev/null +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/xml/ClusteringCacheManagerServiceConfigurationParser.java @@ -0,0 +1,465 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.config.xml; + +import org.ehcache.clustered.client.config.ClusteringServiceConfiguration; +import org.ehcache.clustered.client.config.Timeouts; +import org.ehcache.clustered.client.config.builders.TimeoutsBuilder; +import org.ehcache.clustered.client.internal.ConnectionSource; +import org.ehcache.clustered.client.service.ClusteringService; +import org.ehcache.clustered.common.ServerSideConfiguration; +import org.ehcache.config.units.MemoryUnit; +import org.ehcache.spi.service.ServiceCreationConfiguration; +import org.ehcache.xml.BaseConfigParser; +import org.ehcache.xml.CacheManagerServiceConfigurationParser; +import org.ehcache.xml.exceptions.XmlConfigurationException; +import org.ehcache.xml.model.TimeType; +import org.w3c.dom.Attr; +import org.w3c.dom.Document; +import org.w3c.dom.Element; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; + + +import java.io.IOException; +import java.math.BigInteger; +import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URISyntaxException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import javax.xml.bind.JAXBContext; +import javax.xml.bind.JAXBElement; +import javax.xml.bind.JAXBException; +import javax.xml.bind.Unmarshaller; +import javax.xml.transform.Source; +import javax.xml.transform.stream.StreamSource; + +import static org.ehcache.clustered.client.internal.config.xml.ClusteredCacheConstants.NAMESPACE; +import static org.ehcache.clustered.client.internal.config.xml.ClusteredCacheConstants.XML_SCHEMA; +import static org.ehcache.clustered.client.internal.config.xml.ClusteredCacheConstants.TC_CLUSTERED_NAMESPACE_PREFIX; +import static org.ehcache.xml.XmlModel.convertToJavaTimeUnit; + +/** + * Provides parsing support for the {@code } elements representing a {@link ClusteringService ClusteringService}. + * + * @see ClusteredCacheConstants#XSD + */ +public class ClusteringCacheManagerServiceConfigurationParser extends BaseConfigParser implements CacheManagerServiceConfigurationParser { + + public static final String CLUSTER_ELEMENT_NAME = "cluster"; + public static final String CONNECTION_ELEMENT_NAME = "connection"; + public static final String CLUSTER_CONNECTION_ELEMENT_NAME = "cluster-connection"; + public static final String CLUSTER_TIER_MANAGER_ATTRIBUTE_NAME = "cluster-tier-manager"; + public static final String SERVER_ELEMENT_NAME = "server"; + public static final String HOST_ATTRIBUTE_NAME = "host"; + public static final String PORT_ATTRIBUTE_NAME = "port"; + public static final String READ_TIMEOUT_ELEMENT_NAME = "read-timeout"; + public static final String WRITE_TIMEOUT_ELEMENT_NAME = "write-timeout"; + public static final String CONNECTION_TIMEOUT_ELEMENT_NAME = "connection-timeout"; + public static final String URL_ATTRIBUTE_NAME = "url"; + public static final String DEFAULT_RESOURCE_ELEMENT_NAME = "default-resource"; + public static final String SHARED_POOL_ELEMENT_NAME = "shared-pool"; + public static final String SERVER_SIDE_CONFIG = "server-side-config"; + public static final String AUTO_CREATE_ATTRIBUTE_NAME = "auto-create"; + public static final String UNIT_ATTRIBUTE_NAME = "unit"; + public static final String NAME_ATTRIBUTE_NAME = "name"; + public static final String FROM_ATTRIBUTE_NAME = "from"; + public static final String DEFAULT_UNIT_ATTRIBUTE_VALUE = "seconds"; + + public ClusteringCacheManagerServiceConfigurationParser() { + super(ClusteringServiceConfiguration.class); + } + + @Override + public Source getXmlSchema() throws IOException { + return new StreamSource(XML_SCHEMA.openStream()); + } + + @Override + public URI getNamespace() { + return NAMESPACE; + } + + /** + * Complete interpretation of the top-level elements defined in {@value ClusteredCacheConstants#XSD}. + * This method is called only for those elements from the namespace set by {@link ClusteredCacheConstants#NAMESPACE}. + *

+ * This method presumes the element presented is valid according to the XSD. + * + * @param fragment the XML fragment to process + * @return a {@link org.ehcache.clustered.client.config.ClusteringServiceConfiguration ClusteringServiceConfiguration} + */ + @Override + public ServiceCreationConfiguration parseServiceCreationConfiguration(final Element fragment) { + + if ("cluster".equals(fragment.getLocalName())) { + + ClusteringCacheManagerServiceConfigurationParser.ServerSideConfig serverConfig = null; + URI connectionUri = null; + List serverAddresses = new ArrayList<>(); + String clusterTierManager = null; + Duration getTimeout = null, putTimeout = null, connectionTimeout = null; + final NodeList childNodes = fragment.getChildNodes(); + for (int i = 0; i < childNodes.getLength(); i++) { + final Node item = childNodes.item(i); + if (Node.ELEMENT_NODE == item.getNodeType()) { + switch (item.getLocalName()) { + case "connection": + /* + * is a required element in the XSD + */ + final Attr urlAttribute = ((Element)item).getAttributeNode("url"); + final String urlValue = urlAttribute.getValue(); + try { + connectionUri = new URI(urlValue); + } catch (URISyntaxException e) { + throw new XmlConfigurationException( + String.format("Value of %s attribute on XML configuration element <%s> in <%s> is not a valid URI - '%s'", + urlAttribute.getName(), item.getNodeName(), fragment.getTagName(), connectionUri), e); + } + + break; + case "cluster-connection": + clusterTierManager = ((Element)item).getAttribute("cluster-tier-manager"); + final NodeList serverNodes = item.getChildNodes(); + for (int j = 0; j < serverNodes.getLength(); j++) { + final Node serverNode = serverNodes.item(j); + final String host = ((Element)serverNode).getAttributeNode("host").getValue(); + final Attr port = ((Element)serverNode).getAttributeNode("port"); + InetSocketAddress address; + if (port == null) { + address = InetSocketAddress.createUnresolved(host, 0); + } else { + String portString = port.getValue(); + address = InetSocketAddress.createUnresolved(host, Integer.parseInt(portString)); + } + serverAddresses.add(address); + } + + break; + case "read-timeout": + /* + * is an optional element + */ + getTimeout = processTimeout(fragment, item); + + break; + case "write-timeout": + /* + * is an optional element + */ + putTimeout = processTimeout(fragment, item); + + break; + case "connection-timeout": + /* + * is an optional element + */ + connectionTimeout = processTimeout(fragment, item); + + break; + case "server-side-config": + /* + * is an optional element + */ + serverConfig = processServerSideConfig(item); + break; + default: + throw new XmlConfigurationException( + String.format("Unknown XML configuration element <%s> in <%s>", + item.getNodeName(), fragment.getTagName())); + } + } + } + + try { + Timeouts timeouts = getTimeouts(getTimeout, putTimeout, connectionTimeout); + if (serverConfig == null) { + if (connectionUri != null) { + return new ClusteringServiceConfiguration(connectionUri, timeouts); + } else { + return new ClusteringServiceConfiguration(serverAddresses, clusterTierManager, timeouts); + } + } + + ServerSideConfiguration serverSideConfiguration; + if (serverConfig.defaultServerResource == null) { + serverSideConfiguration = new ServerSideConfiguration(serverConfig.pools); + } else { + serverSideConfiguration = new ServerSideConfiguration(serverConfig.defaultServerResource, serverConfig.pools); + } + + if (connectionUri != null) { + return new ClusteringServiceConfiguration(connectionUri, timeouts, serverConfig.autoCreate, serverSideConfiguration); + } else { + return new ClusteringServiceConfiguration(serverAddresses, clusterTierManager, timeouts, serverConfig.autoCreate, serverSideConfiguration); + } + } catch (IllegalArgumentException e) { + throw new XmlConfigurationException(e); + } + } + throw new XmlConfigurationException(String.format("XML configuration element <%s> in <%s> is not supported", + fragment.getTagName(), (fragment.getParentNode() == null ? "null" : fragment.getParentNode().getLocalName()))); + } + + @Override + public Class getServiceType() { + return ClusteringService.class; + } + + /** + * Translates a {@link ServiceCreationConfiguration} to an xml element + * + * @param serviceCreationConfiguration + */ + @Override + public Element unparseServiceCreationConfiguration(final ServiceCreationConfiguration serviceCreationConfiguration) { + Element rootElement = unparseConfig(serviceCreationConfiguration); + return rootElement; + } + + private Element createRootUrlElement(Document doc, ClusteringServiceConfiguration clusteringServiceConfiguration) { + Element rootElement = doc.createElementNS(getNamespace().toString(), TC_CLUSTERED_NAMESPACE_PREFIX + CLUSTER_ELEMENT_NAME); + Element urlElement = createUrlElement(doc, clusteringServiceConfiguration); + rootElement.appendChild(urlElement); + return rootElement; + } + + protected Element createUrlElement(Document doc, ClusteringServiceConfiguration clusteringServiceConfiguration) { + Element urlElement = doc.createElement(TC_CLUSTERED_NAMESPACE_PREFIX + CONNECTION_ELEMENT_NAME); + urlElement.setAttribute(URL_ATTRIBUTE_NAME, clusteringServiceConfiguration.getClusterUri().toString()); + return urlElement; + } + + private Element createServerElement(Document doc, ClusteringServiceConfiguration clusteringServiceConfiguration) { + if (!(clusteringServiceConfiguration.getConnectionSource() instanceof ConnectionSource.ServerList)) { + throw new IllegalArgumentException("When connection URL is null, source of connection MUST be of type ConnectionSource.ServerList.class"); + } + ConnectionSource.ServerList servers = (ConnectionSource.ServerList)clusteringServiceConfiguration.getConnectionSource(); + Element rootElement = doc.createElementNS(getNamespace().toString(), TC_CLUSTERED_NAMESPACE_PREFIX + CLUSTER_ELEMENT_NAME); + Element connElement = createConnectionElementWrapper(doc, clusteringServiceConfiguration); + servers.getServers().forEach(server -> { + Element serverElement = doc.createElement(TC_CLUSTERED_NAMESPACE_PREFIX + SERVER_ELEMENT_NAME); + serverElement.setAttribute(HOST_ATTRIBUTE_NAME, server.getHostName()); + /* + If port is greater than 0, set the attribute. Otherwise, do not set. Default value will be taken. + */ + if (server.getPort() > 0) { + serverElement.setAttribute(PORT_ATTRIBUTE_NAME, Integer.toString(server.getPort())); + } + connElement.appendChild(serverElement); + }); + rootElement.appendChild(connElement); + return rootElement; + } + + protected Element createConnectionElementWrapper(Document doc, ClusteringServiceConfiguration clusteringServiceConfiguration) { + Element connElement = doc.createElement(TC_CLUSTERED_NAMESPACE_PREFIX + CLUSTER_CONNECTION_ELEMENT_NAME); + connElement.setAttribute(CLUSTER_TIER_MANAGER_ATTRIBUTE_NAME, clusteringServiceConfiguration.getConnectionSource() + .getClusterTierManager()); + return connElement; + } + + @Override + protected Element createRootElement(Document doc, ClusteringServiceConfiguration clusteringServiceConfiguration) { + Element rootElement; + if (clusteringServiceConfiguration.getConnectionSource() instanceof ConnectionSource.ClusterUri) { + rootElement = createRootUrlElement(doc, clusteringServiceConfiguration); + } else { + rootElement = createServerElement(doc, clusteringServiceConfiguration); + } + + processTimeUnits(doc, rootElement, clusteringServiceConfiguration); + Element serverSideConfigurationElem = processServerSideElements(doc, clusteringServiceConfiguration); + rootElement.appendChild(serverSideConfigurationElem); + return rootElement; + } + + private void processTimeUnits(Document doc, Element parent, ClusteringServiceConfiguration clusteringServiceConfiguration) { + if (clusteringServiceConfiguration.getTimeouts() != null) { + Timeouts timeouts = clusteringServiceConfiguration.getTimeouts(); + + Element readTimeoutElem = createTimeoutElement(doc, READ_TIMEOUT_ELEMENT_NAME, timeouts.getReadOperationTimeout()); + Element writeTimeoutElem = createTimeoutElement(doc, WRITE_TIMEOUT_ELEMENT_NAME, timeouts.getWriteOperationTimeout()); + Element connectionTimeoutElem = createTimeoutElement(doc, CONNECTION_TIMEOUT_ELEMENT_NAME, timeouts.getConnectionTimeout()); + /* + Important: do not change the order of following three elements if corresponding change is not done in xsd + */ + parent.appendChild(readTimeoutElem); + parent.appendChild(writeTimeoutElem); + parent.appendChild(connectionTimeoutElem); + } + } + + private Element createTimeoutElement(Document doc, String timeoutName, Duration timeout) { + Element retElement; + if (READ_TIMEOUT_ELEMENT_NAME.equals(timeoutName)) { + retElement = doc.createElement(TC_CLUSTERED_NAMESPACE_PREFIX + READ_TIMEOUT_ELEMENT_NAME); + } else if (WRITE_TIMEOUT_ELEMENT_NAME.equals(timeoutName)) { + retElement = doc.createElement(TC_CLUSTERED_NAMESPACE_PREFIX + WRITE_TIMEOUT_ELEMENT_NAME); + } else { + retElement = doc.createElement(TC_CLUSTERED_NAMESPACE_PREFIX + CONNECTION_TIMEOUT_ELEMENT_NAME); + } + retElement.setAttribute(UNIT_ATTRIBUTE_NAME, DEFAULT_UNIT_ATTRIBUTE_VALUE); + retElement.setTextContent(Long.toString(timeout.getSeconds())); + return retElement; + } + + protected Element processServerSideElements(Document doc, ClusteringServiceConfiguration clusteringServiceConfiguration) { + Element serverSideConfigurationElem = createServerSideConfigurationElement(doc, clusteringServiceConfiguration); + + if (clusteringServiceConfiguration.getServerConfiguration() != null) { + ServerSideConfiguration serverSideConfiguration = clusteringServiceConfiguration.getServerConfiguration(); + String defaultServerResource = serverSideConfiguration.getDefaultServerResource(); + if (!(defaultServerResource == null || defaultServerResource.trim().length() == 0)) { + Element defaultResourceElement = createDefaultServerResourceElement(doc, defaultServerResource); + serverSideConfigurationElem.appendChild(defaultResourceElement); + } + Map resourcePools = serverSideConfiguration.getResourcePools(); + if (resourcePools != null) { + resourcePools.forEach( + (key, value) -> { + Element poolElement = createSharedPoolElement(doc, key, value); + serverSideConfigurationElem.appendChild(poolElement); + } + ); + } + } + return serverSideConfigurationElem; + } + + private Element createServerSideConfigurationElement(Document doc, ClusteringServiceConfiguration clusteringServiceConfiguration) { + Element serverSideConfigurationElem = doc.createElement(TC_CLUSTERED_NAMESPACE_PREFIX + SERVER_SIDE_CONFIG); + serverSideConfigurationElem.setAttribute(AUTO_CREATE_ATTRIBUTE_NAME, Boolean.toString(clusteringServiceConfiguration + .isAutoCreate())); + return serverSideConfigurationElem; + } + + + private Element createSharedPoolElement(Document doc, String poolName, ServerSideConfiguration.Pool pool) { + Element poolElement = doc.createElement(TC_CLUSTERED_NAMESPACE_PREFIX + SHARED_POOL_ELEMENT_NAME); + poolElement.setAttribute(NAME_ATTRIBUTE_NAME, poolName); + String from = pool.getServerResource(); + if (from != null) { + if (from.trim().length() == 0) { + throw new XmlConfigurationException("Resource pool name can not be empty."); + } + poolElement.setAttribute(FROM_ATTRIBUTE_NAME, from); + } + long memoryInBytes = MemoryUnit.B.convert(pool.getSize(), MemoryUnit.B); + poolElement.setAttribute(UNIT_ATTRIBUTE_NAME, MemoryUnit.B.toString()); + poolElement.setTextContent(Long.toString(memoryInBytes)); + return poolElement; + } + + private Element createDefaultServerResourceElement(Document doc, String defaultServerResource) { + Element defaultResourceElement = doc.createElement(TC_CLUSTERED_NAMESPACE_PREFIX + DEFAULT_RESOURCE_ELEMENT_NAME); + defaultResourceElement.setAttribute(FROM_ATTRIBUTE_NAME, defaultServerResource); + return defaultResourceElement; + } + + private ClusteringCacheManagerServiceConfigurationParser.ServerSideConfig processServerSideConfig(Node serverSideConfigElement) { + ClusteringCacheManagerServiceConfigurationParser.ServerSideConfig serverSideConfig = new ClusteringCacheManagerServiceConfigurationParser.ServerSideConfig(); + serverSideConfig.autoCreate = Boolean.parseBoolean(((Element)serverSideConfigElement).getAttribute("auto-create")); + final NodeList serverSideNodes = serverSideConfigElement.getChildNodes(); + for (int i = 0; i < serverSideNodes.getLength(); i++) { + final Node item = serverSideNodes.item(i); + if (Node.ELEMENT_NODE == item.getNodeType()) { + String nodeLocalName = item.getLocalName(); + if ("default-resource".equals(nodeLocalName)) { + serverSideConfig.defaultServerResource = ((Element)item).getAttribute("from"); + + } else if ("shared-pool".equals(nodeLocalName)) { + Element sharedPoolElement = (Element)item; + String poolName = sharedPoolElement.getAttribute("name"); // required + Attr fromAttr = sharedPoolElement.getAttributeNode("from"); // optional + String fromResource = (fromAttr == null ? null : fromAttr.getValue()); + Attr unitAttr = sharedPoolElement.getAttributeNode("unit"); // optional - default 'B' + String unit = (unitAttr == null ? "B" : unitAttr.getValue()); + MemoryUnit memoryUnit = MemoryUnit.valueOf(unit.toUpperCase(Locale.ENGLISH)); + + String quantityValue = sharedPoolElement.getFirstChild().getNodeValue(); + long quantity; + try { + quantity = Long.parseLong(quantityValue); + } catch (NumberFormatException e) { + throw new XmlConfigurationException("Magnitude of value specified for is too large"); + } + + ServerSideConfiguration.Pool poolDefinition; + if (fromResource == null) { + poolDefinition = new ServerSideConfiguration.Pool(memoryUnit.toBytes(quantity)); + } else { + poolDefinition = new ServerSideConfiguration.Pool(memoryUnit.toBytes(quantity), fromResource); + } + + if (serverSideConfig.pools.put(poolName, poolDefinition) != null) { + throw new XmlConfigurationException("Duplicate definition for "); + } + } + } + } + return serverSideConfig; + } + + private Duration processTimeout(Element parentElement, Node timeoutNode) { + try { + // are direct subtype of ehcache:time-type; use JAXB to interpret it + JAXBContext context = JAXBContext.newInstance(TimeType.class); + Unmarshaller unmarshaller = context.createUnmarshaller(); + JAXBElement jaxbElement = unmarshaller.unmarshal(timeoutNode, TimeType.class); + + TimeType timeType = jaxbElement.getValue(); + BigInteger amount = timeType.getValue(); + if (amount.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) > 0) { + throw new XmlConfigurationException( + String.format("Value of XML configuration element <%s> in <%s> exceeds allowed value - %s", + timeoutNode.getNodeName(), parentElement.getTagName(), amount)); + } + return Duration.of(amount.longValue(), convertToJavaTimeUnit(timeType.getUnit())); + + } catch (JAXBException e) { + throw new XmlConfigurationException(e); + } + } + + private Timeouts getTimeouts(Duration getTimeout, Duration putTimeout, Duration connectionTimeout) { + TimeoutsBuilder builder = TimeoutsBuilder.timeouts(); + if (getTimeout != null) { + builder.read(getTimeout); + } + if (putTimeout != null) { + builder.write(putTimeout); + } + if (connectionTimeout != null) { + builder.connection(connectionTimeout); + } + return builder.build(); + } + + private static final class ServerSideConfig { + private boolean autoCreate = false; + private String defaultServerResource = null; + private final Map pools = new HashMap<>(); + } +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/xml/ClusteringCacheServiceConfigurationParser.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/xml/ClusteringCacheServiceConfigurationParser.java new file mode 100644 index 0000000000..f85cd0aa5b --- /dev/null +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/xml/ClusteringCacheServiceConfigurationParser.java @@ -0,0 +1,93 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.config.xml; + +import org.ehcache.clustered.client.config.ClusteredStoreConfiguration; +import org.ehcache.clustered.client.internal.store.ClusteredStore; +import org.ehcache.clustered.common.Consistency; +import org.ehcache.spi.service.ServiceConfiguration; +import org.ehcache.xml.BaseConfigParser; +import org.ehcache.xml.CacheServiceConfigurationParser; +import org.ehcache.xml.exceptions.XmlConfigurationException; +import org.w3c.dom.Document; +import org.w3c.dom.Element; + +import java.io.IOException; +import java.net.URI; + +import javax.xml.transform.Source; +import javax.xml.transform.stream.StreamSource; + +import static org.ehcache.clustered.client.internal.config.xml.ClusteredCacheConstants.NAMESPACE; +import static org.ehcache.clustered.client.internal.config.xml.ClusteredCacheConstants.XML_SCHEMA; +import static org.ehcache.clustered.client.internal.config.xml.ClusteredCacheConstants.TC_CLUSTERED_NAMESPACE_PREFIX; + +/** + * Provides parsing support for the {@code } elements representing a {@link ClusteredStore.Provider ClusteringService}. + * + * @see ClusteredCacheConstants#XSD + */ +public class ClusteringCacheServiceConfigurationParser extends BaseConfigParser implements CacheServiceConfigurationParser { + + public static final String CLUSTERED_STORE_ELEMENT_NAME = "clustered-store"; + public static final String CONSISTENCY_ATTRIBUTE_NAME = "consistency"; + + public ClusteringCacheServiceConfigurationParser() { + super(ClusteredStoreConfiguration.class); + } + + @Override + public Source getXmlSchema() throws IOException { + return new StreamSource(XML_SCHEMA.openStream()); + } + + @Override + public URI getNamespace() { + return NAMESPACE; + } + + @Override + public ServiceConfiguration parseServiceConfiguration(Element fragment) { + if (CLUSTERED_STORE_ELEMENT_NAME.equals(fragment.getLocalName())) { + if (fragment.hasAttribute(CONSISTENCY_ATTRIBUTE_NAME)) { + return new ClusteredStoreConfiguration(Consistency.valueOf(fragment.getAttribute("consistency").toUpperCase())); + } else { + return new ClusteredStoreConfiguration(); + } + } + throw new XmlConfigurationException(String.format("XML configuration element <%s> in <%s> is not supported", + fragment.getTagName(), (fragment.getParentNode() == null ? "null" : fragment.getParentNode().getLocalName()))); + } + + @Override + public Class getServiceType() { + return ClusteredStore.Provider.class; + } + + @Override + public Element unparseServiceConfiguration(ServiceConfiguration serviceConfiguration) { + return unparseConfig(serviceConfiguration); + } + + @Override + protected Element createRootElement(Document doc, ClusteredStoreConfiguration clusteredStoreConfiguration) { + Consistency consistency = clusteredStoreConfiguration.getConsistency(); + Element rootElement = doc.createElementNS(getNamespace().toString(), TC_CLUSTERED_NAMESPACE_PREFIX + CLUSTERED_STORE_ELEMENT_NAME); + rootElement.setAttribute(CONSISTENCY_ATTRIBUTE_NAME, consistency.name().toLowerCase()); + return rootElement; + } + +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/xml/ClusteringServiceConfigurationParser.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/xml/ClusteringServiceConfigurationParser.java deleted file mode 100644 index ddc8fc1f62..0000000000 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/config/xml/ClusteringServiceConfigurationParser.java +++ /dev/null @@ -1,263 +0,0 @@ -/* - * Copyright Terracotta, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.ehcache.clustered.client.internal.config.xml; - -import org.ehcache.clustered.client.config.ClusteredStoreConfiguration; -import org.ehcache.clustered.client.config.ClusteringServiceConfiguration; -import org.ehcache.clustered.client.config.Timeouts; -import org.ehcache.clustered.client.config.builders.TimeoutsBuilder; -import org.ehcache.clustered.client.internal.store.ClusteredStore; -import org.ehcache.clustered.client.service.ClusteringService; -import org.ehcache.clustered.common.Consistency; -import org.ehcache.clustered.common.ServerSideConfiguration; -import org.ehcache.clustered.common.ServerSideConfiguration.Pool; -import org.ehcache.config.units.MemoryUnit; -import org.ehcache.spi.service.ServiceConfiguration; -import org.ehcache.spi.service.ServiceCreationConfiguration; -import org.ehcache.xml.CacheManagerServiceConfigurationParser; -import org.ehcache.xml.CacheServiceConfigurationParser; -import org.ehcache.xml.exceptions.XmlConfigurationException; -import org.ehcache.xml.model.TimeType; -import org.w3c.dom.Attr; -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -import java.io.IOException; -import java.math.BigInteger; -import java.net.URI; -import java.net.URISyntaxException; -import java.time.Duration; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; - -import javax.xml.bind.JAXBContext; -import javax.xml.bind.JAXBElement; -import javax.xml.bind.JAXBException; -import javax.xml.bind.Unmarshaller; -import javax.xml.transform.Source; -import javax.xml.transform.stream.StreamSource; - -import static org.ehcache.clustered.client.internal.config.xml.ClusteredCacheConstants.*; -import static org.ehcache.xml.XmlModel.convertToJavaTimeUnit; - -/** - * Provides parsing support for the {@code } elements representing a {@link ClusteringService ClusteringService}. - * - * @see ClusteredCacheConstants#XSD - */ -public class ClusteringServiceConfigurationParser implements CacheManagerServiceConfigurationParser, - CacheServiceConfigurationParser { - - public static final String CLUSTERED_STORE_ELEMENT_NAME = "clustered-store"; - public static final String CONSISTENCY_ATTRIBUTE_NAME = "consistency"; - - @Override - public Source getXmlSchema() throws IOException { - return new StreamSource(XML_SCHEMA.openStream()); - } - - @Override - public URI getNamespace() { - return NAMESPACE; - } - - @Override - public ServiceConfiguration parseServiceConfiguration(Element fragment) { - if (CLUSTERED_STORE_ELEMENT_NAME.equals(fragment.getLocalName())) { - if (fragment.hasAttribute(CONSISTENCY_ATTRIBUTE_NAME)) { - return new ClusteredStoreConfiguration(Consistency.valueOf(fragment.getAttribute("consistency").toUpperCase())); - } else { - return new ClusteredStoreConfiguration(); - } - } - throw new XmlConfigurationException(String.format("XML configuration element <%s> in <%s> is not supported", - fragment.getTagName(), (fragment.getParentNode() == null ? "null" : fragment.getParentNode().getLocalName()))); - } - - /** - * Complete interpretation of the top-level elements defined in {@value ClusteredCacheConstants#XSD}. - * This method is called only for those elements from the namespace set by {@link ClusteredCacheConstants#NAMESPACE}. - *

- * This method presumes the element presented is valid according to the XSD. - * - * @param fragment the XML fragment to process - * - * @return a {@link org.ehcache.clustered.client.config.ClusteringServiceConfiguration ClusteringServiceConfiguration} - */ - @Override - public ServiceCreationConfiguration parseServiceCreationConfiguration(final Element fragment) { - - if ("cluster".equals(fragment.getLocalName())) { - - ServerSideConfig serverConfig = null; - URI connectionUri = null; - Duration getTimeout = null, putTimeout = null, connectionTimeout = null; - final NodeList childNodes = fragment.getChildNodes(); - for (int i = 0; i < childNodes.getLength(); i++) { - final Node item = childNodes.item(i); - if (Node.ELEMENT_NODE == item.getNodeType()) { - if ("connection".equals(item.getLocalName())) { - /* - * is a required element in the XSD - */ - final Attr urlAttribute = ((Element)item).getAttributeNode("url"); - final String urlValue = urlAttribute.getValue(); - try { - connectionUri = new URI(urlValue); - } catch (URISyntaxException e) { - throw new XmlConfigurationException( - String.format("Value of %s attribute on XML configuration element <%s> in <%s> is not a valid URI - '%s'", - urlAttribute.getName(), item.getNodeName(), fragment.getTagName(), connectionUri), e); - } - - } else if ("read-timeout".equals(item.getLocalName())) { - /* - * is an optional element - */ - getTimeout = processTimeout(fragment, item); - - } else if ("write-timeout".equals(item.getLocalName())) { - /* - * is an optional element - */ - putTimeout = processTimeout(fragment, item); - - } else if ("connection-timeout".equals(item.getLocalName())) { - /* - * is an optional element - */ - connectionTimeout = processTimeout(fragment, item); - - } else if ("server-side-config".equals(item.getLocalName())) { - /* - * is an optional element - */ - serverConfig = processServerSideConfig(item); - } - } - } - - try { - Timeouts timeouts = getTimeouts(getTimeout, putTimeout, connectionTimeout); - if (serverConfig == null) { - return new ClusteringServiceConfiguration(connectionUri, timeouts); - } - - ServerSideConfiguration serverSideConfiguration; - if (serverConfig.defaultServerResource == null) { - serverSideConfiguration = new ServerSideConfiguration(serverConfig.pools); - } else { - serverSideConfiguration = new ServerSideConfiguration(serverConfig.defaultServerResource, serverConfig.pools); - } - - return new ClusteringServiceConfiguration(connectionUri, timeouts, serverConfig.autoCreate, serverSideConfiguration); - } catch (IllegalArgumentException e) { - throw new XmlConfigurationException(e); - } - } - throw new XmlConfigurationException(String.format("XML configuration element <%s> in <%s> is not supported", - fragment.getTagName(), (fragment.getParentNode() == null ? "null" : fragment.getParentNode().getLocalName()))); - } - - private Timeouts getTimeouts(Duration getTimeout, Duration putTimeout, Duration connectionTimeout) { - TimeoutsBuilder builder = TimeoutsBuilder.timeouts(); - if (getTimeout != null) { - builder.read(getTimeout); - } - if(putTimeout != null) { - builder.write(putTimeout); - } - if(connectionTimeout != null) { - builder.connection(connectionTimeout); - } - return builder.build(); - } - - private Duration processTimeout(Element parentElement, Node timeoutNode) { - try { - // are direct subtype of ehcache:time-type; use JAXB to interpret it - JAXBContext context = JAXBContext.newInstance(TimeType.class.getPackage().getName()); - Unmarshaller unmarshaller = context.createUnmarshaller(); - JAXBElement jaxbElement = unmarshaller.unmarshal(timeoutNode, TimeType.class); - - TimeType timeType = jaxbElement.getValue(); - BigInteger amount = timeType.getValue(); - if (amount.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) > 0) { - throw new XmlConfigurationException( - String.format("Value of XML configuration element <%s> in <%s> exceeds allowed value - %s", - timeoutNode.getNodeName(), parentElement.getTagName(), amount)); - } - return Duration.of(amount.longValue(), convertToJavaTimeUnit(timeType.getUnit())); - - } catch (JAXBException e) { - throw new XmlConfigurationException(e); - } - } - - private ServerSideConfig processServerSideConfig(Node serverSideConfigElement) { - ServerSideConfig serverSideConfig = new ServerSideConfig(); - serverSideConfig.autoCreate = Boolean.parseBoolean(((Element) serverSideConfigElement).getAttribute("auto-create")); - final NodeList serverSideNodes = serverSideConfigElement.getChildNodes(); - for (int i = 0; i < serverSideNodes.getLength(); i++) { - final Node item = serverSideNodes.item(i); - if (Node.ELEMENT_NODE == item.getNodeType()) { - String nodeLocalName = item.getLocalName(); - if ("default-resource".equals(nodeLocalName)) { - serverSideConfig.defaultServerResource = ((Element)item).getAttribute("from"); - - } else if ("shared-pool".equals(nodeLocalName)) { - Element sharedPoolElement = (Element)item; - String poolName = sharedPoolElement.getAttribute("name"); // required - Attr fromAttr = sharedPoolElement.getAttributeNode("from"); // optional - String fromResource = (fromAttr == null ? null : fromAttr.getValue()); - Attr unitAttr = sharedPoolElement.getAttributeNode("unit"); // optional - default 'B' - String unit = (unitAttr == null ? "B" : unitAttr.getValue()); - MemoryUnit memoryUnit = MemoryUnit.valueOf(unit.toUpperCase(Locale.ENGLISH)); - - String quantityValue = sharedPoolElement.getFirstChild().getNodeValue(); - long quantity; - try { - quantity = Long.parseLong(quantityValue); - } catch (NumberFormatException e) { - throw new XmlConfigurationException("Magnitude of value specified for is too large"); - } - - Pool poolDefinition; - if (fromResource == null) { - poolDefinition = new Pool(memoryUnit.toBytes(quantity)); - } else { - poolDefinition = new Pool(memoryUnit.toBytes(quantity), fromResource); - } - - if (serverSideConfig.pools.put(poolName, poolDefinition) != null) { - throw new XmlConfigurationException("Duplicate definition for "); - } - } - } - } - return serverSideConfig; - } - - private static final class ServerSideConfig { - private boolean autoCreate = false; - private String defaultServerResource = null; - private final Map pools = new HashMap<>(); - } -} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/ClusteredLoaderWriterStore.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/ClusteredLoaderWriterStore.java new file mode 100644 index 0000000000..8acf69ff81 --- /dev/null +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/ClusteredLoaderWriterStore.java @@ -0,0 +1,352 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.loaderwriter; + +import org.ehcache.CacheIterationException; +import org.ehcache.clustered.client.internal.store.ClusteredStore; +import org.ehcache.clustered.client.internal.store.ClusteredValueHolder; +import org.ehcache.clustered.client.internal.store.ResolvedChain; +import org.ehcache.clustered.client.internal.store.ServerStoreProxy; +import org.ehcache.clustered.client.internal.store.lock.LockManager; +import org.ehcache.clustered.client.internal.store.operations.ChainResolver; +import org.ehcache.clustered.client.internal.store.operations.EternalChainResolver; +import org.ehcache.clustered.common.internal.store.operations.ConditionalRemoveOperation; +import org.ehcache.clustered.common.internal.store.operations.ConditionalReplaceOperation; +import org.ehcache.clustered.common.internal.store.operations.PutIfAbsentOperation; +import org.ehcache.clustered.common.internal.store.operations.PutOperation; +import org.ehcache.clustered.common.internal.store.operations.RemoveOperation; +import org.ehcache.clustered.common.internal.store.operations.ReplaceOperation; +import org.ehcache.clustered.common.internal.store.operations.Result; +import org.ehcache.clustered.common.internal.store.operations.codecs.OperationsCodec; +import org.ehcache.clustered.client.service.ClusteringService; +import org.ehcache.clustered.common.internal.store.Chain; +import org.ehcache.config.ResourceType; +import org.ehcache.core.exceptions.StorePassThroughException; +import org.ehcache.core.spi.store.tiering.AuthoritativeTier; +import org.ehcache.core.spi.time.TimeSource; +import org.ehcache.core.spi.time.TimeSourceService; +import org.ehcache.spi.loaderwriter.CacheLoaderWriter; +import org.ehcache.spi.loaderwriter.CacheLoaderWriterConfiguration; +import org.ehcache.spi.resilience.StoreAccessException; +import org.ehcache.spi.service.ServiceConfiguration; +import org.ehcache.spi.service.ServiceDependencies; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Set; +import java.util.concurrent.TimeoutException; + +import static org.ehcache.core.exceptions.ExceptionFactory.newCacheLoadingException; +import static org.ehcache.core.exceptions.ExceptionFactory.newCacheWritingException; +import static org.ehcache.core.exceptions.StorePassThroughException.handleException; + +public class ClusteredLoaderWriterStore extends ClusteredStore implements AuthoritativeTier { + + private final CacheLoaderWriter cacheLoaderWriter; + private final boolean useLoaderInAtomics; + + public ClusteredLoaderWriterStore(Configuration config, OperationsCodec codec, ChainResolver resolver, TimeSource timeSource, + CacheLoaderWriter loaderWriter, boolean useLoaderInAtomics) { + super(config, codec, resolver, timeSource); + this.cacheLoaderWriter = loaderWriter; + this.useLoaderInAtomics = useLoaderInAtomics; + } + + /** + * For Tests + */ + ClusteredLoaderWriterStore(Configuration config, OperationsCodec codec, EternalChainResolver resolver, + ServerStoreProxy proxy, TimeSource timeSource, CacheLoaderWriter loaderWriter) { + super(config, codec, resolver, proxy, timeSource); + this.cacheLoaderWriter = loaderWriter; + this.useLoaderInAtomics = true; + } + + private LockManager getProxy() { + return (LockManager) storeProxy; + } + + @Override + protected ValueHolder getInternal(K key) throws StoreAccessException, TimeoutException { + ValueHolder holder = super.getInternal(key); + try { + if (holder == null) { + long hash = extractLongKey(key); + boolean unlocked = false; + getProxy().lock(hash); + try { + V value = null; + try { + value = cacheLoaderWriter.load(key); + } catch (Exception e) { + throw new StorePassThroughException(new CacheIterationException(e)); + } + if (value == null) { + return null; + } + append(key, value); + unlocked = true; + return new ClusteredValueHolder<>(value); + } finally { + if (!unlocked) { + getProxy().unlock(hash); + } + } + } + } catch (RuntimeException re) { + throw handleException(re); + } + return holder; + } + + private void append(K key, V value) throws TimeoutException { + PutOperation operation = new PutOperation<>(key, value, timeSource.getTimeMillis()); + ByteBuffer payload = codec.encode(operation); + long extractedKey = extractLongKey(key); + storeProxy.append(extractedKey, payload); + } + + @Override + protected PutStatus silentPut(K key, V value) throws StoreAccessException { + try { + long hash = extractLongKey(key); + boolean unlocked = false; + getProxy().lock(hash); + try { + cacheLoaderWriter.write(key, value); + append(key, value); + unlocked = true; + } finally { + if (!unlocked) { + getProxy().unlock(hash); + } + } + return PutStatus.PUT; + } catch (Exception e) { + throw handleException(e); + } + } + + @Override + protected boolean silentRemove(K key) throws StoreAccessException { + try { + long hash = extractLongKey(key); + boolean unlocked = false; + RemoveOperation operation = new RemoveOperation<>(key, timeSource.getTimeMillis()); + ByteBuffer payLoad = codec.encode(operation); + Chain chain = getProxy().lock(hash); + try { + cacheLoaderWriter.delete(key); + storeProxy.append(hash, payLoad); + unlocked = true; + ResolvedChain resolvedChain = resolver.resolve(chain, key, timeSource.getTimeMillis()); + if (resolvedChain.getResolvedResult(key) != null) { + return true; + } else { + return false; + } + } finally { + if (!unlocked) { + getProxy().unlock(hash); + } + } + } catch (Exception e) { + throw handleException(e); + } + } + + @Override + protected V silentPutIfAbsent(K key, V value) throws StoreAccessException { + try { + long hash = extractLongKey(key); + boolean unlocked = false; + Chain existing = getProxy().lock(hash); + try { + ResolvedChain resolvedChain = resolver.resolve(existing, key, timeSource.getTimeMillis()); + Result result = resolvedChain.getResolvedResult(key); + V existingVal = result == null ? null : result.getValue(); + if (existingVal != null) { + return existingVal; + } else { + existingVal = loadFromLoaderWriter(key); + if (existingVal == null) { + cacheLoaderWriter.write(key, value); + PutIfAbsentOperation operation = new PutIfAbsentOperation<>(key, value, timeSource.getTimeMillis()); + ByteBuffer payload = codec.encode(operation); + storeProxy.append(hash, payload); + unlocked = true; + } + return existingVal; + } + } finally { + if (!unlocked) { + getProxy().unlock(hash); + } + } + } catch (Exception e) { + throw handleException(e); + } + } + + @Override + protected V silentReplace(K key, V value) throws StoreAccessException { + try { + long hash = extractLongKey(key); + boolean unlocked = false; + Chain existing = getProxy().lock(hash); + try { + ResolvedChain resolvedChain = resolver.resolve(existing, key, timeSource.getTimeMillis()); + Result result = resolvedChain.getResolvedResult(key); + V existingVal = result == null ? null : result.getValue(); + if (existingVal != null) { + cacheLoaderWriter.write(key, value); + ReplaceOperation operation = new ReplaceOperation<>(key, value, timeSource.getTimeMillis()); + ByteBuffer payload = codec.encode(operation); + storeProxy.append(hash, payload); + unlocked = true; + return existingVal; + } else { + V inCache = loadFromLoaderWriter(key); + if (inCache != null) { + cacheLoaderWriter.write(key, value); + ReplaceOperation operation = new ReplaceOperation<>(key, value, timeSource.getTimeMillis()); + ByteBuffer payload = codec.encode(operation); + storeProxy.append(hash, payload); + unlocked = true; + return inCache; + } else { + return null; + } + } + } finally { + if (!unlocked) { + getProxy().unlock(hash); + } + } + } catch (Exception e) { + throw handleException(e); + } + } + + @Override + protected V silentRemove(K key, V value) throws StoreAccessException { + try { + long hash = extractLongKey(key); + boolean unlocked = false; + Chain existing = getProxy().lock(hash); + try { + ResolvedChain resolvedChain = resolver.resolve(existing, key, timeSource.getTimeMillis()); + Result result = resolvedChain.getResolvedResult(key); + V existingVal = result == null ? null : result.getValue(); + if (existingVal == null) { + existingVal = loadFromLoaderWriter(key); + } + if (value.equals(existingVal)) { + cacheLoaderWriter.delete(key); + ConditionalRemoveOperation operation = new ConditionalRemoveOperation<>(key, value, timeSource.getTimeMillis()); + ByteBuffer payLoad = codec.encode(operation); + storeProxy.append(hash, payLoad); + unlocked = true; + } + return existingVal; + } finally { + if (!unlocked) { + getProxy().unlock(hash); + } + } + } catch (Exception e) { + throw handleException(e); + } + } + + @Override + protected V silentReplace(K key, V oldValue, V newValue) throws StoreAccessException { + try { + long hash = extractLongKey(key); + boolean unlocked = false; + Chain existing = getProxy().lock(hash); + try { + ResolvedChain resolvedChain = resolver.resolve(existing, key, timeSource.getTimeMillis()); + Result result = resolvedChain.getResolvedResult(key); + V existingVal = result == null ? null : result.getValue(); + if (existingVal == null) { + existingVal = loadFromLoaderWriter(key); + } + if (oldValue.equals(existingVal)) { + cacheLoaderWriter.write(key, newValue); + ConditionalReplaceOperation operation = new ConditionalReplaceOperation<>(key, oldValue, newValue, timeSource.getTimeMillis()); + ByteBuffer payLoad = codec.encode(operation); + storeProxy.append(hash, payLoad); + unlocked = true; + } + return existingVal; + } finally { + if (!unlocked) { + getProxy().unlock(hash); + } + } + } catch (Exception e) { + throw handleException(e); + } + } + + private V loadFromLoaderWriter(K key) { + if (useLoaderInAtomics) { + try { + return cacheLoaderWriter.load(key); + } catch (Exception e) { + throw new StorePassThroughException(newCacheLoadingException(e)); + } + } + return null; + } + + /** + * Provider of {@link ClusteredLoaderWriterStore} instances. + */ + @ServiceDependencies({ TimeSourceService.class, ClusteringService.class}) + public static class Provider extends ClusteredStore.Provider { + @Override + protected ClusteredStore createStore(Configuration storeConfig, + OperationsCodec codec, + ChainResolver resolver, + TimeSource timeSource, + boolean useLoaderInAtomics, + Object[] serviceConfigs) { + return new ClusteredLoaderWriterStore<>(storeConfig, codec, resolver, timeSource, + storeConfig.getCacheLoaderWriter(), useLoaderInAtomics); + } + + @Override + public int rank(Set> resourceTypes, Collection> serviceConfigs) { + int parentRank = super.rank(resourceTypes, serviceConfigs); + if (parentRank == 0 || serviceConfigs.stream().noneMatch(CacheLoaderWriterConfiguration.class::isInstance)) { + return 0; + } + return parentRank + 1; + } + + @Override + public int rankAuthority(ResourceType authorityResource, Collection> serviceConfigs) { + int parentRank = super.rankAuthority(authorityResource, serviceConfigs); + if (parentRank == 0 || serviceConfigs.stream().noneMatch(CacheLoaderWriterConfiguration.class::isInstance)) { + return 0; + } + return parentRank + 1; + } + } + +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/ClusteredLoaderWriterStoreProviderFactory.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/ClusteredLoaderWriterStoreProviderFactory.java new file mode 100644 index 0000000000..7d68a59ce4 --- /dev/null +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/ClusteredLoaderWriterStoreProviderFactory.java @@ -0,0 +1,32 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.loaderwriter; + +import org.ehcache.core.spi.service.ServiceFactory; +import org.ehcache.spi.service.ServiceCreationConfiguration; + +public class ClusteredLoaderWriterStoreProviderFactory implements ServiceFactory { + + @Override + public ClusteredLoaderWriterStore.Provider create(ServiceCreationConfiguration configuration) { + return new ClusteredLoaderWriterStore.Provider(); + } + + @Override + public Class getServiceType() { + return ClusteredLoaderWriterStore.Provider.class; + } +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/DelegatingLoaderWriterStore.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/DelegatingLoaderWriterStore.java new file mode 100644 index 0000000000..920ea16c42 --- /dev/null +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/DelegatingLoaderWriterStore.java @@ -0,0 +1,133 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.loaderwriter; + +import org.ehcache.Cache; +import org.ehcache.core.CacheConfigurationChangeListener; +import org.ehcache.core.events.NullStoreEventDispatcher; +import org.ehcache.core.spi.store.Store; +import org.ehcache.core.spi.store.WrapperStore; +import org.ehcache.core.spi.store.events.StoreEventSource; +import org.ehcache.spi.resilience.StoreAccessException; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.BiFunction; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Supplier; + +public class DelegatingLoaderWriterStore implements WrapperStore { + + private final Store delegate; + + public DelegatingLoaderWriterStore(Store store) { + this.delegate = store; + } + + @Override + public ValueHolder get(K key) throws StoreAccessException { + return delegate.get(key); + } + + @Override + public boolean containsKey(K key) throws StoreAccessException { + return delegate.containsKey(key); + } + + @Override + public PutStatus put(K key, V value) throws StoreAccessException { + return delegate.put(key, value); + } + + @Override + public ValueHolder putIfAbsent(K key, V value, Consumer put) throws StoreAccessException { + return delegate.putIfAbsent(key, value, put); + } + + @Override + public boolean remove(K key) throws StoreAccessException { + return delegate.remove(key); + } + + @Override + public RemoveStatus remove(K key, V value) throws StoreAccessException { + return delegate.remove(key, value); + } + + @Override + public ValueHolder replace(K key, V value) throws StoreAccessException { + return delegate.replace(key, value); + } + + @Override + public ReplaceStatus replace(K key, V oldValue, V newValue) throws StoreAccessException { + return delegate.replace(key, oldValue, newValue); + } + + @Override + public void clear() throws StoreAccessException { + delegate.clear(); + } + + @Override + public StoreEventSource getStoreEventSource() { + return new NullStoreEventDispatcher<>(); + } + + @Override + public Iterator>> iterator() { + throw new UnsupportedOperationException("Implement me"); + } + + @Override + public ValueHolder getAndCompute(K key, BiFunction mappingFunction) throws StoreAccessException { + throw new UnsupportedOperationException("Implement me"); + } + + @Override + public ValueHolder computeAndGet(K key, BiFunction mappingFunction, Supplier replaceEqual, Supplier invokeWriter) throws StoreAccessException { + throw new UnsupportedOperationException("Implement me"); + } + + @Override + public ValueHolder computeIfAbsent(K key, Function mappingFunction) throws StoreAccessException { + throw new UnsupportedOperationException("Implement me"); + } + + @Override + public Map> bulkCompute(Set keys, Function>, Iterable>> remappingFunction) throws StoreAccessException { + return delegate.bulkCompute(keys, remappingFunction); + } + + @Override + public Map> bulkCompute(Set keys, Function>, Iterable>> remappingFunction, Supplier replaceEqual) throws StoreAccessException { + throw new UnsupportedOperationException("Implement me"); + } + + @Override + public Map> bulkComputeIfAbsent(Set keys, Function, Iterable>> mappingFunction) throws StoreAccessException { + return delegate.bulkComputeIfAbsent(keys, mappingFunction); + } + + @Override + public List getConfigurationChangeListeners() { + return Collections.emptyList(); + } + +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/DelegatingLoaderWriterStoreProvider.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/DelegatingLoaderWriterStoreProvider.java new file mode 100644 index 0000000000..d0e72a93e2 --- /dev/null +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/DelegatingLoaderWriterStoreProvider.java @@ -0,0 +1,95 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.loaderwriter; + +import org.ehcache.clustered.client.service.ClusteringService; +import org.ehcache.clustered.client.service.ClusteringService.ClusteredCacheIdentifier; +import org.ehcache.config.ResourceType; +import org.ehcache.core.internal.store.StoreSupport; +import org.ehcache.core.spi.store.Store; +import org.ehcache.core.spi.store.WrapperStore; +import org.ehcache.impl.internal.concurrent.ConcurrentHashMap; +import org.ehcache.impl.internal.store.loaderwriter.LoaderWriterStoreProvider.StoreRef; +import org.ehcache.spi.loaderwriter.CacheLoaderWriterConfiguration; +import org.ehcache.spi.loaderwriter.CacheLoaderWriterProvider; +import org.ehcache.spi.service.Service; +import org.ehcache.spi.service.ServiceConfiguration; +import org.ehcache.spi.service.ServiceDependencies; +import org.ehcache.spi.service.ServiceProvider; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Map; +import java.util.Set; + +import static org.ehcache.core.spi.service.ServiceUtils.findSingletonAmongst; + +@ServiceDependencies({CacheLoaderWriterProvider.class, ClusteringService.class}) +public class DelegatingLoaderWriterStoreProvider implements WrapperStore.Provider { + private volatile ServiceProvider serviceProvider; + + private final Map, StoreRef> createdStores = new ConcurrentHashMap<>(); + + @Override + public Store createStore(Store.Configuration storeConfig, ServiceConfiguration... serviceConfigs) { + + Store.Provider underlyingStoreProvider = StoreSupport + .selectStoreProvider(serviceProvider, storeConfig.getResourcePools().getResourceTypeSet(), + Arrays.asList(serviceConfigs)); + + Store store = underlyingStoreProvider.createStore(storeConfig, serviceConfigs); + DelegatingLoaderWriterStore loaderWriterStore = new DelegatingLoaderWriterStore<>(store); + createdStores.put(loaderWriterStore, new StoreRef<>(store, underlyingStoreProvider)); + return loaderWriterStore; + } + + @Override + public void releaseStore(Store resource) { + StoreRef storeRef = createdStores.remove(resource); + storeRef.getUnderlyingStoreProvider().releaseStore(storeRef.getUnderlyingStore()); + } + + @Override + public void initStore(Store resource) { + StoreRef storeRef = createdStores.get(resource); + storeRef.getUnderlyingStoreProvider().initStore(storeRef.getUnderlyingStore()); + } + + @Override + public int rank(Set> resourceTypes, Collection> serviceConfigs) { + throw new UnsupportedOperationException("Its a Wrapper store provider, does not support regular ranking"); + } + + @Override + public void start(ServiceProvider serviceProvider) { + this.serviceProvider = serviceProvider; + } + + @Override + public void stop() { + this.serviceProvider = null; + } + + @Override + public int wrapperStoreRank(Collection> serviceConfigs) { + CacheLoaderWriterConfiguration loaderWriterConfiguration = findSingletonAmongst(CacheLoaderWriterConfiguration.class, serviceConfigs); + ClusteredCacheIdentifier clusteredCacheIdentifier = findSingletonAmongst(ClusteredCacheIdentifier.class, serviceConfigs); + if (clusteredCacheIdentifier != null && loaderWriterConfiguration != null) { + return 3; + } + return 0; + } +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/DelegatingLoaderWriterStoreProviderFactory.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/DelegatingLoaderWriterStoreProviderFactory.java new file mode 100644 index 0000000000..95828fd554 --- /dev/null +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/DelegatingLoaderWriterStoreProviderFactory.java @@ -0,0 +1,32 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.loaderwriter; + +import org.ehcache.core.spi.service.ServiceFactory; +import org.ehcache.spi.service.ServiceCreationConfiguration; + +public class DelegatingLoaderWriterStoreProviderFactory implements ServiceFactory { + + @Override + public DelegatingLoaderWriterStoreProvider create(ServiceCreationConfiguration configuration) { + return new DelegatingLoaderWriterStoreProvider(); + } + + @Override + public Class getServiceType() { + return DelegatingLoaderWriterStoreProvider.class; + } +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/writebehind/ClusteredWriteBehind.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/writebehind/ClusteredWriteBehind.java new file mode 100644 index 0000000000..4df49d721a --- /dev/null +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/writebehind/ClusteredWriteBehind.java @@ -0,0 +1,107 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.loaderwriter.writebehind; + +import org.ehcache.clustered.client.internal.store.ChainBuilder; +import org.ehcache.clustered.client.internal.store.operations.ChainResolver; +import org.ehcache.clustered.common.internal.store.operations.ConditionalRemoveOperation; +import org.ehcache.clustered.common.internal.store.operations.Operation; +import org.ehcache.clustered.common.internal.store.operations.PutOperation; +import org.ehcache.clustered.common.internal.store.operations.RemoveOperation; +import org.ehcache.clustered.common.internal.store.operations.codecs.OperationsCodec; +import org.ehcache.clustered.common.internal.store.Chain; +import org.ehcache.clustered.common.internal.store.Element; +import org.ehcache.core.spi.time.TimeSource; +import org.ehcache.spi.loaderwriter.CacheLoaderWriter; + +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeoutException; + +class ClusteredWriteBehind { + private final ClusteredWriteBehindStore clusteredWriteBehindStore; + private final ExecutorService executorService; + private final CacheLoaderWriter cacheLoaderWriter; + private final OperationsCodec codec; + private final ChainResolver resolver; + private final TimeSource timeSource; + + ClusteredWriteBehind(ClusteredWriteBehindStore clusteredWriteBehindStore, + ExecutorService executorService, + TimeSource timeSource, + ChainResolver resolver, + CacheLoaderWriter cacheLoaderWriter, + OperationsCodec codec) { + this.clusteredWriteBehindStore = clusteredWriteBehindStore; + this.executorService = executorService; + this.resolver = resolver; + this.cacheLoaderWriter = cacheLoaderWriter; + this.codec = codec; + this.timeSource = timeSource; + } + + void flushWriteBehindQueue(Chain ignored, long hash) { + executorService.submit(() -> { + try { + Chain chain = clusteredWriteBehindStore.lock(hash); + try { + if (!chain.isEmpty()) { + Map> currentState = new HashMap<>(); + for (Element element : chain) { + ByteBuffer payload = element.getPayload(); + Operation operation = codec.decode(payload); + K key = operation.getKey(); + PutOperation result = resolver.applyOperation(key, + currentState.get(key), + operation, + timeSource.getTimeMillis()); + try { + if (result != null) { + if (result != currentState.get(key) && !(operation instanceof PutOperation)) { + cacheLoaderWriter.write(result.getKey(), result.getValue()); + } + currentState.put(key, result.asOperationExpiringAt(result.expirationTime())); + } else { + if (currentState.get(key) != null && (operation instanceof RemoveOperation + || operation instanceof ConditionalRemoveOperation)) { + cacheLoaderWriter.delete(key); + } + currentState.remove(key); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + ChainBuilder builder = new ChainBuilder(); + for (PutOperation operation : currentState.values()) { + builder = builder.add(codec.encode(operation)); + } + + clusteredWriteBehindStore.replaceAtHead(hash, chain, builder.build()); + } + } finally { + clusteredWriteBehindStore.unlock(hash); + } + } catch (TimeoutException e) { + throw new RuntimeException(e); + } + }); + } +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/writebehind/ClusteredWriteBehindStore.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/writebehind/ClusteredWriteBehindStore.java new file mode 100644 index 0000000000..152424f760 --- /dev/null +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/writebehind/ClusteredWriteBehindStore.java @@ -0,0 +1,317 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.loaderwriter.writebehind; + +import org.ehcache.clustered.client.internal.loaderwriter.ClusteredLoaderWriterStore; +import org.ehcache.clustered.client.internal.store.ClusteredStore; +import org.ehcache.clustered.client.internal.store.ClusteredValueHolder; +import org.ehcache.clustered.client.internal.store.ResolvedChain; +import org.ehcache.clustered.client.internal.store.ServerStoreProxy; +import org.ehcache.clustered.client.internal.store.lock.LockManager; +import org.ehcache.clustered.client.internal.store.operations.ChainResolver; +import org.ehcache.clustered.common.internal.store.operations.ConditionalRemoveOperation; +import org.ehcache.clustered.common.internal.store.operations.ConditionalReplaceOperation; +import org.ehcache.clustered.common.internal.store.operations.PutIfAbsentOperation; +import org.ehcache.clustered.common.internal.store.operations.PutOperation; +import org.ehcache.clustered.common.internal.store.operations.PutWithWriterOperation; +import org.ehcache.clustered.common.internal.store.operations.RemoveOperation; +import org.ehcache.clustered.common.internal.store.operations.ReplaceOperation; +import org.ehcache.clustered.common.internal.store.operations.Result; +import org.ehcache.clustered.common.internal.store.operations.codecs.OperationsCodec; +import org.ehcache.clustered.client.service.ClusteringService; +import org.ehcache.clustered.common.internal.store.Chain; +import org.ehcache.config.ResourceType; +import org.ehcache.core.spi.store.tiering.AuthoritativeTier; +import org.ehcache.core.spi.time.TimeSource; +import org.ehcache.core.spi.time.TimeSourceService; +import org.ehcache.spi.loaderwriter.CacheLoaderWriter; +import org.ehcache.spi.loaderwriter.WriteBehindConfiguration; +import org.ehcache.spi.resilience.StoreAccessException; +import org.ehcache.spi.service.ServiceConfiguration; +import org.ehcache.spi.service.ServiceDependencies; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeoutException; + +import static org.ehcache.core.exceptions.StorePassThroughException.handleException; +import static org.ehcache.core.spi.service.ServiceUtils.findSingletonAmongst; + +public class ClusteredWriteBehindStore extends ClusteredStore implements AuthoritativeTier { + + private final CacheLoaderWriter cacheLoaderWriter; + private final ClusteredWriteBehind clusteredWriteBehind; + + private ClusteredWriteBehindStore(Configuration config, + OperationsCodec codec, + ChainResolver resolver, + TimeSource timeSource, + CacheLoaderWriter loaderWriter, + ExecutorService executorService) { + super(config, codec, resolver, timeSource); + this.cacheLoaderWriter = loaderWriter; + this.clusteredWriteBehind = new ClusteredWriteBehind<>(this, executorService, + timeSource, + resolver, + this.cacheLoaderWriter, + codec); + } + + + Chain lock(long hash) throws TimeoutException { + return ((LockManager) storeProxy).lock(hash); + } + + void unlock(long hash) throws TimeoutException { + ((LockManager) storeProxy).unlock(hash); + } + + void replaceAtHead(long key, Chain expected, Chain replacement) { + storeProxy.replaceAtHead(key, expected, replacement); + } + + @Override + protected ValueHolder getInternal(K key) throws StoreAccessException, TimeoutException { + try { + Chain chain = storeProxy.get(extractLongKey(key)); + if (!chain.isEmpty()) { + ClusteredValueHolder holder = null; + ResolvedChain resolvedChain = resolver.resolve(chain, key, timeSource.getTimeMillis()); + Result resolvedResult = resolvedChain.getResolvedResult(key); + if (resolvedResult != null) { + V value = resolvedResult.getValue(); + long expirationTime = resolvedChain.getExpirationTime(); + if (expirationTime == Long.MAX_VALUE) { + holder = new ClusteredValueHolder<>(value); + } else { + holder = new ClusteredValueHolder<>(value, expirationTime); + } + } + return holder; + } else { + long hash = extractLongKey(key); + lock(hash); + try { + V value; + try { + value = cacheLoaderWriter.load(key); + } catch (Exception e) { + throw new RuntimeException(e); + } + if (value == null) { + return null; + } + append(key, value); + return new ClusteredValueHolder<>(value); + } finally { + unlock(hash); + } + } + } catch (RuntimeException re) { + throw handleException(re); + } + } + + private void append(K key, V value) throws TimeoutException { + PutOperation operation = new PutOperation<>(key, value, timeSource.getTimeMillis()); + ByteBuffer payload = codec.encode(operation); + long extractedKey = extractLongKey(key); + storeProxy.append(extractedKey, payload); + } + + @Override + protected PutStatus silentPut(final K key, final V value) throws StoreAccessException { + try { + PutWithWriterOperation operation = new PutWithWriterOperation<>(key, value, timeSource.getTimeMillis()); + ByteBuffer payload = codec.encode(operation); + long extractedKey = extractLongKey(key); + storeProxy.append(extractedKey, payload); + return PutStatus.PUT; + } catch (Exception re) { + throw handleException(re); + } + } + + @Override + protected V silentPutIfAbsent(K key, V value) throws StoreAccessException { + try { + PutIfAbsentOperation operation = new PutIfAbsentOperation<>(key, value, timeSource.getTimeMillis()); + ByteBuffer payload = codec.encode(operation); + long extractedKey = extractLongKey(key); + Chain chain = storeProxy.getAndAppend(extractedKey, payload); + ResolvedChain resolvedChain = resolver.resolve(chain, key, timeSource.getTimeMillis()); + + Result result = resolvedChain.getResolvedResult(key); + return result == null ? null : result.getValue(); + } catch (Exception re) { + throw handleException(re); + } + } + + @Override + protected boolean silentRemove(K key) throws StoreAccessException { + try { + RemoveOperation operation = new RemoveOperation<>(key, timeSource.getTimeMillis()); + ByteBuffer payload = codec.encode(operation); + long extractedKey = extractLongKey(key); + Chain chain = storeProxy.getAndAppend(extractedKey, payload); + ResolvedChain resolvedChain = resolver.resolve(chain, key, timeSource.getTimeMillis()); + + return resolvedChain.getResolvedResult(key) != null; + } catch (Exception re) { + throw handleException(re); + } + } + + @Override + protected V silentRemove(K key, V value) throws StoreAccessException { + try { + ConditionalRemoveOperation operation = new ConditionalRemoveOperation<>(key, value, timeSource.getTimeMillis()); + ByteBuffer payload = codec.encode(operation); + long extractedKey = extractLongKey(key); + Chain chain = storeProxy.getAndAppend(extractedKey, payload); + ResolvedChain resolvedChain = resolver.resolve(chain, key, timeSource.getTimeMillis()); + + Result result = resolvedChain.getResolvedResult(key); + return result == null ? null : result.getValue(); + } catch (Exception re) { + throw handleException(re); + } + } + + @Override + protected V silentReplace(K key, V value) throws StoreAccessException { + try { + ReplaceOperation operation = new ReplaceOperation<>(key, value, timeSource.getTimeMillis()); + ByteBuffer payload = codec.encode(operation); + long extractedKey = extractLongKey(key); + Chain chain = storeProxy.getAndAppend(extractedKey, payload); + ResolvedChain resolvedChain = resolver.resolve(chain, key, timeSource.getTimeMillis()); + + Result result = resolvedChain.getResolvedResult(key); + return result == null ? null : result.getValue(); + } catch (Exception re) { + throw handleException(re); + } + } + + protected V silentReplace(K key, V oldValue, V newValue) throws StoreAccessException { + try { + ConditionalReplaceOperation operation = new ConditionalReplaceOperation<>(key, oldValue, newValue, timeSource + .getTimeMillis()); + ByteBuffer payload = codec.encode(operation); + long extractedKey = extractLongKey(key); + Chain chain = storeProxy.getAndAppend(extractedKey, payload); + ResolvedChain resolvedChain = resolver.resolve(chain, key, timeSource.getTimeMillis()); + + Result result = resolvedChain.getResolvedResult(key); + return result == null ? null : result.getValue(); + } catch (Exception re) { + throw handleException(re); + } + } + + public class WriteBehindServerCallback implements ServerStoreProxy.ServerCallback { + + private final ServerStoreProxy.ServerCallback delegate; + + WriteBehindServerCallback(ServerStoreProxy.ServerCallback delegate) { + this.delegate = delegate; + } + + @Override + public void onInvalidateHash(long hash) { + this.delegate.onInvalidateHash(hash); + } + + @Override + public void onInvalidateAll() { + this.delegate.onInvalidateAll(); + } + + @Override + public Chain compact(Chain chain) { + return this.delegate.compact(chain); + } + + @Override + public Chain compact(Chain chain, long hash) { + clusteredWriteBehind.flushWriteBehindQueue(chain, hash); + return null; + } + } + + private ServerStoreProxy.ServerCallback getWriteBehindServerCallback(ServerStoreProxy.ServerCallback delegate) { + return new WriteBehindServerCallback(delegate); + } + + /** + * Provider of {@link ClusteredWriteBehindStore} instances. + */ + @ServiceDependencies({ TimeSourceService.class, ClusteringService.class}) + public static class Provider extends ClusteredLoaderWriterStore.Provider { + @Override + protected ClusteredStore createStore(Configuration storeConfig, + OperationsCodec codec, + ChainResolver resolver, + TimeSource timeSource, + boolean useLoaderInAtomics, + Object[] serviceConfigs) { + WriteBehindConfiguration writeBehindConfiguration = findSingletonAmongst(WriteBehindConfiguration.class, serviceConfigs); + if (writeBehindConfiguration != null) { + ExecutorService executorService = + executionService.getOrderedExecutor(writeBehindConfiguration.getThreadPoolAlias(), + new LinkedBlockingQueue<>()); + return new ClusteredWriteBehindStore<>(storeConfig, + codec, + resolver, + timeSource, + storeConfig.getCacheLoaderWriter(), + executorService); + } + throw new AssertionError(); + } + + @Override + protected ServerStoreProxy.ServerCallback getServerCallback(ClusteredStore clusteredStore) { + if (clusteredStore instanceof ClusteredWriteBehindStore) { + return ((ClusteredWriteBehindStore)clusteredStore).getWriteBehindServerCallback(super.getServerCallback(clusteredStore)); + } + throw new AssertionError(); + } + + @Override + public int rank(Set> resourceTypes, Collection> serviceConfigs) { + int parentRank = super.rank(resourceTypes, serviceConfigs); + if (parentRank == 0 || serviceConfigs.stream().noneMatch(WriteBehindConfiguration.class::isInstance)) { + return 0; + } + return parentRank + 1; + } + + @Override + public int rankAuthority(ResourceType authorityResource, Collection> serviceConfigs) { + int parentRank = super.rankAuthority(authorityResource, serviceConfigs); + if (parentRank == 0 || serviceConfigs.stream().noneMatch(WriteBehindConfiguration.class::isInstance)) { + return 0; + } + return parentRank + 1; + } + } +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/writebehind/ClusteredWriteBehindStoreProviderFactory.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/writebehind/ClusteredWriteBehindStoreProviderFactory.java new file mode 100644 index 0000000000..a1bec31337 --- /dev/null +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/loaderwriter/writebehind/ClusteredWriteBehindStoreProviderFactory.java @@ -0,0 +1,33 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.loaderwriter.writebehind; + +import org.ehcache.clustered.client.internal.loaderwriter.ClusteredLoaderWriterStore; +import org.ehcache.core.spi.service.ServiceFactory; +import org.ehcache.spi.service.ServiceCreationConfiguration; + +public class ClusteredWriteBehindStoreProviderFactory implements ServiceFactory { + + @Override + public ClusteredWriteBehindStore.Provider create(ServiceCreationConfiguration configuration) { + return new ClusteredWriteBehindStore.Provider(); + } + + @Override + public Class getServiceType() { + return ClusteredWriteBehindStore.Provider.class; + } +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/lock/VoltronReadWriteLockClient.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/lock/VoltronReadWriteLockClient.java index 24c9cbe7a8..1c9c6ab212 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/lock/VoltronReadWriteLockClient.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/lock/VoltronReadWriteLockClient.java @@ -24,7 +24,6 @@ import org.terracotta.connection.entity.Entity; import org.terracotta.entity.EndpointDelegate; import org.terracotta.entity.EntityClientEndpoint; -import org.terracotta.entity.EntityResponse; import org.terracotta.entity.InvokeFuture; import org.terracotta.entity.MessageCodecException; import org.terracotta.exception.EntityException; diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierCreationException.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierCreationException.java index 8b7b057912..a66b9d606f 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierCreationException.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierCreationException.java @@ -21,6 +21,8 @@ */ public class ClusterTierCreationException extends ClusterTierException { + private static final long serialVersionUID = 6048350791384030212L; + public ClusterTierCreationException(String message, Throwable cause) { super(message, cause); } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierDestructionException.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierDestructionException.java index 5d023d2da6..eb16b351e6 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierDestructionException.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierDestructionException.java @@ -21,6 +21,8 @@ */ public class ClusterTierDestructionException extends ClusterTierException { + private static final long serialVersionUID = -7314374512451335435L; + public ClusterTierDestructionException(String message, Throwable cause) { super(message, cause); } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierException.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierException.java index e61027fe22..458d1da3d7 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierException.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierException.java @@ -21,6 +21,8 @@ */ public abstract class ClusterTierException extends Exception { + private static final long serialVersionUID = -4057331870606799775L; + public ClusterTierException(String message, Throwable cause) { super(message, cause); } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierManagerConfigurationException.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierManagerConfigurationException.java index 18a3396a30..0c3b012d04 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierManagerConfigurationException.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierManagerConfigurationException.java @@ -21,6 +21,8 @@ */ public class ClusterTierManagerConfigurationException extends ClusterTierException { + private static final long serialVersionUID = 6540327268333174996L; + public ClusterTierManagerConfigurationException(String message, Throwable cause) { super(message, cause); } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierReleaseException.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierReleaseException.java index d5a28a2bce..a76e707f92 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierReleaseException.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierReleaseException.java @@ -21,6 +21,8 @@ */ public class ClusterTierReleaseException extends ClusterTierException { + private static final long serialVersionUID = -1595496769881016663L; + public ClusterTierReleaseException(String message, Throwable cause) { super(message, cause); } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierValidationException.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierValidationException.java index e74145cef5..f6a138ba56 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierValidationException.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusterTierValidationException.java @@ -21,6 +21,8 @@ */ public class ClusterTierValidationException extends ClusterTierException { + private static final long serialVersionUID = 7363986054006535780L; + public ClusterTierValidationException(String message, Throwable cause) { super(message, cause); } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusteredMapException.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusteredMapException.java index f4c11c4d4c..02edee8743 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusteredMapException.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ClusteredMapException.java @@ -18,6 +18,8 @@ public class ClusteredMapException extends RuntimeException { + private static final long serialVersionUID = -7486556137969177116L; + public ClusteredMapException(final String message) { super(message); } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ConnectionState.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ConnectionState.java new file mode 100644 index 0000000000..6f3f8edec8 --- /dev/null +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/ConnectionState.java @@ -0,0 +1,324 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.service; + +import org.ehcache.CachePersistenceException; +import org.ehcache.clustered.client.config.ClusteringServiceConfiguration; +import org.ehcache.clustered.client.config.Timeouts; +import org.ehcache.clustered.client.internal.ClusterTierManagerClientEntity; +import org.ehcache.clustered.client.internal.ClusterTierManagerClientEntityFactory; +import org.ehcache.clustered.client.internal.ClusterTierManagerCreationException; +import org.ehcache.clustered.client.internal.ClusterTierManagerValidationException; +import org.ehcache.clustered.client.internal.ConnectionSource; +import org.ehcache.clustered.client.internal.store.ClusterTierClientEntity; +import org.ehcache.clustered.client.service.EntityBusyException; +import org.ehcache.clustered.common.internal.ServerStoreConfiguration; +import org.ehcache.clustered.common.internal.exceptions.DestroyInProgressException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.terracotta.connection.Connection; +import org.terracotta.connection.ConnectionException; +import org.terracotta.connection.ConnectionPropertyNames; +import org.terracotta.exception.ConnectionClosedException; +import org.terracotta.exception.ConnectionShutdownException; +import org.terracotta.exception.EntityAlreadyExistsException; +import org.terracotta.exception.EntityNotFoundException; + +import java.io.IOException; +import java.util.Properties; +import java.util.Random; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; + +class ConnectionState { + + private static final Logger LOGGER = LoggerFactory.getLogger(ConnectionState.class); + + private static final String CONNECTION_PREFIX = "Ehcache:"; + + private volatile Connection clusterConnection = null; + private volatile ClusterTierManagerClientEntityFactory entityFactory = null; + private volatile ClusterTierManagerClientEntity entity = null; + + private final AtomicInteger reconnectCounter = new AtomicInteger(); + private final ConcurrentMap clusterTierEntities = new ConcurrentHashMap<>(); + private final Timeouts timeouts; + private final ConnectionSource connectionSource; + private final String entityIdentifier; + private final Properties connectionProperties; + private final ClusteringServiceConfiguration serviceConfiguration; + + private Runnable connectionRecoveryListener = () -> {}; + + ConnectionState(Timeouts timeouts, Properties connectionProperties, ClusteringServiceConfiguration serviceConfiguration) { + this.timeouts = timeouts; + this.connectionSource = serviceConfiguration.getConnectionSource(); + this.entityIdentifier = connectionSource.getClusterTierManager(); + this.connectionProperties = connectionProperties; + connectionProperties.put(ConnectionPropertyNames.CONNECTION_NAME, CONNECTION_PREFIX + entityIdentifier); + connectionProperties.put(ConnectionPropertyNames.CONNECTION_TIMEOUT, Long.toString(timeouts.getConnectionTimeout().toMillis())); + this.serviceConfiguration = serviceConfiguration; + } + + public void setConnectionRecoveryListener(Runnable connectionRecoveryListener) { + this.connectionRecoveryListener = connectionRecoveryListener; + } + + public Connection getConnection() { + return clusterConnection; + } + + public ClusterTierClientEntity getClusterTierClientEntity(String cacheId) { + return clusterTierEntities.get(cacheId); + } + + public ClusterTierManagerClientEntityFactory getEntityFactory() { + return entityFactory; + } + + public ClusterTierClientEntity createClusterTierClientEntity(String cacheId, + ServerStoreConfiguration clientStoreConfiguration, boolean isReconnect) + throws CachePersistenceException { + ClusterTierClientEntity storeClientEntity; + while (true) { + try { + if (isReconnect) { + storeClientEntity = entityFactory.getClusterTierClientEntity(entityIdentifier, cacheId); + } else { + storeClientEntity = entityFactory.fetchOrCreateClusteredStoreEntity(entityIdentifier, cacheId, + clientStoreConfiguration, serviceConfiguration.isAutoCreate()); + } + clusterTierEntities.put(cacheId, storeClientEntity); + break; + } catch (EntityNotFoundException e) { + throw new CachePersistenceException("Cluster tier proxy '" + cacheId + "' for entity '" + entityIdentifier + "' does not exist.", e); + } catch (ConnectionClosedException | ConnectionShutdownException e) { + LOGGER.info("Disconnected from the server", e); + handleConnectionClosedException(); + } + } + + return storeClientEntity; + } + + public void removeClusterTierClientEntity(String cacheId) { + clusterTierEntities.remove(cacheId); + } + + public void initClusterConnection() { + try { + connect(); + } catch (ConnectionException ex) { + LOGGER.error("Initial connection failed due to", ex); + throw new RuntimeException(ex); + } + } + + private void reconnect() { + while (true) { + try { + connect(); + LOGGER.info("New connection to server is established, reconnect count is {}", reconnectCounter.incrementAndGet()); + break; + } catch (ConnectionException e) { + LOGGER.error("Re-connection to server failed, trying again", e); + } + } + } + + private void connect() throws ConnectionException { + clusterConnection = connectionSource.connect(connectionProperties); + entityFactory = new ClusterTierManagerClientEntityFactory(clusterConnection, timeouts); + } + + public void closeConnection() { + Connection conn = clusterConnection; + clusterConnection = null; + if(conn != null) { + try { + conn.close(); + } catch (IOException | ConnectionShutdownException e) { + LOGGER.warn("Error closing cluster connection: " + e); + } + } + } + + private void silentDestroy() { + LOGGER.debug("Found a broken ClusterTierManager - trying to clean it up"); + try { + // Random sleep to enable racing clients to have a window to do the cleanup + Thread.sleep(new Random().nextInt(1000)); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + try { + entityFactory.destroy(entityIdentifier); + } catch (EntityBusyException e) { + // Ignore - we have a racy client + LOGGER.debug("ClusterTierManager {} marked busy when trying to clean it up", entityIdentifier); + } + } + + public void acquireLeadership() { + if (!entityFactory.acquireLeadership(entityIdentifier)) { + entityFactory = null; + closeConnection(); + throw new IllegalStateException("Couldn't acquire cluster-wide maintenance lease"); + } + } + + public void initializeState() { + try { + if (serviceConfiguration.isAutoCreate()) { + autoCreateEntity(); + } else { + retrieveEntity(); + } + } catch (RuntimeException e) { + entityFactory = null; + closeConnection(); + throw e; + } + } + + private void retrieveEntity() { + try { + entity = entityFactory.retrieve(entityIdentifier, serviceConfiguration.getServerConfiguration()); + } catch (DestroyInProgressException | EntityNotFoundException e) { + throw new IllegalStateException("The cluster tier manager '" + entityIdentifier + "' does not exist." + + " Please review your configuration.", e); + } catch (TimeoutException e) { + throw new RuntimeException("Could not connect to the cluster tier manager '" + entityIdentifier + + "'; retrieve operation timed out", e); + } + } + + public void destroyState(boolean healthyConnection) { + if (entityFactory != null && healthyConnection) { + // proactively abandon any acquired read or write locks on a healthy connection + entityFactory.abandonAllHolds(entityIdentifier); + } + entityFactory = null; + + clusterTierEntities.clear(); + entity = null; + } + + public void destroyAll() throws CachePersistenceException { + LOGGER.info("destroyAll called for cluster tiers on {}", connectionSource); + + while (true) { + try { + entityFactory.destroy(entityIdentifier); + break; + } catch (EntityBusyException e) { + throw new CachePersistenceException("Cannot delete cluster tiers on " + connectionSource, e); + } catch (ConnectionClosedException | ConnectionShutdownException e) { + handleConnectionClosedException(); + } + } + } + + public void destroy(String name) throws CachePersistenceException { + // will happen when in maintenance mode + while (true) { + if (entity == null) { + try { + entity = entityFactory.retrieve(entityIdentifier, serviceConfiguration.getServerConfiguration()); + } catch (EntityNotFoundException e) { + // No entity on the server, so no need to destroy anything + break; + } catch (TimeoutException e) { + throw new CachePersistenceException("Could not connect to the cluster tier manager '" + entityIdentifier + + "'; retrieve operation timed out", e); + } catch (DestroyInProgressException e) { + silentDestroy(); + // Nothing left to do + break; + } catch (ConnectionClosedException | ConnectionShutdownException e) { + reconnect(); + } + } + + try { + if (entity != null) { + entityFactory.destroyClusteredStoreEntity(entityIdentifier, name); + break; + } + } catch (EntityNotFoundException e) { + // Ignore - does not exist, nothing to destroy + LOGGER.debug("Destruction of cluster tier {} failed as it does not exist", name); + break; + } catch (ConnectionClosedException | ConnectionShutdownException e) { + handleConnectionClosedException(); + } + } + } + + private void autoCreateEntity() throws ClusterTierManagerValidationException, IllegalStateException { + while (true) { + try { + entityFactory.create(entityIdentifier, serviceConfiguration.getServerConfiguration()); + } catch (ClusterTierManagerCreationException e) { + throw new IllegalStateException("Could not create the cluster tier manager '" + entityIdentifier + "'.", e); + } catch (EntityAlreadyExistsException | EntityBusyException e) { + //ignore - entity already exists - try to retrieve + } catch (ConnectionClosedException | ConnectionShutdownException e) { + LOGGER.info("Disconnected from the server", e); + reconnect(); + continue; + } + + try { + entity = entityFactory.retrieve(entityIdentifier, serviceConfiguration.getServerConfiguration()); + break; + } catch (DestroyInProgressException e) { + silentDestroy(); + } catch (EntityNotFoundException e) { + //ignore - loop and try to create + } catch (TimeoutException e) { + throw new RuntimeException("Could not connect to the cluster tier manager '" + entityIdentifier + + "'; retrieve operation timed out", e); + } catch (ConnectionClosedException | ConnectionShutdownException e) { + LOGGER.info("Disconnected from the server", e); + reconnect(); + } + } + + } + + private void handleConnectionClosedException() { + while (true) { + try { + destroyState(false); + reconnect(); + retrieveEntity(); + connectionRecoveryListener.run(); + break; + } catch (ConnectionClosedException | ConnectionShutdownException e) { + LOGGER.info("Disconnected from the server", e); + } + } + } + + //Only for test + int getReconnectCount() { + return reconnectCounter.get(); + } + +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/DefaultClusteringService.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/DefaultClusteringService.java index 51b5a90c62..2892a19998 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/DefaultClusteringService.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/service/DefaultClusteringService.java @@ -20,27 +20,24 @@ import org.ehcache.clustered.client.config.ClusteredResourcePool; import org.ehcache.clustered.client.config.ClusteredResourceType; import org.ehcache.clustered.client.config.ClusteringServiceConfiguration; -import org.ehcache.clustered.client.internal.ClusterTierManagerClientEntity; -import org.ehcache.clustered.client.internal.ClusterTierManagerClientEntityFactory; -import org.ehcache.clustered.client.internal.ClusterTierManagerCreationException; -import org.ehcache.clustered.client.internal.ClusterTierManagerNotFoundException; -import org.ehcache.clustered.client.internal.ClusterTierManagerValidationException; -import org.ehcache.clustered.client.config.Timeouts; +import org.ehcache.clustered.client.internal.loaderwriter.writebehind.ClusteredWriteBehindStore; import org.ehcache.clustered.client.internal.store.ClusterTierClientEntity; import org.ehcache.clustered.client.internal.store.EventualServerStoreProxy; import org.ehcache.clustered.client.internal.store.ServerStoreProxy; import org.ehcache.clustered.client.internal.store.ServerStoreProxy.ServerCallback; import org.ehcache.clustered.client.internal.store.StrongServerStoreProxy; +import org.ehcache.clustered.client.internal.store.lock.LockManager; +import org.ehcache.clustered.client.internal.store.lock.LockManagerImpl; +import org.ehcache.clustered.client.internal.store.lock.LockingServerStoreProxy; import org.ehcache.clustered.client.service.ClientEntityFactory; import org.ehcache.clustered.client.service.ClusteringService; -import org.ehcache.clustered.client.service.EntityBusyException; import org.ehcache.clustered.client.service.EntityService; import org.ehcache.clustered.common.Consistency; import org.ehcache.clustered.common.internal.ServerStoreConfiguration; -import org.ehcache.clustered.common.internal.exceptions.DestroyInProgressException; import org.ehcache.config.CacheConfiguration; import org.ehcache.config.ResourceType; import org.ehcache.core.spi.store.Store; +import org.ehcache.spi.loaderwriter.WriteBehindProvider; import org.ehcache.spi.persistence.StateRepository; import org.ehcache.spi.service.MaintainableService; import org.ehcache.spi.service.Service; @@ -48,22 +45,16 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.terracotta.connection.Connection; -import org.terracotta.connection.ConnectionException; -import org.terracotta.connection.ConnectionFactory; -import org.terracotta.connection.ConnectionPropertyNames; import org.terracotta.connection.entity.Entity; -import org.terracotta.exception.EntityAlreadyExistsException; -import org.terracotta.exception.EntityNotFoundException; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.Arrays; +import java.util.Collection; import java.util.Properties; -import java.util.Random; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeoutException; +import java.util.stream.Stream; /** * Provides support for accessing server-based cluster services. @@ -75,32 +66,29 @@ class DefaultClusteringService implements ClusteringService, EntityService { static final String CONNECTION_PREFIX = "Ehcache:"; private final ClusteringServiceConfiguration configuration; - private final URI clusterUri; - private final String entityIdentifier; private final ConcurrentMap knownPersistenceSpaces = new ConcurrentHashMap<>(); - private final Timeouts timeouts; + private final ConnectionState connectionState; - private volatile Connection clusterConnection; - private ClusterTierManagerClientEntityFactory entityFactory; - private ClusterTierManagerClientEntity entity; - private final ConcurrentMap clusterTierEntities = new ConcurrentHashMap<>(); + private final Set reconnectSet = ConcurrentHashMap.newKeySet(); + private final Collection connectionRecoveryListeners = new CopyOnWriteArrayList<>(); private volatile boolean inMaintenance = false; - DefaultClusteringService(final ClusteringServiceConfiguration configuration) { + DefaultClusteringService(ClusteringServiceConfiguration configuration) { this.configuration = configuration; - URI ehcacheUri = configuration.getClusterUri(); - this.clusterUri = extractClusterUri(ehcacheUri); - this.entityIdentifier = clusterUri.relativize(ehcacheUri).getPath(); - this.timeouts = configuration.getTimeouts(); + Properties properties = configuration.getProperties(); + this.connectionState = new ConnectionState(configuration.getTimeouts(), properties, configuration); + this.connectionState.setConnectionRecoveryListener(() -> connectionRecoveryListeners.forEach(Runnable::run)); } - private static URI extractClusterUri(URI uri) { - try { - return new URI(uri.getScheme(), uri.getAuthority(), null, null, null); - } catch (URISyntaxException e) { - throw new AssertionError(e); - } + @Override + public void addConnectionRecoveryListener(Runnable runnable) { + connectionRecoveryListeners.add(runnable); + } + + @Override + public void removeConnectionRecoveryListener(Runnable runnable) { + connectionRecoveryListeners.remove(runnable); } @Override @@ -116,111 +104,34 @@ protected Connection getConnection() { if (!isConnected()) { throw new IllegalStateException(getClass().getSimpleName() + " not started."); } - return clusterConnection; + return connectionState.getConnection(); } }; } @Override public boolean isConnected() { - return clusterConnection != null; + return connectionState.getConnection() != null; } @Override public void start(final ServiceProvider serviceProvider) { - initClusterConnection(); - createEntityFactory(); - try { - if (configuration.isAutoCreate()) { - entity = autoCreateEntity(); - } else { - try { - entity = entityFactory.retrieve(entityIdentifier, configuration.getServerConfiguration()); - } catch (DestroyInProgressException | EntityNotFoundException e) { - throw new IllegalStateException("The cluster tier manager '" + entityIdentifier + "' does not exist." - + " Please review your configuration.", e); - } catch (TimeoutException e) { - throw new RuntimeException("Could not connect to the cluster tier manager '" + entityIdentifier - + "'; retrieve operation timed out", e); - } - } - } catch (RuntimeException e) { - entityFactory = null; - closeConnection(); - throw e; - } + connectionState.initClusterConnection(); + connectionState.initializeState(); } @Override public void startForMaintenance(ServiceProvider serviceProvider, MaintenanceScope maintenanceScope) { - initClusterConnection(); - createEntityFactory(); + connectionState.initClusterConnection(); if(maintenanceScope == MaintenanceScope.CACHE_MANAGER) { - if (!entityFactory.acquireLeadership(entityIdentifier)) { - entityFactory = null; - closeConnection(); - throw new IllegalStateException("Couldn't acquire cluster-wide maintenance lease"); - } + connectionState.acquireLeadership(); } inMaintenance = true; } - private void createEntityFactory() { - entityFactory = new ClusterTierManagerClientEntityFactory(clusterConnection, timeouts); - } - - private void initClusterConnection() { - try { - Properties properties = new Properties(); - properties.put(ConnectionPropertyNames.CONNECTION_NAME, CONNECTION_PREFIX + entityIdentifier); - properties.put(ConnectionPropertyNames.CONNECTION_TIMEOUT, Long.toString(timeouts.getConnectionTimeout().toMillis())); - clusterConnection = ConnectionFactory.connect(clusterUri, properties); - } catch (ConnectionException ex) { - throw new RuntimeException(ex); - } - } - - private ClusterTierManagerClientEntity autoCreateEntity() throws ClusterTierManagerValidationException, IllegalStateException { - while (true) { - try { - entityFactory.create(entityIdentifier, configuration.getServerConfiguration()); - } catch (ClusterTierManagerCreationException e) { - throw new IllegalStateException("Could not create the cluster tier manager '" + entityIdentifier + "'.", e); - } catch (EntityAlreadyExistsException | EntityBusyException e) { - //ignore - entity already exists - try to retrieve - } - try { - return entityFactory.retrieve(entityIdentifier, configuration.getServerConfiguration()); - } catch (DestroyInProgressException e) { - silentDestroy(); - } catch (EntityNotFoundException e) { - //ignore - loop and try to create - } catch (TimeoutException e) { - throw new RuntimeException("Could not connect to the cluster tier manager '" + entityIdentifier - + "'; retrieve operation timed out", e); - } - } - } - - private void silentDestroy() { - LOGGER.debug("Found a broken ClusterTierManager - trying to clean it up"); - try { - // Random sleep to enable racing clients to have a window to do the cleanup - Thread.sleep(new Random().nextInt(1000)); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - try { - entityFactory.destroy(entityIdentifier); - } catch (EntityBusyException e) { - // Ignore - we have a racy client - LOGGER.debug("ClusterTierManager {} marked busy when trying to clean it up", entityIdentifier); - } - } - @Override public void stop() { - LOGGER.info("Closing connection to cluster {}", this.clusterUri); + LOGGER.info("Closing connection to cluster {}", configuration.getConnectionSource()); /* * Entity close() operations must *not* be called; if the server connection is disconnected, the entity @@ -229,13 +140,9 @@ public void stop() { * InFlightMessage.waitForAcks -- a method that can wait forever.) Theoretically, the connection close will * take care of server-side cleanup in the event the server is connected. */ - entityFactory = null; + connectionState.destroyState(true); inMaintenance = false; - - clusterTierEntities.clear(); - entity = null; - - closeConnection(); + connectionState.closeConnection(); } @Override @@ -243,22 +150,16 @@ public void destroyAll() throws CachePersistenceException { if (!inMaintenance) { throw new IllegalStateException("Maintenance mode required"); } - LOGGER.info("destroyAll called for cluster tiers on {}", this.clusterUri); - - try { - entityFactory.destroy(entityIdentifier); - } catch (EntityBusyException e) { - throw new CachePersistenceException("Can not delete cluster tiers on " + this.clusterUri, e); - } + connectionState.destroyAll(); } @Override public boolean handlesResourceType(ResourceType resourceType) { - return (Arrays.asList(ClusteredResourceType.Types.values()).contains(resourceType)); + return Stream.of(ClusteredResourceType.Types.values()).anyMatch(t -> t.equals(resourceType)); } @Override - public PersistenceSpaceIdentifier getPersistenceSpaceIdentifier(String name, CacheConfiguration config) throws CachePersistenceException { + public PersistenceSpaceIdentifier getPersistenceSpaceIdentifier(String name, CacheConfiguration config) { ClusteredSpace clusteredSpace = knownPersistenceSpaces.get(name); if(clusteredSpace != null) { return clusteredSpace.identifier; @@ -293,7 +194,8 @@ public StateRepository getStateRepositoryWithin(PersistenceSpaceIdentifier id if(currentRepo != null) { return currentRepo; } else { - ClusterStateRepository newRepo = new ClusterStateRepository(clusterCacheIdentifier, name, clusterTierEntities.get(clusterCacheIdentifier.getId())); + ClusterStateRepository newRepo = new ClusterStateRepository(clusterCacheIdentifier, name, + connectionState.getClusterTierClientEntity(clusterCacheIdentifier.getId())); currentRepo = stateRepositories.putIfAbsent(name, newRepo); if (currentRepo == null) { return newRepo; @@ -312,40 +214,16 @@ private void checkStarted() { @Override public void destroy(String name) throws CachePersistenceException { checkStarted(); - - // will happen when in maintenance mode - if(entity == null) { - try { - entity = entityFactory.retrieve(entityIdentifier, configuration.getServerConfiguration()); - } catch (EntityNotFoundException e) { - // No entity on the server, so no need to destroy anything - } catch (TimeoutException e) { - throw new CachePersistenceException("Could not connect to the cluster tier manager '" + entityIdentifier - + "'; retrieve operation timed out", e); - } catch (DestroyInProgressException e) { - silentDestroy(); - // Nothing left to do - return; - } - } - - try { - if (entity != null) { - entityFactory.destroyClusteredStoreEntity(entityIdentifier, name); - } - } catch (EntityNotFoundException e) { - // Ignore - does not exist, nothing to destroy - LOGGER.debug("Destruction of cluster tier {} failed as it does not exist", name); - } + connectionState.destroy(name); } - protected boolean isStarted() { - return entityFactory != null; + private boolean isStarted() { + return connectionState.getEntityFactory() != null; } @Override - public ServerStoreProxy getServerStoreProxy(final ClusteredCacheIdentifier cacheIdentifier, - final Store.Configuration storeConfig, + public ServerStoreProxy getServerStoreProxy(ClusteredCacheIdentifier cacheIdentifier, + Store.Configuration storeConfig, Consistency configuredConsistency, ServerCallback invalidation) throws CachePersistenceException { final String cacheId = cacheIdentifier.getId(); @@ -371,24 +249,16 @@ public ServerStoreProxy getServerStoreProxy(final ClusteredCacheIdentifie throw new IllegalStateException("A clustered resource is required for a clustered cache"); } - final ServerStoreConfiguration clientStoreConfiguration = new ServerStoreConfiguration( - clusteredResourcePool.getPoolAllocation(), - storeConfig.getKeyType().getName(), - storeConfig.getValueType().getName(), - (storeConfig.getKeySerializer() == null ? null : storeConfig.getKeySerializer().getClass().getName()), - (storeConfig.getValueSerializer() == null ? null : storeConfig.getValueSerializer().getClass().getName()), - configuredConsistency - ); - - ClusterTierClientEntity storeClientEntity; - try { - storeClientEntity = entityFactory.fetchOrCreateClusteredStoreEntity(entityIdentifier, cacheId, - clientStoreConfiguration, configuration.isAutoCreate()); - clusterTierEntities.put(cacheId, storeClientEntity); - } catch (EntityNotFoundException e) { - throw new CachePersistenceException("Cluster tier proxy '" + cacheIdentifier.getId() + "' for entity '" + entityIdentifier + "' does not exist.", e); - } + ServerStoreConfiguration clientStoreConfiguration = new ServerStoreConfiguration( + clusteredResourcePool.getPoolAllocation(), + storeConfig.getKeyType().getName(), + storeConfig.getValueType().getName(), + (storeConfig.getKeySerializer() == null ? null : storeConfig.getKeySerializer().getClass().getName()), + (storeConfig.getValueSerializer() == null ? null : storeConfig.getValueSerializer().getClass().getName()), + configuredConsistency, storeConfig.getCacheLoaderWriter() != null, + invalidation instanceof ClusteredWriteBehindStore.WriteBehindServerCallback); + ClusterTierClientEntity storeClientEntity = connectionState.createClusterTierClientEntity(cacheId, clientStoreConfiguration, reconnectSet.remove(cacheId)); ServerStoreProxy serverStoreProxy; switch (configuredConsistency) { @@ -406,32 +276,29 @@ public ServerStoreProxy getServerStoreProxy(final ClusteredCacheIdentifie storeClientEntity.validate(clientStoreConfiguration); } catch (ClusterTierException e) { serverStoreProxy.close(); - throw new CachePersistenceException("Unable to create cluster tier proxy '" + cacheIdentifier.getId() + "' for entity '" + entityIdentifier + "'", e); + throw new CachePersistenceException("Unable to create cluster tier proxy '" + cacheIdentifier.getId() + "' for entity '" + + configuration.getConnectionSource().getClusterTierManager() + "'", e); } catch (TimeoutException e) { serverStoreProxy.close(); - throw new CachePersistenceException("Unable to create cluster tier proxy '" - + cacheIdentifier.getId() + "' for entity '" + entityIdentifier - + "'; validate operation timed out", e); + throw new CachePersistenceException("Unable to create cluster tier proxy '" + cacheIdentifier.getId() + "' for entity '" + + configuration.getConnectionSource().getClusterTierManager() + "'; validate operation timed out", e); + } + + if (storeConfig.getCacheLoaderWriter() != null) { + LockManager lockManager = new LockManagerImpl(storeClientEntity); + serverStoreProxy = new LockingServerStoreProxy(serverStoreProxy, lockManager); } return serverStoreProxy; } @Override - public void releaseServerStoreProxy(ServerStoreProxy storeProxy) { - clusterTierEntities.remove(storeProxy.getCacheId()); - storeProxy.close(); - } - - private void closeConnection() { - Connection conn = clusterConnection; - clusterConnection = null; - if(conn != null) { - try { - conn.close(); - } catch (IOException e) { - LOGGER.warn("Error closing cluster connection: " + e); - } + public void releaseServerStoreProxy(ServerStoreProxy storeProxy, boolean isReconnect) { + connectionState.removeClusterTierClientEntity(storeProxy.getCacheId()); + if (!isReconnect) { + storeProxy.close(); + } else { + reconnectSet.add(storeProxy.getCacheId()); } } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ChainBuilder.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ChainBuilder.java index 068e7edc33..00aff2d0c6 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ChainBuilder.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ChainBuilder.java @@ -16,10 +16,12 @@ package org.ehcache.clustered.client.internal.store; import org.ehcache.clustered.common.internal.store.Chain; +import org.ehcache.clustered.common.internal.store.Element; import org.ehcache.clustered.common.internal.store.Util; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; /** @@ -38,8 +40,8 @@ private ChainBuilder(List buffers) { //TODO: optimize this & make this mutable public ChainBuilder add(final ByteBuffer payload) { - List newList = new ArrayList<>(); - newList.addAll(this.buffers); + List newList = new ArrayList<>(buffers.size() + 1); + newList.addAll(buffers); newList.add(payload); return new ChainBuilder(newList); } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusterTierClientEntity.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusterTierClientEntity.java index b335f247b9..57f7b40463 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusterTierClientEntity.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusterTierClientEntity.java @@ -51,9 +51,9 @@ public interface ClusterTierClientEntity extends Entity { void addResponseListener(Class responseType, ResponseListener responseListener); - void setDisconnectionListener(DisconnectionListener disconnectionListener); + void addDisconnectionListener(DisconnectionListener disconnectionListener); - void setReconnectListener(ReconnectListener reconnectListener); + void addReconnectListener(ReconnectListener reconnectListener); interface ResponseListener { void onResponse(T response); diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusterTierClientEntityService.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusterTierClientEntityService.java index 02ca058014..3353342ba6 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusterTierClientEntityService.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusterTierClientEntityService.java @@ -33,7 +33,7 @@ /** * ClusterTierClientEntityService */ -public class ClusterTierClientEntityService implements EntityClientService { +public class ClusterTierClientEntityService implements EntityClientService { private final EntityConfigurationCodec configCodec = new EntityConfigurationCodec(new CommonConfigCodec()); @@ -53,8 +53,9 @@ public ClusterTierEntityConfiguration deserializeConfiguration(byte[] configurat } @Override - public ClusterTierClientEntity create(EntityClientEndpoint endpoint, Void userData) { - return new SimpleClusterTierClientEntity(endpoint); + public ClusterTierClientEntity create(EntityClientEndpoint endpoint, + ClusterTierUserData userData) { + return new SimpleClusterTierClientEntity(endpoint, userData.getTimeouts(), userData.getStoreIdentifier()); } @Override diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusterTierUserData.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusterTierUserData.java new file mode 100644 index 0000000000..c41a598410 --- /dev/null +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusterTierUserData.java @@ -0,0 +1,41 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.store; + +import org.ehcache.clustered.client.config.Timeouts; + +/** + * ClusterTierUserData + * + * Additional information passed to client side cluster tier entity. + */ +public class ClusterTierUserData { + private final Timeouts timeouts; + private final String storeIdentifier; + + public ClusterTierUserData(Timeouts timeouts, String storeIdentifier) { + this.timeouts = timeouts; + this.storeIdentifier = storeIdentifier; + } + + public Timeouts getTimeouts() { + return timeouts; + } + + public String getStoreIdentifier() { + return storeIdentifier; + } +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusteredStore.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusteredStore.java index 44cd04eaa9..9808015103 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusteredStore.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusteredStore.java @@ -18,61 +18,60 @@ import org.ehcache.Cache; import org.ehcache.CachePersistenceException; +import org.ehcache.clustered.client.config.ClusteredResourcePool; import org.ehcache.clustered.client.config.ClusteredResourceType; import org.ehcache.clustered.client.config.ClusteredStoreConfiguration; import org.ehcache.clustered.client.internal.store.ServerStoreProxy.ServerCallback; import org.ehcache.clustered.client.internal.store.operations.ChainResolver; import org.ehcache.clustered.client.internal.store.operations.EternalChainResolver; -import org.ehcache.clustered.client.internal.store.operations.ConditionalRemoveOperation; -import org.ehcache.clustered.client.internal.store.operations.ConditionalReplaceOperation; import org.ehcache.clustered.client.internal.store.operations.ExpiryChainResolver; -import org.ehcache.clustered.client.internal.store.operations.PutIfAbsentOperation; -import org.ehcache.clustered.client.internal.store.operations.PutOperation; -import org.ehcache.clustered.client.internal.store.operations.RemoveOperation; -import org.ehcache.clustered.client.internal.store.operations.ReplaceOperation; -import org.ehcache.clustered.client.internal.store.operations.Result; -import org.ehcache.clustered.client.internal.store.operations.codecs.OperationsCodec; +import org.ehcache.clustered.common.internal.store.operations.ConditionalRemoveOperation; +import org.ehcache.clustered.common.internal.store.operations.ConditionalReplaceOperation; +import org.ehcache.clustered.common.internal.store.operations.PutIfAbsentOperation; +import org.ehcache.clustered.common.internal.store.operations.PutOperation; +import org.ehcache.clustered.common.internal.store.operations.RemoveOperation; +import org.ehcache.clustered.common.internal.store.operations.ReplaceOperation; +import org.ehcache.clustered.common.internal.store.operations.Result; +import org.ehcache.clustered.common.internal.store.operations.codecs.OperationsCodec; import org.ehcache.clustered.client.service.ClusteringService; import org.ehcache.clustered.client.service.ClusteringService.ClusteredCacheIdentifier; import org.ehcache.clustered.common.Consistency; import org.ehcache.clustered.common.internal.store.Chain; import org.ehcache.config.ResourceType; +import org.ehcache.config.builders.ExpiryPolicyBuilder; import org.ehcache.core.CacheConfigurationChangeListener; import org.ehcache.core.Ehcache; -import org.ehcache.core.events.CacheEventListenerConfiguration; import org.ehcache.core.collections.ConcurrentWeakIdentityHashMap; +import org.ehcache.core.events.CacheEventListenerConfiguration; +import org.ehcache.core.events.NullStoreEventDispatcher; +import org.ehcache.core.spi.service.ExecutionService; import org.ehcache.core.spi.store.Store; -import org.ehcache.core.spi.store.StoreAccessTimeoutException; import org.ehcache.core.spi.store.events.StoreEventSource; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.impl.internal.store.basic.BaseStore; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.store.tiering.AuthoritativeTier; -import org.ehcache.core.statistics.AuthoritativeTierOperationOutcomes; -import org.ehcache.core.statistics.StoreOperationOutcomes; import org.ehcache.core.spi.time.TimeSource; import org.ehcache.core.spi.time.TimeSourceService; +import org.ehcache.core.statistics.AuthoritativeTierOperationOutcomes; +import org.ehcache.core.statistics.StoreOperationOutcomes; import org.ehcache.core.statistics.StoreOperationOutcomes.EvictionOutcome; import org.ehcache.core.statistics.TierOperationOutcomes; -import org.ehcache.expiry.Expirations; -import org.ehcache.expiry.Expiry; -import org.ehcache.impl.config.loaderwriter.DefaultCacheLoaderWriterConfiguration; -import org.ehcache.core.events.NullStoreEventDispatcher; +import org.ehcache.expiry.ExpiryPolicy; import org.ehcache.impl.store.HashUtils; import org.ehcache.spi.persistence.StateRepository; import org.ehcache.spi.serialization.Serializer; import org.ehcache.spi.serialization.StatefulSerializer; -import org.ehcache.spi.service.ServiceDependencies; -import org.ehcache.spi.service.ServiceProvider; import org.ehcache.spi.service.Service; import org.ehcache.spi.service.ServiceConfiguration; +import org.ehcache.spi.service.ServiceDependencies; +import org.ehcache.spi.service.ServiceProvider; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.terracotta.statistics.MappedOperationStatistic; +import org.terracotta.statistics.OperationStatistic; import org.terracotta.statistics.StatisticsManager; import org.terracotta.statistics.observer.OperationObserver; import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -80,32 +79,34 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeoutException; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; import java.util.function.BiFunction; +import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; -import static org.ehcache.core.exceptions.StorePassThroughException.handleRuntimeException; +import static org.ehcache.core.exceptions.StorePassThroughException.handleException; import static org.ehcache.core.spi.service.ServiceUtils.findSingletonAmongst; -import static org.terracotta.statistics.StatisticBuilder.operation; /** * Supports a {@link Store} in a clustered environment. */ -public class ClusteredStore implements AuthoritativeTier { +public class ClusteredStore extends BaseStore implements AuthoritativeTier { - private static final String STATISTICS_TAG = "Clustered"; - private static final int TIER_HEIGHT = ClusteredResourceType.Types.UNKNOWN.getTierHeight(); //TierHeight is the same for all ClusteredResourceType.Types static final String CHAIN_COMPACTION_THRESHOLD_PROP = "ehcache.client.chain.compaction.threshold"; static final int DEFAULT_CHAIN_COMPACTION_THRESHOLD = 4; private final int chainCompactionLimit; - private final OperationsCodec codec; - private final ChainResolver resolver; + protected final OperationsCodec codec; + protected final ChainResolver resolver; - private final TimeSource timeSource; + protected final TimeSource timeSource; - private volatile ServerStoreProxy storeProxy; + protected volatile ServerStoreProxy storeProxy; private volatile InvalidationValve invalidationValve; private final OperationObserver getObserver; @@ -120,38 +121,38 @@ public class ClusteredStore implements AuthoritativeTier { private final OperationObserver getAndFaultObserver; - private ClusteredStore(final OperationsCodec codec, final ChainResolver resolver, TimeSource timeSource) { + protected ClusteredStore(Configuration config, OperationsCodec codec, ChainResolver resolver, TimeSource timeSource) { + super(config); + this.chainCompactionLimit = Integer.getInteger(CHAIN_COMPACTION_THRESHOLD_PROP, DEFAULT_CHAIN_COMPACTION_THRESHOLD); this.codec = codec; this.resolver = resolver; this.timeSource = timeSource; - this.getObserver = operation(StoreOperationOutcomes.GetOutcome.class).of(this).named("get").tag(STATISTICS_TAG).build(); - this.putObserver = operation(StoreOperationOutcomes.PutOutcome.class).of(this).named("put").tag(STATISTICS_TAG).build(); - this.removeObserver = operation(StoreOperationOutcomes.RemoveOutcome.class).of(this).named("remove").tag(STATISTICS_TAG).build(); - this.putIfAbsentObserver = operation(StoreOperationOutcomes.PutIfAbsentOutcome.class).of(this).named("putIfAbsent").tag(STATISTICS_TAG).build(); - this.conditionalRemoveObserver = operation(StoreOperationOutcomes.ConditionalRemoveOutcome.class).of(this).named("conditionalRemove").tag(STATISTICS_TAG).build(); - this.replaceObserver = operation(StoreOperationOutcomes.ReplaceOutcome.class).of(this).named("replace").tag(STATISTICS_TAG).build(); - this.conditionalReplaceObserver = operation(StoreOperationOutcomes.ConditionalReplaceOutcome.class).of(this).named("conditionalReplace").tag(STATISTICS_TAG).build(); - this.evictionObserver = operation(StoreOperationOutcomes.EvictionOutcome.class).of(this).named("eviction").tag(STATISTICS_TAG).build(); - this.getAndFaultObserver = operation(AuthoritativeTierOperationOutcomes.GetAndFaultOutcome.class).of(this).named("getAndFault").tag(STATISTICS_TAG).build(); - - Set tags = new HashSet<>(Arrays.asList(STATISTICS_TAG, "tier")); - StatisticsManager.createPassThroughStatistic(this, "mappings", tags, () -> -1L); - StatisticsManager.createPassThroughStatistic(this, "maxMappings", tags, () -> -1L); - StatisticsManager.createPassThroughStatistic(this, "allocatedMemory", tags, () -> -1L); - StatisticsManager.createPassThroughStatistic(this, "occupiedMemory", tags, () -> -1L); - + this.getObserver = createObserver("get", StoreOperationOutcomes.GetOutcome.class, true); + this.putObserver = createObserver("put", StoreOperationOutcomes.PutOutcome.class, true); + this.removeObserver = createObserver("remove", StoreOperationOutcomes.RemoveOutcome.class, true); + this.putIfAbsentObserver = createObserver("putIfAbsent", StoreOperationOutcomes.PutIfAbsentOutcome.class, true); + this.conditionalRemoveObserver = createObserver("conditionalRemove", StoreOperationOutcomes.ConditionalRemoveOutcome.class, true); + this.replaceObserver = createObserver("replace", StoreOperationOutcomes.ReplaceOutcome.class, true); + this.conditionalReplaceObserver = createObserver("conditionalReplace", StoreOperationOutcomes.ConditionalReplaceOutcome.class, true); + this.getAndFaultObserver = createObserver("getAndFault", AuthoritativeTierOperationOutcomes.GetAndFaultOutcome.class, true); + this.evictionObserver = createObserver("eviction", StoreOperationOutcomes.EvictionOutcome.class, false); } /** * For tests */ - ClusteredStore(OperationsCodec codec, EternalChainResolver resolver, ServerStoreProxy proxy, TimeSource timeSource) { - this(codec, resolver, timeSource); + protected ClusteredStore(Configuration config, OperationsCodec codec, EternalChainResolver resolver, ServerStoreProxy proxy, TimeSource timeSource) { + this(config, codec, resolver, timeSource); this.storeProxy = proxy; } + @Override + protected String getStatisticsTag() { + return "Clustered"; + } + @Override public ValueHolder get(final K key) throws StoreAccessException { getObserver.begin(); @@ -171,7 +172,7 @@ public ValueHolder get(final K key) throws StoreAccessException { } } - private ValueHolder getInternal(K key) throws StoreAccessException, TimeoutException { + protected ValueHolder getInternal(K key) throws StoreAccessException, TimeoutException { ClusteredValueHolder holder = null; try { Chain chain = storeProxy.get(extractLongKey(key)); @@ -195,12 +196,12 @@ private ValueHolder getInternal(K key) throws StoreAccessException, TimeoutEx } } } catch (RuntimeException re) { - throw handleRuntimeException(re); + throw handleException(re); } return holder; } - private long extractLongKey(K key) { + protected long extractLongKey(K key) { return HashUtils.intHashToLong(key.hashCode()); } @@ -230,23 +231,32 @@ public PutStatus put(final K key, final V value) throws StoreAccessException { return status; } - private PutStatus silentPut(final K key, final V value) throws StoreAccessException { + protected PutStatus silentPut(final K key, final V value) throws StoreAccessException { try { PutOperation operation = new PutOperation<>(key, value, timeSource.getTimeMillis()); ByteBuffer payload = codec.encode(operation); long extractedKey = extractLongKey(key); storeProxy.append(extractedKey, payload); return PutStatus.PUT; - } catch (RuntimeException re) { - throw handleRuntimeException(re); - } catch (TimeoutException e) { - throw new StoreAccessTimeoutException(e); + } catch (Exception re) { + throw handleException(re); } } @Override - public ValueHolder putIfAbsent(final K key, final V value) throws StoreAccessException { + public ValueHolder putIfAbsent(final K key, final V value, Consumer put) throws StoreAccessException { putIfAbsentObserver.begin(); + V result = silentPutIfAbsent(key, value); + if(result == null) { + putIfAbsentObserver.end(StoreOperationOutcomes.PutIfAbsentOutcome.PUT); + return null; + } else { + putIfAbsentObserver.end(StoreOperationOutcomes.PutIfAbsentOutcome.HIT); + return new ClusteredValueHolder<>(result); + } + } + + protected V silentPutIfAbsent(K key, V value) throws StoreAccessException { try { PutIfAbsentOperation operation = new PutIfAbsentOperation<>(key, value, timeSource.getTimeMillis()); ByteBuffer payload = codec.encode(operation); @@ -260,17 +270,9 @@ public ValueHolder putIfAbsent(final K key, final V value) throws StoreAccess } Result result = resolvedChain.getResolvedResult(key); - if(result == null) { - putIfAbsentObserver.end(StoreOperationOutcomes.PutIfAbsentOutcome.PUT); - return null; - } else { - putIfAbsentObserver.end(StoreOperationOutcomes.PutIfAbsentOutcome.HIT); - return new ClusteredValueHolder<>(result.getValue()); - } - } catch (RuntimeException re) { - throw handleRuntimeException(re); - } catch (TimeoutException e) { - throw new StoreAccessTimeoutException(e); + return result == null ? null : result.getValue(); + } catch (Exception re) { + throw handleException(re); } } @@ -286,7 +288,7 @@ public boolean remove(final K key) throws StoreAccessException { } } - private boolean silentRemove(final K key) throws StoreAccessException { + protected boolean silentRemove(K key) throws StoreAccessException { try { RemoveOperation operation = new RemoveOperation<>(key, timeSource.getTimeMillis()); ByteBuffer payload = codec.encode(operation); @@ -300,16 +302,12 @@ private boolean silentRemove(final K key) throws StoreAccessException { } else { return false; } - } catch (RuntimeException re) { - throw handleRuntimeException(re); - } catch (TimeoutException e) { - throw new StoreAccessTimeoutException(e); + } catch (Exception re) { + throw handleException(re); } } - @Override - public RemoveStatus remove(final K key, final V value) throws StoreAccessException { - conditionalRemoveObserver.begin(); + protected V silentRemove(K key, V value) throws StoreAccessException { try { ConditionalRemoveOperation operation = new ConditionalRemoveOperation<>(key, value, timeSource.getTimeMillis()); ByteBuffer payload = codec.encode(operation); @@ -318,30 +316,48 @@ public RemoveStatus remove(final K key, final V value) throws StoreAccessExcepti ResolvedChain resolvedChain = resolver.resolve(chain, key, timeSource.getTimeMillis()); Result result = resolvedChain.getResolvedResult(key); - if(result != null) { - if(value.equals(result.getValue())) { - storeProxy.replaceAtHead(extractedKey, chain, resolvedChain.getCompactedChain()); + if (result != null && value.equals(result.getValue())) { + storeProxy.replaceAtHead(extractedKey, chain, resolvedChain.getCompactedChain()); + } + return result == null ? null : result.getValue(); + } catch (Exception re) { + throw handleException(re); + } + } - conditionalRemoveObserver.end(StoreOperationOutcomes.ConditionalRemoveOutcome.REMOVED); - return RemoveStatus.REMOVED; - } else { - conditionalRemoveObserver.end(StoreOperationOutcomes.ConditionalRemoveOutcome.MISS); - return RemoveStatus.KEY_PRESENT; - } + @Override + public RemoveStatus remove(final K key, final V value) throws StoreAccessException { + conditionalRemoveObserver.begin(); + V result = silentRemove(key, value); + if(result != null) { + if(value.equals(result)) { + conditionalRemoveObserver.end(StoreOperationOutcomes.ConditionalRemoveOutcome.REMOVED); + return RemoveStatus.REMOVED; } else { conditionalRemoveObserver.end(StoreOperationOutcomes.ConditionalRemoveOutcome.MISS); - return RemoveStatus.KEY_MISSING; + return RemoveStatus.KEY_PRESENT; } - } catch (RuntimeException re) { - throw handleRuntimeException(re); - } catch (TimeoutException e) { - throw new StoreAccessTimeoutException(e); + } else { + conditionalRemoveObserver.end(StoreOperationOutcomes.ConditionalRemoveOutcome.MISS); + return RemoveStatus.KEY_MISSING; } } @Override public ValueHolder replace(final K key, final V value) throws StoreAccessException { replaceObserver.begin(); + + V result = silentReplace(key, value); + if(result == null) { + replaceObserver.end(StoreOperationOutcomes.ReplaceOutcome.MISS); + return null; + } else { + replaceObserver.end(StoreOperationOutcomes.ReplaceOutcome.REPLACED); + return new ClusteredValueHolder<>(result); + } + } + + protected V silentReplace(K key, V value) throws StoreAccessException { try { ReplaceOperation operation = new ReplaceOperation<>(key, value, timeSource.getTimeMillis()); ByteBuffer payload = codec.encode(operation); @@ -355,26 +371,16 @@ public ValueHolder replace(final K key, final V value) throws StoreAccessExce } Result result = resolvedChain.getResolvedResult(key); - if(result == null) { - replaceObserver.end(StoreOperationOutcomes.ReplaceOutcome.MISS); - return null; - } else { - replaceObserver.end(StoreOperationOutcomes.ReplaceOutcome.REPLACED); - return new ClusteredValueHolder<>(result.getValue()); - } - } catch (RuntimeException re) { - throw handleRuntimeException(re); - } catch (TimeoutException e) { - throw new StoreAccessTimeoutException(e); + return result == null ? null : result.getValue(); + } catch (Exception re) { + throw handleException(re); } } - @Override - public ReplaceStatus replace(final K key, final V oldValue, final V newValue) throws StoreAccessException { - conditionalReplaceObserver.begin(); + protected V silentReplace(K key, V oldValue, V newValue) throws StoreAccessException { try { ConditionalReplaceOperation operation = new ConditionalReplaceOperation<>(key, oldValue, newValue, timeSource - .getTimeMillis()); + .getTimeMillis()); ByteBuffer payload = codec.encode(operation); long extractedKey = extractLongKey(key); Chain chain = storeProxy.getAndAppend(extractedKey, payload); @@ -386,22 +392,27 @@ public ReplaceStatus replace(final K key, final V oldValue, final V newValue) th } Result result = resolvedChain.getResolvedResult(key); - if(result != null) { - if(oldValue.equals(result.getValue())) { - conditionalReplaceObserver.end(StoreOperationOutcomes.ConditionalReplaceOutcome.REPLACED); - return ReplaceStatus.HIT; - } else { - conditionalReplaceObserver.end(StoreOperationOutcomes.ConditionalReplaceOutcome.MISS); - return ReplaceStatus.MISS_PRESENT; - } + return result == null ? null : result.getValue(); + } catch (Exception re) { + throw handleException(re); + } + } + + @Override + public ReplaceStatus replace(final K key, final V oldValue, final V newValue) throws StoreAccessException { + conditionalReplaceObserver.begin(); + V result = silentReplace(key, oldValue, newValue); + if(result != null) { + if(oldValue.equals(result)) { + conditionalReplaceObserver.end(StoreOperationOutcomes.ConditionalReplaceOutcome.REPLACED); + return ReplaceStatus.HIT; } else { conditionalReplaceObserver.end(StoreOperationOutcomes.ConditionalReplaceOutcome.MISS); - return ReplaceStatus.MISS_NOT_PRESENT; + return ReplaceStatus.MISS_PRESENT; } - } catch (RuntimeException re) { - throw handleRuntimeException(re); - } catch (TimeoutException e) { - throw new StoreAccessTimeoutException(e); + } else { + conditionalReplaceObserver.end(StoreOperationOutcomes.ConditionalReplaceOutcome.MISS); + return ReplaceStatus.MISS_NOT_PRESENT; } } @@ -409,10 +420,8 @@ public ReplaceStatus replace(final K key, final V oldValue, final V newValue) th public void clear() throws StoreAccessException { try { storeProxy.clear(); - } catch (RuntimeException re) { - throw handleRuntimeException(re); - } catch (TimeoutException e) { - throw new StoreAccessTimeoutException(e); + } catch (Exception re) { + throw handleException(re); } } @@ -429,22 +438,19 @@ public Iterator>> iterator() { } @Override - public ValueHolder compute(final K key, final BiFunction mappingFunction) - throws StoreAccessException { + public ValueHolder getAndCompute(final K key, final BiFunction mappingFunction) { // TODO: Make appropriate ServerStoreProxy call throw new UnsupportedOperationException("Implement me"); } @Override - public ValueHolder compute(final K key, final BiFunction mappingFunction, final Supplier replaceEqual) - throws StoreAccessException { + public ValueHolder computeAndGet(final K key, final BiFunction mappingFunction, final Supplier replaceEqual, Supplier invokeWriter) { // TODO: Make appropriate ServerStoreProxy call throw new UnsupportedOperationException("Implement me"); } @Override - public ValueHolder computeIfAbsent(final K key, final Function mappingFunction) - throws StoreAccessException { + public ValueHolder computeIfAbsent(final K key, final Function mappingFunction) { // TODO: Make appropriate ServerStoreProxy call throw new UnsupportedOperationException("Implement me"); } @@ -475,14 +481,13 @@ public Map> bulkCompute(final Set keys, final Fun } } } else { - throw new UnsupportedOperationException("This compute method is not yet capable of handling generic computation functions"); + throw new UnsupportedOperationException("This bulkCompute method is not yet capable of handling generic computation functions"); } return valueHolderMap; } @Override - public Map> bulkCompute(final Set keys, final Function>, Iterable>> remappingFunction, final Supplier replaceEqual) - throws StoreAccessException { + public Map> bulkCompute(final Set keys, final Function>, Iterable>> remappingFunction, final Supplier replaceEqual) { // TODO: Make appropriate ServerStoreProxy call throw new UnsupportedOperationException("Implement me"); } @@ -503,12 +508,11 @@ public Map> bulkComputeIfAbsent(final Set keys, f // This timeout handling is safe **only** in the context of a get/read operation! value = null; } - ValueHolder holder = (value != null) ? value : null; - map.put(key, holder); + map.put(key, value); } return map; } else { - throw new UnsupportedOperationException("This compute method is not yet capable of handling generic computation functions"); + throw new UnsupportedOperationException("This bulkComputeIfAbsent method is not yet capable of handling generic computation functions"); } } @@ -553,12 +557,11 @@ public void setInvalidationValve(InvalidationValve valve) { this.invalidationValve = valve; } - /** * Provider of {@link ClusteredStore} instances. */ @ServiceDependencies({TimeSourceService.class, ClusteringService.class}) - public static class Provider implements Store.Provider, AuthoritativeTier.Provider { + public static class Provider extends BaseStoreProvider implements AuthoritativeTier.Provider { private static final Logger LOGGER = LoggerFactory.getLogger(Provider.class); @@ -571,163 +574,204 @@ public static class Provider implements Store.Provider, AuthoritativeTier.Provid private volatile ServiceProvider serviceProvider; private volatile ClusteringService clusteringService; + protected volatile ExecutionService executionService; + private final Lock connectLock = new ReentrantLock(); private final Map, StoreConfig> createdStores = new ConcurrentWeakIdentityHashMap<>(); - private final Map, Collection>> tierOperationStatistics = new ConcurrentWeakIdentityHashMap<>(); + private final Map, OperationStatistic[]> tierOperationStatistics = new ConcurrentWeakIdentityHashMap<>(); @Override - public ClusteredStore createStore(final Configuration storeConfig, final ServiceConfiguration... serviceConfigs) { - ClusteredStore store = createStoreInternal(storeConfig, serviceConfigs); - Collection> tieredOps = new ArrayList<>(); + @SuppressWarnings("unchecked") + protected ClusteredResourceType getResourceType() { + return ClusteredResourceType.Types.UNKNOWN; + } - MappedOperationStatistic get = - new MappedOperationStatistic<>( - store, TierOperationOutcomes.GET_TRANSLATION, "get", TIER_HEIGHT, "get", STATISTICS_TAG); - StatisticsManager.associate(get).withParent(store); - tieredOps.add(get); + @Override + public ClusteredStore createStore(Configuration storeConfig, ServiceConfiguration... serviceConfigs) { + ClusteredStore store = createStoreInternal(storeConfig, serviceConfigs); - MappedOperationStatistic evict = - new MappedOperationStatistic<>( - store, TierOperationOutcomes.EVICTION_TRANSLATION, "eviction", TIER_HEIGHT, "eviction", STATISTICS_TAG); - StatisticsManager.associate(evict).withParent(store); - tieredOps.add(evict); + tierOperationStatistics.put(store, new OperationStatistic[] { + createTranslatedStatistic(store, "get", TierOperationOutcomes.GET_TRANSLATION, "get"), + createTranslatedStatistic(store, "eviction", TierOperationOutcomes.EVICTION_TRANSLATION, "eviction") + }); - tierOperationStatistics.put(store, tieredOps); return store; } private ClusteredStore createStoreInternal(Configuration storeConfig, Object[] serviceConfigs) { - DefaultCacheLoaderWriterConfiguration loaderWriterConfiguration = findSingletonAmongst(DefaultCacheLoaderWriterConfiguration.class, serviceConfigs); - if (loaderWriterConfiguration != null) { - throw new IllegalStateException("CacheLoaderWriter is not supported with clustered tiers"); - } + connectLock.lock(); + try { - CacheEventListenerConfiguration eventListenerConfiguration = findSingletonAmongst(CacheEventListenerConfiguration.class, serviceConfigs); - if (eventListenerConfiguration != null) { - throw new IllegalStateException("CacheEventListener is not supported with clustered tiers"); - } + CacheEventListenerConfiguration eventListenerConfiguration = findSingletonAmongst(CacheEventListenerConfiguration.class, serviceConfigs); + if (eventListenerConfiguration != null) { + throw new IllegalStateException("CacheEventListener is not supported with clustered tiers"); + } - if (clusteringService == null) { - throw new IllegalStateException(Provider.class.getCanonicalName() + ".createStore called without ClusteringServiceConfiguration"); - } + if (clusteringService == null) { + throw new IllegalStateException(Provider.class.getCanonicalName() + ".createStore called without ClusteringServiceConfiguration"); + } - final HashSet> clusteredResourceTypes = - new HashSet<>(storeConfig.getResourcePools().getResourceTypeSet()); - clusteredResourceTypes.retainAll(CLUSTER_RESOURCES); + HashSet> clusteredResourceTypes = + new HashSet<>(storeConfig.getResourcePools().getResourceTypeSet()); + clusteredResourceTypes.retainAll(CLUSTER_RESOURCES); - if (clusteredResourceTypes.isEmpty()) { - throw new IllegalStateException(Provider.class.getCanonicalName() + ".createStore called without ClusteredResourcePools"); - } - if (clusteredResourceTypes.size() != 1) { - throw new IllegalStateException(Provider.class.getCanonicalName() + ".createStore can not create clustered tier with multiple clustered resources"); - } + if (clusteredResourceTypes.isEmpty()) { + throw new IllegalStateException(Provider.class.getCanonicalName() + ".createStore called without ClusteredResourcePools"); + } + if (clusteredResourceTypes.size() != 1) { + throw new IllegalStateException(Provider.class.getCanonicalName() + ".createStore can not create clustered tier with multiple clustered resources"); + } - ClusteredStoreConfiguration clusteredStoreConfiguration = findSingletonAmongst(ClusteredStoreConfiguration.class, serviceConfigs); - if (clusteredStoreConfiguration == null) { - clusteredStoreConfiguration = new ClusteredStoreConfiguration(); - } - ClusteredCacheIdentifier cacheId = findSingletonAmongst(ClusteredCacheIdentifier.class, serviceConfigs); + ClusteredStoreConfiguration clusteredStoreConfiguration = findSingletonAmongst(ClusteredStoreConfiguration.class, serviceConfigs); + if (clusteredStoreConfiguration == null) { + clusteredStoreConfiguration = new ClusteredStoreConfiguration(); + } + ClusteredCacheIdentifier cacheId = findSingletonAmongst(ClusteredCacheIdentifier.class, serviceConfigs); - TimeSource timeSource = serviceProvider.getService(TimeSourceService.class).getTimeSource(); + TimeSource timeSource = serviceProvider.getService(TimeSourceService.class).getTimeSource(); - OperationsCodec codec = new OperationsCodec<>(storeConfig.getKeySerializer(), storeConfig.getValueSerializer()); + OperationsCodec codec = new OperationsCodec<>(storeConfig.getKeySerializer(), storeConfig.getValueSerializer()); - ChainResolver resolver; - Expiry expiry = storeConfig.getExpiry(); - if (Expirations.noExpiration().equals(expiry)) { - resolver = new EternalChainResolver<>(codec); - } else { - resolver = new ExpiryChainResolver<>(codec, expiry); - } + ChainResolver resolver; + ExpiryPolicy expiry = storeConfig.getExpiry(); + if (ExpiryPolicyBuilder.noExpiration().equals(expiry)) { + resolver = new EternalChainResolver<>(codec); + } else { + resolver = new ExpiryChainResolver<>(codec, expiry); + } + ClusteredStore store = createStore(storeConfig, codec, resolver, timeSource, storeConfig.useLoaderInAtomics(), serviceConfigs); - ClusteredStore store = new ClusteredStore<>(codec, resolver, timeSource); + createdStores.put(store, new StoreConfig(cacheId, storeConfig, clusteredStoreConfiguration.getConsistency())); + return store; + } finally { + connectLock.unlock(); + } + } - createdStores.put(store, new StoreConfig(cacheId, storeConfig, clusteredStoreConfiguration.getConsistency())); - return store; + protected ClusteredStore createStore(Configuration storeConfig, + OperationsCodec codec, + ChainResolver resolver, + TimeSource timeSource, + boolean useLoaderInAtomics, + Object[] serviceConfigs) { + return new ClusteredStore<>(storeConfig, codec, resolver, timeSource); } @Override - public void releaseStore(final Store resource) { - if (createdStores.remove(resource) == null) { - throw new IllegalArgumentException("Given clustered tier is not managed by this provider : " + resource); + public void releaseStore(Store resource) { + connectLock.lock(); + try { + if (createdStores.remove(resource) == null) { + throw new IllegalArgumentException("Given clustered tier is not managed by this provider : " + resource); + } + ClusteredStore clusteredStore = (ClusteredStore) resource; + this.clusteringService.releaseServerStoreProxy(clusteredStore.storeProxy, false); + StatisticsManager.nodeFor(clusteredStore).clean(); + tierOperationStatistics.remove(clusteredStore); + } finally { + connectLock.unlock(); } - ClusteredStore clusteredStore = (ClusteredStore)resource; - this.clusteringService.releaseServerStoreProxy(clusteredStore.storeProxy); - StatisticsManager.nodeFor(clusteredStore).clean(); - tierOperationStatistics.remove(clusteredStore); } @Override - public void initStore(final Store resource) { - StoreConfig storeConfig = createdStores.get(resource); - if (storeConfig == null) { - throw new IllegalArgumentException("Given clustered tier is not managed by this provider : " + resource); - } - final ClusteredStore clusteredStore = (ClusteredStore) resource; - ClusteredCacheIdentifier cacheIdentifier = storeConfig.getCacheIdentifier(); + public void initStore(Store resource) { + connectLock.lock(); try { - clusteredStore.storeProxy = clusteringService.getServerStoreProxy(cacheIdentifier, storeConfig.getStoreConfig(), storeConfig.getConsistency(), - new ServerCallback() { - @Override - public void onInvalidateHash(long hash) { - EvictionOutcome result = EvictionOutcome.SUCCESS; - clusteredStore.evictionObserver.begin(); - if (clusteredStore.invalidationValve != null) { - try { - LOGGER.debug("CLIENT: calling invalidation valve for hash {}", hash); - clusteredStore.invalidationValve.invalidateAllWithHash(hash); - } catch (StoreAccessException sae) { - //TODO: what should be done here? delegate to resilience strategy? - LOGGER.error("Error invalidating hash {}", hash, sae); - result = StoreOperationOutcomes.EvictionOutcome.FAILURE; - } + StoreConfig storeConfig = createdStores.get(resource); + if (storeConfig == null) { + throw new IllegalArgumentException("Given clustered tier is not managed by this provider : " + resource); + } + ClusteredStore clusteredStore = (ClusteredStore) resource; + ClusteredCacheIdentifier cacheIdentifier = storeConfig.getCacheIdentifier(); + try { + ServerStoreProxy storeProxy = clusteringService.getServerStoreProxy(cacheIdentifier, storeConfig.getStoreConfig(), storeConfig.getConsistency(), + getServerCallback(clusteredStore)); + ReconnectingServerStoreProxy reconnectingServerStoreProxy = new ReconnectingServerStoreProxy(storeProxy, () -> { + Runnable reconnectTask = () -> { + connectLock.lock(); + try { + //TODO: handle race between disconnect event and connection closed exception being thrown + // this guy should wait till disconnect event processing is complete. + String cacheId = cacheIdentifier.getId(); + LOGGER.info("Cache {} got disconnected from cluster, reconnecting", cacheId); + clusteringService.releaseServerStoreProxy(clusteredStore.storeProxy, true); + initStore(clusteredStore); + LOGGER.info("Cache {} got reconnected to cluster", cacheId); + } finally { + connectLock.unlock(); } - clusteredStore.evictionObserver.end(result); - } + }; + CompletableFuture.runAsync(reconnectTask, executionService.getUnorderedExecutor(null, new LinkedBlockingQueue<>())); + }); + clusteredStore.storeProxy = reconnectingServerStoreProxy; + } catch (CachePersistenceException e) { + throw new RuntimeException("Unable to create cluster tier proxy - " + cacheIdentifier, e); + } - @Override - public void onInvalidateAll() { - if (clusteredStore.invalidationValve != null) { - try { - LOGGER.debug("CLIENT: calling invalidation valve for all"); - clusteredStore.invalidationValve.invalidateAll(); - } catch (StoreAccessException sae) { - //TODO: what should be done here? delegate to resilience strategy? - LOGGER.error("Error invalidating all", sae); - } - } - } + Serializer keySerializer = clusteredStore.codec.getKeySerializer(); + if (keySerializer instanceof StatefulSerializer) { + StateRepository stateRepository; + try { + stateRepository = clusteringService.getStateRepositoryWithin(cacheIdentifier, cacheIdentifier.getId() + "-Key"); + } catch (CachePersistenceException e) { + throw new RuntimeException(e); + } + ((StatefulSerializer) keySerializer).init(stateRepository); + } + Serializer valueSerializer = clusteredStore.codec.getValueSerializer(); + if (valueSerializer instanceof StatefulSerializer) { + StateRepository stateRepository; + try { + stateRepository = clusteringService.getStateRepositoryWithin(cacheIdentifier, cacheIdentifier.getId() + "-Value"); + } catch (CachePersistenceException e) { + throw new RuntimeException(e); + } + ((StatefulSerializer) valueSerializer).init(stateRepository); + } - @Override - public Chain compact(Chain chain) { - return clusteredStore.resolver.applyOperation(chain, clusteredStore.timeSource.getTimeMillis()); - } - }); - } catch (CachePersistenceException e) { - throw new RuntimeException("Unable to create cluster tier proxy - " + cacheIdentifier, e); + } finally { + connectLock.unlock(); } + } - Serializer keySerializer = clusteredStore.codec.getKeySerializer(); - if (keySerializer instanceof StatefulSerializer) { - StateRepository stateRepository = null; - try { - stateRepository = clusteringService.getStateRepositoryWithin(cacheIdentifier, cacheIdentifier.getId() + "-Key"); - } catch (CachePersistenceException e) { - throw new RuntimeException(e); + protected ServerCallback getServerCallback(ClusteredStore clusteredStore) { + return new ServerCallback() { + @Override + public void onInvalidateHash(long hash) { + EvictionOutcome result = EvictionOutcome.SUCCESS; + clusteredStore.evictionObserver.begin(); + if (clusteredStore.invalidationValve != null) { + try { + LOGGER.debug("CLIENT: calling invalidation valve for hash {}", hash); + clusteredStore.invalidationValve.invalidateAllWithHash(hash); + } catch (StoreAccessException sae) { + //TODO: what should be done here? delegate to resilience strategy? + LOGGER.error("Error invalidating hash {}", hash, sae); + result = EvictionOutcome.FAILURE; + } + } + clusteredStore.evictionObserver.end(result); } - ((StatefulSerializer)keySerializer).init(stateRepository); - } - Serializer valueSerializer = clusteredStore.codec.getValueSerializer(); - if (valueSerializer instanceof StatefulSerializer) { - StateRepository stateRepository = null; - try { - stateRepository = clusteringService.getStateRepositoryWithin(cacheIdentifier, cacheIdentifier.getId() + "-Value"); - } catch (CachePersistenceException e) { - throw new RuntimeException(e); + + @Override + public void onInvalidateAll() { + if (clusteredStore.invalidationValve != null) { + try { + LOGGER.debug("CLIENT: calling invalidation valve for all"); + clusteredStore.invalidationValve.invalidateAll(); + } catch (StoreAccessException sae) { + //TODO: what should be done here? delegate to resilience strategy? + LOGGER.error("Error invalidating all", sae); + } + } } - ((StatefulSerializer)valueSerializer).init(stateRepository); - } + + @Override + public Chain compact(Chain chain) { + return clusteredStore.resolver.applyOperation(chain, clusteredStore.timeSource.getTimeMillis()); + } + }; } @Override @@ -750,34 +794,36 @@ public int rankAuthority(ResourceType authorityResource, Collection serviceProvider) { - this.serviceProvider = serviceProvider; - this.clusteringService = this.serviceProvider.getService(ClusteringService.class); + connectLock.lock(); + try { + this.serviceProvider = serviceProvider; + this.clusteringService = this.serviceProvider.getService(ClusteringService.class); + this.executionService = this.serviceProvider.getService(ExecutionService.class); + } finally { + connectLock.unlock(); + } } @Override public void stop() { - this.serviceProvider = null; - createdStores.clear(); + connectLock.lock(); + try { + this.serviceProvider = null; + createdStores.clear(); + } finally { + connectLock.unlock(); + } } @Override public AuthoritativeTier createAuthoritativeTier(Configuration storeConfig, ServiceConfiguration... serviceConfigs) { ClusteredStore authoritativeTier = createStoreInternal(storeConfig, serviceConfigs); - Collection> tieredOps = new ArrayList<>(); - MappedOperationStatistic get = - new MappedOperationStatistic<>( - authoritativeTier, TierOperationOutcomes.GET_AND_FAULT_TRANSLATION, "get", TIER_HEIGHT, "getAndFault", STATISTICS_TAG); - StatisticsManager.associate(get).withParent(authoritativeTier); - tieredOps.add(get); + tierOperationStatistics.put(authoritativeTier, new OperationStatistic[] { + createTranslatedStatistic(authoritativeTier, "get", TierOperationOutcomes.GET_AND_FAULT_TRANSLATION, "getAndFault"), + createTranslatedStatistic(authoritativeTier, "eviction", TierOperationOutcomes.EVICTION_TRANSLATION, "eviction") + }); - MappedOperationStatistic evict = - new MappedOperationStatistic<>( - authoritativeTier, TierOperationOutcomes.EVICTION_TRANSLATION, "eviction", TIER_HEIGHT, "eviction", STATISTICS_TAG); - StatisticsManager.associate(evict).withParent(authoritativeTier); - tieredOps.add(evict); - - tierOperationStatistics.put(authoritativeTier, tieredOps); return authoritativeTier; } @@ -790,6 +836,7 @@ public void releaseAuthoritativeTier(AuthoritativeTier resource) { public void initAuthoritativeTier(AuthoritativeTier resource) { initStore(resource); } + } private static class StoreConfig { diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusteredValueHolder.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusteredValueHolder.java index f11db85e8b..37359949dc 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusteredValueHolder.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ClusteredValueHolder.java @@ -44,7 +44,7 @@ protected TimeUnit nativeTimeUnit() { } @Override - public V value() { + public V get() { return value; } } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/CommonServerStoreProxy.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/CommonServerStoreProxy.java index 72bf749aa7..6537b70f41 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/CommonServerStoreProxy.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/CommonServerStoreProxy.java @@ -16,7 +16,6 @@ package org.ehcache.clustered.client.internal.store; -import org.ehcache.clustered.client.internal.store.ClusterTierClientEntity.ResponseListener; import org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse; import org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse.ClientInvalidateAll; import org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse.ClientInvalidateHash; @@ -34,7 +33,6 @@ import org.slf4j.LoggerFactory; import java.nio.ByteBuffer; -import java.util.Objects; import java.util.concurrent.TimeoutException; import static java.util.Objects.requireNonNull; @@ -54,6 +52,8 @@ class CommonServerStoreProxy implements ServerStoreProxy { this.entity = requireNonNull(entity, "ClusterTierClientEntity must be non-null"); requireNonNull(invalidation, "ServerCallback must be non-null"); + entity.addDisconnectionListener(invalidation::onInvalidateAll); + entity.addResponseListener(ServerInvalidateHash.class, response -> { long key = response.getKey(); LOGGER.debug("CLIENT: on cache {}, server requesting hash {} to be invalidated", cacheId, key); @@ -88,6 +88,13 @@ class CommonServerStoreProxy implements ServerStoreProxy { LOGGER.error("error acking client invalidation of all on cache {}", cacheId, e); } }); + entity.addResponseListener(EhcacheEntityResponse.ResolveRequest.class, response -> { + Chain incoming = response.getChain(); + Chain compacted = invalidation.compact(incoming, response.getKey()); + if (compacted != null) { + replaceAtHead(response.getKey(), incoming, compacted); + } + }); } @Override @@ -124,7 +131,7 @@ public Chain get(long key) throws TimeoutException { } @Override - public void append(long key, ByteBuffer payLoad) throws TimeoutException { + public void append(long key, ByteBuffer payLoad) { try { entity.invokeAndWaitForReceive(new AppendMessage(key, payLoad), true); } catch (Exception e) { diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/InternalClusterTierClientEntity.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/InternalClusterTierClientEntity.java index d7fd0b1625..4841c72255 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/InternalClusterTierClientEntity.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/InternalClusterTierClientEntity.java @@ -16,15 +16,8 @@ package org.ehcache.clustered.client.internal.store; -import org.ehcache.clustered.client.config.Timeouts; - /** - * InternalClusterTierClientEntity + * InternalClusterTierClientEntity : Marker interface for any extensions that is used internally */ public interface InternalClusterTierClientEntity extends ClusterTierClientEntity { - - void setTimeouts(Timeouts timeouts); - - void setStoreIdentifier(String storeIdentifier); - } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ReconnectInProgressException.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ReconnectInProgressException.java new file mode 100644 index 0000000000..940edf1f03 --- /dev/null +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ReconnectInProgressException.java @@ -0,0 +1,26 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.store; + +public class ReconnectInProgressException extends RuntimeException { + + private static final long serialVersionUID = 2561046982957750120L; + + public ReconnectInProgressException() { + super("Connection lost to server. Client is trying to reconnect to server"); + } + +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ReconnectingServerStoreProxy.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ReconnectingServerStoreProxy.java new file mode 100644 index 0000000000..7bc88d1703 --- /dev/null +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ReconnectingServerStoreProxy.java @@ -0,0 +1,203 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.store; + +import org.ehcache.clustered.client.internal.store.lock.LockManager; +import org.ehcache.clustered.client.internal.store.lock.LockingServerStoreProxy; +import org.ehcache.clustered.common.internal.store.Chain; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.terracotta.exception.ConnectionClosedException; +import org.terracotta.exception.ConnectionShutdownException; + +import java.nio.ByteBuffer; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; + +public class ReconnectingServerStoreProxy implements ServerStoreProxy, LockManager { + + private static final Logger LOGGER = LoggerFactory.getLogger(ReconnectingServerStoreProxy.class); + + private final AtomicReference delegateRef; + private final Runnable onReconnect; + + public ReconnectingServerStoreProxy(ServerStoreProxy serverStoreProxy, Runnable onReconnect) { + if (serverStoreProxy instanceof LockingServerStoreProxy) { + this.delegateRef = new AtomicReference<>((LockingServerStoreProxy) serverStoreProxy); + } else { + this.delegateRef = new AtomicReference<>(new LockingServerStoreProxy(serverStoreProxy, new UnSupportedLockManager())); + } + this.onReconnect = onReconnect; + } + + @Override + public String getCacheId() { + return proxy().getCacheId(); + } + + @Override + public void close() { + try { + proxy().close(); + } catch (ConnectionClosedException | ConnectionShutdownException e) { + LOGGER.debug("Store was already closed, since connection was closed"); + } + } + + @Override + public Chain get(long key) throws TimeoutException { + return onStoreProxy(serverStoreProxy -> serverStoreProxy.get(key)); + } + + @Override + public void append(long key, ByteBuffer payLoad) throws TimeoutException { + onStoreProxy(serverStoreProxy -> { + serverStoreProxy.append(key, payLoad); + return null; + }); + } + + @Override + public Chain getAndAppend(long key, ByteBuffer payLoad) throws TimeoutException { + return onStoreProxy(serverStoreProxy -> serverStoreProxy.getAndAppend(key, payLoad)); + } + + @Override + public void replaceAtHead(long key, Chain expect, Chain update) { + try { + onStoreProxy(serverStoreProxy -> { + serverStoreProxy.replaceAtHead(key, expect, update); + return null; + }); + } catch (TimeoutException e) { + throw new RuntimeException(e); + } + } + + @Override + public void clear() throws TimeoutException { + onStoreProxy(serverStoreProxy -> { + serverStoreProxy.clear(); + return null; + }); + } + + private LockingServerStoreProxy proxy() { + return delegateRef.get(); + } + + private T onStoreProxy(TimeoutExceptionFunction function) throws TimeoutException { + LockingServerStoreProxy storeProxy = proxy(); + try { + return function.apply(storeProxy); + } catch (ServerStoreProxyException sspe) { + if (sspe.getCause() instanceof ConnectionClosedException) { + if (delegateRef.compareAndSet(storeProxy, new ReconnectInProgressProxy(storeProxy.getCacheId()))) { + onReconnect.run(); + } + return onStoreProxy(function); + } else { + throw sspe; + } + } + } + + @Override + public Chain lock(long hash) throws TimeoutException { + return onStoreProxy(lockingServerStoreProxy -> lockingServerStoreProxy.lock(hash)); + } + + @Override + public void unlock(long hash) throws TimeoutException { + onStoreProxy(lockingServerStoreProxy -> { + lockingServerStoreProxy.unlock(hash); + return null; + }); + } + + @FunctionalInterface + private interface TimeoutExceptionFunction { + V apply(U u) throws TimeoutException; + } + + private static class ReconnectInProgressProxy extends LockingServerStoreProxy { + + private final String cacheId; + + ReconnectInProgressProxy(String cacheId) { + super(null, null); + this.cacheId = cacheId; + } + + @Override + public String getCacheId() { + return this.cacheId; + } + + @Override + public void close() { + throw new ReconnectInProgressException(); + } + + @Override + public Chain get(long key) { + throw new ReconnectInProgressException(); + } + + @Override + public void append(long key, ByteBuffer payLoad) { + throw new ReconnectInProgressException(); + } + + @Override + public Chain getAndAppend(long key, ByteBuffer payLoad) { + throw new ReconnectInProgressException(); + } + + @Override + public void replaceAtHead(long key, Chain expect, Chain update) { + throw new ReconnectInProgressException(); + } + + @Override + public void clear() { + throw new ReconnectInProgressException(); + } + + @Override + public Chain lock(long hash) throws TimeoutException { + throw new ReconnectInProgressException(); + } + + @Override + public void unlock(long hash) throws TimeoutException { + throw new ReconnectInProgressException(); + } + } + + private static class UnSupportedLockManager implements LockManager { + + @Override + public Chain lock(long hash) throws TimeoutException { + throw new UnsupportedOperationException("Lock ops are not supported"); + } + + @Override + public void unlock(long hash) throws TimeoutException { + throw new UnsupportedOperationException("Lock ops are not supported"); + } + } +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ResolvedChain.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ResolvedChain.java index 6e3aed757d..8c8a76473b 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ResolvedChain.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ResolvedChain.java @@ -16,7 +16,7 @@ package org.ehcache.clustered.client.internal.store; -import org.ehcache.clustered.client.internal.store.operations.Result; +import org.ehcache.clustered.common.internal.store.operations.Result; import org.ehcache.clustered.common.internal.store.Chain; import java.util.Collections; diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ServerStoreProxy.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ServerStoreProxy.java index 355aaccbfb..b77c884cdd 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ServerStoreProxy.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ServerStoreProxy.java @@ -40,6 +40,10 @@ interface ServerCallback { void onInvalidateAll(); Chain compact(Chain chain); + + default Chain compact(Chain chain, long hash) { + return compact(chain); + } } /** diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ServerStoreProxyException.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ServerStoreProxyException.java index e2ac646f13..7b406d8271 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ServerStoreProxyException.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/ServerStoreProxyException.java @@ -15,10 +15,10 @@ */ package org.ehcache.clustered.client.internal.store; -/** - */ public class ServerStoreProxyException extends RuntimeException { + private static final long serialVersionUID = -3451273597124838171L; + /** * Creates a new exception wrapping the {@link Throwable cause} passed in. * diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/SimpleClusterTierClientEntity.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/SimpleClusterTierClientEntity.java index 3cfbabd1e6..e166f8d3ab 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/SimpleClusterTierClientEntity.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/SimpleClusterTierClientEntity.java @@ -17,7 +17,6 @@ package org.ehcache.clustered.client.internal.store; import org.ehcache.clustered.client.config.Timeouts; -import org.ehcache.clustered.client.config.builders.TimeoutsBuilder; import org.ehcache.clustered.client.internal.service.ClusterTierException; import org.ehcache.clustered.client.internal.service.ClusterTierValidationException; import org.ehcache.clustered.common.internal.ServerStoreConfiguration; @@ -69,20 +68,19 @@ public class SimpleClusterTierClientEntity implements InternalClusterTierClientE private final ReconnectMessageCodec reconnectMessageCodec = new ReconnectMessageCodec(); private final Map, List>> responseListeners = new ConcurrentHashMap<>(); + private final List disconnectionListeners = new CopyOnWriteArrayList<>(); + private final Timeouts timeouts; + private final String storeIdentifier; - private ReconnectListener reconnectListener = reconnectMessage -> { - // No op - }; - private DisconnectionListener disconnectionListener = () -> { - // No op - }; - private Timeouts timeouts = TimeoutsBuilder.timeouts().build(); - private String storeIdentifier; - private volatile boolean connected = true; + private final List reconnectListeners = new CopyOnWriteArrayList<>(); + private volatile boolean connected = true; - public SimpleClusterTierClientEntity(EntityClientEndpoint endpoint) { + public SimpleClusterTierClientEntity(EntityClientEndpoint endpoint, + Timeouts timeouts, String storeIdentifier) { this.endpoint = endpoint; + this.timeouts = timeouts; + this.storeIdentifier = storeIdentifier; this.messageFactory = new LifeCycleMessageFactory(); endpoint.setDelegate(new EndpointDelegate() { @Override @@ -95,26 +93,22 @@ public void handleMessage(EhcacheEntityResponse messageFromServer) { public byte[] createExtendedReconnectData() { synchronized (lock) { ClusterTierReconnectMessage reconnectMessage = new ClusterTierReconnectMessage(); - reconnectListener.onHandleReconnect(reconnectMessage); + reconnectListeners.forEach(reconnectListener -> reconnectListener.onHandleReconnect(reconnectMessage)); return reconnectMessageCodec.encode(reconnectMessage); } } @Override public void didDisconnectUnexpectedly() { + LOGGER.info("Cluster tier for cache {} disconnected", storeIdentifier); fireDisconnectionEvent(); } }); } - @Override - public void setTimeouts(Timeouts timeouts) { - this.timeouts = timeouts; - } - void fireDisconnectionEvent() { connected = false; - disconnectionListener.onDisconnection(); + disconnectionListeners.forEach(DisconnectionListener::onDisconnection); } private void fireResponseEvent(T response) { @@ -133,8 +127,8 @@ private void fireResponseEvent(T response) { @Override public void close() { endpoint.close(); - reconnectListener = null; - disconnectionListener = null; + reconnectListeners.clear(); + disconnectionListeners.clear(); } @Override @@ -143,13 +137,13 @@ public Timeouts getTimeouts() { } @Override - public void setReconnectListener(ReconnectListener reconnectListener) { - this.reconnectListener = reconnectListener; + public void addReconnectListener(ReconnectListener reconnectListener) { + this.reconnectListeners.add(reconnectListener); } @Override - public void setDisconnectionListener(DisconnectionListener disconnectionListener) { - this.disconnectionListener = disconnectionListener; + public void addDisconnectionListener(DisconnectionListener disconnectionListener) { + this.disconnectionListeners.add(disconnectionListener); } @Override @@ -176,29 +170,20 @@ public void validate(ServerStoreConfiguration clientStoreConfiguration) throws C } } - @Override - public void setStoreIdentifier(String storeIdentifier) { - this.storeIdentifier = storeIdentifier; - } - - void setConnected(boolean connected) { - this.connected = connected; - } - @Override public EhcacheEntityResponse invokeStateRepositoryOperation(StateRepositoryOpMessage message, boolean track) throws ClusterException, TimeoutException { return invokeAndWaitForRetired(message, track); } @Override - public void invokeAndWaitForSend(EhcacheOperationMessage message, boolean track) throws ClusterException, TimeoutException { + public void invokeAndWaitForSend(EhcacheOperationMessage message, boolean track) throws TimeoutException { invokeInternal(endpoint.beginInvoke().ackSent(), getTimeoutDuration(message), message, track); } @Override public void invokeAndWaitForReceive(EhcacheOperationMessage message, boolean track) - throws ClusterException, TimeoutException { - invokeInternal(endpoint.beginInvoke().ackReceived(), getTimeoutDuration(message), message, track); + throws ClusterException, TimeoutException { + invokeInternalAndWait(endpoint.beginInvoke().ackReceived(), message, track); } @Override diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/StrongServerStoreProxy.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/StrongServerStoreProxy.java index 8a5b576e39..0d5c7d1d43 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/StrongServerStoreProxy.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/StrongServerStoreProxy.java @@ -50,8 +50,8 @@ public StrongServerStoreProxy(final String cacheId, final ClusterTierClientEntit delegate.addResponseListener(EhcacheEntityResponse.HashInvalidationDone.class, this::hashInvalidationDoneResponseListener); delegate.addResponseListener(EhcacheEntityResponse.AllInvalidationDone.class, this::allInvalidationDoneResponseListener); - entity.setReconnectListener(this::reconnectListener); - entity.setDisconnectionListener(this::disconnectionListener); + entity.addReconnectListener(this::reconnectListener); + entity.addDisconnectionListener(this::disconnectionListener); } private void disconnectionListener() { diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/lock/LockManager.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/lock/LockManager.java new file mode 100644 index 0000000000..164ac3009b --- /dev/null +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/lock/LockManager.java @@ -0,0 +1,35 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.store.lock; + +import org.ehcache.clustered.common.internal.store.Chain; + +import java.util.concurrent.TimeoutException; + +public interface LockManager { + /** + * + * @param hash + */ + Chain lock(long hash) throws TimeoutException; + + /** + * + * @param hash + */ + void unlock(long hash) throws TimeoutException; + +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/lock/LockManagerImpl.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/lock/LockManagerImpl.java new file mode 100644 index 0000000000..397f2fb703 --- /dev/null +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/lock/LockManagerImpl.java @@ -0,0 +1,83 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.store.lock; + +import org.ehcache.clustered.client.internal.store.ClusterTierClientEntity; +import org.ehcache.clustered.client.internal.store.ServerStoreProxyException; +import org.ehcache.clustered.common.internal.messages.ClusterTierReconnectMessage; +import org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse; +import org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse.LockSuccess; +import org.ehcache.clustered.common.internal.messages.ServerStoreOpMessage.LockMessage; +import org.ehcache.clustered.common.internal.messages.ServerStoreOpMessage.UnlockMessage; +import org.ehcache.clustered.common.internal.store.Chain; +import org.ehcache.impl.internal.concurrent.ConcurrentHashMap; + +import java.util.Collections; +import java.util.Set; +import java.util.concurrent.TimeoutException; + +import static org.ehcache.clustered.common.internal.messages.EhcacheResponseType.LOCK_FAILURE; + +public class LockManagerImpl implements LockManager { + + private final ClusterTierClientEntity clientEntity; + private final Set locksHeld = Collections.newSetFromMap(new ConcurrentHashMap<>()); + + public LockManagerImpl(ClusterTierClientEntity clientEntity) { + this.clientEntity = clientEntity; + clientEntity.addReconnectListener(this::reconnectListener); + } + + private void reconnectListener(ClusterTierReconnectMessage reconnectMessage) { + reconnectMessage.addLocksHeld(locksHeld); + } + + @Override + public Chain lock(long hash) throws TimeoutException { + LockSuccess response = getlockResponse(hash); + locksHeld.add(hash); + return response.getChain(); + } + + private LockSuccess getlockResponse(long hash) throws TimeoutException { + EhcacheEntityResponse response; + do { + try { + response = clientEntity.invokeAndWaitForComplete(new LockMessage(hash), false); + } catch (TimeoutException tme) { + throw tme; + } catch (Exception e) { + throw new ServerStoreProxyException(e); + } + if (response == null) { + throw new ServerStoreProxyException("Response for acquiring lock was invalid null message"); + } + } while (response.getResponseType() == LOCK_FAILURE); + return (LockSuccess) response; + } + + @Override + public void unlock(long hash) throws TimeoutException { + try { + clientEntity.invokeAndWaitForComplete(new UnlockMessage(hash), false); + locksHeld.remove(hash); + } catch (TimeoutException tme) { + throw tme; + } catch (Exception e) { + throw new ServerStoreProxyException(e); + } + } +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/lock/LockingServerStoreProxy.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/lock/LockingServerStoreProxy.java new file mode 100644 index 0000000000..d231aac923 --- /dev/null +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/lock/LockingServerStoreProxy.java @@ -0,0 +1,78 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.store.lock; + +import org.ehcache.clustered.client.internal.store.ServerStoreProxy; +import org.ehcache.clustered.common.internal.store.Chain; + +import java.nio.ByteBuffer; +import java.util.concurrent.TimeoutException; + +public class LockingServerStoreProxy implements ServerStoreProxy, LockManager { + + private final ServerStoreProxy storeProxy; + private final LockManager lockManager; + + public LockingServerStoreProxy(ServerStoreProxy storeProxy, LockManager lockManager) { + this.storeProxy = storeProxy; + this.lockManager = lockManager; + } + + @Override + public String getCacheId() { + return storeProxy.getCacheId(); + } + + @Override + public void close() { + storeProxy.close(); + } + + @Override + public Chain lock(long hash) throws TimeoutException { + return lockManager.lock(hash); + } + + @Override + public void unlock(long hash) throws TimeoutException { + lockManager.unlock(hash); + } + + @Override + public Chain get(long key) throws TimeoutException { + return storeProxy.get(key); + } + + @Override + public void append(long key, ByteBuffer payLoad) throws TimeoutException { + storeProxy.append(key, payLoad); + } + + @Override + public Chain getAndAppend(long key, ByteBuffer payLoad) throws TimeoutException { + return storeProxy.getAndAppend(key, payLoad); + } + + @Override + public void replaceAtHead(long key, Chain expect, Chain update) { + storeProxy.replaceAtHead(key, expect, update); + } + + @Override + public void clear() throws TimeoutException { + storeProxy.clear(); + } +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/ChainResolver.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/ChainResolver.java index 8d1457b1a6..09e30cd6c4 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/ChainResolver.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/ChainResolver.java @@ -18,16 +18,15 @@ import org.ehcache.clustered.client.internal.store.ChainBuilder; import org.ehcache.clustered.client.internal.store.ResolvedChain; -import org.ehcache.clustered.client.internal.store.operations.codecs.OperationsCodec; import org.ehcache.clustered.common.internal.store.Chain; import org.ehcache.clustered.common.internal.store.Element; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.ehcache.clustered.common.internal.store.operations.Operation; +import org.ehcache.clustered.common.internal.store.operations.PutOperation; +import org.ehcache.clustered.common.internal.store.operations.codecs.OperationsCodec; import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; -import java.util.concurrent.TimeUnit; /** * An abstract chain resolver. @@ -41,8 +40,6 @@ * @param value type */ public abstract class ChainResolver { - protected static final Logger LOG = LoggerFactory.getLogger(EternalChainResolver.class); - protected static final TimeUnit TIME_UNIT = TimeUnit.MILLISECONDS; protected final OperationsCodec codec; public ChainResolver(final OperationsCodec codec) { @@ -124,5 +121,5 @@ public Chain applyOperation(Chain chain, long now) { * @param now current time * @return an equivalent put operation */ - protected abstract PutOperation applyOperation(K key, PutOperation existing, Operation operation, long now); + public abstract PutOperation applyOperation(K key, PutOperation existing, Operation operation, long now); } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/EternalChainResolver.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/EternalChainResolver.java index 9868fd5c8a..dd2cc6bf93 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/EternalChainResolver.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/EternalChainResolver.java @@ -16,9 +16,10 @@ package org.ehcache.clustered.client.internal.store.operations; -import org.ehcache.clustered.client.internal.store.operations.codecs.OperationsCodec; - -import static java.lang.Math.max; +import org.ehcache.clustered.common.internal.store.operations.Operation; +import org.ehcache.clustered.common.internal.store.operations.PutOperation; +import org.ehcache.clustered.common.internal.store.operations.Result; +import org.ehcache.clustered.common.internal.store.operations.codecs.OperationsCodec; /** * A specialized chain resolver for eternal caches. @@ -43,7 +44,7 @@ public EternalChainResolver(final OperationsCodec codec) { * @param now current time * @return the equivalent put operation */ - protected PutOperation applyOperation(K key, PutOperation existing, Operation operation, long now) { + public PutOperation applyOperation(K key, PutOperation existing, Operation operation, long now) { final Result newValue = operation.apply(existing); if (newValue == null) { return null; diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/ExpiryChainResolver.java b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/ExpiryChainResolver.java index fd23dce9bf..f7f5e6b544 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/ExpiryChainResolver.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/ExpiryChainResolver.java @@ -16,11 +16,19 @@ package org.ehcache.clustered.client.internal.store.operations; -import org.ehcache.clustered.client.internal.store.operations.codecs.OperationsCodec; -import org.ehcache.expiry.Duration; -import org.ehcache.expiry.Expiry; +import org.ehcache.clustered.common.internal.store.operations.Operation; +import org.ehcache.clustered.common.internal.store.operations.PutOperation; +import org.ehcache.clustered.common.internal.store.operations.Result; +import org.ehcache.clustered.common.internal.store.operations.codecs.OperationsCodec; +import org.ehcache.core.config.ExpiryUtils; +import org.ehcache.expiry.ExpiryPolicy; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.Duration; import static java.util.Objects.requireNonNull; +import static org.ehcache.core.config.ExpiryUtils.isExpiryDurationInfinite; /** * A specialized chain resolver for non-eternal caches. @@ -30,7 +38,9 @@ */ public class ExpiryChainResolver extends ChainResolver { - private final Expiry expiry; + private static final Logger LOG = LoggerFactory.getLogger(ExpiryChainResolver.class); + + private final ExpiryPolicy expiry; /** * Creates a resolver with the given codec and expiry policy. @@ -38,7 +48,7 @@ public class ExpiryChainResolver extends ChainResolver { * @param codec operation codec * @param expiry expiry policy */ - public ExpiryChainResolver(final OperationsCodec codec, Expiry expiry) { + public ExpiryChainResolver(final OperationsCodec codec, ExpiryPolicy expiry) { super(codec); this.expiry = requireNonNull(expiry, "Expiry cannot be null"); } @@ -55,7 +65,7 @@ public ExpiryChainResolver(final OperationsCodec codec, Expiry applyOperation(K key, PutOperation existing, Operation operation, long now) { + public PutOperation applyOperation(K key, PutOperation existing, Operation operation, long now) { final Result newValue = operation.apply(existing); if (newValue == null) { return null; @@ -93,12 +103,12 @@ private long calculateExpiryTime(K key, PutOperation existing, Operation ServerStoreProxy getServerStoreProxy(ClusteredCacheIdentifier cacheIdenti * Releases access to a {@link ServerStoreProxy} and the server-resident {@code ServerStore} it represents. * * @param serverStoreProxy a {@link ServerStoreProxy} obtained through {@link #getServerStoreProxy} + * @param isReconnect whether client is trying to reconnect */ - void releaseServerStoreProxy(ServerStoreProxy serverStoreProxy); + void releaseServerStoreProxy(ServerStoreProxy serverStoreProxy, boolean isReconnect); + + /** + * Add a block to execute when the connection is recovered after it was closed. + * + * @param runnable the execution block + */ + void addConnectionRecoveryListener(Runnable runnable); + + /** + * Remove a block to execute when the connection is recovered after it was closed. + * + * @param runnable the execution block + */ + void removeConnectionRecoveryListener(Runnable runnable); /** * A {@link org.ehcache.spi.persistence.PersistableResourceService.PersistenceSpaceIdentifier PersistenceSpaceIdentifier} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/service/EntityBusyException.java b/clustered/client/src/main/java/org/ehcache/clustered/client/service/EntityBusyException.java index ac5b0e42d0..2812e911fe 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/service/EntityBusyException.java +++ b/clustered/client/src/main/java/org/ehcache/clustered/client/service/EntityBusyException.java @@ -17,7 +17,7 @@ /** * Thrown by {@link org.terracotta.connection.entity.Entity} operations requiring access to the - * {@code {@link org.terracotta.entity.ActiveServerEntity}} when the {@code {@link org.terracotta.entity.ActiveServerEntity}} is not available. + * {@code org.terracotta.entity.ActiveServerEntity} when the {@code org.terracotta.entity.ActiveServerEntity} is not available. */ public class EntityBusyException extends Exception { private static final long serialVersionUID = -7706902691622092177L; diff --git a/clustered/client/src/main/resources/META-INF/services/org.ehcache.core.spi.service.ServiceFactory b/clustered/client/src/main/resources/META-INF/services/org.ehcache.core.spi.service.ServiceFactory index 3f9d3fbd63..99428db970 100644 --- a/clustered/client/src/main/resources/META-INF/services/org.ehcache.core.spi.service.ServiceFactory +++ b/clustered/client/src/main/resources/META-INF/services/org.ehcache.core.spi.service.ServiceFactory @@ -1,2 +1,5 @@ org.ehcache.clustered.client.internal.service.ClusteringServiceFactory org.ehcache.clustered.client.internal.store.ClusteredStoreProviderFactory +org.ehcache.clustered.client.internal.loaderwriter.DelegatingLoaderWriterStoreProviderFactory +org.ehcache.clustered.client.internal.loaderwriter.ClusteredLoaderWriterStoreProviderFactory +org.ehcache.clustered.client.internal.loaderwriter.writebehind.ClusteredWriteBehindStoreProviderFactory diff --git a/clustered/client/src/main/resources/META-INF/services/org.ehcache.xml.CacheManagerServiceConfigurationParser b/clustered/client/src/main/resources/META-INF/services/org.ehcache.xml.CacheManagerServiceConfigurationParser index aa772aacfb..3cfe6d381a 100644 --- a/clustered/client/src/main/resources/META-INF/services/org.ehcache.xml.CacheManagerServiceConfigurationParser +++ b/clustered/client/src/main/resources/META-INF/services/org.ehcache.xml.CacheManagerServiceConfigurationParser @@ -1 +1 @@ -org.ehcache.clustered.client.internal.config.xml.ClusteringServiceConfigurationParser \ No newline at end of file +org.ehcache.clustered.client.internal.config.xml.ClusteringCacheManagerServiceConfigurationParser diff --git a/clustered/client/src/main/resources/META-INF/services/org.ehcache.xml.CacheServiceConfigurationParser b/clustered/client/src/main/resources/META-INF/services/org.ehcache.xml.CacheServiceConfigurationParser index aa772aacfb..5a8e03b57d 100644 --- a/clustered/client/src/main/resources/META-INF/services/org.ehcache.xml.CacheServiceConfigurationParser +++ b/clustered/client/src/main/resources/META-INF/services/org.ehcache.xml.CacheServiceConfigurationParser @@ -1 +1 @@ -org.ehcache.clustered.client.internal.config.xml.ClusteringServiceConfigurationParser \ No newline at end of file +org.ehcache.clustered.client.internal.config.xml.ClusteringCacheServiceConfigurationParser diff --git a/clustered/client/src/main/resources/ehcache-clustered-ext.xsd b/clustered/client/src/main/resources/ehcache-clustered-ext.xsd index 90d0524da7..1a8aeae70a 100644 --- a/clustered/client/src/main/resources/ehcache-clustered-ext.xsd +++ b/clustered/client/src/main/resources/ehcache-clustered-ext.xsd @@ -46,13 +46,11 @@ - - - - Specifies the server endpoint to use for identifying cluster configuration. - - - + + + + + @@ -97,6 +95,51 @@ + + + + + + + + + Cluster Tier Manager identifier. + + + + + + + + + Data specific to a particular server. + + + + + + + The host that the server is running on. + + + + + + + The port that the server is listening on. + + + + + + + + + + + + + diff --git a/clustered/client/src/test/java/org/ehcache/clustered/ClusteredResourcePoolUpdationTest.java b/clustered/client/src/test/java/org/ehcache/clustered/ClusteredResourcePoolUpdationTest.java index 6a80761325..946df0deba 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/ClusteredResourcePoolUpdationTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/ClusteredResourcePoolUpdationTest.java @@ -84,7 +84,7 @@ public void testClusteredDedicatedResourcePoolUpdation() throws Exception { expectedException.expectMessage("Updating CLUSTERED resource is not supported"); dedicatedCache.getRuntimeConfiguration().updateResourcePools( ResourcePoolsBuilder.newResourcePoolsBuilder() - .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 32, MemoryUnit.MB)) + .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 8, MemoryUnit.MB)) .build() ); } diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/BasicClusteredCacheExpiryTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/BasicClusteredCacheExpiryTest.java index 62babd8882..7d88e1f4ea 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/BasicClusteredCacheExpiryTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/BasicClusteredCacheExpiryTest.java @@ -23,17 +23,16 @@ import org.ehcache.clustered.client.internal.UnitTestConnectionService; import org.ehcache.clustered.common.Consistency; import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.ExpiryPolicyBuilder; import org.ehcache.config.builders.ResourcePoolsBuilder; import org.ehcache.config.units.MemoryUnit; -import org.ehcache.expiry.Duration; -import org.ehcache.expiry.Expirations; import org.ehcache.impl.internal.TimeSourceConfiguration; import org.junit.After; import org.junit.Before; import org.junit.Test; import java.net.URI; -import java.util.concurrent.TimeUnit; +import java.time.Duration; import static org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder.cluster; import static org.ehcache.config.builders.CacheConfigurationBuilder.newCacheConfigurationBuilder; @@ -54,7 +53,7 @@ public class BasicClusteredCacheExpiryTest { .withCache("clustered-cache", newCacheConfigurationBuilder(Long.class, String.class, ResourcePoolsBuilder.newResourcePoolsBuilder() .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 2, MemoryUnit.MB))) - .withExpiry(Expirations.timeToLiveExpiration(new Duration(1L, TimeUnit.MILLISECONDS))) + .withExpiry(ExpiryPolicyBuilder.timeToLiveExpiration(Duration.ofMillis(1L))) .add(ClusteredStoreConfigurationBuilder.withConsistency(Consistency.STRONG))); @Before diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/BasicClusteredCacheTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/BasicClusteredCacheTest.java index a7febc46de..5163527489 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/BasicClusteredCacheTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/BasicClusteredCacheTest.java @@ -234,6 +234,9 @@ public void testLargeValues() throws Exception { } public static class Person implements Serializable { + + private static final long serialVersionUID = 1L; + final String name; final int age; diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/ClusteredCacheDestroyTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/ClusteredCacheDestroyTest.java index 65c965c1b4..833e26c035 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/ClusteredCacheDestroyTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/ClusteredCacheDestroyTest.java @@ -60,15 +60,15 @@ public class ClusteredCacheDestroyTest { .with(cluster(CLUSTER_URI).autoCreate()) .withCache(CLUSTERED_CACHE, newCacheConfigurationBuilder(Long.class, String.class, ResourcePoolsBuilder.newResourcePoolsBuilder() - .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 32, MemoryUnit.MB))) + .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 8, MemoryUnit.MB))) .add(ClusteredStoreConfigurationBuilder.withConsistency(Consistency.STRONG))); @Before public void definePassthroughServer() throws Exception { UnitTestConnectionService.add(CLUSTER_URI, new UnitTestConnectionService.PassthroughServerBuilder() - .resource("primary-server-resource", 64, MemoryUnit.MB) - .resource("secondary-server-resource", 64, MemoryUnit.MB) + .resource("primary-server-resource", 16, MemoryUnit.MB) + .resource("secondary-server-resource", 16, MemoryUnit.MB) .build()); } @@ -97,7 +97,7 @@ public void testDestroyFreesUpTheAllocatedResource() throws CachePersistenceExce CacheConfigurationBuilder configBuilder = newCacheConfigurationBuilder(Long.class, String.class, ResourcePoolsBuilder.newResourcePoolsBuilder() - .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 34, MemoryUnit.MB))); + .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 10, MemoryUnit.MB))); try { Cache anotherCache = persistentCacheManager.createCache("another-cache", configBuilder); @@ -134,6 +134,16 @@ public void testDestroyUnknownCacheAlias() throws Exception { cacheManager.close(); } + @Test + public void testDestroyNonExistentCache() throws CachePersistenceException { + PersistentCacheManager persistentCacheManager = clusteredCacheManagerBuilder.build(true); + + String nonExistent = "this-is-not-the-cache-you-are-looking-for"; + assertThat(persistentCacheManager.getCache(nonExistent, Long.class, String.class), nullValue()); + persistentCacheManager.destroyCache(nonExistent); + persistentCacheManager.close(); + } + @Test public void testDestroyCacheWhenMultipleClientsConnected() { PersistentCacheManager persistentCacheManager1 = clusteredCacheManagerBuilder.build(true); @@ -181,6 +191,23 @@ public void testDestroyCacheWithCacheManagerStopped() throws CachePersistenceExc assertThat(persistentCacheManager.getStatus(), is(Status.UNINITIALIZED)); } + @Test + public void testDestroyNonExistentCacheWithCacheManagerStopped() throws CachePersistenceException { + PersistentCacheManager persistentCacheManager = clusteredCacheManagerBuilder.build(true); + persistentCacheManager.close(); + persistentCacheManager.destroyCache("this-is-not-the-cache-you-are-looking-for"); + assertThat(persistentCacheManager.getStatus(), is(Status.UNINITIALIZED)); + } + + @Test + public void testDestroyCacheOnNonExistentCacheManager() throws CachePersistenceException { + PersistentCacheManager persistentCacheManager = clusteredCacheManagerBuilder.build(true); + persistentCacheManager.close(); + persistentCacheManager.destroy(); + + persistentCacheManager.destroyCache("this-is-not-the-cache-you-are-looking-for"); + assertThat(persistentCacheManager.getStatus(), is(Status.UNINITIALIZED)); + } @Test public void testDestroyCacheWithTwoCacheManagerOnSameCache_forbiddenWhenInUse() throws CachePersistenceException { PersistentCacheManager persistentCacheManager1 = clusteredCacheManagerBuilder.build(true); diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/ClusteredCacheExpirationTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/ClusteredCacheExpirationTest.java index 8d9176a080..4b02f488d3 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/ClusteredCacheExpirationTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/ClusteredCacheExpirationTest.java @@ -24,13 +24,13 @@ import org.ehcache.clustered.client.internal.UnitTestConnectionService; import org.ehcache.clustered.common.Consistency; import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.ExpiryPolicyBuilder; import org.ehcache.config.builders.ResourcePoolsBuilder; +import org.ehcache.config.units.EntryUnit; import org.ehcache.config.units.MemoryUnit; import org.ehcache.core.spi.service.StatisticsService; import org.ehcache.core.statistics.TierStatistics; -import org.ehcache.expiry.Duration; -import org.ehcache.expiry.Expirations; -import org.ehcache.expiry.Expiry; +import org.ehcache.expiry.ExpiryPolicy; import org.ehcache.impl.internal.TimeSourceConfiguration; import org.ehcache.impl.internal.statistics.DefaultStatisticsService; import org.junit.After; @@ -38,8 +38,8 @@ import org.junit.Test; import java.net.URI; +import java.time.Duration; import java.util.Map; -import java.util.concurrent.TimeUnit; import static org.assertj.core.api.Assertions.assertThat; import static org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder.cluster; @@ -55,22 +55,22 @@ public class ClusteredCacheExpirationTest { private StatisticsService statisticsService = new DefaultStatisticsService(); - private CacheManagerBuilder cacheManagerBuilder(Expiry expiry) { + private CacheManagerBuilder cacheManagerBuilder(ExpiryPolicy expiry) { return newCacheManagerBuilder() .using(statisticsService) .using(new TimeSourceConfiguration(timeSource)) .with(cluster(CLUSTER_URI).autoCreate()) .withCache(CLUSTERED_CACHE, newCacheConfigurationBuilder(Long.class, String.class, ResourcePoolsBuilder.newResourcePoolsBuilder() - .heap(10) - .offheap(10, MemoryUnit.MB) - .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 32, MemoryUnit.MB))) + .heap(10, EntryUnit.ENTRIES) + .offheap(6, MemoryUnit.MB) + .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 8, MemoryUnit.MB))) .withExpiry(expiry) .add(ClusteredStoreConfigurationBuilder.withConsistency(Consistency.STRONG))); } - private Expiry oneSecondExpiration() { - return Expirations.timeToLiveExpiration(Duration.of(1, TimeUnit.SECONDS)); + private ExpiryPolicy oneSecondExpiration() { + return ExpiryPolicyBuilder.timeToLiveExpiration(Duration.ofSeconds(1)); } @Before @@ -117,7 +117,7 @@ public void testGetExpirationPropagatedToHigherTiers() throws CachePersistenceEx @Test public void testGetNoExpirationPropagatedToHigherTiers() throws CachePersistenceException { - CacheManagerBuilder clusteredCacheManagerBuilder = cacheManagerBuilder(Expirations.noExpiration()); + CacheManagerBuilder clusteredCacheManagerBuilder = cacheManagerBuilder(ExpiryPolicyBuilder.noExpiration()); try(PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(true)) { diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/ClusteredConcurrencyTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/ClusteredConcurrencyTest.java index 16dbc084e7..17f770f66a 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/ClusteredConcurrencyTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/ClusteredConcurrencyTest.java @@ -96,11 +96,11 @@ private Runnable content(final CountDownLatch latch) { CacheManagerBuilder clusteredCacheManagerBuilder = CacheManagerBuilder.newCacheManagerBuilder() .with(ClusteringServiceConfigurationBuilder.cluster(CLUSTER_URI).autoCreate() .defaultServerResource("primary-server-resource") - .resourcePool("resource-pool-a", 32, MemoryUnit.MB) - .resourcePool("resource-pool-b", 32, MemoryUnit.MB, "secondary-server-resource")) + .resourcePool("resource-pool-a", 8, MemoryUnit.MB) + .resourcePool("resource-pool-b", 8, MemoryUnit.MB, "secondary-server-resource")) .withCache(CACHE_NAME, CacheConfigurationBuilder.newCacheConfigurationBuilder(Long.class, String.class, ResourcePoolsBuilder.newResourcePoolsBuilder() - .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 32, MemoryUnit.MB))) + .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 8, MemoryUnit.MB))) .add(new ClusteredStoreConfiguration(Consistency.STRONG))); latch.countDown(); diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/NonClusteredCacheTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/NonClusteredCacheTest.java index 50256cb2ec..38ab3b9b77 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/NonClusteredCacheTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/NonClusteredCacheTest.java @@ -51,7 +51,7 @@ public void testNonClustered() throws Exception { Set> targetProviders = new HashSet<>(); targetProviders.add(ClusteredStore.Provider.class); targetProviders.add(ClusteringService.class); - for (ServiceFactory factory : ClassLoading.libraryServiceLoaderFor(ServiceFactory.class)) { + for (ServiceFactory factory : ClassLoading.libraryServiceLoaderFor(ServiceFactory.class)) { if (targetProviders.remove(factory.getServiceType())) { if (targetProviders.isEmpty()) { break; diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/UnSupportedCombinationsWIthClusteredCacheTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/UnSupportedCombinationsWIthClusteredCacheTest.java index 0a0dc345e5..09c4352f61 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/UnSupportedCombinationsWIthClusteredCacheTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/UnSupportedCombinationsWIthClusteredCacheTest.java @@ -33,8 +33,6 @@ import org.ehcache.event.CacheEvent; import org.ehcache.event.CacheEventListener; import org.ehcache.event.EventType; -import org.ehcache.spi.loaderwriter.BulkCacheLoadingException; -import org.ehcache.spi.loaderwriter.BulkCacheWritingException; import org.ehcache.spi.loaderwriter.CacheLoaderWriter; import org.ehcache.transactions.xa.configuration.XAStoreConfiguration; import org.ehcache.transactions.xa.txmgr.btm.BitronixTransactionManagerLookup; @@ -69,30 +67,6 @@ public void removePassthroughServer() throws Exception { UnitTestConnectionService.remove("terracotta://localhost/my-application"); } - @Test - public void testClusteredCacheWithLoaderWriter() { - - final CacheManagerBuilder clusteredCacheManagerBuilder - = CacheManagerBuilder.newCacheManagerBuilder() - .with(ClusteringServiceConfigurationBuilder.cluster(URI.create("terracotta://localhost/my-application")) - .autoCreate()); - final PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(true); - - try { - CacheConfiguration config = CacheConfigurationBuilder.newCacheConfigurationBuilder(Long.class, String.class, - ResourcePoolsBuilder.newResourcePoolsBuilder() - .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 8, MemoryUnit.MB))) - .withLoaderWriter(new TestLoaderWriter()) - .build(); - - cacheManager.createCache("test", config); - fail("IllegalStateException expected"); - } catch (IllegalStateException e){ - assertThat(e.getCause().getMessage(), is("CacheLoaderWriter is not supported with clustered tiers")); - } - cacheManager.close(); - } - @Test public void testClusteredCacheWithEventListeners() { @@ -151,32 +125,32 @@ public void testClusteredCacheWithXA() throws Exception { private static class TestLoaderWriter implements CacheLoaderWriter { @Override - public String load(Long key) throws Exception { + public String load(Long key) { return null; } @Override - public Map loadAll(Iterable keys) throws BulkCacheLoadingException, Exception { + public Map loadAll(Iterable keys) { return null; } @Override - public void write(Long key, String value) throws Exception { + public void write(Long key, String value) { } @Override - public void writeAll(Iterable> entries) throws BulkCacheWritingException, Exception { + public void writeAll(Iterable> entries) { } @Override - public void delete(Long key) throws Exception { + public void delete(Long key) { } @Override - public void deleteAll(Iterable keys) throws BulkCacheWritingException, Exception { + public void deleteAll(Iterable keys) { } } diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/config/ClusteringServiceConfigurationTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/config/ClusteringServiceConfigurationTest.java index 464a67578a..31c4fbc91c 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/config/ClusteringServiceConfigurationTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/config/ClusteringServiceConfigurationTest.java @@ -17,6 +17,7 @@ package org.ehcache.clustered.client.config; import org.ehcache.clustered.client.config.builders.TimeoutsBuilder; +import org.ehcache.clustered.client.internal.ConnectionSource; import org.ehcache.clustered.client.service.ClusteringService; import org.ehcache.clustered.common.ServerSideConfiguration; import org.ehcache.config.builders.CacheManagerBuilder; @@ -24,73 +25,144 @@ import org.junit.Test; import org.junit.rules.ExpectedException; +import java.net.InetSocketAddress; import java.net.URI; import java.util.Collections; +import java.util.Iterator; -import static net.bytebuddy.matcher.ElementMatchers.is; import static org.assertj.core.api.Assertions.assertThat; public class ClusteringServiceConfigurationTest { - private static URI DEFAULT_URI = URI.create("terracotta://localhost:9450"); + private static final URI DEFAULT_URI = URI.create("terracotta://localhost:9450"); + private static final Iterable SERVERS = Collections.singletonList(InetSocketAddress.createUnresolved("localhost", 9450)); + private static final String CACHE_MANAGER = "cacheManager"; @Rule public ExpectedException expectedException = ExpectedException.none(); @Test - public void testGetConnectionUrlNull() throws Exception { + public void testGetConnectionUrlNull() { expectedException.expect(NullPointerException.class); new ClusteringServiceConfiguration((URI)null); } @Test - public void testGetConnectionUrl() throws Exception { + public void testGetServersNull() { + expectedException.expect(NullPointerException.class); + new ClusteringServiceConfiguration(null, CACHE_MANAGER); + } + + @Test + public void testGetConnectionUrl() { assertThat(new ClusteringServiceConfiguration(DEFAULT_URI).getClusterUri()).isEqualTo(DEFAULT_URI); } @Test - public void testTimeouts() throws Exception { + public void testGetServersAndCacheManager() { + ConnectionSource.ServerList connectionSource = (ConnectionSource.ServerList) new ClusteringServiceConfiguration(SERVERS, CACHE_MANAGER).getConnectionSource(); + assertThat(connectionSource.getServers()).isEqualTo(SERVERS); + assertThat(new ClusteringServiceConfiguration(SERVERS, CACHE_MANAGER).getConnectionSource().getClusterTierManager()).isEqualTo(CACHE_MANAGER); + } + + @Test + public void testGetServersAndRemove() { + ConnectionSource.ServerList connectionSource = (ConnectionSource.ServerList) new ClusteringServiceConfiguration(SERVERS, CACHE_MANAGER).getConnectionSource(); + Iterator iterator = connectionSource.getServers().iterator(); + iterator.next(); + iterator.remove(); + assertThat(connectionSource.getServers()).isEqualTo(SERVERS); + } + + @Test + public void testTimeoutsWithURI() { Timeouts timeouts = TimeoutsBuilder.timeouts().build(); assertThat(new ClusteringServiceConfiguration(DEFAULT_URI, timeouts).getTimeouts()).isSameAs(timeouts); } @Test - public void testDefaultTimeouts() throws Exception { + public void testTimeoutsWithServers() { + Timeouts timeouts = TimeoutsBuilder.timeouts().build(); + assertThat(new ClusteringServiceConfiguration(SERVERS, CACHE_MANAGER, timeouts).getTimeouts()).isSameAs(timeouts); + } + + @Test + public void testDefaultTimeoutsWithURI() { assertThat(new ClusteringServiceConfiguration(DEFAULT_URI).getTimeouts()).isEqualTo(TimeoutsBuilder.timeouts().build()); } @Test - public void testTimeoutsCannotBeNull2Args() throws Exception { + public void testDefaultTimeoutsWithServers() { + assertThat(new ClusteringServiceConfiguration(SERVERS, CACHE_MANAGER).getTimeouts()).isEqualTo(TimeoutsBuilder.timeouts().build()); + } + + @Test + public void testTimeoutsCannotBeNull2ArgsWithURI() { expectedException.expect(NullPointerException.class); new ClusteringServiceConfiguration(DEFAULT_URI, (Timeouts) null); } @Test - public void testTimeoutsCannotBeNull3Args() throws Exception { + public void testTimeoutsCannotBeNull2ArgsWithServers() { + expectedException.expect(NullPointerException.class); + new ClusteringServiceConfiguration(SERVERS, CACHE_MANAGER, null); + } + + @Test + public void testTimeoutsCannotBeNull3ArgsWithURI() { + expectedException.expect(NullPointerException.class); + new ClusteringServiceConfiguration(DEFAULT_URI, null, new ServerSideConfiguration(Collections.emptyMap())); + } + + @Test + public void testTimeoutsCannotBeNull3ArgsWithServers() { expectedException.expect(NullPointerException.class); - new ClusteringServiceConfiguration(DEFAULT_URI, (Timeouts) null, new ServerSideConfiguration(Collections.emptyMap())); + new ClusteringServiceConfiguration(SERVERS, CACHE_MANAGER, null, new ServerSideConfiguration(Collections.emptyMap())); } @Test - public void testTimeoutsCannotBeNull4Args() throws Exception { + public void testTimeoutsCannotBeNull4ArgsWithURI() { expectedException.expect(NullPointerException.class); - new ClusteringServiceConfiguration(DEFAULT_URI, (Timeouts) null, true, new ServerSideConfiguration(Collections.emptyMap())); + new ClusteringServiceConfiguration(DEFAULT_URI, null, true, new ServerSideConfiguration(Collections.emptyMap())); } @Test - public void testGetServiceType() throws Exception { + public void testTimeoutsCannotBeNull4ArgsWithServers() { + expectedException.expect(NullPointerException.class); + new ClusteringServiceConfiguration(SERVERS, CACHE_MANAGER, null, true, new ServerSideConfiguration(Collections.emptyMap())); + } + + @Test + public void testGetServiceTypeWithURI() { assertThat(new ClusteringServiceConfiguration(DEFAULT_URI).getServiceType()).isEqualTo(ClusteringService.class); } @Test - public void testGetAutoCreate() throws Exception { + public void testGetServiceTypeWithServers() { + assertThat(new ClusteringServiceConfiguration(SERVERS, CACHE_MANAGER).getServiceType()).isEqualTo(ClusteringService.class); + } + + @Test + public void testGetAutoCreateWithURI() { assertThat(new ClusteringServiceConfiguration(DEFAULT_URI, true, new ServerSideConfiguration(Collections.emptyMap())).isAutoCreate()).isTrue(); } @Test - public void testBuilder() throws Exception { + public void testGetAutoCreateWithServers() { + assertThat(new ClusteringServiceConfiguration(SERVERS, CACHE_MANAGER, true, + new ServerSideConfiguration(Collections.emptyMap())).isAutoCreate()).isTrue(); + } + + @Test + public void testBuilderWithURI() { assertThat(new ClusteringServiceConfiguration(DEFAULT_URI) .builder(CacheManagerBuilder.newCacheManagerBuilder())).isExactlyInstanceOf(CacheManagerBuilder.class); } + + @Test + public void testBuilderWithServers() { + assertThat(new ClusteringServiceConfiguration(SERVERS, CACHE_MANAGER) + .builder(CacheManagerBuilder.newCacheManagerBuilder())).isExactlyInstanceOf(CacheManagerBuilder.class); + } } diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/config/builders/ClusteredResourcePoolBuilderTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/config/builders/ClusteredResourcePoolBuilderTest.java index 2f9544e109..84b4715f0b 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/config/builders/ClusteredResourcePoolBuilderTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/config/builders/ClusteredResourcePoolBuilderTest.java @@ -19,7 +19,6 @@ import org.ehcache.clustered.client.config.ClusteredResourceType; import org.ehcache.clustered.client.config.SharedClusteredResourcePool; import org.ehcache.config.ResourcePool; -import org.ehcache.config.ResourceType; import org.ehcache.config.units.MemoryUnit; import org.hamcrest.Matchers; import org.junit.Test; @@ -27,7 +26,8 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -import static org.junit.Assert.*; +import static org.junit.Assert.assertThat; + import org.ehcache.clustered.client.config.DedicatedClusteredResourcePool; public class ClusteredResourcePoolBuilderTest { @@ -36,28 +36,23 @@ public class ClusteredResourcePoolBuilderTest { public void dedicated2Arg() throws Exception { ResourcePool pool = ClusteredResourcePoolBuilder.clusteredDedicated(16, MemoryUnit.GB); assertThat(pool, is(instanceOf(DedicatedClusteredResourcePool.class))); - assertThat(pool.getType(), Matchers.is(ClusteredResourceType.Types.DEDICATED)); + assertThat(pool.getType(), Matchers.is(ClusteredResourceType.Types.DEDICATED)); assertThat(pool.isPersistent(), is(true)); assertThat(((DedicatedClusteredResourcePool)pool).getSize(), is(16L)); assertThat(((DedicatedClusteredResourcePool)pool).getUnit(), is(MemoryUnit.GB)); assertThat(((DedicatedClusteredResourcePool)pool).getFromResource(), is(nullValue())); } - @Test + @Test(expected = NullPointerException.class) public void dedicated2ArgUnitNull() throws Exception { - try { - ClusteredResourcePoolBuilder.clusteredDedicated(16, null); - fail(); - } catch (NullPointerException e) { - // expected - } + ClusteredResourcePoolBuilder.clusteredDedicated(16, null); } @Test public void dedicated3Arg() throws Exception { ResourcePool pool = ClusteredResourcePoolBuilder.clusteredDedicated("resourceId", 16, MemoryUnit.GB); assertThat(pool, is(instanceOf(DedicatedClusteredResourcePool.class))); - assertThat(pool.getType(), Matchers.is(ClusteredResourceType.Types.DEDICATED)); + assertThat(pool.getType(), is(ClusteredResourceType.Types.DEDICATED)); assertThat(pool.isPersistent(), is(true)); assertThat(((DedicatedClusteredResourcePool)pool).getSize(), is(16L)); assertThat(((DedicatedClusteredResourcePool)pool).getUnit(), is(MemoryUnit.GB)); @@ -68,40 +63,29 @@ public void dedicated3Arg() throws Exception { public void dedicated3ArgFromNull() throws Exception { ResourcePool pool = ClusteredResourcePoolBuilder.clusteredDedicated(null, 16, MemoryUnit.GB); assertThat(pool, is(instanceOf(DedicatedClusteredResourcePool.class))); - assertThat(pool.getType(), Matchers.is(ClusteredResourceType.Types.DEDICATED)); + assertThat(pool.getType(), is(ClusteredResourceType.Types.DEDICATED)); assertThat(pool.isPersistent(), is(true)); assertThat(((DedicatedClusteredResourcePool)pool).getSize(), is(16L)); assertThat(((DedicatedClusteredResourcePool)pool).getUnit(), is(MemoryUnit.GB)); assertThat(((DedicatedClusteredResourcePool)pool).getFromResource(), is(nullValue())); } - @Test + @Test(expected = NullPointerException.class) public void dedicated3ArgUnitNull() throws Exception { - try { - ClusteredResourcePoolBuilder.clusteredDedicated("resourceId", 16, null); - fail(); - } catch (NullPointerException e) { - // expected - } + ClusteredResourcePoolBuilder.clusteredDedicated("resourceId", 16, null); } @Test public void shared() throws Exception { ResourcePool pool = ClusteredResourcePoolBuilder.clusteredShared("resourceId"); assertThat(pool, is(instanceOf(SharedClusteredResourcePool.class))); - assertThat(pool.getType(), Matchers.is(ClusteredResourceType.Types.SHARED)); + assertThat(pool.getType(), is(ClusteredResourceType.Types.SHARED)); assertThat(pool.isPersistent(), is(true)); assertThat(((SharedClusteredResourcePool)pool).getSharedResourcePool(), is("resourceId")); } - @Test + @Test(expected = NullPointerException.class) public void sharedSharedResourceNull() throws Exception { - try { - ClusteredResourcePoolBuilder.clusteredShared(null); - fail(); - } catch (NullPointerException e) { - // expected - } - + ClusteredResourcePoolBuilder.clusteredShared(null); } -} \ No newline at end of file +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/config/builders/TimeoutsBuilderTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/config/builders/TimeoutsBuilderTest.java index 2f1d67910b..266799a904 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/config/builders/TimeoutsBuilderTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/config/builders/TimeoutsBuilderTest.java @@ -26,15 +26,15 @@ public class TimeoutsBuilderTest { @Test - public void build_empty() throws Exception { + public void build_empty() { Timeouts t = TimeoutsBuilder.timeouts().build(); assertThat(t.getReadOperationTimeout()).isEqualTo(Timeouts.DEFAULT_OPERATION_TIMEOUT); assertThat(t.getWriteOperationTimeout()).isEqualTo(Timeouts.DEFAULT_OPERATION_TIMEOUT); - assertThat(t.getConnectionTimeout()).isEqualTo(Timeouts.INFINITE_TIMEOUT); + assertThat(t.getConnectionTimeout()).isEqualTo(Timeouts.DEFAULT_CONNECTION_TIMEOUT); } @Test - public void build_filled() throws Exception { + public void build_filled() { Timeouts t = TimeoutsBuilder.timeouts() .read(Duration.ofDays(1)) .write(Duration.ofDays(2)) diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/docs/GettingStarted.java b/clustered/client/src/test/java/org/ehcache/clustered/client/docs/GettingStarted.java index 31e3292bff..5ea95fae52 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/docs/GettingStarted.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/docs/GettingStarted.java @@ -75,11 +75,11 @@ public void clusteredCacheManagerWithServerSideConfigExample() throws Exception CacheManagerBuilder.newCacheManagerBuilder() .with(ClusteringServiceConfigurationBuilder.cluster(URI.create("terracotta://localhost/my-application")).autoCreate() .defaultServerResource("primary-server-resource") // <1> - .resourcePool("resource-pool-a", 28, MemoryUnit.MB, "secondary-server-resource") // <2> - .resourcePool("resource-pool-b", 32, MemoryUnit.MB)) // <3> + .resourcePool("resource-pool-a", 8, MemoryUnit.MB, "secondary-server-resource") // <2> + .resourcePool("resource-pool-b", 10, MemoryUnit.MB)) // <3> .withCache("clustered-cache", CacheConfigurationBuilder.newCacheConfigurationBuilder(Long.class, String.class, // <4> ResourcePoolsBuilder.newResourcePoolsBuilder() - .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 32, MemoryUnit.MB)))) // <5> + .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 8, MemoryUnit.MB)))) // <5> .withCache("shared-cache-1", CacheConfigurationBuilder.newCacheConfigurationBuilder(Long.class, String.class, ResourcePoolsBuilder.newResourcePoolsBuilder() .with(ClusteredResourcePoolBuilder.clusteredShared("resource-pool-a")))) // <6> @@ -100,7 +100,7 @@ public void clusteredCacheManagerWithDynamicallyAddedCacheExample() throws Excep .with(ClusteringServiceConfigurationBuilder.cluster(URI.create("terracotta://localhost/my-application")) .autoCreate() .defaultServerResource("primary-server-resource") - .resourcePool("resource-pool-a", 28, MemoryUnit.MB)); + .resourcePool("resource-pool-a", 8, MemoryUnit.MB)); PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); cacheManager.init(); @@ -124,7 +124,7 @@ public void explicitConsistencyConfiguration() throws Exception { .with(ClusteringServiceConfigurationBuilder.cluster(URI.create("terracotta://localhost/my-application")) .autoCreate() .defaultServerResource("primary-server-resource") - .resourcePool("resource-pool-a", 32, MemoryUnit.MB)); + .resourcePool("resource-pool-a", 8, MemoryUnit.MB)); PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); cacheManager.init(); @@ -152,7 +152,7 @@ public void clusteredCacheTieredExample() throws Exception { .with(ClusteringServiceConfigurationBuilder.cluster(URI.create("terracotta://localhost/my-application")) .autoCreate() .defaultServerResource("primary-server-resource") - .resourcePool("resource-pool-a", 32, MemoryUnit.MB)); + .resourcePool("resource-pool-a", 8, MemoryUnit.MB)); PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); cacheManager.init(); @@ -180,7 +180,7 @@ public void clusteredCacheManagerLifecycleExamples() throws Exception { CacheManagerBuilder autoCreate = CacheManagerBuilder.newCacheManagerBuilder() .with(ClusteringServiceConfigurationBuilder.cluster(URI.create("terracotta://localhost/my-application")) .autoCreate() // <1> - .resourcePool("resource-pool", 32, MemoryUnit.MB, "primary-server-resource")) + .resourcePool("resource-pool", 8, MemoryUnit.MB, "primary-server-resource")) .withCache("clustered-cache", CacheConfigurationBuilder.newCacheConfigurationBuilder(Long.class, String.class, ResourcePoolsBuilder.newResourcePoolsBuilder() .with(ClusteredResourcePoolBuilder.clusteredShared("resource-pool")))); @@ -188,7 +188,7 @@ public void clusteredCacheManagerLifecycleExamples() throws Exception { CacheManagerBuilder expecting = CacheManagerBuilder.newCacheManagerBuilder() .with(ClusteringServiceConfigurationBuilder.cluster(URI.create("terracotta://localhost/my-application")) .expecting() // <2> - .resourcePool("resource-pool", 32, MemoryUnit.MB, "primary-server-resource")) + .resourcePool("resource-pool", 8, MemoryUnit.MB, "primary-server-resource")) .withCache("clustered-cache", CacheConfigurationBuilder.newCacheConfigurationBuilder(Long.class, String.class, ResourcePoolsBuilder.newResourcePoolsBuilder() .with(ClusteredResourcePoolBuilder.clusteredShared("resource-pool")))); @@ -219,7 +219,7 @@ public void unknownClusteredCacheExample() CacheManagerBuilder cacheManagerBuilderAutoCreate = CacheManagerBuilder.newCacheManagerBuilder() .with(ClusteringServiceConfigurationBuilder.cluster(URI.create("terracotta://localhost/my-application")) .autoCreate() // <1> - .resourcePool("resource-pool", 32, MemoryUnit.MB, "primary-server-resource")); + .resourcePool("resource-pool", 8, MemoryUnit.MB, "primary-server-resource")); PersistentCacheManager cacheManager1 = cacheManagerBuilderAutoCreate.build(false); cacheManager1.init(); @@ -235,7 +235,7 @@ public void unknownClusteredCacheExample() CacheManagerBuilder cacheManagerBuilderExpecting = CacheManagerBuilder.newCacheManagerBuilder() .with(ClusteringServiceConfigurationBuilder.cluster(URI.create("terracotta://localhost/my-application")) .expecting() // <4> - .resourcePool("resource-pool", 32, MemoryUnit.MB, "primary-server-resource")); + .resourcePool("resource-pool", 8, MemoryUnit.MB, "primary-server-resource")); PersistentCacheManager cacheManager2 = cacheManagerBuilderExpecting.build(false); cacheManager2.init(); diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/docs/Resilience.java b/clustered/client/src/test/java/org/ehcache/clustered/client/docs/Resilience.java new file mode 100644 index 0000000000..8e6e86dd3f --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/docs/Resilience.java @@ -0,0 +1,62 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.clustered.client.docs; + +import org.ehcache.PersistentCacheManager; +import org.ehcache.clustered.client.config.Timeouts; +import org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder; +import org.ehcache.clustered.client.config.builders.TimeoutsBuilder; +import org.ehcache.clustered.client.internal.UnitTestConnectionService; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.units.MemoryUnit; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.net.URI; +import java.time.Duration; + +public class Resilience { + + @Before + public void resetPassthroughServer() throws Exception { + UnitTestConnectionService.add("terracotta://localhost/my-application", + new UnitTestConnectionService.PassthroughServerBuilder() + .resource("primary-server-resource", 128, MemoryUnit.MB) + .resource("secondary-server-resource", 96, MemoryUnit.MB) + .build()); + } + + @After + public void removePassthroughServer() throws Exception { + UnitTestConnectionService.remove("terracotta://localhost/my-application"); + } + + @Test + public void clusteredCacheManagerExample() throws Exception { + // tag::timeoutsExample[] + CacheManagerBuilder clusteredCacheManagerBuilder = + CacheManagerBuilder.newCacheManagerBuilder() + .with(ClusteringServiceConfigurationBuilder.cluster(URI.create("terracotta://localhost/my-application")) + .timeouts(TimeoutsBuilder.timeouts() // <1> + .read(Duration.ofSeconds(10)) // <2> + .write(Timeouts.DEFAULT_OPERATION_TIMEOUT) // <3> + .connection(Timeouts.INFINITE_TIMEOUT)) // <4> + .autoCreate()); + // end::timeoutsExample[] + } +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/MockConnectionService.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/MockConnectionService.java index c99025050a..22e99a9669 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/MockConnectionService.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/MockConnectionService.java @@ -20,6 +20,7 @@ import org.terracotta.connection.ConnectionException; import org.terracotta.connection.ConnectionService; +import java.net.InetSocketAddress; import java.net.URI; import java.util.Properties; @@ -28,17 +29,32 @@ */ public class MockConnectionService implements ConnectionService { + private static final String CONNECTION_TYPE = "mock"; public static Connection mockConnection; @Override public boolean handlesURI(URI uri) { - return uri.getScheme().equals("mock"); + return handlesConnectionType(uri.getScheme()); + } + + @Override + public boolean handlesConnectionType(String s) { + return CONNECTION_TYPE.equals(s); } @Override public Connection connect(URI uri, Properties properties) throws ConnectionException { + return getConnection(); + } + + @Override + public Connection connect(Iterable iterable, Properties properties) throws ConnectionException { + return getConnection(); + } + + private Connection getConnection() throws ConnectionException { if (mockConnection == null) { - throw new IllegalStateException("Set mock connection first"); + throw new ConnectionException(new IllegalStateException("Set mock connection first")); } return mockConnection; } diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/UnitTestConnectionService.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/UnitTestConnectionService.java index dc1b8c3cbf..f9dc86734c 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/UnitTestConnectionService.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/UnitTestConnectionService.java @@ -23,6 +23,7 @@ import java.lang.reflect.Method; import java.lang.reflect.Proxy; import java.math.BigInteger; +import java.net.InetSocketAddress; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; @@ -149,6 +150,7 @@ public static void add(URI uri, PassthroughServer server) { // TODO rework that better server.registerAsynchronousServerCrasher(mock(IAsynchronousServerCrasher.class)); server.start(true, false); + server.addPermanentEntities(); LOGGER.info("Started PassthroughServer at {}", keyURI); } @@ -238,7 +240,7 @@ public static PassthroughServer remove(URI uri) { String stringArg = (String) args[1]; try { - EntityRef entityRef = connection.getEntityRef(type, version, stringArg); + EntityRef entityRef = connection.getEntityRef(type, version, stringArg); entityRef.destroy(); } catch (EntityNotProvidedException ex) { LOGGER.error("Entity destroy failed (not provided???): ", ex); @@ -283,7 +285,7 @@ public static PassthroughServer remove(String uri) { @SuppressWarnings("unused") public static final class PassthroughServerBuilder { private final List> serverEntityServices = new ArrayList<>(); - private final List> clientEntityServices = + private final List> clientEntityServices = new ArrayList<>(); private final Map serviceProviders = new IdentityHashMap<>(); @@ -336,7 +338,7 @@ public PassthroughServerBuilder serverEntityService(EntityServerService se return this; } - public PassthroughServerBuilder clientEntityService(EntityClientService service) { + public PassthroughServerBuilder clientEntityService(EntityClientService service) { this.clientEntityServices.add(service); return this; } @@ -360,7 +362,7 @@ public PassthroughServer build() { newServer.registerServerEntityService(service); } - for (EntityClientService service : clientEntityServices) { + for (EntityClientService service : clientEntityServices) { newServer.registerClientEntityService(service); } @@ -385,6 +387,11 @@ public static Collection getConnectionProperties(URI uri) { } } + public static Collection getConnections(URI uri) { + ServerDescriptor serverDescriptor = SERVERS.get(createKey(uri)); + return serverDescriptor.getConnections().keySet(); + } + @Override public boolean handlesURI(URI uri) { if (PASSTHROUGH.equals(uri.getScheme())) { @@ -394,6 +401,11 @@ public boolean handlesURI(URI uri) { return SERVERS.containsKey(uri); } + @Override + public boolean handlesConnectionType(String s) { + throw new UnsupportedOperationException("Operation not supported. Use handlesURI(URI) instead."); + } + @Override public Connection connect(URI uri, Properties properties) throws ConnectionException { @@ -436,10 +448,15 @@ public Connection connect(URI uri, Properties properties) throws ConnectionExcep * Uses a Proxy around Connection so closed connections can be removed from the ServerDescriptor. */ return (Connection) Proxy.newProxyInstance(Connection.class.getClassLoader(), - new Class[] { Connection.class }, + new Class[] { Connection.class }, new ConnectionInvocationHandler(serverDescriptor, connection)); } + @Override + public Connection connect(Iterable iterable, Properties properties) { + throw new UnsupportedOperationException("Operation not supported. Use connect(URI, Properties) instead"); + } + /** * Ensures that the {@code URI} presented conforms to the value used to locate a server. * diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/config/xml/ClusteredCacheConfigurationParserIT.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/config/xml/ClusteredCacheConfigurationParserIT.java new file mode 100644 index 0000000000..f501af949d --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/config/xml/ClusteredCacheConfigurationParserIT.java @@ -0,0 +1,45 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.config.xml; + +import org.ehcache.config.Configuration; +import org.ehcache.xml.XmlConfiguration; +import org.ehcache.xml.XmlConfigurationTest; +import org.junit.Test; +import org.xmlunit.builder.Input; +import org.xmlunit.diff.DefaultNodeMatcher; +import org.xmlunit.diff.ElementSelectors; + +import java.net.URL; + +import static org.junit.Assert.assertThat; +import static org.xmlunit.matchers.CompareMatcher.isSimilarTo; + +/** + * ClusteredCacheConfigurationParserIT + */ +public class ClusteredCacheConfigurationParserIT { + + @Test + public void testClusteredCacheXmlTranslationToString() { + URL resource = XmlConfigurationTest.class.getResource("/configs/clustered-cache.xml"); + Configuration config = new XmlConfiguration(resource); + XmlConfiguration xmlConfig = new XmlConfiguration(config); + assertThat(xmlConfig.toString(), isSimilarTo(Input.from(resource)).ignoreComments() + .ignoreWhitespace() + .withNodeMatcher(new DefaultNodeMatcher(ElementSelectors.byNameAndAllAttributes))); + } +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/config/xml/ClusteredResourceConfigurationParserTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/config/xml/ClusteredResourceConfigurationParserTest.java new file mode 100644 index 0000000000..2b490639bc --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/config/xml/ClusteredResourceConfigurationParserTest.java @@ -0,0 +1,61 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.config.xml; + +import org.ehcache.clustered.client.internal.config.ClusteredResourcePoolImpl; +import org.ehcache.clustered.client.internal.config.DedicatedClusteredResourcePoolImpl; +import org.ehcache.clustered.client.internal.config.SharedClusteredResourcePoolImpl; +import org.ehcache.config.units.MemoryUnit; +import org.junit.Test; +import org.w3c.dom.Node; + +import static org.ehcache.xml.ConfigurationParserTestHelper.assertElement; + +/** + * ClusteredResourceConfigurationParserTest + */ +public class ClusteredResourceConfigurationParserTest { + + @Test + public void testTranslateClusteredResourcePoolConfiguration() { + ClusteredResourceConfigurationParser configTranslator = new ClusteredResourceConfigurationParser(); + ClusteredResourcePoolImpl clusteredResourcePool = new ClusteredResourcePoolImpl(); + Node retElement = configTranslator.unparseResourcePool(clusteredResourcePool); + String inputString = ""; + assertElement(inputString, retElement); + } + + @Test + public void testTranslateDedicatedResourcePoolConfiguration() { + ClusteredResourceConfigurationParser configTranslator = new ClusteredResourceConfigurationParser(); + DedicatedClusteredResourcePoolImpl dedicatedClusteredResourcePool = new DedicatedClusteredResourcePoolImpl("my-from", 12, MemoryUnit.GB); + Node retElement = configTranslator.unparseResourcePool(dedicatedClusteredResourcePool); + String inputString = "12"; + assertElement(inputString, retElement); + } + + @Test + public void testTranslateSharedResourcePoolConfiguration() { + ClusteredResourceConfigurationParser configTranslator = new ClusteredResourceConfigurationParser(); + SharedClusteredResourcePoolImpl sharedResourcePool = new SharedClusteredResourcePoolImpl("shared-pool"); + Node retElement = configTranslator.unparseResourcePool(sharedResourcePool); + String inputString = ""; + assertElement(inputString, retElement); + } + +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/config/xml/ClusteringCacheManagerServiceConfigurationParserTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/config/xml/ClusteringCacheManagerServiceConfigurationParserTest.java new file mode 100644 index 0000000000..1bc0247e7f --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/config/xml/ClusteringCacheManagerServiceConfigurationParserTest.java @@ -0,0 +1,559 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.config.xml; + +import org.ehcache.clustered.client.config.ClusteringServiceConfiguration; +import org.ehcache.clustered.client.config.Timeouts; +import org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder; +import org.ehcache.clustered.client.config.builders.TimeoutsBuilder; +import org.ehcache.clustered.client.internal.ConnectionSource; +import org.ehcache.config.Configuration; +import org.ehcache.config.units.MemoryUnit; +import org.ehcache.core.internal.util.ClassLoading; +import org.ehcache.core.spi.service.ServiceUtils; +import org.ehcache.spi.service.Service; +import org.ehcache.spi.service.ServiceCreationConfiguration; +import org.ehcache.xml.CacheManagerServiceConfigurationParser; +import org.ehcache.xml.XmlConfiguration; +import org.ehcache.xml.exceptions.XmlConfigurationException; +import org.ehcache.xml.model.TimeType; +import org.hamcrest.Matchers; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestName; +import org.w3c.dom.Attr; +import org.w3c.dom.Element; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.math.BigInteger; +import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URL; +import java.time.Duration; +import java.time.temporal.TemporalUnit; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.ServiceLoader; + +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.transform.stream.StreamSource; + +import static java.time.temporal.ChronoUnit.MINUTES; +import static org.ehcache.xml.ConfigurationParserTestHelper.assertElement; +import static org.ehcache.xml.XmlModel.convertToJavaTimeUnit; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.fail; + +public class ClusteringCacheManagerServiceConfigurationParserTest { + + @ClassRule + public static final TemporaryFolder folder = new TemporaryFolder(); + + @Rule + public final TestName testName = new TestName(); + + /** + * Ensures the {@link ClusteringCacheManagerServiceConfigurationParser} is locatable as a + * {@link CacheManagerServiceConfigurationParser} instance. + */ + @Test + public void testServiceLocator() throws Exception { + String expectedParser = ClusteringCacheManagerServiceConfigurationParser.class.getName(); + @SuppressWarnings({"unchecked", "rawtypes"}) + ServiceLoader> parsers = (ServiceLoader) + ClassLoading.libraryServiceLoaderFor(CacheManagerServiceConfigurationParser.class); + + for (CacheManagerServiceConfigurationParser parser : parsers) { + if (parser.getClass().getName().equals(expectedParser)) { + return; + } + } + fail("Expected parser not found"); + } + + /** + * Ensures the namespace declared by {@link ClusteringCacheManagerServiceConfigurationParser} and its + * schema are the same. + */ + @Test + public void testSchema() throws Exception { + final ClusteringCacheManagerServiceConfigurationParser parser = new ClusteringCacheManagerServiceConfigurationParser(); + final StreamSource schemaSource = (StreamSource) parser.getXmlSchema(); + + final DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); + factory.setNamespaceAware(true); + factory.setIgnoringComments(true); + factory.setIgnoringElementContentWhitespace(true); + + final DocumentBuilder domBuilder = factory.newDocumentBuilder(); + final Element schema = domBuilder.parse(schemaSource.getInputStream()).getDocumentElement(); + final Attr targetNamespaceAttr = schema.getAttributeNode("targetNamespace"); + assertThat(targetNamespaceAttr, is(not(nullValue()))); + assertThat(targetNamespaceAttr.getValue(), is(parser.getNamespace().toString())); + } + + @Test + public void testGetTimeout() throws Exception { + + final String[] config = new String[] + { + "", + "", + " ", + " ", + " ", + " 5", + " 10", + " 15", + " ", + " ", + "", + "" + }; + + final Configuration configuration = new XmlConfiguration(makeConfig(config)); + + Collection> serviceCreationConfigurations = + configuration.getServiceCreationConfigurations(); + assertThat(serviceCreationConfigurations, is(not(Matchers.empty()))); + + ClusteringServiceConfiguration clusteringServiceConfiguration = + ServiceUtils.findSingletonAmongst(ClusteringServiceConfiguration.class, serviceCreationConfigurations); + assertThat(clusteringServiceConfiguration, is(notNullValue())); + + Timeouts timeouts = clusteringServiceConfiguration.getTimeouts(); + assertThat(timeouts.getReadOperationTimeout(), is(Duration.of(5, MINUTES))); + assertThat(timeouts.getWriteOperationTimeout(), is(Duration.of(10, MINUTES))); + assertThat(timeouts.getConnectionTimeout(), is(Duration.of(15, MINUTES))); + } + + @Test + public void testGetTimeoutNone() throws Exception { + + final String[] config = new String[] + { + "", + "", + " ", + " ", + " ", + " ", + " ", + "", + "" + }; + + final Configuration configuration = new XmlConfiguration(makeConfig(config)); + + Collection> serviceCreationConfigurations = + configuration.getServiceCreationConfigurations(); + assertThat(serviceCreationConfigurations, is(not(Matchers.empty()))); + + ClusteringServiceConfiguration clusteringServiceConfiguration = + ServiceUtils.findSingletonAmongst(ClusteringServiceConfiguration.class, serviceCreationConfigurations); + assertThat(clusteringServiceConfiguration, is(notNullValue())); + + assertThat(clusteringServiceConfiguration.getTimeouts(), is(TimeoutsBuilder.timeouts().build())); + } + + @Test + public void testGetTimeoutUnitDefault() throws Exception { + + final String[] config = new String[] + { + "", + "", + " ", + " ", + " ", + " 5", + " ", + " ", + "", + "" + }; + + final Configuration configuration = new XmlConfiguration(makeConfig(config)); + + Collection> serviceCreationConfigurations = + configuration.getServiceCreationConfigurations(); + assertThat(serviceCreationConfigurations, is(not(Matchers.empty()))); + + ClusteringServiceConfiguration clusteringServiceConfiguration = + ServiceUtils.findSingletonAmongst(ClusteringServiceConfiguration.class, serviceCreationConfigurations); + assertThat(clusteringServiceConfiguration, is(notNullValue())); + + TemporalUnit defaultUnit = convertToJavaTimeUnit(new TimeType().getUnit()); + assertThat(clusteringServiceConfiguration.getTimeouts().getReadOperationTimeout(), + is(equalTo(Duration.of(5, defaultUnit)))); + } + + @Test + public void testGetTimeoutUnitBad() throws Exception { + + final String[] config = new String[] + { + "", + "", + " ", + " ", + " ", + " 5", + " ", + " ", + "", + "" + }; + + try { + new XmlConfiguration(makeConfig(config)); + fail("Expecting XmlConfigurationException"); + } catch (XmlConfigurationException e) { + assertThat(e.getMessage(), containsString("Error parsing XML configuration ")); + assertThat(e.getCause().getMessage(), containsString("Value 'femtos' is not facet-valid with respect to enumeration ")); + } + } + + @Test + public void testGetTimeoutValueTooBig() throws Exception { + + final String[] config = new String[] + { + "", + "", + " ", + " ", + " ", + " " + + BigInteger.ONE.add(BigInteger.valueOf(Long.MAX_VALUE)) + + "", + " ", + " ", + "", + "" + }; + + try { + new XmlConfiguration(makeConfig(config)); + fail("Expecting XmlConfigurationException"); + } catch (XmlConfigurationException e) { + assertThat(e.getMessage(), containsString(" exceeds allowed value ")); + } + } + + @Test + public void testGetTimeoutValueOmitted() throws Exception { + + final String[] config = new String[] + { + "", + "", + " ", + " ", + " ", + " ", + " ", + " ", + "", + "" + }; + + try { + new XmlConfiguration(makeConfig(config)); + fail("Expecting XmlConfigurationException"); + } catch (XmlConfigurationException e) { + assertThat(e.getMessage(), containsString("Error parsing XML configuration ")); + assertThat(e.getCause().getMessage(), containsString("'' is not a valid value for 'integer'")); + } + } + + @Test(expected = XmlConfigurationException.class) + public void testUrlAndServers() throws Exception { + final String[] config = new String[] + { + "", + "", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "", + "" + }; + + new XmlConfiguration(makeConfig(config)); + } + + @Test(expected = XmlConfigurationException.class) + public void testServersOnly() throws Exception { + final String[] config = new String[] + { + "", + "", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "", + "" + }; + + new XmlConfiguration(makeConfig(config)); + } + + @Test + public void testServersWithClusterTierManager() throws Exception { + final String[] config = new String[] + { + "", + "", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "", + "" + }; + + final Configuration configuration = new XmlConfiguration(makeConfig(config)); + Collection> serviceCreationConfigurations = configuration.getServiceCreationConfigurations(); + ClusteringServiceConfiguration clusteringServiceConfiguration = + ServiceUtils.findSingletonAmongst(ClusteringServiceConfiguration.class, serviceCreationConfigurations); + ConnectionSource.ServerList connectionSource = (ConnectionSource.ServerList) clusteringServiceConfiguration.getConnectionSource(); + Iterable servers = connectionSource.getServers(); + + InetSocketAddress firstServer = InetSocketAddress.createUnresolved("server-1", 9510); + InetSocketAddress secondServer = InetSocketAddress.createUnresolved("server-2", 9540); + List expectedServers = Arrays.asList(firstServer, secondServer); + + assertThat(connectionSource.getClusterTierManager(), is("cM")); + assertThat(servers, is(expectedServers)); + } + + @Test + public void testServersWithClusterTierManagerAndOptionalPorts() throws Exception { + final String[] config = new String[] + { + "", + "", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "", + "" + }; + + final Configuration configuration = new XmlConfiguration(makeConfig(config)); + Collection> serviceCreationConfigurations = configuration.getServiceCreationConfigurations(); + ClusteringServiceConfiguration clusteringServiceConfiguration = + ServiceUtils.findSingletonAmongst(ClusteringServiceConfiguration.class, serviceCreationConfigurations); + ConnectionSource.ServerList connectionSource = (ConnectionSource.ServerList)clusteringServiceConfiguration.getConnectionSource(); + Iterable servers = connectionSource.getServers(); + + InetSocketAddress firstServer = InetSocketAddress.createUnresolved("100.100.100.100", 9510); + InetSocketAddress secondServer = InetSocketAddress.createUnresolved("server-2", 0); + InetSocketAddress thirdServer = InetSocketAddress.createUnresolved("[::1]", 0); + InetSocketAddress fourthServer = InetSocketAddress.createUnresolved("[fe80::1453:846e:7be4:15fe]", 9710); + List expectedServers = Arrays.asList(firstServer, secondServer, thirdServer, fourthServer); + + assertThat(connectionSource.getClusterTierManager(), is("cM")); + assertThat(servers, is(expectedServers)); + } + + @Test + public void testTranslateServiceCreationConfiguration() throws Exception { + URI connectionUri = new URI("terracotta://localhost:9510/my-application"); + ClusteringServiceConfiguration serviceConfig = ClusteringServiceConfigurationBuilder.cluster(connectionUri) + .timeouts(Timeouts.DEFAULT) + .autoCreate() + .defaultServerResource("main") + .resourcePool("primaryresource", 5, MemoryUnit.GB) + .resourcePool("secondaryresource", 10, MemoryUnit.GB, "optional") + .build(); + + ClusteringCacheManagerServiceConfigurationParser parser = new ClusteringCacheManagerServiceConfigurationParser(); + Element returnElement = parser.unparseServiceCreationConfiguration(serviceConfig); + + String inputString = "" + + "" + + "5" + + "5" + + "150" + + "" + + "" + + "5368709120" + + "10737418240" + + ""; + assertElement(inputString, returnElement); + + } + + @Test + public void testTranslateServiceCreationConfigurationWithNoResourcePoolAndAutoCreateFalse() throws Exception { + URI connectionUri = new URI("terracotta://localhost:9510/my-application"); + ClusteringServiceConfiguration serviceConfig = ClusteringServiceConfigurationBuilder.cluster(connectionUri) + .timeouts(Timeouts.DEFAULT) + .expecting() + .defaultServerResource("main") + .build(); + + + ClusteringCacheManagerServiceConfigurationParser parser = new ClusteringCacheManagerServiceConfigurationParser(); + Element returnElement = parser.unparseServiceCreationConfiguration(serviceConfig); + + String inputString = "" + + "" + + "5" + + "5" + + "150" + + "" + + "" + + ""; + assertElement(inputString, returnElement); + } + + @Test + public void testTranslateServiceCreationConfigurationWithNoServerSideConfig() throws Exception { + URI connectionUri = new URI("terracotta://localhost:9510/my-application"); + ClusteringServiceConfiguration serviceConfig = ClusteringServiceConfigurationBuilder.cluster(connectionUri) + .timeouts(Timeouts.DEFAULT) + .build(); + + ClusteringCacheManagerServiceConfigurationParser parser = new ClusteringCacheManagerServiceConfigurationParser(); + Element returnElement = parser.unparseServiceCreationConfiguration(serviceConfig); + + String inputString = "" + + "" + + "5" + + "5" + + "150" + + "" + + ""; + assertElement(inputString, returnElement); + } + + @Test + public void testTranslateServiceCreationConfigurationWithInetSocketAddress() { + + InetSocketAddress firstServer = InetSocketAddress.createUnresolved("100.100.100.100", 9510); + InetSocketAddress secondServer = InetSocketAddress.createUnresolved("server-2", 0); + InetSocketAddress thirdServer = InetSocketAddress.createUnresolved("[::1]", 0); + InetSocketAddress fourthServer = InetSocketAddress.createUnresolved("[fe80::1453:846e:7be4:15fe]", 9710); + List servers = Arrays.asList(firstServer, secondServer, thirdServer, fourthServer); + ClusteringServiceConfiguration serviceConfig = ClusteringServiceConfigurationBuilder.cluster(servers, "my-application") + .timeouts(Timeouts.DEFAULT) + .build(); + + + ClusteringCacheManagerServiceConfigurationParser parser = new ClusteringCacheManagerServiceConfigurationParser(); + Element returnElement = parser.unparseServiceCreationConfiguration(serviceConfig); + + String inputString = "" + + "" + + "" + + "" + + "" + + "" + + "" + + "5" + + "5" + + "150" + + ""; + assertElement(inputString, returnElement); + } + + /** + * Constructs a temporary XML configuration file. + * + * @param lines the lines to include in the XML configuration file + * @return a {@code URL} pointing to the XML configuration file + * @throws IOException if an error is raised while creating or writing the XML configuration file + */ + @SuppressWarnings("ThrowFromFinallyBlock") + private URL makeConfig(final String[] lines) throws IOException { + final File configFile = folder.newFile(testName.getMethodName() + "_config.xml"); + + OutputStreamWriter out = null; + try { + out = new OutputStreamWriter(new FileOutputStream(configFile), "UTF-8"); + for (final String line : lines) { + out.write(line); + } + } finally { + if (out != null) { + try { + out.close(); + } catch (IOException e) { + throw new AssertionError(e); + } + } + } + + return configFile.toURI().toURL(); + } + +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/config/xml/ClusteringCacheServiceConfigurationParserTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/config/xml/ClusteringCacheServiceConfigurationParserTest.java new file mode 100644 index 0000000000..50f0335940 --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/config/xml/ClusteringCacheServiceConfigurationParserTest.java @@ -0,0 +1,38 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.config.xml; + +import org.ehcache.clustered.client.config.builders.ClusteredStoreConfigurationBuilder; +import org.ehcache.clustered.common.Consistency; +import org.junit.Test; +import org.w3c.dom.Node; + +import static org.ehcache.xml.ConfigurationParserTestHelper.assertElement; + +public class ClusteringCacheServiceConfigurationParserTest { + + @Test + public void testTranslateServiceStoreConfiguration() { + + ClusteringCacheServiceConfigurationParser configurationTranslator = new ClusteringCacheServiceConfigurationParser(); + Node retNode = configurationTranslator.unparseServiceConfiguration( + ClusteredStoreConfigurationBuilder.withConsistency(Consistency.STRONG).build()); + + String inputString = ""; + assertElement(inputString, retNode); + } +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/config/xml/ClusteringServiceConfigurationParserTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/config/xml/ClusteringServiceConfigurationParserTest.java deleted file mode 100644 index 72964e58ea..0000000000 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/config/xml/ClusteringServiceConfigurationParserTest.java +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Copyright Terracotta, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.ehcache.clustered.client.internal.config.xml; - -import org.ehcache.clustered.client.config.ClusteringServiceConfiguration; -import org.ehcache.clustered.client.config.Timeouts; -import org.ehcache.clustered.client.config.builders.TimeoutsBuilder; -import org.ehcache.config.Configuration; -import org.ehcache.core.internal.util.ClassLoading; -import org.ehcache.core.spi.service.ServiceUtils; -import org.ehcache.spi.service.ServiceCreationConfiguration; -import org.ehcache.xml.CacheManagerServiceConfigurationParser; -import org.ehcache.xml.XmlConfiguration; -import org.ehcache.xml.exceptions.XmlConfigurationException; -import org.ehcache.xml.model.TimeType; -import org.hamcrest.Matchers; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.rules.TestName; -import org.w3c.dom.Attr; -import org.w3c.dom.Element; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStreamWriter; -import java.math.BigInteger; -import java.net.URL; -import java.time.Duration; -import java.time.temporal.TemporalUnit; -import java.util.Collection; -import java.util.ServiceLoader; - -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.transform.stream.StreamSource; - -import static java.time.temporal.ChronoUnit.MINUTES; -import static org.ehcache.xml.XmlModel.convertToJavaTimeUnit; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; -import static org.junit.Assert.*; - -/** - * Basic tests for {@link ClusteringServiceConfigurationParser}. - */ -public class ClusteringServiceConfigurationParserTest { - - @ClassRule - public static final TemporaryFolder folder = new TemporaryFolder(); - - @Rule - public final TestName testName = new TestName(); - - - /** - * Ensures the {@link ClusteringServiceConfigurationParser} is locatable as a - * {@link CacheManagerServiceConfigurationParser} instance. - */ - @Test - public void testServiceLocator() throws Exception { - final String expectedParser = ClusteringServiceConfigurationParser.class.getName(); - final ServiceLoader parsers = - ClassLoading.libraryServiceLoaderFor(CacheManagerServiceConfigurationParser.class); - foundParser: { - for (final CacheManagerServiceConfigurationParser parser : parsers) { - if (parser.getClass().getName().equals(expectedParser)) { - break foundParser; - } - } - fail("Expected parser not found"); - } - } - - /** - * Ensures the namespace declared by {@link ClusteringServiceConfigurationParser} and its - * schema are the same. - */ - @Test - public void testSchema() throws Exception { - final ClusteringServiceConfigurationParser parser = new ClusteringServiceConfigurationParser(); - final StreamSource schemaSource = (StreamSource) parser.getXmlSchema(); - - final DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); - factory.setNamespaceAware(true); - factory.setIgnoringComments(true); - factory.setIgnoringElementContentWhitespace(true); - - final DocumentBuilder domBuilder = factory.newDocumentBuilder(); - final Element schema = domBuilder.parse(schemaSource.getInputStream()).getDocumentElement(); - final Attr targetNamespaceAttr = schema.getAttributeNode("targetNamespace"); - assertThat(targetNamespaceAttr, is(not(nullValue()))); - assertThat(targetNamespaceAttr.getValue(), is(parser.getNamespace().toString())); - } - - @Test - public void testGetTimeout() throws Exception { - - final String[] config = new String[] - { - "", - "", - " ", - " ", - " ", - " 5", - " 10", - " 15", - " ", - " ", - "", - "" - }; - - final Configuration configuration = new XmlConfiguration(makeConfig(config)); - - Collection> serviceCreationConfigurations = - configuration.getServiceCreationConfigurations(); - assertThat(serviceCreationConfigurations, is(not(Matchers.empty()))); - - ClusteringServiceConfiguration clusteringServiceConfiguration = - ServiceUtils.findSingletonAmongst(ClusteringServiceConfiguration.class, serviceCreationConfigurations); - assertThat(clusteringServiceConfiguration, is(notNullValue())); - - Timeouts timeouts = clusteringServiceConfiguration.getTimeouts(); - assertThat(timeouts.getReadOperationTimeout(), is(Duration.of(5, MINUTES))); - assertThat(timeouts.getWriteOperationTimeout(), is(Duration.of(10, MINUTES))); - assertThat(timeouts.getConnectionTimeout(), is(Duration.of(15, MINUTES))); - } - - @Test - public void testGetTimeoutNone() throws Exception { - - final String[] config = new String[] - { - "", - "", - " ", - " ", - " ", - " ", - " ", - "", - "" - }; - - final Configuration configuration = new XmlConfiguration(makeConfig(config)); - - Collection> serviceCreationConfigurations = - configuration.getServiceCreationConfigurations(); - assertThat(serviceCreationConfigurations, is(not(Matchers.empty()))); - - ClusteringServiceConfiguration clusteringServiceConfiguration = - ServiceUtils.findSingletonAmongst(ClusteringServiceConfiguration.class, serviceCreationConfigurations); - assertThat(clusteringServiceConfiguration, is(notNullValue())); - - assertThat(clusteringServiceConfiguration.getTimeouts(), is(TimeoutsBuilder.timeouts().build())); - } - - @Test - public void testGetTimeoutUnitDefault() throws Exception { - - final String[] config = new String[] - { - "", - "", - " ", - " ", - " ", - " 5", - " ", - " ", - "", - "" - }; - - final Configuration configuration = new XmlConfiguration(makeConfig(config)); - - Collection> serviceCreationConfigurations = - configuration.getServiceCreationConfigurations(); - assertThat(serviceCreationConfigurations, is(not(Matchers.empty()))); - - ClusteringServiceConfiguration clusteringServiceConfiguration = - ServiceUtils.findSingletonAmongst(ClusteringServiceConfiguration.class, serviceCreationConfigurations); - assertThat(clusteringServiceConfiguration, is(notNullValue())); - - TemporalUnit defaultUnit = convertToJavaTimeUnit(new TimeType().getUnit()); - assertThat(clusteringServiceConfiguration.getTimeouts().getReadOperationTimeout(), - is(equalTo(Duration.of(5, defaultUnit)))); - } - - @Test - public void testGetTimeoutUnitBad() throws Exception { - - final String[] config = new String[] - { - "", - "", - " ", - " ", - " ", - " 5", - " ", - " ", - "", - "" - }; - - try { - new XmlConfiguration(makeConfig(config)); - fail("Expecting XmlConfigurationException"); - } catch (XmlConfigurationException e) { - assertThat(e.getMessage(), containsString("Error parsing XML configuration ")); - assertThat(e.getCause().getMessage(), containsString("Value 'femtos' is not facet-valid with respect to enumeration ")); - } - } - - @Test - public void testGetTimeoutValueTooBig() throws Exception { - - final String[] config = new String[] - { - "", - "", - " ", - " ", - " ", - " " - + BigInteger.ONE.add(BigInteger.valueOf(Long.MAX_VALUE)) - + "", - " ", - " ", - "", - "" - }; - - try { - new XmlConfiguration(makeConfig(config)); - fail("Expecting XmlConfigurationException"); - } catch (XmlConfigurationException e) { - assertThat(e.getMessage(), containsString(" exceeds allowed value ")); - } - } - - @Test - public void testGetTimeoutValueOmitted() throws Exception { - - final String[] config = new String[] - { - "", - "", - " ", - " ", - " ", - " ", - " ", - " ", - "", - "" - }; - - try { - new XmlConfiguration(makeConfig(config)); - fail("Expecting XmlConfigurationException"); - } catch (XmlConfigurationException e) { - assertThat(e.getMessage(), containsString("Error parsing XML configuration ")); - assertThat(e.getCause().getMessage(), containsString("'' is not a valid value for 'integer'")); - } - } - - /** - * Constructs a temporary XML configuration file. - * - * @param lines the lines to include in the XML configuration file - * - * @return a {@code URL} pointing to the XML configuration file - * - * @throws IOException if an error is raised while creating or writing the XML configuration file - */ - @SuppressWarnings("ThrowFromFinallyBlock") - private URL makeConfig(final String[] lines) throws IOException { - final File configFile = folder.newFile(testName.getMethodName() + "_config.xml"); - - OutputStreamWriter out = null; - try { - out = new OutputStreamWriter(new FileOutputStream(configFile), "UTF-8"); - for (final String line : lines) { - out.write(line); - } - } finally { - if (out != null) { - try { - out.close(); - } catch (IOException e) { - throw new AssertionError(e); - } - } - } - - return configFile.toURI().toURL(); - } -} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/loaderwriter/ClusteredLoaderWriterStoreProviderTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/loaderwriter/ClusteredLoaderWriterStoreProviderTest.java new file mode 100644 index 0000000000..681ccb0a97 --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/loaderwriter/ClusteredLoaderWriterStoreProviderTest.java @@ -0,0 +1,76 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.loaderwriter; + +import org.ehcache.clustered.client.config.ClusteredResourceType; +import org.ehcache.clustered.client.internal.store.ClusteredStoreProviderTest; +import org.ehcache.clustered.client.service.ClusteringService; +import org.ehcache.core.internal.service.ServiceLocator; +import org.ehcache.core.spi.service.DiskResourceService; +import org.ehcache.impl.internal.store.disk.OffHeapDiskStore; +import org.ehcache.impl.internal.store.heap.OnHeapStore; +import org.ehcache.impl.internal.store.offheap.OffHeapStore; +import org.ehcache.impl.internal.store.tiering.TieredStore; +import org.ehcache.spi.loaderwriter.CacheLoaderWriterConfiguration; +import org.junit.Test; + +import java.util.Collections; +import java.util.HashSet; + +import static org.ehcache.core.internal.service.ServiceLocator.dependencySet; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; +import static org.mockito.Mockito.mock; + +public class ClusteredLoaderWriterStoreProviderTest { + + private final CacheLoaderWriterConfiguration cacheLoaderWriterConfiguration = mock(CacheLoaderWriterConfiguration.class); + + @Test + public void testRank() { + ClusteredLoaderWriterStore.Provider provider = new ClusteredLoaderWriterStore.Provider(); + ServiceLocator serviceLocator = dependencySet() + .with(new TieredStore.Provider()) + .with(new OnHeapStore.Provider()) + .with(new OffHeapStore.Provider()) + .with(mock(DiskResourceService.class)) + .with(new OffHeapDiskStore.Provider()) + .with(mock(ClusteringService.class)).build(); + provider.start(serviceLocator); + + assertThat(provider.rank(new HashSet<>(Collections.singletonList(ClusteredResourceType.Types.DEDICATED)), + Collections.singletonList(cacheLoaderWriterConfiguration)), is(2)); + assertThat(provider.rank(new HashSet<>(Collections.singletonList(ClusteredResourceType.Types.DEDICATED)), + Collections.emptyList()), is(0)); + assertThat(provider.rank(new HashSet<>(Collections.singletonList(new ClusteredStoreProviderTest.UnmatchedResourceType())), + Collections.singletonList(cacheLoaderWriterConfiguration)), is(0)); + } + + @Test + public void testAuthoritativeRank() { + ClusteredLoaderWriterStore.Provider provider = new ClusteredLoaderWriterStore.Provider(); + ServiceLocator serviceLocator = dependencySet().with(mock(ClusteringService.class)).build(); + provider.start(serviceLocator); + + assertThat(provider.rankAuthority(ClusteredResourceType.Types.DEDICATED, + Collections.singletonList(cacheLoaderWriterConfiguration)), + is(2)); + assertThat(provider.rankAuthority(ClusteredResourceType.Types.DEDICATED, Collections.emptyList()), + is(0)); + assertThat(provider.rankAuthority(new ClusteredStoreProviderTest.UnmatchedResourceType(), Collections.singletonList(cacheLoaderWriterConfiguration)), + is(0)); + } +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/loaderwriter/ClusteredLoaderWriterStoreTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/loaderwriter/ClusteredLoaderWriterStoreTest.java new file mode 100644 index 0000000000..0f29847eab --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/loaderwriter/ClusteredLoaderWriterStoreTest.java @@ -0,0 +1,335 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.loaderwriter; + +import org.ehcache.clustered.client.internal.store.ServerStoreProxy; +import org.ehcache.clustered.client.internal.store.lock.LockingServerStoreProxy; +import org.ehcache.clustered.client.internal.store.operations.EternalChainResolver; +import org.ehcache.clustered.common.internal.store.operations.PutOperation; +import org.ehcache.clustered.common.internal.store.operations.codecs.OperationsCodec; +import org.ehcache.clustered.common.internal.store.Chain; +import org.ehcache.clustered.common.internal.store.Util; +import org.ehcache.clustered.loaderWriter.TestCacheLoaderWriter; +import org.ehcache.core.spi.store.Store; +import org.ehcache.core.spi.time.TimeSource; +import org.ehcache.impl.serialization.LongSerializer; +import org.ehcache.impl.serialization.StringSerializer; +import org.ehcache.spi.loaderwriter.CacheLoaderWriter; +import org.junit.Test; +import org.mockito.ArgumentMatchers; + +import java.nio.ByteBuffer; + +import static org.ehcache.clustered.common.internal.store.Util.EMPTY_CHAIN; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assert.assertThat; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +public class ClusteredLoaderWriterStoreTest { + + @SuppressWarnings("unchecked") + private Store.Configuration configuration = mock(Store.Configuration.class); + private OperationsCodec codec = new OperationsCodec<>(new LongSerializer(), new StringSerializer()); + private EternalChainResolver resolver = new EternalChainResolver<>(codec); + private TimeSource timeSource = mock(TimeSource.class); + + @Test + public void testGetValueAbsentInSOR() throws Exception { + ServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + CacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + when(storeProxy.get(eq(1L))).thenReturn(EMPTY_CHAIN); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + assertThat(store.get(1L), is(nullValue())); + } + + @Test + public void testGetValuePresentInSOR() throws Exception { + ServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + loaderWriter.storeMap.put(1L, "one"); + when(storeProxy.get(eq(1L))).thenReturn(EMPTY_CHAIN); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + assertThat(store.get(1L).get(), equalTo("one")); + } + + @Test + public void testGetValuePresentInCache() throws Exception { + ServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + @SuppressWarnings("unchecked") + CacheLoaderWriter loaderWriter = mock(CacheLoaderWriter.class); + PutOperation operation = new PutOperation<>(1L, "one", System.currentTimeMillis()); + Chain toReturn = Util.getChain(false, codec.encode(operation)); + when(storeProxy.get(anyLong())).thenReturn(toReturn); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + assertThat(store.get(1L).get(), equalTo("one")); + verify(loaderWriter, times(0)).load(anyLong()); + verifyZeroInteractions(loaderWriter); + } + + @Test + public void testPut() throws Exception { + ServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + assertThat(loaderWriter.storeMap.containsKey(1L), is(false)); + assertThat(store.put(1L, "one"), is(Store.PutStatus.PUT)); + assertThat(loaderWriter.storeMap.containsKey(1L), is(true)); + } + + @Test + public void testRemoveValueAbsentInCachePresentInSOR() throws Exception { + LockingServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + when(storeProxy.lock(anyLong())).thenReturn(EMPTY_CHAIN); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + loaderWriter.storeMap.put(1L, "one"); + assertThat(store.remove(1L), is(false)); + assertThat(loaderWriter.storeMap.containsKey(1L), is(false)); + } + + @Test + public void testRemoveValuePresentInCachePresentInSOR() throws Exception { + LockingServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + PutOperation operation = new PutOperation<>(1L, "one", System.currentTimeMillis()); + Chain toReturn = Util.getChain(false, codec.encode(operation)); + when(storeProxy.lock(anyLong())).thenReturn(toReturn); + when(storeProxy.get(anyLong())).thenReturn(toReturn); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + loaderWriter.storeMap.put(1L, "one"); + assertThat(store.get(1L).get(), equalTo("one")); + assertThat(store.remove(1L), is(true)); + assertThat(loaderWriter.storeMap.containsKey(1L), is(false)); + } + + @Test + public void testRemoveValueAbsentInCacheAbsentInSOR() throws Exception { + LockingServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + @SuppressWarnings("unchecked") + CacheLoaderWriter loaderWriter = mock(CacheLoaderWriter.class); + when(storeProxy.lock(anyLong())).thenReturn(EMPTY_CHAIN); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + assertThat(store.remove(1L), is(false)); + verify(loaderWriter, times(1)).delete(anyLong()); + } + + @Test + public void testPufIfAbsentValueAbsentInCacheAbsentInSOR() throws Exception { + LockingServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + when(storeProxy.lock(anyLong())).thenReturn(EMPTY_CHAIN); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + assertThat(loaderWriter.storeMap.isEmpty(), is(true)); + assertThat(store.putIfAbsent(1L, "one", null), is(nullValue())); + assertThat(loaderWriter.storeMap.get(1L), equalTo("one")); + } + + @Test + public void testPufIfAbsentValueAbsentInCachePresentInSOR() throws Exception { + LockingServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + when(storeProxy.lock(anyLong())).thenReturn(EMPTY_CHAIN); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + loaderWriter.storeMap.put(1L, "one"); + assertThat(store.putIfAbsent(1L, "Again", null).get(), equalTo("one")); + verify(storeProxy, times(0)).append(anyLong(), ArgumentMatchers.any(ByteBuffer.class)); + assertThat(loaderWriter.storeMap.get(1L), equalTo("one")); + } + + @Test + public void testPufIfAbsentValuePresentInCachePresentInSOR() throws Exception { + LockingServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + PutOperation operation = new PutOperation<>(1L, "one", System.currentTimeMillis()); + Chain toReturn = Util.getChain(false, codec.encode(operation)); + when(storeProxy.lock(anyLong())).thenReturn(toReturn); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + loaderWriter.storeMap.put(1L, "one"); + assertThat(store.putIfAbsent(1L, "Again", null).get(), equalTo("one")); + verify(storeProxy, times(0)).append(anyLong(), ArgumentMatchers.any(ByteBuffer.class)); + assertThat(loaderWriter.storeMap.get(1L), equalTo("one")); + } + + @Test + public void testReplaceValueAbsentInCacheAbsentInSOR() throws Exception { + LockingServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + when(storeProxy.lock(anyLong())).thenReturn(EMPTY_CHAIN); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + assertThat(loaderWriter.storeMap.isEmpty(), is(true)); + assertThat(store.replace(1L, "one"), is(nullValue())); + assertThat(loaderWriter.storeMap.isEmpty(), is(true)); + verify(storeProxy, times(0)).append(anyLong(), ArgumentMatchers.any(ByteBuffer.class)); + } + + @Test + public void testReplaceValueAbsentInCachePresentInSOR() throws Exception { + LockingServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + when(storeProxy.lock(anyLong())).thenReturn(EMPTY_CHAIN); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + loaderWriter.storeMap.put(1L, "one"); + assertThat(store.replace(1L, "Again").get(), equalTo("one")); + verify(storeProxy, times(1)).append(anyLong(), ArgumentMatchers.any(ByteBuffer.class)); + assertThat(loaderWriter.storeMap.get(1L), equalTo("Again")); + } + + @Test + public void testReplaceValuePresentInCachePresentInSOR() throws Exception { + LockingServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + PutOperation operation = new PutOperation<>(1L, "one", System.currentTimeMillis()); + Chain toReturn = Util.getChain(false, codec.encode(operation)); + when(storeProxy.lock(anyLong())).thenReturn(toReturn); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + loaderWriter.storeMap.put(1L, "one"); + assertThat(store.replace(1L, "Again").get(), equalTo("one")); + verify(storeProxy, times(1)).append(anyLong(), ArgumentMatchers.any(ByteBuffer.class)); + assertThat(loaderWriter.storeMap.get(1L), equalTo("Again")); + } + + @Test + public void testRemove2ArgsValueAbsentInCacheAbsentInSOR() throws Exception { + LockingServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + @SuppressWarnings("unchecked") + CacheLoaderWriter loaderWriter = mock(CacheLoaderWriter.class); + when(storeProxy.lock(anyLong())).thenReturn(EMPTY_CHAIN); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + assertThat(store.remove(1L, "one"), is(Store.RemoveStatus.KEY_MISSING)); + verify(storeProxy, times(0)).append(anyLong(), ArgumentMatchers.any(ByteBuffer.class)); + } + + @Test + public void testRemove2ArgsValueAbsentInCachePresentInSOR() throws Exception { + LockingServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + when(storeProxy.lock(anyLong())).thenReturn(EMPTY_CHAIN); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + loaderWriter.storeMap.put(1L, "one"); + assertThat(store.remove(1L, "one"), is(Store.RemoveStatus.REMOVED)); + verify(storeProxy, times(1)).append(anyLong(), ArgumentMatchers.any(ByteBuffer.class)); + assertThat(loaderWriter.storeMap.isEmpty(), is(true)); + } + + @Test + public void testRemove2ArgsValuePresentInCachePresentInSOR() throws Exception { + LockingServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + @SuppressWarnings("unchecked") + CacheLoaderWriter loaderWriter = mock(CacheLoaderWriter.class); + PutOperation operation = new PutOperation<>(1L, "one", System.currentTimeMillis()); + Chain toReturn = Util.getChain(false, codec.encode(operation)); + when(storeProxy.lock(anyLong())).thenReturn(toReturn); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + assertThat(store.remove(1L, "one"), is(Store.RemoveStatus.REMOVED)); + verify(storeProxy, times(1)).append(anyLong(), ArgumentMatchers.any(ByteBuffer.class)); + verify(loaderWriter, times(0)).load(anyLong()); + verify(loaderWriter, times(1)).delete(anyLong()); + } + + @Test + public void testRemove2ArgsValueAbsentInCacheDiffValuePresentInSOR() throws Exception { + LockingServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + when(storeProxy.lock(anyLong())).thenReturn(EMPTY_CHAIN); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + loaderWriter.storeMap.put(1L, "one"); + assertThat(store.remove(1L, "Again"), is(Store.RemoveStatus.KEY_PRESENT)); + verify(storeProxy, times(0)).append(anyLong(), ArgumentMatchers.any(ByteBuffer.class)); + assertThat(loaderWriter.storeMap.get(1L), equalTo("one")); + } + + @Test + public void testReplace2ArgsValueAbsentInCacheAbsentInSOR() throws Exception { + LockingServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + @SuppressWarnings("unchecked") + CacheLoaderWriter loaderWriter = mock(CacheLoaderWriter.class); + when(storeProxy.lock(anyLong())).thenReturn(EMPTY_CHAIN); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + assertThat(store.replace(1L, "one", "Again"), is(Store.ReplaceStatus.MISS_NOT_PRESENT)); + verify(storeProxy, times(0)).append(anyLong(), ArgumentMatchers.any(ByteBuffer.class)); + verify(loaderWriter, times(1)).load(anyLong()); + verify(loaderWriter, times(0)).write(anyLong(), anyString()); + } + + @Test + public void testReplace2ArgsValueAbsentInCachePresentInSOR() throws Exception { + LockingServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + when(storeProxy.lock(anyLong())).thenReturn(EMPTY_CHAIN); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + loaderWriter.storeMap.put(1L, "one"); + assertThat(store.replace(1L, "one", "Again"), is(Store.ReplaceStatus.HIT)); + verify(storeProxy, times(1)).append(anyLong(), ArgumentMatchers.any(ByteBuffer.class)); + assertThat(loaderWriter.storeMap.get(1L), equalTo("Again")); + } + + @Test + public void testReplace2ArgsValuePresentInCachePresentInSOR() throws Exception { + LockingServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + @SuppressWarnings("unchecked") + CacheLoaderWriter loaderWriter = mock(CacheLoaderWriter.class); + PutOperation operation = new PutOperation<>(1L, "one", System.currentTimeMillis()); + Chain toReturn = Util.getChain(false, codec.encode(operation)); + when(storeProxy.lock(anyLong())).thenReturn(toReturn); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + assertThat(store.replace(1L, "one", "Again"), is(Store.ReplaceStatus.HIT)); + verify(storeProxy, times(1)).append(anyLong(), ArgumentMatchers.any(ByteBuffer.class)); + verify(loaderWriter, times(0)).load(anyLong()); + verify(loaderWriter, times(1)).write(anyLong(), anyString()); + } + + @Test + public void testReplace2ArgsValueAbsentInCacheDiffValueInSOR() throws Exception { + LockingServerStoreProxy storeProxy = mock(LockingServerStoreProxy.class); + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + when(storeProxy.lock(anyLong())).thenReturn(EMPTY_CHAIN); + ClusteredLoaderWriterStore store = new ClusteredLoaderWriterStore<>(configuration, codec, resolver, storeProxy, + timeSource, loaderWriter); + loaderWriter.storeMap.put(1L, "one"); + assertThat(store.replace(1L, "Again", "one"), is(Store.ReplaceStatus.MISS_PRESENT)); + verify(storeProxy, times(0)).append(anyLong(), ArgumentMatchers.any(ByteBuffer.class)); + assertThat(loaderWriter.storeMap.get(1L), equalTo("one")); + } +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/loaderwriter/writebehind/ClusteredWriteBehindStoreProviderTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/loaderwriter/writebehind/ClusteredWriteBehindStoreProviderTest.java new file mode 100644 index 0000000000..332d008fc3 --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/loaderwriter/writebehind/ClusteredWriteBehindStoreProviderTest.java @@ -0,0 +1,81 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.loaderwriter.writebehind; + +import org.ehcache.clustered.client.config.ClusteredResourceType; +import org.ehcache.clustered.client.internal.store.ClusteredStoreProviderTest; +import org.ehcache.clustered.client.service.ClusteringService; +import org.ehcache.core.internal.service.ServiceLocator; +import org.ehcache.core.spi.service.DiskResourceService; +import org.ehcache.impl.internal.store.disk.OffHeapDiskStore; +import org.ehcache.impl.internal.store.heap.OnHeapStore; +import org.ehcache.impl.internal.store.offheap.OffHeapStore; +import org.ehcache.impl.internal.store.tiering.TieredStore; +import org.ehcache.spi.loaderwriter.CacheLoaderWriterConfiguration; +import org.ehcache.spi.loaderwriter.WriteBehindConfiguration; +import org.junit.Test; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; + +import static org.ehcache.core.internal.service.ServiceLocator.dependencySet; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; +import static org.mockito.Mockito.mock; + +public class ClusteredWriteBehindStoreProviderTest { + + private final CacheLoaderWriterConfiguration cacheLoaderWriterConfiguration = mock(CacheLoaderWriterConfiguration.class); + private final WriteBehindConfiguration writeBehindConfiguration = mock(WriteBehindConfiguration.class); + + @Test + public void testRank() { + ClusteredWriteBehindStore.Provider provider = new ClusteredWriteBehindStore.Provider(); + ServiceLocator serviceLocator = dependencySet() + .with(new TieredStore.Provider()) + .with(new OnHeapStore.Provider()) + .with(new OffHeapStore.Provider()) + .with(mock(DiskResourceService.class)) + .with(new OffHeapDiskStore.Provider()) + .with(mock(ClusteringService.class)).build(); + provider.start(serviceLocator); + + assertThat(provider.rank(new HashSet<>(Collections.singletonList(ClusteredResourceType.Types.DEDICATED)), + Arrays.asList(cacheLoaderWriterConfiguration, writeBehindConfiguration)), is(3)); + assertThat(provider.rank(new HashSet<>(Collections.singletonList(ClusteredResourceType.Types.DEDICATED)), + Collections.singletonList(cacheLoaderWriterConfiguration)), is(0)); + assertThat(provider.rank(new HashSet<>(Collections.singletonList(new ClusteredStoreProviderTest.UnmatchedResourceType())), + Arrays.asList(cacheLoaderWriterConfiguration, writeBehindConfiguration)), is(0)); + } + + @Test + public void testAuthoritativeRank() { + ClusteredWriteBehindStore.Provider provider = new ClusteredWriteBehindStore.Provider(); + ServiceLocator serviceLocator = dependencySet().with(mock(ClusteringService.class)).build(); + provider.start(serviceLocator); + + assertThat(provider.rankAuthority(ClusteredResourceType.Types.DEDICATED, + Arrays.asList(cacheLoaderWriterConfiguration, writeBehindConfiguration)), + is(3)); + assertThat(provider.rankAuthority(ClusteredResourceType.Types.DEDICATED, + Collections.singletonList(writeBehindConfiguration)), + is(0)); + assertThat(provider.rankAuthority(new ClusteredStoreProviderTest.UnmatchedResourceType(), Arrays.asList(cacheLoaderWriterConfiguration, + writeBehindConfiguration)), + is(0)); + } +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/loaderwriter/writebehind/ClusteredWriteBehindTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/loaderwriter/writebehind/ClusteredWriteBehindTest.java new file mode 100644 index 0000000000..036b08c8c0 --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/loaderwriter/writebehind/ClusteredWriteBehindTest.java @@ -0,0 +1,297 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.loaderwriter.writebehind; + +import org.ehcache.clustered.client.internal.store.operations.ChainResolver; +import org.ehcache.clustered.client.internal.store.operations.ExpiryChainResolver; +import org.ehcache.clustered.common.internal.store.operations.ConditionalRemoveOperation; +import org.ehcache.clustered.common.internal.store.operations.Operation; +import org.ehcache.clustered.common.internal.store.operations.PutIfAbsentOperation; +import org.ehcache.clustered.common.internal.store.operations.PutOperation; +import org.ehcache.clustered.common.internal.store.operations.PutWithWriterOperation; +import org.ehcache.clustered.common.internal.store.operations.RemoveOperation; +import org.ehcache.clustered.common.internal.store.operations.codecs.OperationsCodec; +import org.ehcache.clustered.common.internal.store.Chain; +import org.ehcache.clustered.common.internal.store.Element; +import org.ehcache.clustered.common.internal.store.Util; +import org.ehcache.clustered.loaderWriter.writebehind.RecordingLoaderWriter; +import org.ehcache.core.spi.time.SystemTimeSource; +import org.ehcache.core.spi.time.TimeSource; +import org.ehcache.expiry.ExpiryPolicy; +import org.ehcache.impl.serialization.LongSerializer; +import org.ehcache.impl.serialization.StringSerializer; +import org.junit.Test; +import org.mockito.ArgumentCaptor; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.AbstractExecutorService; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class ClusteredWriteBehindTest { + + private static final TimeSource TIME_SOURCE = SystemTimeSource.INSTANCE; + + @Test + public void testPutWithWriter() throws Exception { + List eventInfoList = new ArrayList<>(); + eventInfoList.add(new EventInfo(1L, + new PutWithWriterOperation<>(1L, "The one", TIME_SOURCE.getTimeMillis()), + "The one", + true)); + eventInfoList.add(new EventInfo(1L, + new PutWithWriterOperation<>(1L, "The one one", TIME_SOURCE.getTimeMillis()), + "The one one", + true)); + eventInfoList.add(new EventInfo(2L, + new PutWithWriterOperation<>(2L, "The two", TIME_SOURCE.getTimeMillis()), + + "The two", + true)); + eventInfoList.add(new EventInfo(2L, + new PutWithWriterOperation<>(2L, "The two two", TIME_SOURCE.getTimeMillis()), + "The two two", + true)); + + HashMap result = new HashMap<>(); + result.put(1L, "The one one"); + result.put(2L, "The two two"); + verifyEvents(eventInfoList, result); + } + + @Test + public void testRemoves() throws Exception { + List eventInfoList = new ArrayList<>(); + eventInfoList.add(new EventInfo(1L, + new PutWithWriterOperation<>(1L, "The one", TIME_SOURCE.getTimeMillis()), + "The one", + true)); + eventInfoList.add(new EventInfo(1L, + new PutWithWriterOperation<>(1L, "The one one", TIME_SOURCE.getTimeMillis()), + "The one one", + true)); + eventInfoList.add(new EventInfo(1L, new RemoveOperation<>(1L, TIME_SOURCE.getTimeMillis()), null, true)); + + verifyEvents(eventInfoList, Collections.emptyMap()); + } + + @Test + public void testCAS() throws Exception { + List eventInfoList = new ArrayList<>(); + eventInfoList.add(new EventInfo(1L, + new PutIfAbsentOperation<>(1L, "The one", TIME_SOURCE.getTimeMillis()), + "The one", + true)); + eventInfoList.add(new EventInfo(1L, + new PutIfAbsentOperation<>(1L, "The one one", TIME_SOURCE.getTimeMillis()), + "none", + false)); + eventInfoList.add(new EventInfo(1L, + new ConditionalRemoveOperation<>(1L, "The one", TIME_SOURCE.getTimeMillis()), + null, + true)); + + verifyEvents(eventInfoList, Collections.emptyMap()); + } + + @Test + public void testPuts() throws Exception { + List eventInfoList = new ArrayList<>(); + eventInfoList.add(new EventInfo(1L, + new PutOperation<>(1L, "The one", TIME_SOURCE.getTimeMillis()), + "The one", + false)); + eventInfoList.add(new EventInfo(1L, + new PutWithWriterOperation<>(1L, "The one one", TIME_SOURCE.getTimeMillis()), + "The one one", + true)); + eventInfoList.add(new EventInfo(2L, new PutWithWriterOperation<>(2L, "The two", TIME_SOURCE.getTimeMillis()), + "The two", + true)); + eventInfoList.add(new EventInfo(4L, new PutWithWriterOperation<>(4L, "The four", TIME_SOURCE.getTimeMillis()), + "The four", + true)); + + HashMap result = new HashMap<>(); + result.put(1L, "The one one"); + result.put(2L, "The two"); + result.put(4L, "The four"); + verifyEvents(eventInfoList, result); + } + + @SuppressWarnings("unchecked") + private void verifyEvents(List expected, Map expectedChainContents) throws TimeoutException { + ClusteredWriteBehindStore clusteredWriteBehindStore = mock(ClusteredWriteBehindStore.class); + ExecutorService executorService = new TestExecutorService(); + RecordingLoaderWriter cacheLoaderWriter = new RecordingLoaderWriter<>(); + OperationsCodec operationCodec = new OperationsCodec<>(new LongSerializer(), new StringSerializer()); + ChainResolver resolver = new ExpiryChainResolver<>(operationCodec, ExpiryPolicy.NO_EXPIRY); + + ClusteredWriteBehind clusteredWriteBehind = new ClusteredWriteBehind<>(clusteredWriteBehindStore, + executorService, + TIME_SOURCE, + resolver, + cacheLoaderWriter, + operationCodec); + Chain elements = makeChain(expected, operationCodec); + + when(clusteredWriteBehindStore.lock(1L)).thenReturn(elements); + + ArgumentCaptor chainArgumentCaptor = ArgumentCaptor.forClass(Chain.class); + + clusteredWriteBehind.flushWriteBehindQueue(null, 1L); + + Map> records = cacheLoaderWriter.getRecords(); + + Map track = new HashMap<>(); + for (EventInfo event : expected) { + if (event.track) { + int next = track.compute(event.key, (k, v) -> v == null ? 0 : v + 1); + assertThat(records.get(event.key).get(next), is(event.expectedValue)); + } + } + + verify(clusteredWriteBehindStore).replaceAtHead(anyLong(), any(), chainArgumentCaptor.capture()); + + Chain value = chainArgumentCaptor.getValue(); + Map result = convert(value, operationCodec, resolver, TIME_SOURCE); + + for (Map.Entry entry : result.entrySet()) { + assertThat(entry.getValue(), is(expectedChainContents.get(entry.getKey()))); + } + + verify(clusteredWriteBehindStore).unlock(1L); + } + + private Map convert(Chain chain, OperationsCodec codec, + ChainResolver resolver, TimeSource timeSource) { + Map result = new HashMap<>(); + for (Element element : chain) { + ByteBuffer payload = element.getPayload(); + Operation operation = codec.decode(payload); + Long key = operation.getKey(); + PutOperation opResult = resolver.applyOperation(key, + null, + operation, + timeSource.getTimeMillis()); + result.put(key, opResult.getValue()); + } + return result; + } + + private Chain makeChain(List expected, OperationsCodec operationsCodec) { + ByteBuffer[] byteBuffers = new ByteBuffer[expected.size()]; + for (int i = 0; i < byteBuffers.length; i++) { + byteBuffers[i] = operationsCodec.encode(expected.get(i).operation); + } + return chain(byteBuffers); + } + + + class TestExecutorService extends AbstractExecutorService { + + @Override + public void shutdown() { + + } + + @Override + public List shutdownNow() { + return null; + } + + @Override + public boolean isShutdown() { + return false; + } + + @Override + public boolean isTerminated() { + return false; + } + + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { + return false; + } + + @Override + public void execute(Runnable command) { + command.run(); + } + } + + public static Chain chain(ByteBuffer... buffers) { + final List list = new ArrayList<>(); + for (ByteBuffer b : buffers) { + list.add(b::asReadOnlyBuffer); + } + + return new Chain() { + + final List elements = Collections.unmodifiableList(list); + + @Override + public Iterator iterator() { + return elements.iterator(); + } + + @Override + public Iterator reverseIterator() { + return Util.reverseIterator(elements); + } + + @Override + public boolean isEmpty() { + return elements.isEmpty(); + } + + @Override + public int length() { + return elements.size(); + } + }; + } + + private class EventInfo { + private final Long key; + private final Operation operation; + private final String expectedValue; + private final boolean track; + + private EventInfo(Long key, Operation operation, String expectedValue, boolean track) { + this.key = key; + this.operation = operation; + this.expectedValue = expectedValue; + this.track = track; + } + } +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/ClusterStateRepositoryReplicationTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/ClusterStateRepositoryReplicationTest.java index 9edde4a1e0..19469217ef 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/ClusterStateRepositoryReplicationTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/ClusterStateRepositoryReplicationTest.java @@ -48,8 +48,8 @@ import static org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder.clusteredDedicated; import static org.ehcache.clustered.client.internal.UnitTestConnectionService.getOffheapResourcesType; import static org.ehcache.config.Eviction.noAdvice; +import static org.ehcache.config.builders.ExpiryPolicyBuilder.noExpiration; import static org.ehcache.config.builders.ResourcePoolsBuilder.newResourcePoolsBuilder; -import static org.ehcache.expiry.Expirations.noExpiration; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; @@ -109,7 +109,7 @@ public void testClusteredStateRepositoryReplication() throws Exception { ClusterStateRepository stateRepository = new ClusterStateRepository(spaceIdentifier, "test", clientEntity); - StateHolder testHolder = stateRepository.getPersistentStateHolder("testHolder", String.class, String.class); + StateHolder testHolder = stateRepository.getPersistentStateHolder("testHolder", String.class, String.class, c -> true, null); testHolder.putIfAbsent("One", "One"); testHolder.putIfAbsent("Two", "Two"); @@ -154,7 +154,7 @@ public Class getServiceType() { } }, "test", clientEntity); - StateHolder testMap = stateRepository.getPersistentStateHolder("testMap", TestVal.class, TestVal.class); + StateHolder testMap = stateRepository.getPersistentStateHolder("testMap", TestVal.class, TestVal.class, c -> true, null); testMap.putIfAbsent(new TestVal("One"), new TestVal("One")); testMap.putIfAbsent(new TestVal("Two"), new TestVal("Two")); @@ -176,6 +176,9 @@ private static SimpleClusterTierClientEntity getEntity(ServerStoreProxy clusteri } private static class TestVal implements Serializable { + + private static final long serialVersionUID = 1L; + final String val; diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/ClusteringServiceFactoryTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/ClusteringServiceFactoryTest.java index f04ced19e9..c25cf95aff 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/ClusteringServiceFactoryTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/ClusteringServiceFactoryTest.java @@ -16,14 +16,13 @@ package org.ehcache.clustered.client.internal.service; -import org.ehcache.clustered.client.internal.service.ClusteringServiceFactory; import org.ehcache.core.spi.service.ServiceFactory; import org.ehcache.core.internal.util.ClassLoading; import org.junit.Test; import java.util.ServiceLoader; -import static org.junit.Assert.*; +import static org.junit.Assert.fail; /** * @author Clifford W. Johnson @@ -32,16 +31,17 @@ public class ClusteringServiceFactoryTest { @Test public void testServiceLocator() throws Exception { - final String expectedFactory = ClusteringServiceFactory.class.getName(); - final ServiceLoader factories = ClassLoading.libraryServiceLoaderFor(ServiceFactory.class); - foundParser: { - for (final ServiceFactory factory : factories) { - if (factory.getClass().getName().equals(expectedFactory)) { - break foundParser; - } + String expectedFactory = ClusteringServiceFactory.class.getName(); + @SuppressWarnings({"unchecked", "rawtypes"}) + ServiceLoader> factories = (ServiceLoader) ClassLoading.libraryServiceLoaderFor(ServiceFactory.class); + + for (ServiceFactory factory : factories) { + if (factory.getClass().getName().equals(expectedFactory)) { + return; } - fail("Expected factory not found"); } + + fail("Expected factory not found"); } -} \ No newline at end of file +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/ConnectionClosedTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/ConnectionClosedTest.java new file mode 100644 index 0000000000..ea417720cf --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/ConnectionClosedTest.java @@ -0,0 +1,124 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.clustered.client.internal.service; + +import org.ehcache.Cache; +import org.ehcache.PersistentCacheManager; +import org.ehcache.clustered.client.config.builders.TimeoutsBuilder; +import org.ehcache.clustered.client.internal.UnitTestConnectionService; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.ResourcePoolsBuilder; +import org.ehcache.config.units.MemoryUnit; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.terracotta.connection.Connection; +import org.terracotta.connection.ConnectionPropertyNames; + +import java.net.URI; +import java.time.Duration; +import java.util.Collection; +import java.util.Properties; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; + +import static org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder.clusteredDedicated; +import static org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder.cluster; +import static org.ehcache.config.builders.CacheConfigurationBuilder.newCacheConfigurationBuilder; +import static org.ehcache.config.builders.CacheManagerBuilder.newCacheManagerBuilder; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assert.assertThat; + +public class ConnectionClosedTest { + + private static final URI CLUSTER_URI = URI.create("terracotta://connection.com:9540/timeout"); + + @Before + public void definePassthroughServer() throws Exception { + UnitTestConnectionService.add(CLUSTER_URI, + new UnitTestConnectionService.PassthroughServerBuilder() + .resource("primary-server-resource", 64, MemoryUnit.MB) + .build()); + } + + @After + public void removePassthroughServer() throws Exception { + try { + UnitTestConnectionService.remove(CLUSTER_URI); + } catch (IllegalStateException e) { + assertThat(e.getMessage(), is("Connection already closed")); + } + } + + @Test + public void testCacheOperationThrowsAfterConnectionClosed() throws Exception { + + ResourcePoolsBuilder resourcePoolsBuilder = ResourcePoolsBuilder.newResourcePoolsBuilder() + .with(clusteredDedicated("primary-server-resource", 2, MemoryUnit.MB)); + + CacheManagerBuilder clusteredCacheManagerBuilder = + newCacheManagerBuilder() + .with(cluster(CLUSTER_URI) + .timeouts(TimeoutsBuilder + .timeouts() + .connection(Duration.ofSeconds(20)) + .build()) + .autoCreate()) + .withCache("clustered-cache", newCacheConfigurationBuilder(Long.class, String.class, + resourcePoolsBuilder)); + PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(true); + + Cache cache = cacheManager.getCache("clustered-cache", Long.class, String.class); + + Collection connectionProperties = UnitTestConnectionService.getConnectionProperties(CLUSTER_URI); + + assertThat(connectionProperties.size(), is(1)); + Properties properties = connectionProperties.iterator().next(); + + assertThat(properties.getProperty(ConnectionPropertyNames.CONNECTION_TIMEOUT), is("20000")); + + cache.put(1L, "value"); + assertThat(cache.get(1L), is("value")); + + Collection connections = UnitTestConnectionService.getConnections(CLUSTER_URI); + + assertThat(connections.size(), is(1)); + + Connection connection = connections.iterator().next(); + + connection.close(); + + CompletableFuture future = CompletableFuture.supplyAsync(() -> { + while (true) { + try { + Thread.sleep(200); + } catch (InterruptedException e) { + // + } + String result; + if ((result = cache.get(1L)) != null) { + return result; + } + } + }); + + assertThat(future.get(5, TimeUnit.SECONDS), is("value")); + + } + +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/ConnectionStateTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/ConnectionStateTest.java new file mode 100644 index 0000000000..e2a12e8793 --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/ConnectionStateTest.java @@ -0,0 +1,122 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.service; + +import org.ehcache.clustered.client.config.ClusteredResourcePool; +import org.ehcache.clustered.client.config.ClusteringServiceConfiguration; +import org.ehcache.clustered.client.config.Timeouts; +import org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder; +import org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder; +import org.ehcache.clustered.client.internal.UnitTestConnectionService; +import org.ehcache.clustered.client.internal.store.ClusterTierClientEntity; +import org.ehcache.clustered.common.internal.ServerStoreConfiguration; +import org.ehcache.config.units.MemoryUnit; +import org.ehcache.impl.serialization.LongSerializer; +import org.ehcache.impl.serialization.StringSerializer; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.terracotta.connection.Connection; + +import java.io.IOException; +import java.net.URI; +import java.util.Collection; +import java.util.Properties; + +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.junit.Assert.assertThat; + +public class ConnectionStateTest { + + private static URI CLUSTER_URI = URI.create("terracotta://localhost:9510"); + + private final ClusteringServiceConfiguration serviceConfiguration = ClusteringServiceConfigurationBuilder + .cluster(CLUSTER_URI) + .autoCreate() + .build(); + + @Rule + public ExpectedException expectedException = ExpectedException.none(); + + @Before + public void definePassthroughServer() { + UnitTestConnectionService.add(CLUSTER_URI, + new UnitTestConnectionService.PassthroughServerBuilder() + .resource("primary-server-resource", 64, MemoryUnit.MB) + .resource("secondary-server-resource", 64, MemoryUnit.MB) + .build()); + } + + @After + public void removePassthrough() { + expectedException.expect(IllegalStateException.class); + UnitTestConnectionService.remove(CLUSTER_URI); + } + + @Test + public void testInitializeStateAfterConnectionCloses() throws Exception { + + ConnectionState connectionState = new ConnectionState(Timeouts.DEFAULT, new Properties(), serviceConfiguration); + connectionState.initClusterConnection(); + + closeConnection(); + + expectedException.expect(IllegalStateException.class); + connectionState.getConnection().close(); + + connectionState.initializeState(); + + assertThat(connectionState.getConnection(), notNullValue()); + assertThat(connectionState.getEntityFactory(), notNullValue()); + + connectionState.getConnection().close(); + + } + + @Test + public void testCreateClusterTierEntityAfterConnectionCloses() throws Exception { + + ConnectionState connectionState = new ConnectionState(Timeouts.DEFAULT, new Properties(), serviceConfiguration); + connectionState.initClusterConnection(); + connectionState.initializeState(); + + closeConnection(); + + ClusteredResourcePool resourcePool = ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 4, MemoryUnit.MB); + ServerStoreConfiguration serverStoreConfiguration = new ServerStoreConfiguration(resourcePool.getPoolAllocation(), + Long.class.getName(), String.class.getName(), LongSerializer.class.getName(), StringSerializer.class.getName(), null, false); + + ClusterTierClientEntity clientEntity = connectionState.createClusterTierClientEntity("cache1", serverStoreConfiguration, false); + + assertThat(clientEntity, notNullValue()); + + } + + //For test to simulate connection close as result of lease expiry + private void closeConnection() throws IOException { + Collection connections = UnitTestConnectionService.getConnections(CLUSTER_URI); + + assertThat(connections.size(), is(1)); + + Connection connection = connections.iterator().next(); + + connection.close(); + } + +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/DefaultClusteringServiceTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/DefaultClusteringServiceTest.java index 9968f29972..fc76c80768 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/DefaultClusteringServiceTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/DefaultClusteringServiceTest.java @@ -175,7 +175,7 @@ public void testGetPersistenceSpaceIdentifier() throws Exception { .build(); DefaultClusteringService service = new DefaultClusteringService(configuration); - PersistableResourceService.PersistenceSpaceIdentifier spaceIdentifier = service.getPersistenceSpaceIdentifier("cacheAlias", null); + PersistableResourceService.PersistenceSpaceIdentifier spaceIdentifier = service.getPersistenceSpaceIdentifier("cacheAlias", null); assertThat(spaceIdentifier, is(instanceOf(ClusteredCacheIdentifier.class))); assertThat(((ClusteredCacheIdentifier)spaceIdentifier).getId(), is("cacheAlias")); assertThat(service.getPersistenceSpaceIdentifier("cacheAlias", null), sameInstance(spaceIdentifier)); @@ -193,7 +193,7 @@ public void testCreate() throws Exception { .build(); DefaultClusteringService service = new DefaultClusteringService(configuration); - PersistableResourceService.PersistenceSpaceIdentifier spaceIdentifier = service.getPersistenceSpaceIdentifier("cacheAlias", configBuilder + PersistableResourceService.PersistenceSpaceIdentifier spaceIdentifier = service.getPersistenceSpaceIdentifier("cacheAlias", configBuilder .build()); assertThat(spaceIdentifier, instanceOf(ClusteredCacheIdentifier.class)); assertThat(((ClusteredCacheIdentifier) spaceIdentifier).getId(), is("cacheAlias")); @@ -807,7 +807,7 @@ public void testReleaseServerStoreProxyShared() throws Exception { ObservableClusterTierActiveEntity clusterTierActiveEntity = clusterTierActiveEntities.get(0); assertThat(clusterTierActiveEntity.getConnectedClients().size(), is(1)); - creationService.releaseServerStoreProxy(serverStoreProxy); + creationService.releaseServerStoreProxy(serverStoreProxy, false); assertThat(activeEntity.getConnectedClients().size(), is(1)); assertThat(activeEntity.getStores(), containsInAnyOrder(cacheAlias)); @@ -815,7 +815,7 @@ public void testReleaseServerStoreProxyShared() throws Exception { assertThat(clusterTierActiveEntity.getConnectedClients(), empty()); try { - creationService.releaseServerStoreProxy(serverStoreProxy); + creationService.releaseServerStoreProxy(serverStoreProxy, false); fail("Expecting IllegalStateException"); } catch (IllegalStateException e) { assertThat(e.getMessage(), containsString("Endpoint closed")); @@ -1120,7 +1120,7 @@ public void testReleaseServerStoreProxyDedicated() throws Exception { ObservableClusterTierActiveEntity clusterTierActiveEntity = clusterTierActiveEntities.get(0); assertThat(clusterTierActiveEntity.getConnectedClients(), not(empty())); - creationService.releaseServerStoreProxy(serverStoreProxy); + creationService.releaseServerStoreProxy(serverStoreProxy, false); assertThat(activeEntity.getDedicatedResourcePoolIds(), containsInAnyOrder(cacheAlias)); assertThat(activeEntity.getConnectedClients().size(), is(1)); @@ -1128,7 +1128,7 @@ public void testReleaseServerStoreProxyDedicated() throws Exception { assertThat(clusterTierActiveEntity.getConnectedClients(), empty()); try { - creationService.releaseServerStoreProxy(serverStoreProxy); + creationService.releaseServerStoreProxy(serverStoreProxy, false); fail("Expecting IllegalStateException"); } catch (IllegalStateException e) { assertThat(e.getMessage(), containsString("Endpoint closed")); @@ -1178,7 +1178,7 @@ public void testGetServerStoreProxySharedDestroy() throws Exception { assertThat(getRootCause(e).getMessage(), containsString(" in use by ")); } - creationService.releaseServerStoreProxy(serverStoreProxy); + creationService.releaseServerStoreProxy(serverStoreProxy, false); assertThat(activeEntity.getStores(), containsInAnyOrder(cacheAlias)); assertThat(clusterTierActiveEntity.getConnectedClients(), empty()); @@ -1231,7 +1231,7 @@ public void testGetServerStoreProxyDedicatedDestroy() throws Exception { assertThat(getRootCause(e).getMessage(), containsString(" in use by ")); } - creationService.releaseServerStoreProxy(serverStoreProxy); + creationService.releaseServerStoreProxy(serverStoreProxy, false); assertThat(activeEntity.getDedicatedResourcePoolIds(), containsInAnyOrder(cacheAlias)); assertThat(activeEntity.getStores(), containsInAnyOrder(cacheAlias)); assertThat(clusterTierActiveEntity.getConnectedClients(), empty()); @@ -1996,7 +1996,7 @@ public void testGetStateRepositoryWithinTwiceWithSameName() throws Exception { ClusteringServiceConfiguration configuration = new ClusteringServiceConfiguration(URI.create(CLUSTER_URI_BASE), true, new ServerSideConfiguration(Collections.emptyMap())); DefaultClusteringService service = new DefaultClusteringService(configuration); - PersistableResourceService.PersistenceSpaceIdentifier cacheIdentifier = service.getPersistenceSpaceIdentifier("myCache", null); + PersistableResourceService.PersistenceSpaceIdentifier cacheIdentifier = service.getPersistenceSpaceIdentifier("myCache", null); StateRepository repository1 = service.getStateRepositoryWithin(cacheIdentifier, "myRepo"); StateRepository repository2 = service.getStateRepositoryWithin(cacheIdentifier, "myRepo"); assertThat(repository1, sameInstance(repository2)); @@ -2007,8 +2007,8 @@ public void testGetStateRepositoryWithinTwiceWithSameNameDifferentPersistenceSpa ClusteringServiceConfiguration configuration = new ClusteringServiceConfiguration(URI.create(CLUSTER_URI_BASE), true, new ServerSideConfiguration(Collections.emptyMap())); DefaultClusteringService service = new DefaultClusteringService(configuration); - PersistableResourceService.PersistenceSpaceIdentifier cacheIdentifier1 = service.getPersistenceSpaceIdentifier("myCache1", null); - PersistableResourceService.PersistenceSpaceIdentifier cacheIdentifier2 = service.getPersistenceSpaceIdentifier("myCache2", null); + PersistableResourceService.PersistenceSpaceIdentifier cacheIdentifier1 = service.getPersistenceSpaceIdentifier("myCache1", null); + PersistableResourceService.PersistenceSpaceIdentifier cacheIdentifier2 = service.getPersistenceSpaceIdentifier("myCache2", null); StateRepository repository1 = service.getStateRepositoryWithin(cacheIdentifier1, "myRepo"); StateRepository repository2 = service.getStateRepositoryWithin(cacheIdentifier2, "myRepo"); assertThat(repository1, not(sameInstance(repository2))); @@ -2045,7 +2045,7 @@ public void testReleasePersistenceSpaceIdentifierTwice() throws Exception { ClusteringServiceConfiguration configuration = new ClusteringServiceConfiguration(URI.create(CLUSTER_URI_BASE), true, new ServerSideConfiguration(Collections.emptyMap())); DefaultClusteringService service = new DefaultClusteringService(configuration); - PersistableResourceService.PersistenceSpaceIdentifier cacheIdentifier = service.getPersistenceSpaceIdentifier("myCache", null); + PersistableResourceService.PersistenceSpaceIdentifier cacheIdentifier = service.getPersistenceSpaceIdentifier("myCache", null); try { service.releasePersistenceSpaceIdentifier(cacheIdentifier); } catch (CachePersistenceException e) { diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/ReconnectTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/ReconnectTest.java new file mode 100644 index 0000000000..c2607aaf9b --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/ReconnectTest.java @@ -0,0 +1,89 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.service; + +import org.ehcache.clustered.client.config.ClusteringServiceConfiguration; +import org.ehcache.clustered.client.config.Timeouts; +import org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder; +import org.ehcache.clustered.client.internal.MockConnectionService; +import org.hamcrest.Matchers; +import org.junit.Assert; +import org.junit.Test; +import org.mockito.Mockito; +import org.terracotta.connection.Connection; +import org.terracotta.exception.ConnectionShutdownException; + +import java.net.URI; +import java.util.Properties; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; + +public class ReconnectTest { + + private static URI CLUSTER_URI = URI.create("mock://localhost:9510"); + + private final ClusteringServiceConfiguration serviceConfiguration = ClusteringServiceConfigurationBuilder + .cluster(CLUSTER_URI) + .autoCreate() + .build(); + + @Test(expected = RuntimeException.class) + public void testInitialConnectDoesNotRetryAfterConnectionException() { + MockConnectionService.mockConnection = null; + ConnectionState connectionState = new ConnectionState(Timeouts.DEFAULT, new Properties(), serviceConfiguration); + + connectionState.initClusterConnection(); + } + + @Test + public void testAfterConnectionReconnectHappensEvenAfterConnectionException() throws Exception { + Connection connection = Mockito.mock(Connection.class, Mockito.withSettings() + .defaultAnswer(invocation -> { + throw new ConnectionShutdownException("Connection Closed"); + })); + + MockConnectionService.mockConnection = connection; + + ConnectionState connectionState = new ConnectionState(Timeouts.DEFAULT, new Properties(), serviceConfiguration); + + connectionState.initClusterConnection(); + + CompletableFuture future = CompletableFuture.runAsync(() -> connectionState.initializeState()); + + MockConnectionService.mockConnection = null; + + CompletableFuture reconnecting = CompletableFuture.runAsync(() -> { + MockConnectionService.mockConnection = Mockito.mock(Connection.class, Mockito.withSettings().defaultAnswer(invocation -> { + throw new RuntimeException("Stop reconnecting"); + })); + while (connectionState.getReconnectCount() == 1) { + break; + } + }); + + reconnecting.get(); + + try { + future.get(); + } catch (ExecutionException e) { + Assert.assertThat(e.getCause().getMessage(), Matchers.is("Stop reconnecting")); + } + + Assert.assertThat(connectionState.getReconnectCount(), Matchers.is(1)); + + } + +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/StateRepositoryWhitelistingTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/StateRepositoryWhitelistingTest.java index 6975d20aa5..e6d5cc2e21 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/StateRepositoryWhitelistingTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/service/StateRepositoryWhitelistingTest.java @@ -48,8 +48,8 @@ import static org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder.clusteredDedicated; import static org.ehcache.clustered.client.internal.UnitTestConnectionService.getOffheapResourcesType; import static org.ehcache.config.Eviction.noAdvice; +import static org.ehcache.config.builders.ExpiryPolicyBuilder.noExpiration; import static org.ehcache.config.builders.ResourcePoolsBuilder.newResourcePoolsBuilder; -import static org.ehcache.expiry.Expirations.noExpiration; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; @@ -169,6 +169,9 @@ public void testWhitelistingForPrimitiveClass() throws Exception { } private static class Parent implements Serializable { + + private static final long serialVersionUID = 1L; + final int val; private Parent(int val) { @@ -192,6 +195,9 @@ public int hashCode() { } private static class Child extends Parent implements Serializable { + + private static final long serialVersionUID = 1L; + final long longValue; private Child(int val, long longValue) { diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/AbstractServerStoreProxyTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/AbstractServerStoreProxyTest.java index fb6ba014ad..7d93bf16d3 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/AbstractServerStoreProxyTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/AbstractServerStoreProxyTest.java @@ -15,6 +15,7 @@ */ package org.ehcache.clustered.client.internal.store; +import org.ehcache.clustered.client.config.builders.TimeoutsBuilder; import org.ehcache.clustered.client.internal.ClusterTierManagerClientEntityFactory; import org.ehcache.clustered.client.internal.ClusterTierManagerClientEntityService; import org.ehcache.clustered.client.internal.UnitTestConnectionService; @@ -31,6 +32,7 @@ import org.terracotta.connection.Connection; import java.net.URI; +import java.time.Duration; import java.util.Collections; import java.util.Properties; @@ -62,18 +64,28 @@ public static void destroyCluster() { protected static SimpleClusterTierClientEntity createClientEntity(String name, ServerStoreConfiguration configuration, boolean create) throws Exception { + return createClientEntity(name, configuration, create, true); + } + + protected static SimpleClusterTierClientEntity createClientEntity(String name, + ServerStoreConfiguration configuration, + boolean create, + boolean validate) throws Exception { Connection connection = CONNECTION_SERVICE.connect(CLUSTER_URI, new Properties()); // Create ClusterTierManagerClientEntity if needed - ClusterTierManagerClientEntityFactory entityFactory = new ClusterTierManagerClientEntityFactory(connection); + ClusterTierManagerClientEntityFactory entityFactory = new ClusterTierManagerClientEntityFactory( + connection, + TimeoutsBuilder.timeouts().write(Duration.ofSeconds(30)).build()); if (create) { entityFactory.create(name, new ServerSideConfiguration("defaultResource", Collections.emptyMap())); } // Create or fetch the ClusterTierClientEntity SimpleClusterTierClientEntity clientEntity = (SimpleClusterTierClientEntity) entityFactory.fetchOrCreateClusteredStoreEntity(name, name, configuration, create); - clientEntity.validate(configuration); + if (validate) { + clientEntity.validate(configuration); + } return clientEntity; } - } diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/ChainBuilderTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/ChainBuilderTest.java index c692c5f13e..725ba42a48 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/ChainBuilderTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/ChainBuilderTest.java @@ -20,7 +20,11 @@ import org.ehcache.clustered.common.internal.store.Util; import org.junit.Test; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Iterator; +import java.util.List; import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/ClusteredStoreProviderTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/ClusteredStoreProviderTest.java index 41125ca8c1..e0c76ef807 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/ClusteredStoreProviderTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/ClusteredStoreProviderTest.java @@ -25,19 +25,20 @@ import org.ehcache.config.ResourcePool; import org.ehcache.config.ResourcePools; import org.ehcache.config.ResourceType; +import org.ehcache.config.builders.ExpiryPolicyBuilder; import org.ehcache.config.units.MemoryUnit; import org.ehcache.core.config.ResourcePoolsImpl; import org.ehcache.core.internal.service.ServiceLocator; import org.ehcache.core.spi.service.DiskResourceService; import org.ehcache.core.spi.store.Store; -import org.ehcache.expiry.Expirations; -import org.ehcache.expiry.Expiry; +import org.ehcache.expiry.ExpiryPolicy; import org.ehcache.impl.internal.store.disk.OffHeapDiskStore; import org.ehcache.impl.internal.store.heap.OnHeapStore; import org.ehcache.impl.internal.store.offheap.OffHeapStore; import org.ehcache.impl.internal.store.tiering.TieredStore; import org.ehcache.impl.serialization.LongSerializer; import org.ehcache.impl.serialization.StringSerializer; +import org.ehcache.spi.loaderwriter.CacheLoaderWriter; import org.ehcache.spi.serialization.Serializer; import org.ehcache.spi.service.ServiceConfiguration; import org.junit.Test; @@ -168,8 +169,8 @@ public ClassLoader getClassLoader() { } @Override - public Expiry getExpiry() { - return Expirations.noExpiration(); + public ExpiryPolicy getExpiry() { + return ExpiryPolicyBuilder.noExpiration(); } @Override @@ -194,10 +195,15 @@ public Serializer getValueSerializer() { public int getDispatcherConcurrency() { return 1; } + + @Override + public CacheLoaderWriter getCacheLoaderWriter() { + return null; + } }; } - private static class UnmatchedResourceType implements ResourceType { + public static class UnmatchedResourceType implements ResourceType { @Override public Class getResourcePoolClass() { return ResourcePool.class; diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/ClusteredStoreTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/ClusteredStoreTest.java index cee4befaf5..beebc0dc29 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/ClusteredStoreTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/ClusteredStoreTest.java @@ -16,6 +16,7 @@ package org.ehcache.clustered.client.internal.store; +import org.assertj.core.api.ThrowableAssert; import org.ehcache.clustered.client.TestTimeSource; import org.ehcache.clustered.client.config.ClusteredResourcePool; import org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder; @@ -23,22 +24,25 @@ import org.ehcache.clustered.client.internal.UnitTestConnectionService; import org.ehcache.clustered.client.internal.store.ServerStoreProxy.ServerCallback; import org.ehcache.clustered.client.internal.store.operations.EternalChainResolver; -import org.ehcache.clustered.client.internal.store.operations.Result; -import org.ehcache.clustered.client.internal.store.operations.codecs.OperationsCodec; +import org.ehcache.clustered.common.internal.store.operations.Result; +import org.ehcache.clustered.common.internal.store.operations.codecs.OperationsCodec; import org.ehcache.clustered.common.ServerSideConfiguration; import org.ehcache.clustered.common.internal.ServerStoreConfiguration; import org.ehcache.clustered.common.internal.store.Chain; +import org.ehcache.config.EvictionAdvisor; +import org.ehcache.config.ResourcePools; import org.ehcache.config.units.MemoryUnit; import org.ehcache.core.Ehcache; import org.ehcache.core.spi.store.Store; -import org.ehcache.core.spi.store.StoreAccessException; -import org.ehcache.core.spi.store.StoreAccessTimeoutException; +import org.ehcache.expiry.ExpiryPolicy; +import org.ehcache.spi.loaderwriter.CacheLoaderWriter; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.time.TimeSource; import org.ehcache.core.statistics.StoreOperationOutcomes; -import org.ehcache.expiry.Expirations; import org.ehcache.impl.store.HashUtils; import org.ehcache.impl.serialization.LongSerializer; import org.ehcache.impl.serialization.StringSerializer; +import org.ehcache.spi.serialization.Serializer; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -57,6 +61,7 @@ import java.util.concurrent.TimeoutException; import java.util.function.Function; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; import static org.ehcache.clustered.client.internal.store.ClusteredStore.DEFAULT_CHAIN_COMPACTION_THRESHOLD; import static org.ehcache.clustered.client.internal.store.ClusteredStore.CHAIN_COMPACTION_THRESHOLD_PROP; import static org.ehcache.clustered.util.StatisticsTestUtils.validateStat; @@ -82,6 +87,59 @@ public class ClusteredStoreTest { private ClusteredStore store; + private final Store.Configuration config = new Store.Configuration() { + + @Override + public Class getKeyType() { + return Long.class; + } + + @Override + public Class getValueType() { + return String.class; + } + + @Override + public EvictionAdvisor getEvictionAdvisor() { + return null; + } + + @Override + public ClassLoader getClassLoader() { + return null; + } + + @Override + public ExpiryPolicy getExpiry() { + return null; + } + + @Override + public ResourcePools getResourcePools() { + return null; + } + + @Override + public Serializer getKeySerializer() { + return null; + } + + @Override + public Serializer getValueSerializer() { + return null; + } + + @Override + public int getDispatcherConcurrency() { + return 0; + } + + @Override + public CacheLoaderWriter getCacheLoaderWriter() { + return null; + } + }; + @Before public void setup() throws Exception { UnitTestConnectionService.add( @@ -98,7 +156,7 @@ public void setup() throws Exception { ClusteredResourcePool resourcePool = ClusteredResourcePoolBuilder.clusteredDedicated(4, MemoryUnit.MB); ServerStoreConfiguration serverStoreConfiguration = new ServerStoreConfiguration(resourcePool.getPoolAllocation(), - Long.class.getName(), String.class.getName(), LongSerializer.class.getName(), StringSerializer.class.getName(), null); + Long.class.getName(), String.class.getName(), LongSerializer.class.getName(), StringSerializer.class.getName(), null, false); ClusterTierClientEntity clientEntity = entityFactory.fetchOrCreateClusteredStoreEntity("TestCacheManager", CACHE_IDENTIFIER, serverStoreConfiguration, true); clientEntity.validate(serverStoreConfiguration); ServerStoreProxy serverStoreProxy = new CommonServerStoreProxy(CACHE_IDENTIFIER, clientEntity, mock(ServerCallback.class)); @@ -107,7 +165,7 @@ public void setup() throws Exception { OperationsCodec codec = new OperationsCodec<>(new LongSerializer(), new StringSerializer()); EternalChainResolver resolver = new EternalChainResolver<>(codec); - store = new ClusteredStore<>(codec, resolver, serverStoreProxy, testTimeSource); + store = new ClusteredStore<>(config, codec, resolver, serverStoreProxy, testTimeSource); } @After @@ -115,6 +173,12 @@ public void tearDown() throws Exception { UnitTestConnectionService.remove("terracotta://localhost/my-application"); } + private void assertTimeoutOccurred(ThrowableAssert.ThrowingCallable throwingCallable) { + assertThatExceptionOfType(StoreAccessException.class) + .isThrownBy(throwingCallable) + .withCauseInstanceOf(TimeoutException.class); + } + @Test public void testPut() throws Exception { assertThat(store.put(1L, "one"), is(Store.PutStatus.PUT)); @@ -124,15 +188,16 @@ public void testPut() throws Exception { validateStat(store, StoreOperationOutcomes.PutOutcome.PUT, 3); } - @Test(expected = StoreAccessTimeoutException.class) + @Test @SuppressWarnings("unchecked") public void testPutTimeout() throws Exception { ServerStoreProxy proxy = mock(ServerStoreProxy.class); OperationsCodec codec = mock(OperationsCodec.class); TimeSource timeSource = mock(TimeSource.class); doThrow(TimeoutException.class).when(proxy).append(anyLong(), isNull()); - ClusteredStore store = new ClusteredStore<>(codec, null, proxy, timeSource); - store.put(1L, "one"); + ClusteredStore store = new ClusteredStore<>(config, codec, null, proxy, timeSource); + + assertTimeoutOccurred(() -> store.put(1L, "one")); } @Test @@ -140,7 +205,7 @@ public void testGet() throws Exception { assertThat(store.get(1L), nullValue()); validateStats(store, EnumSet.of(StoreOperationOutcomes.GetOutcome.MISS)); store.put(1L, "one"); - assertThat(store.get(1L).value(), is("one")); + assertThat(store.get(1L).get(), is("one")); validateStats(store, EnumSet.of(StoreOperationOutcomes.GetOutcome.MISS, StoreOperationOutcomes.GetOutcome.HIT)); } @@ -153,7 +218,7 @@ public void testGetThrowsOnlySAE() throws Exception { ServerStoreProxy serverStoreProxy = mock(ServerStoreProxy.class); when(serverStoreProxy.get(anyLong())).thenThrow(new RuntimeException()); TestTimeSource testTimeSource = mock(TestTimeSource.class); - ClusteredStore store = new ClusteredStore<>(codec, chainResolver, serverStoreProxy, testTimeSource); + ClusteredStore store = new ClusteredStore<>(config, codec, chainResolver, serverStoreProxy, testTimeSource); store.get(1L); } @@ -163,7 +228,7 @@ public void testGetTimeout() throws Exception { ServerStoreProxy proxy = mock(ServerStoreProxy.class); long longKey = HashUtils.intHashToLong(new Long(1L).hashCode()); when(proxy.get(longKey)).thenThrow(TimeoutException.class); - ClusteredStore store = new ClusteredStore<>(null, null, proxy, null); + ClusteredStore store = new ClusteredStore<>(config,null, null, proxy, null); assertThat(store.get(1L), nullValue()); validateStats(store, EnumSet.of(StoreOperationOutcomes.GetOutcome.TIMEOUT)); } @@ -187,7 +252,7 @@ public void testGetThatCompactsInvokesReplace() throws Exception { long longKey = HashUtils.intHashToLong(new Long(42L).hashCode()); when(serverStoreProxy.get(longKey)).thenReturn(chain); - ClusteredStore clusteredStore = new ClusteredStore<>(operationsCodec, chainResolver, + ClusteredStore clusteredStore = new ClusteredStore<>(config, operationsCodec, chainResolver, serverStoreProxy, timeSource); clusteredStore.get(42L); verify(serverStoreProxy).replaceAtHead(eq(longKey), eq(chain), isNull()); @@ -211,7 +276,7 @@ public void testGetThatDoesNotCompactsInvokesReplace() throws Exception { long longKey = HashUtils.intHashToLong(new Long(42L).hashCode()); when(serverStoreProxy.get(longKey)).thenReturn(chain); - ClusteredStore clusteredStore = new ClusteredStore<>(operationsCodec, chainResolver, + ClusteredStore clusteredStore = new ClusteredStore<>(config, operationsCodec, chainResolver, serverStoreProxy, timeSource); clusteredStore.get(42L); verify(serverStoreProxy, never()).replaceAtHead(eq(longKey), eq(chain), any(Chain.class)); @@ -235,7 +300,7 @@ public void testContainsKeyThrowsOnlySAE() throws Exception { ServerStoreProxy serverStoreProxy = mock(ServerStoreProxy.class); when(serverStoreProxy.get(anyLong())).thenThrow(new RuntimeException()); TestTimeSource testTimeSource = mock(TestTimeSource.class); - ClusteredStore store = new ClusteredStore<>(codec, chainResolver, serverStoreProxy, testTimeSource); + ClusteredStore store = new ClusteredStore<>(config, codec, chainResolver, serverStoreProxy, testTimeSource); store.containsKey(1L); } @@ -249,28 +314,33 @@ public void testRemove() throws Exception { validateStats(store, EnumSet.of(StoreOperationOutcomes.RemoveOutcome.MISS, StoreOperationOutcomes.RemoveOutcome.REMOVED)); } - @Test(expected = StoreAccessException.class) + @Test public void testRemoveThrowsOnlySAE() throws Exception { @SuppressWarnings("unchecked") OperationsCodec codec = mock(OperationsCodec.class); @SuppressWarnings("unchecked") EternalChainResolver chainResolver = mock(EternalChainResolver.class); ServerStoreProxy serverStoreProxy = mock(ServerStoreProxy.class); - when(serverStoreProxy.get(anyLong())).thenThrow(new RuntimeException()); - TestTimeSource testTimeSource = mock(TestTimeSource.class); - ClusteredStore store = new ClusteredStore<>(codec, chainResolver, serverStoreProxy, testTimeSource); - store.remove(1L); + RuntimeException theException = new RuntimeException(); + when(serverStoreProxy.getAndAppend(anyLong(), any())).thenThrow(theException); + TestTimeSource testTimeSource = new TestTimeSource(); + + ClusteredStore store = new ClusteredStore<>(config, codec, chainResolver, serverStoreProxy, testTimeSource); + assertThatExceptionOfType(StoreAccessException.class) + .isThrownBy(() -> store.remove(1L)) + .withCause(theException); } - @Test(expected = StoreAccessTimeoutException.class) + @Test @SuppressWarnings("unchecked") public void testRemoveTimeout() throws Exception { ServerStoreProxy proxy = mock(ServerStoreProxy.class); OperationsCodec codec = mock(OperationsCodec.class); TimeSource timeSource = mock(TimeSource.class); when(proxy.getAndAppend(anyLong(), isNull())).thenThrow(TimeoutException.class); - ClusteredStore store = new ClusteredStore<>(codec, null, proxy, timeSource); - store.remove(1L); + ClusteredStore store = new ClusteredStore<>(config, codec, null, proxy, timeSource); + + assertTimeoutOccurred(() -> store.remove(1L)); } @Test @@ -300,26 +370,27 @@ public void testClearThrowsOnlySAE() throws Exception { ServerStoreProxy serverStoreProxy = mock(ServerStoreProxy.class); doThrow(new RuntimeException()).when(serverStoreProxy).clear(); TestTimeSource testTimeSource = mock(TestTimeSource.class); - ClusteredStore store = new ClusteredStore<>(codec, chainResolver, serverStoreProxy, testTimeSource); + ClusteredStore store = new ClusteredStore<>(config, codec, chainResolver, serverStoreProxy, testTimeSource); store.clear(); } - @Test(expected = StoreAccessTimeoutException.class) + @Test public void testClearTimeout() throws Exception { ServerStoreProxy proxy = mock(ServerStoreProxy.class); @SuppressWarnings("unchecked") OperationsCodec codec = mock(OperationsCodec.class); TimeSource timeSource = mock(TimeSource.class); doThrow(TimeoutException.class).when(proxy).clear(); - ClusteredStore store = new ClusteredStore<>(codec, null, proxy, timeSource); - store.clear(); + ClusteredStore store = new ClusteredStore<>(config, codec, null, proxy, timeSource); + + assertTimeoutOccurred(() -> store.clear()); } @Test public void testPutIfAbsent() throws Exception { - assertThat(store.putIfAbsent(1L, "one"), nullValue()); + assertThat(store.putIfAbsent(1L, "one", b -> {}), nullValue()); validateStats(store, EnumSet.of(StoreOperationOutcomes.PutIfAbsentOutcome.PUT)); - assertThat(store.putIfAbsent(1L, "another one").value(), is("one")); + assertThat(store.putIfAbsent(1L, "another one", b -> {}).get(), is("one")); validateStats(store, EnumSet.of(StoreOperationOutcomes.PutIfAbsentOutcome.PUT, StoreOperationOutcomes.PutIfAbsentOutcome.HIT)); } @@ -332,19 +403,20 @@ public void testPutIfAbsentThrowsOnlySAE() throws Exception { ServerStoreProxy serverStoreProxy = mock(ServerStoreProxy.class); when(serverStoreProxy.get(anyLong())).thenThrow(new RuntimeException()); TestTimeSource testTimeSource = mock(TestTimeSource.class); - ClusteredStore store = new ClusteredStore<>(codec, chainResolver, serverStoreProxy, testTimeSource); - store.putIfAbsent(1L, "one"); + ClusteredStore store = new ClusteredStore<>(config, codec, chainResolver, serverStoreProxy, testTimeSource); + store.putIfAbsent(1L, "one", b -> {}); } - @Test(expected = StoreAccessTimeoutException.class) + @Test @SuppressWarnings("unchecked") public void testPutIfAbsentTimeout() throws Exception { ServerStoreProxy proxy = mock(ServerStoreProxy.class); OperationsCodec codec = mock(OperationsCodec.class); TimeSource timeSource = mock(TimeSource.class); when(proxy.getAndAppend(anyLong(), isNull())).thenThrow(TimeoutException.class); - ClusteredStore store = new ClusteredStore<>(codec, null, proxy, timeSource); - store.putIfAbsent(1L, "one"); + ClusteredStore store = new ClusteredStore<>(config, codec, null, proxy, timeSource); + + assertTimeoutOccurred(() -> store.putIfAbsent(1L, "one", b -> {})); } @Test @@ -368,19 +440,20 @@ public void testConditionalRemoveThrowsOnlySAE() throws Exception { ServerStoreProxy serverStoreProxy = mock(ServerStoreProxy.class); when(serverStoreProxy.get(anyLong())).thenThrow(new RuntimeException()); TestTimeSource testTimeSource = mock(TestTimeSource.class); - ClusteredStore store = new ClusteredStore<>(codec, chainResolver, serverStoreProxy, testTimeSource); + ClusteredStore store = new ClusteredStore<>(config, codec, chainResolver, serverStoreProxy, testTimeSource); store.remove(1L, "one"); } - @Test(expected = StoreAccessTimeoutException.class) + @Test @SuppressWarnings("unchecked") public void testConditionalRemoveTimeout() throws Exception { ServerStoreProxy proxy = mock(ServerStoreProxy.class); OperationsCodec codec = mock(OperationsCodec.class); TimeSource timeSource = mock(TimeSource.class); when(proxy.getAndAppend(anyLong(), isNull())).thenThrow(TimeoutException.class); - ClusteredStore store = new ClusteredStore<>(codec, null, proxy, timeSource); - store.remove(1L, "one"); + ClusteredStore store = new ClusteredStore<>(config, codec, null, proxy, timeSource); + + assertTimeoutOccurred(() -> store.remove(1L, "one")); } @Test @@ -388,7 +461,7 @@ public void testReplace() throws Exception { assertThat(store.replace(1L, "one"), nullValue()); validateStats(store, EnumSet.of(StoreOperationOutcomes.ReplaceOutcome.MISS)); store.put(1L, "one"); - assertThat(store.replace(1L, "another one").value(), is("one")); + assertThat(store.replace(1L, "another one").get(), is("one")); validateStats(store, EnumSet.of(StoreOperationOutcomes.ReplaceOutcome.MISS, StoreOperationOutcomes.ReplaceOutcome.REPLACED)); } @@ -401,19 +474,20 @@ public void testReplaceThrowsOnlySAE() throws Exception { ServerStoreProxy serverStoreProxy = mock(ServerStoreProxy.class); when(serverStoreProxy.get(anyLong())).thenThrow(new RuntimeException()); TestTimeSource testTimeSource = mock(TestTimeSource.class); - ClusteredStore store = new ClusteredStore<>(codec, chainResolver, serverStoreProxy, testTimeSource); + ClusteredStore store = new ClusteredStore<>(config, codec, chainResolver, serverStoreProxy, testTimeSource); store.replace(1L, "one"); } - @Test(expected = StoreAccessTimeoutException.class) + @Test @SuppressWarnings("unchecked") public void testReplaceTimeout() throws Exception { ServerStoreProxy proxy = mock(ServerStoreProxy.class); OperationsCodec codec = mock(OperationsCodec.class); TimeSource timeSource = mock(TimeSource.class); when(proxy.getAndAppend(anyLong(), isNull())).thenThrow(TimeoutException.class); - ClusteredStore store = new ClusteredStore<>(codec, null, proxy, timeSource); - store.replace(1L, "one"); + ClusteredStore store = new ClusteredStore<>(config, codec, null, proxy, timeSource); + + assertTimeoutOccurred(() -> store.replace(1L, "one")); } @Test @@ -438,19 +512,20 @@ public void testConditionalReplaceThrowsOnlySAE() throws Exception { ServerStoreProxy serverStoreProxy = mock(ServerStoreProxy.class); when(serverStoreProxy.get(anyLong())).thenThrow(new RuntimeException()); TestTimeSource testTimeSource = mock(TestTimeSource.class); - ClusteredStore store = new ClusteredStore<>(codec, chainResolver, serverStoreProxy, testTimeSource); + ClusteredStore store = new ClusteredStore<>(config, codec, chainResolver, serverStoreProxy, testTimeSource); store.replace(1L, "one", "another one"); } - @Test(expected = StoreAccessTimeoutException.class) + @Test @SuppressWarnings("unchecked") public void testConditionalReplaceTimeout() throws Exception { ServerStoreProxy proxy = mock(ServerStoreProxy.class); OperationsCodec codec = mock(OperationsCodec.class); TimeSource timeSource = mock(TimeSource.class); when(proxy.getAndAppend(anyLong(), isNull())).thenThrow(TimeoutException.class); - ClusteredStore store = new ClusteredStore<>(codec, null, proxy, timeSource); - store.replace(1L, "one", "another one"); + ClusteredStore store = new ClusteredStore<>(config, codec, null, proxy, timeSource); + + assertTimeoutOccurred(() -> store.replace(1L, "one", "another one")); } @Test @@ -462,10 +537,10 @@ public void testBulkComputePutAll() throws Exception { Ehcache.PutAllFunction putAllFunction = new Ehcache.PutAllFunction<>(null, map, null); Map> valueHolderMap = store.bulkCompute(new HashSet<>(Arrays.asList(1L, 2L)), putAllFunction); - assertThat(valueHolderMap.get(1L).value(), is(map.get(1L))); - assertThat(store.get(1L).value(), is(map.get(1L))); - assertThat(valueHolderMap.get(2L).value(), is(map.get(2L))); - assertThat(store.get(2L).value(), is(map.get(2L))); + assertThat(valueHolderMap.get(1L).get(), is(map.get(1L))); + assertThat(store.get(1L).get(), is(map.get(1L))); + assertThat(valueHolderMap.get(2L).get(), is(map.get(2L))); + assertThat(store.get(2L).get(), is(map.get(2L))); assertThat(putAllFunction.getActualPutCount().get(), is(2)); validateStats(store, EnumSet.of(StoreOperationOutcomes.PutOutcome.PUT)); //outcome of the initial store put } @@ -502,10 +577,10 @@ public void testBulkComputeIfAbsentGetAll() throws Exception { Ehcache.GetAllFunction getAllAllFunction = new Ehcache.GetAllFunction<>(); Map> valueHolderMap = store.bulkComputeIfAbsent(new HashSet<>(Arrays.asList(1L, 2L)), getAllAllFunction); - assertThat(valueHolderMap.get(1L).value(), is("one")); - assertThat(store.get(1L).value(), is("one")); - assertThat(valueHolderMap.get(2L).value(), is("two")); - assertThat(store.get(2L).value(), is("two")); + assertThat(valueHolderMap.get(1L).get(), is("one")); + assertThat(store.get(1L).get(), is("one")); + assertThat(valueHolderMap.get(2L).get(), is("two")); + assertThat(store.get(2L).get(), is("two")); } @Test(expected = UnsupportedOperationException.class) @@ -519,59 +594,59 @@ public void testBulkComputeIfAbsentThrowsForGenericFunction() throws Exception { @Test @SuppressWarnings("unchecked") public void testPutIfAbsentReplacesChainOnlyOnCompressionThreshold() throws Exception { - Result result = mock(Result.class); + Result result = mock(Result.class); when(result.getValue()).thenReturn("one"); - ResolvedChain resolvedChain = mock(ResolvedChain.class); + ResolvedChain resolvedChain = mock(ResolvedChain.class); when(resolvedChain.getResolvedResult(anyLong())).thenReturn(result); when(resolvedChain.getCompactedChain()).thenReturn(mock(Chain.class)); ServerStoreProxy proxy = mock(ServerStoreProxy.class); when(proxy.getAndAppend(anyLong(), any(ByteBuffer.class))).thenReturn(mock(Chain.class)); - EternalChainResolver resolver = mock(EternalChainResolver.class); + EternalChainResolver resolver = mock(EternalChainResolver.class); when(resolver.resolve(any(Chain.class), anyLong(), anyLong())).thenReturn(resolvedChain); OperationsCodec codec = mock(OperationsCodec.class); when(codec.encode(any())).thenReturn(ByteBuffer.allocate(0)); TimeSource timeSource = mock(TimeSource.class); - ClusteredStore store = new ClusteredStore(codec, resolver, proxy, timeSource); + ClusteredStore store = new ClusteredStore<>(config, codec, resolver, proxy, timeSource); when(resolvedChain.getCompactionCount()).thenReturn(DEFAULT_CHAIN_COMPACTION_THRESHOLD - 1); // less than the default threshold - store.putIfAbsent(1L, "one"); + store.putIfAbsent(1L, "one", b -> {}); verify(proxy, never()).replaceAtHead(anyLong(), any(Chain.class), any(Chain.class)); when(resolvedChain.getCompactionCount()).thenReturn(DEFAULT_CHAIN_COMPACTION_THRESHOLD); // equal to the default threshold - store.putIfAbsent(1L, "one"); + store.putIfAbsent(1L, "one", b -> {}); verify(proxy, never()).replaceAtHead(anyLong(), any(Chain.class), any(Chain.class)); when(resolvedChain.getCompactionCount()).thenReturn(DEFAULT_CHAIN_COMPACTION_THRESHOLD + 1); // greater than the default threshold - store.putIfAbsent(1L, "one"); + store.putIfAbsent(1L, "one", b -> {}); verify(proxy).replaceAtHead(anyLong(), any(Chain.class), any(Chain.class)); } @Test @SuppressWarnings("unchecked") public void testReplaceReplacesChainOnlyOnCompressionThreshold() throws Exception { - Result result = mock(Result.class); + Result result = mock(Result.class); when(result.getValue()).thenReturn("one"); - ResolvedChain resolvedChain = mock(ResolvedChain.class); + ResolvedChain resolvedChain = mock(ResolvedChain.class); when(resolvedChain.getResolvedResult(anyLong())).thenReturn(result); when(resolvedChain.getCompactedChain()).thenReturn(mock(Chain.class)); ServerStoreProxy proxy = mock(ServerStoreProxy.class); when(proxy.getAndAppend(anyLong(), any(ByteBuffer.class))).thenReturn(mock(Chain.class)); - EternalChainResolver resolver = mock(EternalChainResolver.class); + EternalChainResolver resolver = mock(EternalChainResolver.class); when(resolver.resolve(any(Chain.class), anyLong(), anyLong())).thenReturn(resolvedChain); OperationsCodec codec = mock(OperationsCodec.class); when(codec.encode(any())).thenReturn(ByteBuffer.allocate(0)); TimeSource timeSource = mock(TimeSource.class); - ClusteredStore store = new ClusteredStore(codec, resolver, proxy, timeSource); + ClusteredStore store = new ClusteredStore<>(config, codec, resolver, proxy, timeSource); when(resolvedChain.getCompactionCount()).thenReturn(DEFAULT_CHAIN_COMPACTION_THRESHOLD - 1); // less than the default threshold store.replace(1L, "one"); @@ -589,21 +664,21 @@ public void testReplaceReplacesChainOnlyOnCompressionThreshold() throws Exceptio @Test @SuppressWarnings("unchecked") public void testConditionalReplaceReplacesChainOnlyOnCompressionThreshold() throws Exception { - ResolvedChain resolvedChain = mock(ResolvedChain.class); + ResolvedChain resolvedChain = mock(ResolvedChain.class); when(resolvedChain.getResolvedResult(anyLong())).thenReturn(mock(Result.class)); when(resolvedChain.getCompactedChain()).thenReturn(mock(Chain.class)); ServerStoreProxy proxy = mock(ServerStoreProxy.class); when(proxy.getAndAppend(anyLong(), any(ByteBuffer.class))).thenReturn(mock(Chain.class)); - EternalChainResolver resolver = mock(EternalChainResolver.class); + EternalChainResolver resolver = mock(EternalChainResolver.class); when(resolver.resolve(any(Chain.class), anyLong(), anyLong())).thenReturn(resolvedChain); OperationsCodec codec = mock(OperationsCodec.class); when(codec.encode(any())).thenReturn(ByteBuffer.allocate(0)); TimeSource timeSource = mock(TimeSource.class); - ClusteredStore store = new ClusteredStore(codec, resolver, proxy, timeSource); + ClusteredStore store = new ClusteredStore<>(config, codec, resolver, proxy, timeSource); when(resolvedChain.getCompactionCount()).thenReturn(DEFAULT_CHAIN_COMPACTION_THRESHOLD - 1); // less than the default threshold store.replace(1L, "one", "anotherOne"); @@ -625,27 +700,27 @@ public void testCustomCompressionThreshold() throws Exception { try { System.setProperty(CHAIN_COMPACTION_THRESHOLD_PROP, String.valueOf(customThreshold)); - Result result = mock(Result.class); + Result result = mock(Result.class); when(result.getValue()).thenReturn("one"); - ResolvedChain resolvedChain = mock(ResolvedChain.class); + ResolvedChain resolvedChain = mock(ResolvedChain.class); when(resolvedChain.getResolvedResult(anyLong())).thenReturn(result); when(resolvedChain.getCompactedChain()).thenReturn(mock(Chain.class)); ServerStoreProxy proxy = mock(ServerStoreProxy.class); when(proxy.getAndAppend(anyLong(), any(ByteBuffer.class))).thenReturn(mock(Chain.class)); - EternalChainResolver resolver = mock(EternalChainResolver.class); + EternalChainResolver resolver = mock(EternalChainResolver.class); when(resolver.resolve(any(Chain.class), anyLong(), anyLong())).thenReturn(resolvedChain); OperationsCodec codec = mock(OperationsCodec.class); when(codec.encode(any())).thenReturn(ByteBuffer.allocate(0)); TimeSource timeSource = mock(TimeSource.class); - ClusteredStore store = new ClusteredStore(codec, resolver, proxy, timeSource); + ClusteredStore store = new ClusteredStore<>(config, codec, resolver, proxy, timeSource); when(resolvedChain.getCompactionCount()).thenReturn(customThreshold - 1); // less than the custom threshold - store.putIfAbsent(1L, "one"); + store.putIfAbsent(1L, "one", b -> {}); verify(proxy, never()).replaceAtHead(anyLong(), any(Chain.class), any(Chain.class)); when(resolvedChain.getCompactionCount()).thenReturn(customThreshold); // equal to the custom threshold @@ -663,7 +738,7 @@ public void testCustomCompressionThreshold() throws Exception { @Test @SuppressWarnings("unchecked") public void testRemoveReplacesChainOnHits() throws Exception { - ResolvedChain resolvedChain = mock(ResolvedChain.class); + ResolvedChain resolvedChain = mock(ResolvedChain.class); when(resolvedChain.getCompactedChain()).thenReturn(mock(Chain.class)); when(resolvedChain.getResolvedResult(anyLong())).thenReturn(mock(Result.class)); //simulate a key hit on chain resolution when(resolvedChain.getCompactionCount()).thenReturn(1); @@ -671,14 +746,14 @@ public void testRemoveReplacesChainOnHits() throws Exception { ServerStoreProxy proxy = mock(ServerStoreProxy.class); when(proxy.getAndAppend(anyLong(), any(ByteBuffer.class))).thenReturn(mock(Chain.class)); - EternalChainResolver resolver = mock(EternalChainResolver.class); + EternalChainResolver resolver = mock(EternalChainResolver.class); when(resolver.resolve(any(Chain.class), anyLong(), anyLong())).thenReturn(resolvedChain); OperationsCodec codec = mock(OperationsCodec.class); when(codec.encode(any())).thenReturn(ByteBuffer.allocate(0)); TimeSource timeSource = mock(TimeSource.class); - ClusteredStore store = new ClusteredStore(codec, resolver, proxy, timeSource); + ClusteredStore store = new ClusteredStore<>(config, codec, resolver, proxy, timeSource); store.remove(1L); verify(proxy).replaceAtHead(anyLong(), any(Chain.class), any(Chain.class)); @@ -687,10 +762,10 @@ public void testRemoveReplacesChainOnHits() throws Exception { @Test @SuppressWarnings("unchecked") public void testRemoveDoesNotReplaceChainOnMisses() throws Exception { - ResolvedChain resolvedChain = mock(ResolvedChain.class); + ResolvedChain resolvedChain = mock(ResolvedChain.class); when(resolvedChain.getResolvedResult(anyLong())).thenReturn(null); //simulate a key miss on chain resolution - EternalChainResolver resolver = mock(EternalChainResolver.class); + EternalChainResolver resolver = mock(EternalChainResolver.class); when(resolver.resolve(any(Chain.class), anyLong(), anyLong())).thenReturn(resolvedChain); OperationsCodec codec = mock(OperationsCodec.class); @@ -699,7 +774,7 @@ public void testRemoveDoesNotReplaceChainOnMisses() throws Exception { when(proxy.getAndAppend(anyLong(), any(ByteBuffer.class))).thenReturn(mock(Chain.class)); TimeSource timeSource = mock(TimeSource.class); - ClusteredStore store = new ClusteredStore(codec, resolver, proxy, timeSource); + ClusteredStore store = new ClusteredStore<>(config, codec, resolver, proxy, timeSource); store.remove(1L); verify(proxy, never()).replaceAtHead(anyLong(), any(Chain.class), any(Chain.class)); @@ -708,10 +783,10 @@ public void testRemoveDoesNotReplaceChainOnMisses() throws Exception { @Test @SuppressWarnings("unchecked") public void testConditionalRemoveReplacesChainOnHits() throws Exception { - Result result = mock(Result.class); + Result result = mock(Result.class); when(result.getValue()).thenReturn("foo"); - ResolvedChain resolvedChain = mock(ResolvedChain.class); + ResolvedChain resolvedChain = mock(ResolvedChain.class); when(resolvedChain.getCompactedChain()).thenReturn(mock(Chain.class)); when(resolvedChain.getResolvedResult(anyLong())).thenReturn(result); //simulate a key hit on chain resolution when(resolvedChain.getCompactionCount()).thenReturn(1); @@ -719,14 +794,14 @@ public void testConditionalRemoveReplacesChainOnHits() throws Exception { ServerStoreProxy proxy = mock(ServerStoreProxy.class); when(proxy.getAndAppend(anyLong(), any(ByteBuffer.class))).thenReturn(mock(Chain.class)); - EternalChainResolver resolver = mock(EternalChainResolver.class); + EternalChainResolver resolver = mock(EternalChainResolver.class); when(resolver.resolve(any(Chain.class), anyLong(), anyLong())).thenReturn(resolvedChain); OperationsCodec codec = mock(OperationsCodec.class); when(codec.encode(any())).thenReturn(ByteBuffer.allocate(0)); TimeSource timeSource = mock(TimeSource.class); - ClusteredStore store = new ClusteredStore(codec, resolver, proxy, timeSource); + ClusteredStore store = new ClusteredStore<>(config, codec, resolver, proxy, timeSource); store.remove(1L, "foo"); verify(proxy).replaceAtHead(anyLong(), any(Chain.class), any(Chain.class)); @@ -735,10 +810,10 @@ public void testConditionalRemoveReplacesChainOnHits() throws Exception { @Test @SuppressWarnings("unchecked") public void testConditionalRemoveDoesNotReplaceChainOnKeyMiss() throws Exception { - ResolvedChain resolvedChain = mock(ResolvedChain.class); + ResolvedChain resolvedChain = mock(ResolvedChain.class); when(resolvedChain.getResolvedResult(anyLong())).thenReturn(null); //simulate a key miss on chain resolution - EternalChainResolver resolver = mock(EternalChainResolver.class); + EternalChainResolver resolver = mock(EternalChainResolver.class); when(resolver.resolve(any(Chain.class), anyLong(), anyLong())).thenReturn(resolvedChain); OperationsCodec codec = mock(OperationsCodec.class); @@ -747,7 +822,7 @@ public void testConditionalRemoveDoesNotReplaceChainOnKeyMiss() throws Exception when(proxy.getAndAppend(anyLong(), any(ByteBuffer.class))).thenReturn(mock(Chain.class)); TimeSource timeSource = mock(TimeSource.class); - ClusteredStore store = new ClusteredStore(codec, resolver, proxy, timeSource); + ClusteredStore store = new ClusteredStore<>(config, codec, resolver, proxy, timeSource); store.remove(1L, "foo"); verify(proxy, never()).replaceAtHead(anyLong(), any(Chain.class), any(Chain.class)); @@ -756,13 +831,13 @@ public void testConditionalRemoveDoesNotReplaceChainOnKeyMiss() throws Exception @Test @SuppressWarnings("unchecked") public void testConditionalRemoveDoesNotReplaceChainOnKeyHitValueMiss() throws Exception { - Result result = mock(Result.class); - ResolvedChain resolvedChain = mock(ResolvedChain.class); + Result result = mock(Result.class); + ResolvedChain resolvedChain = mock(ResolvedChain.class); when(resolvedChain.getResolvedResult(anyLong())).thenReturn(result); //simulate a key kit when(result.getValue()).thenReturn("bar"); //but a value miss - EternalChainResolver resolver = mock(EternalChainResolver.class); + EternalChainResolver resolver = mock(EternalChainResolver.class); when(resolver.resolve(any(Chain.class), anyLong(), anyLong())).thenReturn(resolvedChain); OperationsCodec codec = mock(OperationsCodec.class); @@ -771,7 +846,7 @@ public void testConditionalRemoveDoesNotReplaceChainOnKeyHitValueMiss() throws E when(proxy.getAndAppend(anyLong(), any(ByteBuffer.class))).thenReturn(mock(Chain.class)); TimeSource timeSource = mock(TimeSource.class); - ClusteredStore store = new ClusteredStore(codec, resolver, proxy, timeSource); + ClusteredStore store = new ClusteredStore<>(config, codec, resolver, proxy, timeSource); store.remove(1L, "foo"); verify(proxy, never()).replaceAtHead(anyLong(), any(Chain.class), any(Chain.class)); @@ -799,7 +874,7 @@ public void testExpirationIsSentToHigherTiers() throws Exception { OperationsCodec codec = mock(OperationsCodec.class); TimeSource timeSource = mock(TimeSource.class); - ClusteredStore store = new ClusteredStore<>(codec, resolver, proxy, timeSource); + ClusteredStore store = new ClusteredStore<>(config, codec, resolver, proxy, timeSource); Store.ValueHolder vh = store.get(1L); @@ -829,7 +904,7 @@ public void testNoExpireIsSentToHigherTiers() throws Exception { OperationsCodec codec = mock(OperationsCodec.class); TimeSource timeSource = mock(TimeSource.class); - ClusteredStore store = new ClusteredStore<>(codec, resolver, proxy, timeSource); + ClusteredStore store = new ClusteredStore<>(config, codec, resolver, proxy, timeSource); Store.ValueHolder vh = store.get(1L); diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/CommonServerStoreProxyTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/CommonServerStoreProxyTest.java index cfd9e0d75e..0f7e9bf24b 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/CommonServerStoreProxyTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/CommonServerStoreProxyTest.java @@ -25,6 +25,7 @@ import org.ehcache.impl.serialization.LongSerializer; import org.junit.Test; +import java.nio.ByteBuffer; import java.util.Iterator; import static org.ehcache.clustered.common.internal.store.Util.createPayload; @@ -32,17 +33,21 @@ import static org.ehcache.clustered.common.internal.store.Util.readPayLoad; import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class CommonServerStoreProxyTest extends AbstractServerStoreProxyTest { private static ClusterTierClientEntity createClientEntity(String name) throws Exception { - ClusteredResourcePool resourcePool = ClusteredResourcePoolBuilder.clusteredDedicated(16L, MemoryUnit.MB); + ClusteredResourcePool resourcePool = ClusteredResourcePoolBuilder.clusteredDedicated(8L, MemoryUnit.MB); ServerStoreConfiguration serverStoreConfiguration = new ServerStoreConfiguration(resourcePool.getPoolAllocation(), Long.class .getName(), Long.class.getName(), LongSerializer.class.getName(), LongSerializer.class - .getName(), null); + .getName(), null, false); return createClientEntity(name, serverStoreConfiguration, true); @@ -150,6 +155,27 @@ public void testClear() throws Exception { assertThat(chain.isEmpty(), is(true)); } + @Test + public void testResolveRequestIsProcessedAtThreshold() throws Exception { + ByteBuffer buffer = createPayload(42L); + + ClusterTierClientEntity clientEntity = createClientEntity("testResolveRequestIsProcessed"); + ServerCallback serverCallback = mock(ServerCallback.class); + when(serverCallback.compact(any(Chain.class), any(long.class))).thenReturn(getChain(false, buffer.duplicate())); + CommonServerStoreProxy serverStoreProxy = new CommonServerStoreProxy("testResolveRequestIsProcessed", clientEntity, serverCallback); + + for (int i = 0; i < 8; i++) { + serverStoreProxy.append(1L, buffer.duplicate()); + } + verify(serverCallback, never()).compact(any(Chain.class)); + assertChainHas(serverStoreProxy.get(1L), 42L, 42L, 42L, 42L, 42L, 42L, 42L, 42L); + + //trigger compaction at > 8 entries + serverStoreProxy.append(1L, buffer.duplicate()); + verify(serverCallback).compact(any(Chain.class), any(long.class)); + assertChainHas(serverStoreProxy.get(1L), 42L); + } + private static void assertChainHas(Chain chain, long... payLoads) { Iterator elements = chain.iterator(); for (long payLoad : payLoads) { diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/EventualServerStoreProxyTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/EventualServerStoreProxyTest.java index a4ba9ed54e..7bed2fef30 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/EventualServerStoreProxyTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/EventualServerStoreProxyTest.java @@ -44,11 +44,11 @@ public class EventualServerStoreProxyTest extends AbstractServerStoreProxyTest { private static SimpleClusterTierClientEntity createClientEntity(String name, boolean create) throws Exception { - ClusteredResourcePool resourcePool = ClusteredResourcePoolBuilder.clusteredDedicated(16L, MemoryUnit.MB); + ClusteredResourcePool resourcePool = ClusteredResourcePoolBuilder.clusteredDedicated(8L, MemoryUnit.MB); ServerStoreConfiguration serverStoreConfiguration = new ServerStoreConfiguration(resourcePool.getPoolAllocation(), Long.class.getName(), Long.class.getName(), LongSerializer.class.getName(), LongSerializer.class - .getName(), Consistency.EVENTUAL); + .getName(), Consistency.EVENTUAL, false); return createClientEntity(name, serverStoreConfiguration, create); } diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/MultiThreadedStrongServerStoreProxyTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/MultiThreadedStrongServerStoreProxyTest.java new file mode 100644 index 0000000000..2dc34d4c2c --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/MultiThreadedStrongServerStoreProxyTest.java @@ -0,0 +1,103 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.store; + +import org.ehcache.clustered.client.config.ClusteredResourcePool; +import org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder; +import org.ehcache.clustered.client.internal.store.ServerStoreProxy.ServerCallback; +import org.ehcache.clustered.common.Consistency; +import org.ehcache.clustered.common.internal.ServerStoreConfiguration; +import org.ehcache.clustered.common.internal.store.Chain; +import org.ehcache.config.units.MemoryUnit; +import org.ehcache.impl.serialization.LongSerializer; +import org.junit.Assert; +import org.junit.Test; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.ehcache.clustered.common.internal.store.Util.createPayload; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; + +public class MultiThreadedStrongServerStoreProxyTest extends AbstractServerStoreProxyTest { + + private static final String ENTITY_NAME = "testConcurrentHashInvalidationWithAppend"; + private static final int MAX_WAIT_TIME_SECONDS = 30; + + private static ServerStoreConfiguration getServerStoreConfiguration() throws Exception { + ClusteredResourcePool resourcePool = ClusteredResourcePoolBuilder.clusteredDedicated(4L, MemoryUnit.MB); + + return new ServerStoreConfiguration(resourcePool.getPoolAllocation(), Long.class.getName(), + Long.class.getName(), LongSerializer.class.getName(), LongSerializer.class + .getName(), Consistency.STRONG, false); + } + + @Test + public void testConcurrentHashInvalidationListenerWithAppend() throws Exception { + final AtomicReference invalidatedHash = new AtomicReference<>(); + SimpleClusterTierClientEntity clientEntity1 = createClientEntity(ENTITY_NAME, getServerStoreConfiguration(), true, true); + StrongServerStoreProxy serverStoreProxy1 = new StrongServerStoreProxy(ENTITY_NAME, clientEntity1, mock(ServerCallback.class)); + + ExecutorService executor = Executors.newSingleThreadExecutor(); + CountDownLatch beforeValidationLatch = new CountDownLatch(1); + CountDownLatch afterValidationLatch = new CountDownLatch(1); + executor.submit(() -> { + try { + SimpleClusterTierClientEntity clientEntity2 = createClientEntity(ENTITY_NAME, getServerStoreConfiguration(), false, false); + StrongServerStoreProxy serverStoreProxy2 = new StrongServerStoreProxy(ENTITY_NAME, clientEntity2, new ServerCallback() { + @Override + public void onInvalidateHash(long hash) { + invalidatedHash.set(hash); + } + + @Override + public void onInvalidateAll() { + throw new AssertionError("Should not be called"); + } + + @Override + public Chain compact(Chain chain) { + throw new AssertionError(); + } + }); + // avoid a warning + assertNotNull(serverStoreProxy2); + assertTrue(beforeValidationLatch.await(MAX_WAIT_TIME_SECONDS, TimeUnit.SECONDS)); + clientEntity2.validate(getServerStoreConfiguration()); + afterValidationLatch.countDown(); + } catch (Exception e) { + Assert.fail("Unexpected Exception " + e.getMessage()); + } + }); + + serverStoreProxy1.append(1L, createPayload(1L)); + assertNull(invalidatedHash.get()); + beforeValidationLatch.countDown(); + assertTrue(afterValidationLatch.await(MAX_WAIT_TIME_SECONDS, TimeUnit.SECONDS)); + serverStoreProxy1.append(1L, createPayload(1L)); + assertThat(invalidatedHash.get(), is(1L)); + + executor.shutdownNow(); + } +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/ReconnectingServerStoreProxyTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/ReconnectingServerStoreProxyTest.java new file mode 100644 index 0000000000..16cfe1768e --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/ReconnectingServerStoreProxyTest.java @@ -0,0 +1,79 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.store; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; +import org.terracotta.exception.ConnectionClosedException; + +import java.nio.ByteBuffer; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.doThrow; + +public class ReconnectingServerStoreProxyTest { + + @Rule + public MockitoRule mockitoRule = MockitoJUnit.rule(); + + @Mock + ServerStoreProxy proxy; + + @Mock + Runnable runnable; + + @Rule + public ExpectedException exception = ExpectedException.none(); + + private final ServerStoreProxyException storeProxyException = new ServerStoreProxyException(new ConnectionClosedException("Connection Closed")); + + @InjectMocks + ReconnectingServerStoreProxy serverStoreProxy; + + @Test + public void testAppend() throws Exception { + + doThrow(storeProxyException).when(proxy).append(anyLong(), any(ByteBuffer.class)); + + exception.expect(ReconnectInProgressException.class); + serverStoreProxy.append(0, ByteBuffer.allocate(2)); + } + + @Test + public void testGetAndAppend() throws Exception { + + doThrow(storeProxyException).when(proxy).getAndAppend(anyLong(), any(ByteBuffer.class)); + + exception.expect(ReconnectInProgressException.class); + serverStoreProxy.getAndAppend(0, ByteBuffer.allocate(2)); + } + + @Test + public void testGet() throws Exception { + + doThrow(storeProxyException).when(proxy).get(anyLong()); + + exception.expect(ReconnectInProgressException.class); + serverStoreProxy.get(0); + } + +} \ No newline at end of file diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/StrongServerStoreProxyTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/StrongServerStoreProxyTest.java index a76b42c651..fab62756a8 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/StrongServerStoreProxyTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/StrongServerStoreProxyTest.java @@ -26,12 +26,10 @@ import org.junit.Test; import java.util.List; -import java.util.concurrent.Callable; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; @@ -52,7 +50,7 @@ private static SimpleClusterTierClientEntity createClientEntity(String name, boo ServerStoreConfiguration serverStoreConfiguration = new ServerStoreConfiguration(resourcePool.getPoolAllocation(), Long.class.getName(), Long.class.getName(), LongSerializer.class.getName(), LongSerializer.class - .getName(), Consistency.STRONG); + .getName(), Consistency.STRONG, false); return createClientEntity(name, serverStoreConfiguration, create); } diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/lock/LockManagerImplTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/lock/LockManagerImplTest.java new file mode 100644 index 0000000000..d3c7ec4759 --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/lock/LockManagerImplTest.java @@ -0,0 +1,115 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.store.lock; + +import org.ehcache.clustered.client.internal.store.ClusterTierClientEntity; +import org.ehcache.clustered.client.internal.store.ServerStoreProxyException; +import org.ehcache.clustered.common.internal.exceptions.UnknownClusterException; +import org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse; +import org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse.LockSuccess; +import org.ehcache.clustered.common.internal.messages.ServerStoreOpMessage.LockMessage; +import org.ehcache.clustered.common.internal.store.Chain; +import org.ehcache.clustered.common.internal.store.Util; +import org.junit.Test; + +import java.nio.ByteBuffer; +import java.util.concurrent.TimeoutException; + +import static org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse.lockFailure; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class LockManagerImplTest { + + @Test + public void testLock() throws Exception { + ClusterTierClientEntity clusterTierClientEntity = mock(ClusterTierClientEntity.class); + + LockManagerImpl lockManager = new LockManagerImpl(clusterTierClientEntity); + + LockSuccess lockSuccess = getLockSuccessResponse(); + + when(clusterTierClientEntity.invokeAndWaitForComplete(any(LockMessage.class), anyBoolean())) + .thenReturn(lockSuccess); + + Chain lock = lockManager.lock(2L); + + assertThat(lock, notNullValue()); + assertThat(lock.length(), is(3)); + + } + + @Test + public void testLockWhenException() throws Exception { + ClusterTierClientEntity clusterTierClientEntity = mock(ClusterTierClientEntity.class); + + LockManagerImpl lockManager = new LockManagerImpl(clusterTierClientEntity); + + when(clusterTierClientEntity.invokeAndWaitForComplete(any(LockMessage.class), anyBoolean())) + .thenThrow(new UnknownClusterException(""), new TimeoutException("timed out test")); + + try { + lockManager.lock(2L); + fail(); + } catch (ServerStoreProxyException sspe) { + assertThat(sspe.getCause(), instanceOf(UnknownClusterException.class)); + } + + try { + lockManager.lock(2L); + fail(); + } catch (TimeoutException e) { + assertThat(e.getMessage(), is("timed out test")); + } + + } + + @Test + public void testLockWhenFailure() throws Exception { + ClusterTierClientEntity clusterTierClientEntity = mock(ClusterTierClientEntity.class); + + LockManagerImpl lockManager = new LockManagerImpl(clusterTierClientEntity); + + LockSuccess lockSuccess = getLockSuccessResponse(); + + when(clusterTierClientEntity.invokeAndWaitForComplete(any(LockMessage.class), anyBoolean())) + .thenReturn(lockFailure(), lockFailure(), lockFailure(), lockSuccess); + + Chain lock = lockManager.lock(2L); + + assertThat(lock, notNullValue()); + assertThat(lock.length(), is(3)); + } + + private LockSuccess getLockSuccessResponse() { + ByteBuffer[] buffers = new ByteBuffer[3]; + for (int i = 1; i <= 3; i++) { + buffers[i-1] = Util.createPayload(i); + } + + Chain chain = Util.getChain(false, buffers); + + return EhcacheEntityResponse.lockSuccess(chain); + } + +} \ No newline at end of file diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/lock/LockRetentionDuringFailoverTest.java b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/lock/LockRetentionDuringFailoverTest.java new file mode 100644 index 0000000000..3f26f4d520 --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/lock/LockRetentionDuringFailoverTest.java @@ -0,0 +1,154 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.client.internal.store.lock; + +import org.ehcache.Cache; +import org.ehcache.CacheManager; +import org.ehcache.clustered.client.internal.ClusterTierManagerClientEntityService; +import org.ehcache.clustered.client.internal.UnitTestConnectionService; +import org.ehcache.clustered.client.internal.lock.VoltronReadWriteLockEntityClientService; +import org.ehcache.clustered.client.internal.store.ClusterTierClientEntityService; +import org.ehcache.clustered.lock.server.VoltronReadWriteLockServerEntityService; +import org.ehcache.clustered.server.ObservableEhcacheServerEntityService; +import org.ehcache.clustered.server.store.ObservableClusterTierServerEntityService; +import org.ehcache.config.CacheConfiguration; +import org.ehcache.config.builders.CacheConfigurationBuilder; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.impl.internal.concurrent.ConcurrentHashMap; +import org.ehcache.spi.loaderwriter.CacheLoaderWriter; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.terracotta.offheapresource.OffHeapResourcesProvider; +import org.terracotta.offheapresource.config.MemoryUnit; +import org.terracotta.passthrough.PassthroughClusterControl; +import org.terracotta.passthrough.PassthroughTestHelpers; + +import java.net.URI; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +import static org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder.clusteredDedicated; +import static org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder.cluster; +import static org.ehcache.clustered.client.internal.UnitTestConnectionService.getOffheapResourcesType; +import static org.ehcache.config.builders.ResourcePoolsBuilder.newResourcePoolsBuilder; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; + +public class LockRetentionDuringFailoverTest { + + private static final String STRIPENAME = "stripe"; + private static final String STRIPE_URI = "passthrough://" + STRIPENAME; + + private PassthroughClusterControl clusterControl; + + private CountDownLatch latch; + private LatchedLoaderWriter loaderWriter; + private Cache cache; + + @Before + public void setUp() throws Exception { + this.clusterControl = PassthroughTestHelpers.createActivePassive(STRIPENAME, + server -> { + server.registerServerEntityService(new ObservableEhcacheServerEntityService()); + server.registerClientEntityService(new ClusterTierManagerClientEntityService()); + server.registerServerEntityService(new ObservableClusterTierServerEntityService()); + server.registerClientEntityService(new ClusterTierClientEntityService()); + server.registerServerEntityService(new VoltronReadWriteLockServerEntityService()); + server.registerClientEntityService(new VoltronReadWriteLockEntityClientService()); + server.registerExtendedConfiguration(new OffHeapResourcesProvider(getOffheapResourcesType("test", 32, MemoryUnit.MB))); + + UnitTestConnectionService.addServerToStripe(STRIPENAME, server); + } + ); + + clusterControl.waitForActive(); + clusterControl.waitForRunningPassivesInStandby(); + + this.latch = new CountDownLatch(1); + this.loaderWriter = new LatchedLoaderWriter(latch); + + CacheConfiguration config = CacheConfigurationBuilder + .newCacheConfigurationBuilder(Long.class, String.class, + newResourcePoolsBuilder() + .with(clusteredDedicated("test", 2, org.ehcache.config.units.MemoryUnit.MB))) + .withLoaderWriter(loaderWriter) + .build(); + + CacheManager cacheManager = CacheManagerBuilder.newCacheManagerBuilder().with(cluster(URI.create(STRIPE_URI)).autoCreate()) + .withCache("cache-1", config) + .build(true); + + cache = cacheManager.getCache("cache-1", Long.class, String.class); + + } + + @After + public void tearDown() throws Exception { + UnitTestConnectionService.removeStripe(STRIPENAME); + clusterControl.tearDown(); + } + + @Test + public void testLockRetentionDuringFailover() throws Exception { + + ExecutorService executorService = Executors.newFixedThreadPool(1); + Future putFuture = executorService.submit(() -> cache.put(1L, "one")); + + clusterControl.terminateActive(); + clusterControl.waitForActive(); + + assertThat(loaderWriter.backingMap.isEmpty(), is(true)); + + latch.countDown(); + + putFuture.get(); + + assertThat(loaderWriter.backingMap.get(1L), is("one")); + + } + + private static class LatchedLoaderWriter implements CacheLoaderWriter { + + ConcurrentHashMap backingMap = new ConcurrentHashMap<>(); + private final CountDownLatch latch; + + LatchedLoaderWriter(CountDownLatch latch) { + this.latch = latch; + } + + @Override + public String load(Long key) throws Exception { + latch.await(); + return backingMap.get(key); + } + + @Override + public void write(Long key, String value) throws Exception { + latch.await(); + backingMap.put(key, value); + } + + @Override + public void delete(Long key) throws Exception { + latch.await(); + backingMap.remove(key); + } + } + +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/replication/ReplicationUtil.java b/clustered/client/src/test/java/org/ehcache/clustered/client/replication/ReplicationUtil.java index 99ed6e65ab..c7f6ab26d0 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/replication/ReplicationUtil.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/client/replication/ReplicationUtil.java @@ -52,6 +52,6 @@ public static ServerStoreConfiguration getServerStoreConfiguration(String resour ClusteredResourcePool resourcePool = ClusteredResourcePoolBuilder.clusteredDedicated(resourceName, 8, MB); return new ServerStoreConfiguration(resourcePool.getPoolAllocation(), String.class.getName(), String.class.getName(), CompactJavaSerializer.class.getName(), CompactJavaSerializer.class - .getName(), Consistency.STRONG); + .getName(), Consistency.STRONG, false); } } diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/BaseKeyValueOperationTest.java b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/BaseKeyValueOperationTest.java similarity index 93% rename from clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/BaseKeyValueOperationTest.java rename to clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/BaseKeyValueOperationTest.java index f173613c4a..5d8d19ba98 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/BaseKeyValueOperationTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/BaseKeyValueOperationTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; import org.ehcache.clustered.client.TestTimeSource; import org.ehcache.impl.serialization.LongSerializer; @@ -24,9 +24,9 @@ import java.nio.ByteBuffer; -import static org.ehcache.clustered.client.internal.store.operations.Operation.BYTE_SIZE_BYTES; -import static org.ehcache.clustered.client.internal.store.operations.Operation.INT_SIZE_BYTES; -import static org.ehcache.clustered.client.internal.store.operations.Operation.LONG_SIZE_BYTES; +import static org.ehcache.clustered.common.internal.store.operations.Operation.BYTE_SIZE_BYTES; +import static org.ehcache.clustered.common.internal.store.operations.Operation.INT_SIZE_BYTES; +import static org.ehcache.clustered.common.internal.store.operations.Operation.LONG_SIZE_BYTES; import static org.junit.Assert.*; public abstract class BaseKeyValueOperationTest { @@ -98,4 +98,4 @@ public void testDecodeThrowsOnInvalidType() throws Exception { ByteBuffer buffer = ByteBuffer.wrap(new byte[] {2}); getNewOperation(buffer, keySerializer, valueSerializer); } -} \ No newline at end of file +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/ConditionalRemoveOperationTest.java b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/ConditionalRemoveOperationTest.java similarity index 96% rename from clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/ConditionalRemoveOperationTest.java rename to clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/ConditionalRemoveOperationTest.java index b28879bc94..40c97f376a 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/ConditionalRemoveOperationTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/ConditionalRemoveOperationTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; import org.ehcache.spi.serialization.Serializer; import org.junit.Test; diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/ConditionalReplaceOperationTest.java b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/ConditionalReplaceOperationTest.java similarity index 95% rename from clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/ConditionalReplaceOperationTest.java rename to clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/ConditionalReplaceOperationTest.java index f536417a86..d512e4838f 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/ConditionalReplaceOperationTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/ConditionalReplaceOperationTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; import org.ehcache.clustered.client.TestTimeSource; import org.ehcache.impl.serialization.LongSerializer; @@ -24,9 +24,9 @@ import java.nio.ByteBuffer; -import static org.ehcache.clustered.client.internal.store.operations.Operation.BYTE_SIZE_BYTES; -import static org.ehcache.clustered.client.internal.store.operations.Operation.INT_SIZE_BYTES; -import static org.ehcache.clustered.client.internal.store.operations.Operation.LONG_SIZE_BYTES; +import static org.ehcache.clustered.common.internal.store.operations.Operation.BYTE_SIZE_BYTES; +import static org.ehcache.clustered.common.internal.store.operations.Operation.INT_SIZE_BYTES; +import static org.ehcache.clustered.common.internal.store.operations.Operation.LONG_SIZE_BYTES; import static org.hamcrest.core.Is.is; import static org.junit.Assert.*; diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/EternalChainResolverTest.java b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/EternalChainResolverTest.java similarity index 98% rename from clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/EternalChainResolverTest.java rename to clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/EternalChainResolverTest.java index 411dd815a3..a2b03aa605 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/EternalChainResolverTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/EternalChainResolverTest.java @@ -14,11 +14,12 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; import org.ehcache.clustered.client.internal.store.ChainBuilder; import org.ehcache.clustered.client.internal.store.ResolvedChain; -import org.ehcache.clustered.client.internal.store.operations.codecs.OperationsCodec; +import org.ehcache.clustered.client.internal.store.operations.EternalChainResolver; +import org.ehcache.clustered.common.internal.store.operations.codecs.OperationsCodec; import org.ehcache.clustered.common.internal.store.Chain; import org.ehcache.clustered.common.internal.store.Element; import org.ehcache.impl.serialization.LongSerializer; diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/ExpiryChainResolverExpiryTest.java b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/ExpiryChainResolverExpiryTest.java similarity index 84% rename from clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/ExpiryChainResolverExpiryTest.java rename to clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/ExpiryChainResolverExpiryTest.java index ccf230c441..f1468cb6c4 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/ExpiryChainResolverExpiryTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/ExpiryChainResolverExpiryTest.java @@ -14,26 +14,25 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; -import org.ehcache.ValueSupplier; import org.ehcache.clustered.client.TestTimeSource; import org.ehcache.clustered.client.internal.store.ChainBuilder; import org.ehcache.clustered.client.internal.store.ResolvedChain; -import org.ehcache.clustered.client.internal.store.operations.codecs.OperationsCodec; +import org.ehcache.clustered.client.internal.store.operations.ExpiryChainResolver; +import org.ehcache.clustered.common.internal.store.operations.codecs.OperationsCodec; import org.ehcache.clustered.common.internal.store.Chain; import org.ehcache.clustered.common.internal.store.Element; -import org.ehcache.expiry.Duration; -import org.ehcache.expiry.Expiry; +import org.ehcache.expiry.ExpiryPolicy; import org.ehcache.impl.serialization.LongSerializer; import org.ehcache.impl.serialization.StringSerializer; import org.junit.Before; import org.junit.Test; import org.mockito.InOrder; +import java.time.Duration; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.nullValue; import static org.junit.Assert.assertThat; @@ -63,10 +62,10 @@ public void initialSetup() { @Test @SuppressWarnings("unchecked") public void testGetExpiryForAccessIsIgnored() { - Expiry expiry = mock(Expiry.class); + ExpiryPolicy expiry = mock(ExpiryPolicy.class); ExpiryChainResolver chainResolver = new ExpiryChainResolver<>(codec, expiry); - when(expiry.getExpiryForCreation(anyLong(), anyString())).thenReturn(Duration.INFINITE); + when(expiry.getExpiryForCreation(anyLong(), anyString())).thenReturn(ExpiryPolicy.INFINITE); List> list = new ArrayList<>(); list.add(new PutOperation<>(1L, "One", timeSource.getTimeMillis())); @@ -76,9 +75,9 @@ public void testGetExpiryForAccessIsIgnored() { ResolvedChain resolvedChain = chainResolver.resolve(chain, 1L, timeSource.getTimeMillis()); - verify(expiry, times(0)).getExpiryForAccess(anyLong(), any(ValueSupplier.class)); + verify(expiry, times(0)).getExpiryForAccess(anyLong(), any()); verify(expiry, times(1)).getExpiryForCreation(anyLong(), anyString()); - verify(expiry, times(1)).getExpiryForUpdate(anyLong(), any(ValueSupplier.class), anyString()); + verify(expiry, times(1)).getExpiryForUpdate(anyLong(), any(), anyString()); assertThat(resolvedChain.isCompacted(), is(true)); } @@ -86,10 +85,10 @@ public void testGetExpiryForAccessIsIgnored() { @Test @SuppressWarnings("unchecked") public void testGetExpiryForCreationIsInvokedOnlyOnce() { - Expiry expiry = mock(Expiry.class); + ExpiryPolicy expiry = mock(ExpiryPolicy.class); ExpiryChainResolver chainResolver = new ExpiryChainResolver<>(codec, expiry); - when(expiry.getExpiryForCreation(anyLong(), anyString())).thenReturn(Duration.INFINITE); + when(expiry.getExpiryForCreation(anyLong(), anyString())).thenReturn(ExpiryPolicy.INFINITE); List> list = new ArrayList<>(); list.add(new PutOperation<>(1L, "One", timeSource.getTimeMillis())); @@ -104,7 +103,7 @@ public void testGetExpiryForCreationIsInvokedOnlyOnce() { InOrder inOrder = inOrder(expiry); inOrder.verify(expiry, times(1)).getExpiryForCreation(anyLong(), anyString()); - inOrder.verify(expiry, times(3)).getExpiryForUpdate(anyLong(), any(ValueSupplier.class), anyString()); + inOrder.verify(expiry, times(3)).getExpiryForUpdate(anyLong(), any(), anyString()); assertThat(resolvedChain.isCompacted(), is(true)); } @@ -112,10 +111,10 @@ public void testGetExpiryForCreationIsInvokedOnlyOnce() { @Test @SuppressWarnings("unchecked") public void testGetExpiryForCreationIsNotInvokedForReplacedChains() { - Expiry expiry = mock(Expiry.class); + ExpiryPolicy expiry = mock(ExpiryPolicy.class); ExpiryChainResolver chainResolver = new ExpiryChainResolver<>(codec, expiry); - when(expiry.getExpiryForCreation(anyLong(), anyString())).thenReturn(Duration.INFINITE); + when(expiry.getExpiryForCreation(anyLong(), anyString())).thenReturn(ExpiryPolicy.INFINITE); List> list = new ArrayList<>(); list.add(new PutOperation<>(1L, "Replaced", -10L)); @@ -127,7 +126,7 @@ public void testGetExpiryForCreationIsNotInvokedForReplacedChains() { ResolvedChain resolvedChain = chainResolver.resolve(chain, 1L, timeSource.getTimeMillis()); verify(expiry, times(0)).getExpiryForCreation(anyLong(), anyString()); - verify(expiry, times(3)).getExpiryForUpdate(anyLong(), any(ValueSupplier.class), anyString()); + verify(expiry, times(3)).getExpiryForUpdate(anyLong(), any(), anyString()); assertThat(resolvedChain.isCompacted(), is(true)); } @@ -136,10 +135,10 @@ public void testGetExpiryForCreationIsNotInvokedForReplacedChains() { @SuppressWarnings("unchecked") public void testGetExpiryForCreationIsInvokedAfterRemoveOperations() { - Expiry expiry = mock(Expiry.class); + ExpiryPolicy expiry = mock(ExpiryPolicy.class); ExpiryChainResolver chainResolver = new ExpiryChainResolver<>(codec, expiry); - when(expiry.getExpiryForCreation(anyLong(), anyString())).thenReturn(Duration.INFINITE); + when(expiry.getExpiryForCreation(anyLong(), anyString())).thenReturn(ExpiryPolicy.INFINITE); List> list = new ArrayList<>(); list.add(new PutOperation<>(1L, "Replaced", 10L)); @@ -153,15 +152,15 @@ public void testGetExpiryForCreationIsInvokedAfterRemoveOperations() { InOrder inOrder = inOrder(expiry); - verify(expiry, times(0)).getExpiryForAccess(anyLong(), any(ValueSupplier.class)); - inOrder.verify(expiry, times(1)).getExpiryForUpdate(anyLong(), any(ValueSupplier.class), anyString()); + verify(expiry, times(0)).getExpiryForAccess(anyLong(), any()); + inOrder.verify(expiry, times(1)).getExpiryForUpdate(anyLong(), any(), anyString()); inOrder.verify(expiry, times(1)).getExpiryForCreation(anyLong(), anyString()); assertThat(resolvedChain.isCompacted(), is(true)); reset(expiry); - when(expiry.getExpiryForCreation(anyLong(), anyString())).thenReturn(Duration.INFINITE); + when(expiry.getExpiryForCreation(anyLong(), anyString())).thenReturn(ExpiryPolicy.INFINITE); list = new ArrayList<>(); list.add(new PutOperation<>(1L, "One", timeSource.getTimeMillis())); @@ -175,9 +174,9 @@ public void testGetExpiryForCreationIsInvokedAfterRemoveOperations() { inOrder = inOrder(expiry); - verify(expiry, times(0)).getExpiryForAccess(anyLong(), any(ValueSupplier.class)); + verify(expiry, times(0)).getExpiryForAccess(anyLong(), any()); inOrder.verify(expiry, times(1)).getExpiryForCreation(anyLong(), anyString()); - inOrder.verify(expiry, times(1)).getExpiryForUpdate(anyLong(), any(ValueSupplier.class), anyString()); + inOrder.verify(expiry, times(1)).getExpiryForUpdate(anyLong(), any(), anyString()); inOrder.verify(expiry, times(1)).getExpiryForCreation(anyLong(), anyString()); assertThat(resolvedChain.isCompacted(), is(true)); @@ -186,7 +185,7 @@ public void testGetExpiryForCreationIsInvokedAfterRemoveOperations() { @Test @SuppressWarnings("unchecked") public void testNullGetExpiryForCreation() { - Expiry expiry = mock(Expiry.class); + ExpiryPolicy expiry = mock(ExpiryPolicy.class); ExpiryChainResolver chainResolver = new ExpiryChainResolver<>(codec, expiry); when(expiry.getExpiryForCreation(anyLong(), anyString())).thenReturn(null); @@ -196,7 +195,7 @@ public void testNullGetExpiryForCreation() { Chain chain = getChainFromOperations(list); - ResolvedChain resolvedChain = chainResolver.resolve(chain, 1L, timeSource.getTimeMillis()); + ResolvedChain resolvedChain = chainResolver.resolve(chain, 1L, timeSource.getTimeMillis()); assertTrue(resolvedChain.getCompactedChain().isEmpty()); assertThat(resolvedChain.isCompacted(), is(true)); @@ -205,10 +204,10 @@ public void testNullGetExpiryForCreation() { @Test @SuppressWarnings("unchecked") public void testNullGetExpiryForUpdate() { - Expiry expiry = mock(Expiry.class); + ExpiryPolicy expiry = mock(ExpiryPolicy.class); ExpiryChainResolver chainResolver = new ExpiryChainResolver<>(codec, expiry); - when(expiry.getExpiryForUpdate(anyLong(), any(ValueSupplier.class), anyString())).thenReturn(null); + when(expiry.getExpiryForUpdate(anyLong(), any(), anyString())).thenReturn(null); List> list = new ArrayList<>(); list.add(new PutOperation<>(1L, "Replaced", -10L)); @@ -226,10 +225,10 @@ public void testNullGetExpiryForUpdate() { @Test @SuppressWarnings("unchecked") public void testGetExpiryForUpdateUpdatesExpirationTimeStamp() { - Expiry expiry = mock(Expiry.class); + ExpiryPolicy expiry = mock(ExpiryPolicy.class); ExpiryChainResolver chainResolver = new ExpiryChainResolver<>(codec, expiry); - when(expiry.getExpiryForUpdate(anyLong(), any(ValueSupplier.class), anyString())).thenReturn(new Duration(2L, TimeUnit.MILLISECONDS)); + when(expiry.getExpiryForUpdate(anyLong(), any(), anyString())).thenReturn(Duration.ofMillis(2L)); List> list = new ArrayList<>(); list.add(new PutOperation<>(1L, "Replaced", -10L)); @@ -247,10 +246,10 @@ public void testGetExpiryForUpdateUpdatesExpirationTimeStamp() { @Test @SuppressWarnings("unchecked") public void testExpiryThrowsException() { - Expiry expiry = mock(Expiry.class); + ExpiryPolicy expiry = mock(ExpiryPolicy.class); ExpiryChainResolver chainResolver = new ExpiryChainResolver<>(codec, expiry); - when(expiry.getExpiryForUpdate(anyLong(), any(ValueSupplier.class), anyString())).thenThrow(new RuntimeException("Test Update Expiry")); + when(expiry.getExpiryForUpdate(anyLong(), any(), anyString())).thenThrow(new RuntimeException("Test Update Expiry")); when(expiry.getExpiryForCreation(anyLong(), anyString())).thenThrow(new RuntimeException("Test Create Expiry")); List> list = new ArrayList<>(); diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/ExpiryChainResolverTest.java b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/ExpiryChainResolverTest.java similarity index 91% rename from clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/ExpiryChainResolverTest.java rename to clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/ExpiryChainResolverTest.java index d47ea2b61e..4bf85351b1 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/ExpiryChainResolverTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/ExpiryChainResolverTest.java @@ -14,15 +14,15 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; import org.ehcache.clustered.client.internal.store.ChainBuilder; import org.ehcache.clustered.client.internal.store.ResolvedChain; -import org.ehcache.clustered.client.internal.store.operations.codecs.OperationsCodec; +import org.ehcache.clustered.client.internal.store.operations.ExpiryChainResolver; +import org.ehcache.clustered.common.internal.store.operations.codecs.OperationsCodec; import org.ehcache.clustered.common.internal.store.Chain; import org.ehcache.clustered.common.internal.store.Element; -import org.ehcache.expiry.Duration; -import org.ehcache.expiry.Expirations; +import org.ehcache.config.builders.ExpiryPolicyBuilder; import org.ehcache.impl.serialization.LongSerializer; import org.ehcache.impl.serialization.StringSerializer; import org.hamcrest.Description; @@ -31,9 +31,9 @@ import org.junit.Test; import java.nio.ByteBuffer; +import java.time.Duration; import java.util.concurrent.TimeUnit; -import static org.ehcache.expiry.Expirations.timeToIdleExpiration; import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.is; import static org.hamcrest.collection.IsIterableContainingInAnyOrder.containsInAnyOrder; @@ -58,7 +58,7 @@ public void testResolveMaintainsOtherKeysInOrder() throws Exception { new PutOperation<>(2L, "Suresh", 0L), new PutOperation<>(2L, "Mathew", 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); ResolvedChain resolvedChain = resolver.resolve(chain, 1L, 0L); Result result = resolvedChain.getResolvedResult(1L); assertEquals(expected, result); @@ -75,7 +75,7 @@ public void testResolveMaintainsOtherKeysInOrder() throws Exception { @Test public void testResolveEmptyChain() throws Exception { Chain chain = getChainFromOperations(); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); ResolvedChain resolvedChain = resolver.resolve(chain, 1L, 0L); Result result = resolvedChain.getResolvedResult(1L); assertNull(result); @@ -90,7 +90,7 @@ public void testResolveChainWithNonExistentKey() throws Exception { new PutOperation<>(2L, "Suresh", 0L), new PutOperation<>(2L, "Mathew", 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); ResolvedChain resolvedChain = resolver.resolve(chain, 3L, 0L); Result result = resolvedChain.getResolvedResult(3L); assertNull(result); @@ -102,7 +102,7 @@ public void testResolveSinglePut() throws Exception { Operation expected = new PutOperation<>(1L, "Albin", 0L); Chain chain = getChainFromOperations(expected); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); ResolvedChain resolvedChain = resolver.resolve(chain, 1L, 0L); Result result = resolvedChain.getResolvedResult(1L); assertEquals(expected, result); @@ -118,7 +118,7 @@ public void testResolvePutsOnly() throws Exception { new PutOperation<>(1L, "Suresh", 0L), new PutOperation<>(1L, "Mathew", 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); ResolvedChain resolvedChain = resolver.resolve(chain, 1L, 0L); Result result = resolvedChain.getResolvedResult(1L); assertEquals(expected, result); @@ -130,7 +130,7 @@ public void testResolvePutsOnly() throws Exception { public void testResolveSingleRemove() throws Exception { Chain chain = getChainFromOperations(new RemoveOperation<>(1L, 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); ResolvedChain resolvedChain = resolver.resolve(chain, 1L, 0L); Result result = resolvedChain.getResolvedResult(1L); assertNull(result); @@ -144,7 +144,7 @@ public void testResolveRemovesOnly() throws Exception { new RemoveOperation<>(1L, 0L), new RemoveOperation<>(1L, 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); ResolvedChain resolvedChain = resolver.resolve(chain, 1L, 0L); Result result = resolvedChain.getResolvedResult(1L); assertNull(result); @@ -158,7 +158,7 @@ public void testPutAndRemove() throws Exception { new PutOperation<>(1L, "Albin", 0L), new RemoveOperation<>(1L, 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); ResolvedChain resolvedChain = resolver.resolve(chain, 1L, 0L); Result result = resolvedChain.getResolvedResult(1L); assertNull(result); @@ -170,7 +170,7 @@ public void testResolvePutIfAbsentOnly() throws Exception { Operation expected = new PutOperation<>(1L, "Mathew", 0L); Chain chain = getChainFromOperations(new PutIfAbsentOperation<>(1L, "Mathew", 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); ResolvedChain resolvedChain = resolver.resolve(chain, 1L, 0L); Result result = resolvedChain.getResolvedResult(1L); assertEquals(expected, result); @@ -185,7 +185,7 @@ public void testResolvePutIfAbsentsOnly() throws Exception { new PutIfAbsentOperation<>(1L, "Suresh", 0L), new PutIfAbsentOperation<>(1L, "Mathew", 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); ResolvedChain resolvedChain = resolver.resolve(chain, 1L, 0L); Result result = resolvedChain.getResolvedResult(1L); assertEquals(expected, result); @@ -200,7 +200,7 @@ public void testResolvePutIfAbsentSucceeds() throws Exception { new RemoveOperation<>(1L, 0L), new PutIfAbsentOperation<>(1L, "Mathew", 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); ResolvedChain resolvedChain = resolver.resolve(chain, 1L, 0L); Result result = resolvedChain.getResolvedResult(1L); assertEquals(expected, result); @@ -211,7 +211,7 @@ public void testResolvePutIfAbsentSucceeds() throws Exception { public void testResolveForSingleOperationDoesNotCompact() { Chain chain = getChainFromOperations(new PutOperation<>(1L, "Albin", 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); ResolvedChain resolvedChain = resolver.resolve(chain, 1L, 0L); assertThat(resolvedChain.isCompacted(), is(false)); assertThat(resolvedChain.getCompactionCount(), is(0)); @@ -233,7 +233,7 @@ public void testResolveForMultiplesOperationsAlwaysCompact() { new RemoveOperation<>(1L, 0L), new PutIfAbsentOperation<>(2L, "Albin", 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); ResolvedChain resolvedChain = resolver.resolve(chain, 1L, 0L); assertThat(resolvedChain.isCompacted(), is(true)); assertThat(resolvedChain.getCompactionCount(), is(8)); @@ -247,7 +247,7 @@ public void testResolveForMultipleOperationHasCorrectIsFirstAndTimeStamp() { new RemoveOperation<>(1L, 2), new PutOperation<>(1L, "AlbinAfterRemove", 3)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.timeToLiveExpiration(Duration.of(1, TimeUnit.HOURS))); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.timeToLiveExpiration(Duration.ofHours(1))); ResolvedChain resolvedChain = resolver.resolve(chain, 1L, 3); Operation operation = codec.decode(resolvedChain.getCompactedChain().iterator().next().getPayload()); @@ -272,7 +272,7 @@ public void testResolveForMultipleOperationHasCorrectIsFirstAndTimeStampWithExpi new PutOperation<>(1L, "Albin4", 3L) ); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.timeToLiveExpiration(new Duration(1l, TimeUnit.MILLISECONDS))); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.timeToLiveExpiration(Duration.ofMillis(1L))); ResolvedChain resolvedChain = resolver.resolve(chain, 1L, 3L); Operation operation = codec.decode(resolvedChain.getCompactedChain().iterator().next().getPayload()); @@ -299,7 +299,7 @@ public void testResolveDoesNotDecodeOtherKeyOperationValues() throws Exception { CountingLongSerializer keySerializer = new CountingLongSerializer(); CountingStringSerializer valueSerializer = new CountingStringSerializer(); OperationsCodec customCodec = new OperationsCodec<>(keySerializer, valueSerializer); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(customCodec, timeToIdleExpiration(Duration.of(5, TimeUnit.SECONDS))); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(customCodec, ExpiryPolicyBuilder.timeToIdleExpiration(Duration.ofSeconds(5))); resolver.resolve(chain, 1L, 0L); assertThat(keySerializer.decodeCount, is(3)); @@ -318,7 +318,7 @@ public void testResolveDecodesOperationValueOnlyOnDemand() throws Exception { CountingLongSerializer keySerializer = new CountingLongSerializer(); CountingStringSerializer valueSerializer = new CountingStringSerializer(); OperationsCodec customCodec = new OperationsCodec<>(keySerializer, valueSerializer); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(customCodec, timeToIdleExpiration(Duration.of(5, TimeUnit.SECONDS))); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(customCodec, ExpiryPolicyBuilder.timeToIdleExpiration(Duration.ofSeconds(5))); resolver.resolve(chain, 1L, 0L); assertThat(keySerializer.decodeCount, is(3)); @@ -337,7 +337,7 @@ public void testCompactingTwoKeys() throws Exception { new PutOperation<>(2L, "Suresh", 0L), new PutOperation<>(2L, "Mathew", 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); Chain compactedChain = resolver.applyOperation(chain, 0L); @@ -350,7 +350,7 @@ public void testCompactingTwoKeys() throws Exception { @Test public void testCompactEmptyChain() throws Exception { Chain chain = (new ChainBuilder()).build(); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); Chain compacted = resolver.applyOperation(chain, 0L); assertThat(compacted, emptyIterable()); } @@ -361,7 +361,7 @@ public void testCompactSinglePut() throws Exception { new PutOperation<>(1L, "Albin", 0L) ); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); Chain compacted = resolver.applyOperation(chain, 0L); assertThat(compacted, contains(operation(new PutOperation<>(1L, "Albin", 0L)))); @@ -374,7 +374,7 @@ public void testCompactMultiplePuts() throws Exception { new PutOperation<>(1L, "Suresh", 0L), new PutOperation<>(1L, "Mathew", 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); Chain compactedChain = resolver.applyOperation(chain, 0L); assertThat(compactedChain, contains(operation(new PutOperation<>(1L, "Mathew", 0L)))); } @@ -383,7 +383,7 @@ public void testCompactMultiplePuts() throws Exception { public void testCompactSingleRemove() throws Exception { Chain chain = getChainFromOperations(new RemoveOperation<>(1L, 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); Chain compactedChain = resolver.applyOperation(chain, 0L); assertThat(compactedChain, emptyIterable()); } @@ -394,7 +394,7 @@ public void testCompactMultipleRemoves() throws Exception { new RemoveOperation<>(1L, 0L), new RemoveOperation<>(1L, 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); Chain compactedChain = resolver.applyOperation(chain, 0L); assertThat(compactedChain, emptyIterable()); } @@ -405,7 +405,7 @@ public void testCompactPutAndRemove() throws Exception { new PutOperation<>(1L, "Albin", 0L), new RemoveOperation<>(1L, 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); Chain compactedChain = resolver.applyOperation(chain, 0L); assertThat(compactedChain, emptyIterable()); } @@ -414,7 +414,7 @@ public void testCompactPutAndRemove() throws Exception { public void testCompactSinglePutIfAbsent() throws Exception { Chain chain = getChainFromOperations(new PutIfAbsentOperation<>(1L, "Mathew", 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); Chain compactedChain = resolver.applyOperation(chain, 0L); assertThat(compactedChain, contains(operation(new PutOperation<>(1L, "Mathew", 0L)))); } @@ -426,7 +426,7 @@ public void testCompactMultiplePutIfAbsents() throws Exception { new PutIfAbsentOperation<>(1L, "Suresh", 0L), new PutIfAbsentOperation<>(1L, "Mathew", 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); Chain compactedChain = resolver.applyOperation(chain, 0L); assertThat(compactedChain, contains(operation(new PutOperation<>(1L, "Albin", 0L)))); } @@ -438,7 +438,7 @@ public void testCompactPutIfAbsentAfterRemove() throws Exception { new RemoveOperation<>(1L, 0L), new PutIfAbsentOperation<>(1L, "Mathew", 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); Chain compactedChain = resolver.applyOperation(chain, 0L); assertThat(compactedChain, contains(operation(new PutOperation<>(1L, "Mathew", 0L)))); } @@ -459,7 +459,7 @@ public void testCompactForMultipleKeysAndOperations() { new RemoveOperation<>(1L, 0L), new PutIfAbsentOperation<>(2L, "Albin", 0L)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); Chain compactedChain = resolver.applyOperation(chain, 0L); assertThat(compactedChain, contains(operation(new PutOperation<>(2L, "Albin", 0L)))); } @@ -472,7 +472,7 @@ public void testCompactHasCorrectTimeStamp() { new RemoveOperation<>(1L, 2), new PutOperation<>(1L, "Albin3", 3)); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.noExpiration()); Chain compactedChain = resolver.applyOperation(chain, 3); assertThat(compactedChain, contains(operation(new PutOperation<>(1L, "Albin3", 3)))); @@ -487,7 +487,7 @@ public void testCompactHasCorrectWithExpiry() { new PutOperation<>(1L, "Albin4", 3L) ); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, Expirations.timeToLiveExpiration(new Duration(1l, TimeUnit.MILLISECONDS))); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(codec, ExpiryPolicyBuilder.timeToLiveExpiration(Duration.ofMillis(1L))); Chain compactedChain = resolver.applyOperation(chain, 3L); assertThat(compactedChain, contains(operation(new PutOperation<>(1L, "Albin4", 3L)))); @@ -503,7 +503,7 @@ public void testCompactDecodesOperationValueOnlyOnDemand() throws Exception { CountingLongSerializer keySerializer = new CountingLongSerializer(); CountingStringSerializer valueSerializer = new CountingStringSerializer(); OperationsCodec customCodec = new OperationsCodec<>(keySerializer, valueSerializer); - ExpiryChainResolver resolver = new ExpiryChainResolver<>(customCodec, Expirations.noExpiration()); + ExpiryChainResolver resolver = new ExpiryChainResolver<>(customCodec, ExpiryPolicyBuilder.noExpiration()); resolver.applyOperation(chain, 0L); assertThat(keySerializer.decodeCount, is(3)); diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/LazyValueHolderTest.java b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/LazyValueHolderTest.java similarity index 97% rename from clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/LazyValueHolderTest.java rename to clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/LazyValueHolderTest.java index a89f2bf8d3..00ae6f6122 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/LazyValueHolderTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/LazyValueHolderTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; import org.ehcache.spi.serialization.Serializer; import org.junit.Before; diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/PutIfAbsentOperationTest.java b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/PutIfAbsentOperationTest.java similarity index 96% rename from clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/PutIfAbsentOperationTest.java rename to clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/PutIfAbsentOperationTest.java index 7843d24a19..84cbc04566 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/PutIfAbsentOperationTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/PutIfAbsentOperationTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; import org.ehcache.spi.serialization.Serializer; import org.junit.Test; diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/PutOperationTest.java b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/PutOperationTest.java similarity index 96% rename from clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/PutOperationTest.java rename to clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/PutOperationTest.java index 9d4deb8bf5..8eca0e0dc4 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/PutOperationTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/PutOperationTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; import org.ehcache.spi.serialization.Serializer; import org.junit.Test; diff --git a/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/PutWithWriterOperationTest.java b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/PutWithWriterOperationTest.java new file mode 100644 index 0000000000..b4f332da79 --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/PutWithWriterOperationTest.java @@ -0,0 +1,51 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.common.internal.store.operations; + +import org.ehcache.spi.serialization.Serializer; +import org.junit.Test; + +import java.nio.ByteBuffer; + +import static org.junit.Assert.*; + +public class PutWithWriterOperationTest extends BaseKeyValueOperationTest { + + @Override + protected BaseKeyValueOperation getNewOperation(K key, V value, long timestamp) { + return new PutWithWriterOperation<>(key, value, timestamp); + } + + @Override + protected BaseKeyValueOperation getNewOperation(ByteBuffer buffer, Serializer keySerializer, Serializer valueSerializer) { + return new PutWithWriterOperation<>(buffer, keySerializer, valueSerializer); + } + + @Override + protected OperationCode getOperationCode() { + return OperationCode.PUT_WITH_WRITER; + } + + @Test + public void testApply() { + PutWithWriterOperation putOperation = new PutWithWriterOperation<>(1L, "one", System.currentTimeMillis()); + Result result = putOperation.apply(null); + assertSame(putOperation, result); + PutWithWriterOperation anotherOperation = new PutWithWriterOperation<>(1L, "two", System.currentTimeMillis()); + result = anotherOperation.apply(putOperation); + assertSame(anotherOperation, result); + } +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/RemoveOperationTest.java b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/RemoveOperationTest.java similarity index 94% rename from clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/RemoveOperationTest.java rename to clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/RemoveOperationTest.java index b1d6b14c75..533cd0e4e8 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/RemoveOperationTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/RemoveOperationTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; import org.ehcache.clustered.client.TestTimeSource; import org.ehcache.impl.serialization.LongSerializer; @@ -24,8 +24,8 @@ import java.nio.ByteBuffer; -import static org.ehcache.clustered.client.internal.store.operations.Operation.BYTE_SIZE_BYTES; -import static org.ehcache.clustered.client.internal.store.operations.Operation.LONG_SIZE_BYTES; +import static org.ehcache.clustered.common.internal.store.operations.Operation.BYTE_SIZE_BYTES; +import static org.ehcache.clustered.common.internal.store.operations.Operation.LONG_SIZE_BYTES; import static org.junit.Assert.*; public class RemoveOperationTest { diff --git a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/ReplaceOperationTest.java b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/ReplaceOperationTest.java similarity index 96% rename from clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/ReplaceOperationTest.java rename to clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/ReplaceOperationTest.java index 94787b7948..621a3f43aa 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/client/internal/store/operations/ReplaceOperationTest.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/common/internal/store/operations/ReplaceOperationTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; import org.ehcache.spi.serialization.Serializer; import org.junit.Test; diff --git a/clustered/client/src/test/java/org/ehcache/clustered/loaderWriter/BasicClusteredLoaderWriterTest.java b/clustered/client/src/test/java/org/ehcache/clustered/loaderWriter/BasicClusteredLoaderWriterTest.java new file mode 100644 index 0000000000..1dd41248bf --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/loaderWriter/BasicClusteredLoaderWriterTest.java @@ -0,0 +1,286 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.clustered.loaderWriter; + +import org.ehcache.Cache; +import org.ehcache.CacheManager; +import org.ehcache.CachePersistenceException; +import org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder; +import org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder; +import org.ehcache.clustered.client.internal.UnitTestConnectionService; +import org.ehcache.clustered.client.internal.service.ClusterTierValidationException; +import org.ehcache.clustered.util.ThrowingResilienceStrategy; +import org.ehcache.config.CacheConfiguration; +import org.ehcache.config.builders.CacheConfigurationBuilder; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.ResourcePoolsBuilder; +import org.ehcache.config.units.EntryUnit; +import org.ehcache.config.units.MemoryUnit; +import org.ehcache.spi.loaderwriter.CacheLoaderWriter; +import org.hamcrest.Matchers; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.net.URI; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ThreadLocalRandom; + +import static org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder.cluster; +import static org.ehcache.config.builders.CacheConfigurationBuilder.*; +import static org.hamcrest.Matchers.*; +import static org.junit.Assert.assertThat; + +public class BasicClusteredLoaderWriterTest { + + private static final URI CLUSTER_URI = URI.create("terracotta://example.com:9540/clustered-loader-writer"); + + @Before + public void definePassthroughServer() throws Exception { + UnitTestConnectionService.add(CLUSTER_URI, + new UnitTestConnectionService.PassthroughServerBuilder() + .resource("primary-server-resource", 4, MemoryUnit.MB) + .build()); + } + + @After + public void removePassthroughServer() throws Exception { + UnitTestConnectionService.remove(CLUSTER_URI); + } + + @Test + public void testAllClientsNeedToHaveLoaderWriterConfigured() { + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + CacheConfiguration cacheConfiguration = getCacheConfiguration(loaderWriter); + + CacheManager cacheManager = CacheManagerBuilder + .newCacheManagerBuilder() + .with(cluster(CLUSTER_URI).autoCreate()) + .withCache("cache-1", cacheConfiguration) + .build(true); + + CacheConfiguration withoutLoaderWriter = newCacheConfigurationBuilder(Long.class, String.class, + ResourcePoolsBuilder + .newResourcePoolsBuilder().heap(10, EntryUnit.ENTRIES).offheap(1, MemoryUnit.MB) + .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 2, MemoryUnit.MB))) + .withResilienceStrategy(new ThrowingResilienceStrategy()) + .build(); + + try { + CacheManager anotherManager = CacheManagerBuilder + .newCacheManagerBuilder() + .with(cluster(CLUSTER_URI).autoCreate()) + .withCache("cache-1", withoutLoaderWriter) + .build(true); + } catch (RuntimeException e) { + assertThat(e.getCause().getCause().getCause().getCause(), instanceOf(CachePersistenceException.class)); + assertThat(e.getCause().getCause().getCause().getCause().getCause(), instanceOf(ClusterTierValidationException.class)); + } + } + + @Test + public void testBasicClusteredCacheLoaderWriter() { + + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + CacheConfiguration cacheConfiguration = getCacheConfiguration(loaderWriter); + + CacheManager cacheManager = CacheManagerBuilder + .newCacheManagerBuilder() + .with(cluster(CLUSTER_URI).autoCreate()) + .withCache("cache-1", cacheConfiguration) + .build(true); + + Cache cache = cacheManager.getCache("cache-1", Long.class, String.class); + + cache.put(1L, "1"); + + assertThat(cache.get(1L), is("1")); + + assertThat(loaderWriter.storeMap.get(1L), is("1")); + + } + + @Test + public void testLoaderWriterMultipleClients() { + + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + + CacheConfiguration cacheConfiguration = getCacheConfiguration(loaderWriter); + + CacheManager cacheManager1 = CacheManagerBuilder + .newCacheManagerBuilder() + .with(cluster(CLUSTER_URI).autoCreate()) + .withCache("cache-1", cacheConfiguration) + .build(true); + + CacheManager cacheManager2 = CacheManagerBuilder + .newCacheManagerBuilder() + .with(cluster(CLUSTER_URI).autoCreate()) + .withCache("cache-1", cacheConfiguration) + .build(true); + + Cache client1 = cacheManager1.getCache("cache-1", Long.class, String.class); + Cache client2 = cacheManager2.getCache("cache-1", Long.class, String.class); + + client1.put(1L, "1"); + client2.put(1L, "2"); + + assertThat(client1.get(1L), is("2")); + assertThat(loaderWriter.storeMap.get(1L), is("2")); + + client1.remove(1L); + + assertThat(client2.get(1L), nullValue()); + assertThat(loaderWriter.storeMap.get(1L), nullValue()); + + } + + @Test + public void testCASOpsMultipleClients() { + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + + CacheConfiguration cacheConfiguration = getCacheConfiguration(loaderWriter); + + CacheManager cacheManager1 = CacheManagerBuilder + .newCacheManagerBuilder() + .with(cluster(CLUSTER_URI).autoCreate()) + .withCache("cache-1", cacheConfiguration) + .build(true); + + CacheManager cacheManager2 = CacheManagerBuilder + .newCacheManagerBuilder() + .with(cluster(CLUSTER_URI).autoCreate()) + .withCache("cache-1", cacheConfiguration) + .build(true); + + Cache client1 = cacheManager1.getCache("cache-1", Long.class, String.class); + Cache client2 = cacheManager2.getCache("cache-1", Long.class, String.class); + + assertThat(client1.putIfAbsent(1L, "1"), nullValue()); + assertThat(client2.putIfAbsent(1L, "2"), is("1")); + + assertThat(client1.get(1L), is("1")); + assertThat(loaderWriter.storeMap.get(1L), is("1")); + + assertThat(client1.replace(1L, "2"), is("1")); + assertThat(client2.replace(1L, "3"), is("2")); + + assertThat(client1.get(1L), is("3")); + assertThat(loaderWriter.storeMap.get(1L), is("3")); + + assertThat(client1.replace(1L, "2", "4"), is(false)); + assertThat(client2.replace(1L, "3", "4"), is(true)); + + assertThat(client1.get(1L), is("4")); + assertThat(loaderWriter.storeMap.get(1L), is("4")); + + assertThat(client1.remove(1L, "5"), is(false)); + assertThat(client2.remove(1L, "4"), is(true)); + + } + + @Test + public void testBulkOps() { + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + CacheConfiguration cacheConfiguration = getCacheConfiguration(loaderWriter); + + CacheManager cacheManager = CacheManagerBuilder + .newCacheManagerBuilder() + .with(cluster(CLUSTER_URI).autoCreate()) + .withCache("cache-1", cacheConfiguration) + .build(true); + + Cache cache = cacheManager.getCache("cache-1", Long.class, String.class); + + Map mappings = new HashMap<>(); + + for (int i = 1; i <= 5; i++) { + mappings.put((long) i, "" + i); + } + + cache.putAll(mappings); + + assertThat(loaderWriter.storeMap.keySet(), containsInAnyOrder(mappings.keySet().toArray())); + + cache.clear(); + + Map loadedData = cache.getAll(mappings.keySet()); + + assertThat(mappings.keySet(), containsInAnyOrder(loadedData.keySet().toArray())); + + cache.removeAll(mappings.keySet()); + + assertThat(loaderWriter.storeMap.isEmpty(), is(true)); + } + + @Test + public void testCASOps() { + TestCacheLoaderWriter loaderWriter = new TestCacheLoaderWriter(); + + CacheConfiguration cacheConfiguration = getCacheConfiguration(loaderWriter); + + CacheManager cacheManager1 = CacheManagerBuilder + .newCacheManagerBuilder() + .with(cluster(CLUSTER_URI).autoCreate()) + .withCache("cache-1", cacheConfiguration) + .build(true); + + CacheManager cacheManager2 = CacheManagerBuilder + .newCacheManagerBuilder() + .with(cluster(CLUSTER_URI).autoCreate()) + .withCache("cache-1", cacheConfiguration) + .build(true); + + Cache client1 = cacheManager1.getCache("cache-1", Long.class, String.class); + Cache client2 = cacheManager2.getCache("cache-1", Long.class, String.class); + + assertThat(loaderWriter.storeMap.isEmpty(), is(true)); + + Set keys = new HashSet<>(); + ThreadLocalRandom.current().longs(10).forEach(x -> { + keys.add(x); + client1.put(x, Long.toString(x)); + }); + assertThat(loaderWriter.storeMap.size(), is(10)); + + + keys.forEach(x -> assertThat(client2.putIfAbsent(x, "Again" + x), is(Long.toString(x)))); + + keys.stream().limit(5).forEach(x -> + assertThat(client2.replace(x , "Replaced" + x), is(Long.toString(x)))); + + keys.forEach(x -> client1.remove(x, Long.toString(x))); + + assertThat(loaderWriter.storeMap.size(), is(5)); + + } + + private CacheConfiguration getCacheConfiguration(TestCacheLoaderWriter loaderWriter) { + return newCacheConfigurationBuilder(Long.class, String.class, + ResourcePoolsBuilder + .newResourcePoolsBuilder().heap(10, EntryUnit.ENTRIES).offheap(1, MemoryUnit.MB) + .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 2, MemoryUnit.MB))) + .withLoaderWriter(loaderWriter) + .withResilienceStrategy(new ThrowingResilienceStrategy()) + .build(); + } + +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/loaderWriter/TestCacheLoaderWriter.java b/clustered/client/src/test/java/org/ehcache/clustered/loaderWriter/TestCacheLoaderWriter.java new file mode 100644 index 0000000000..4dd229b209 --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/loaderWriter/TestCacheLoaderWriter.java @@ -0,0 +1,42 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.clustered.loaderWriter; + +import org.ehcache.impl.internal.concurrent.ConcurrentHashMap; +import org.ehcache.spi.loaderwriter.CacheLoaderWriter; + +import java.util.Map; + +public class TestCacheLoaderWriter implements CacheLoaderWriter { + + public final Map storeMap = new ConcurrentHashMap<>(); + + @Override + public String load(Long key) throws Exception { + return storeMap.get(key); + } + + @Override + public void write(Long key, String value) throws Exception { + storeMap.put(key, value); + } + + @Override + public void delete(Long key) throws Exception { + storeMap.remove(key); + } +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/loaderWriter/writebehind/BasicClusteredWriteBehindPassthroughTest.java b/clustered/client/src/test/java/org/ehcache/clustered/loaderWriter/writebehind/BasicClusteredWriteBehindPassthroughTest.java new file mode 100644 index 0000000000..214fde795e --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/loaderWriter/writebehind/BasicClusteredWriteBehindPassthroughTest.java @@ -0,0 +1,280 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.clustered.loaderWriter.writebehind; + +import org.ehcache.Cache; +import org.ehcache.CacheManager; +import org.ehcache.PersistentCacheManager; +import org.ehcache.clustered.client.config.ClusteredStoreConfiguration; +import org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder; +import org.ehcache.clustered.client.internal.UnitTestConnectionService; +import org.ehcache.clustered.common.Consistency; +import org.ehcache.config.CacheConfiguration; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.ResourcePoolsBuilder; +import org.ehcache.config.builders.WriteBehindConfigurationBuilder; +import org.ehcache.config.units.EntryUnit; +import org.ehcache.config.units.MemoryUnit; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.net.URI; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder.cluster; +import static org.ehcache.config.builders.CacheConfigurationBuilder.newCacheConfigurationBuilder; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.junit.Assert.assertThat; + +public class BasicClusteredWriteBehindPassthroughTest { + + private static final URI CLUSTER_URI = URI.create("terracotta://example.com:9540/clustered-write-behind"); + + @Before + public void definePassthroughServer() { + UnitTestConnectionService.add(CLUSTER_URI, + new UnitTestConnectionService.PassthroughServerBuilder() + .resource("primary-server-resource", 4, MemoryUnit.MB) + .build()); + } + + @After + public void removePassthroughServer() { + UnitTestConnectionService.remove(CLUSTER_URI); + } + + private RecordingLoaderWriter loaderWriter; + private final List cacheRecords = new ArrayList<>(); + + private static final String CACHE_NAME = "cache-1"; + private static final long KEY = 1L; + + @Before + public void setUp() { + loaderWriter = new RecordingLoaderWriter<>(); + } + + @Test + public void testBasicClusteredWriteBehind() { + try (PersistentCacheManager cacheManager = createCacheManager()) { + Cache cache = cacheManager.getCache(CACHE_NAME, Long.class, String.class); + + for (int i = 0; i < 10; i++) { + put(cache, String.valueOf(i)); + } + + assertValue(cache, String.valueOf(9)); + + verifyRecords(cache); + cache.clear(); + } + } + + @Test + public void testWriteBehindMultipleClients() { + try (PersistentCacheManager cacheManager1 = createCacheManager(); + PersistentCacheManager cacheManager2 = createCacheManager()) { + Cache client1 = cacheManager1.getCache(CACHE_NAME, Long.class, String.class); + Cache client2 = cacheManager2.getCache(CACHE_NAME, Long.class, String.class); + + put(client1, "The one from client1"); + put(client2, "The one one from client2"); + assertValue(client1, "The one one from client2"); + remove(client1); + put(client2, "The one from client2"); + put(client1, "The one one from client1"); + assertValue(client2, "The one one from client1"); + remove(client2); + assertValue(client1, null); + put(client1, "The one from client1"); + put(client1, "The one one from client1"); + remove(client2); + put(client2, "The one from client2"); + put(client2, "The one one from client2"); + remove(client1); + assertValue(client2, null); + + verifyRecords(client1); + client1.clear(); + } + } + + @Test + public void testClusteredWriteBehindCAS() { + try (PersistentCacheManager cacheManager = createCacheManager()) { + Cache cache = cacheManager.getCache(CACHE_NAME, Long.class, String.class); + putIfAbsent(cache, "First value", true); + assertValue(cache, "First value"); + putIfAbsent(cache, "Second value", false); + assertValue(cache, "First value"); + put(cache, "First value again"); + assertValue(cache, "First value again"); + replace(cache, "Replaced First value", true); + assertValue(cache, "Replaced First value"); + replace(cache, "Replaced First value", "Replaced First value again", true); + assertValue(cache, "Replaced First value again"); + replace(cache, "Replaced First", "Tried Replacing First value again", false); + assertValue(cache, "Replaced First value again"); + condRemove(cache, "Replaced First value again", true); + assertValue(cache, null); + replace(cache, "Trying to replace value", false); + assertValue(cache, null); + put(cache, "new value", true); + assertValue(cache, "new value"); + condRemove(cache, "new value", false); + + verifyRecords(cache); + cache.clear(); + } + } + + @Test + public void testClusteredWriteBehindLoading() { + try (CacheManager cacheManager = createCacheManager()) { + Cache cache = cacheManager.getCache(CACHE_NAME, Long.class, String.class); + + put(cache, "Some value"); + tryFlushingUpdatesToSOR(cache); + cache.clear(); + + assertThat(cache.get(KEY), notNullValue()); + + cache.clear(); + } + } + + private void assertValue(Cache cache, String value) { + assertThat(cache.get(KEY), is(value)); + } + + private void put(Cache cache, String value) { + put(cache, value, true); + } + + private void put(Cache cache, String value, boolean addToCacheRecords) { + cache.put(KEY, value); + if (addToCacheRecords) { + cacheRecords.add(new Record(KEY, cache.get(KEY))); + } + } + + private void putIfAbsent(Cache cache, String value, boolean addToCacheRecords) { + cache.putIfAbsent(KEY, value); + if (addToCacheRecords) { + cacheRecords.add(new Record(KEY, cache.get(KEY))); + } + } + + private void replace(Cache cache, String value, boolean addToCacheRecords) { + cache.replace(KEY, value); + if (addToCacheRecords) { + cacheRecords.add(new Record(KEY, cache.get(KEY))); + } + } + + private void replace(Cache cache, String oldValue, String newValue, boolean addToCacheRecords) { + cache.replace(KEY, oldValue, newValue); + if (addToCacheRecords) { + cacheRecords.add(new Record(KEY, cache.get(KEY))); + } + } + + private void remove(Cache cache) { + cache.remove(KEY); + cacheRecords.add(new Record(KEY, null)); + } + + private void condRemove(Cache cache, String value, boolean addToCacheRecords) { + cache.remove(KEY, value); + if (addToCacheRecords) { + cacheRecords.add(new Record(KEY, null)); + } + } + + private void verifyRecords(Cache cache) { + tryFlushingUpdatesToSOR(cache); + + Map> loaderWriterRecords = loaderWriter.getRecords(); + + Map track = new HashMap<>(); + for (Record cacheRecord : cacheRecords) { + Long key = cacheRecord.getKey(); + int next = track.compute(key, (k, v) -> v == null ? 0 : v + 1); + assertThat(loaderWriterRecords.get(key).get(next), is(cacheRecord.getValue())); + } + } + + private void tryFlushingUpdatesToSOR(Cache cache) { + int retryCount = 1000; + int i = 0; + while (true) { + String value = "flush_queue_" + i; + put(cache, value, false); + try { + Thread.sleep(100); + } catch (InterruptedException e) { + e.printStackTrace(); + } + if (value.equals(loaderWriter.load(KEY))) break; + if (i > retryCount) { + throw new RuntimeException("Couldn't flush updates to SOR after " + retryCount + " tries"); + } + i++; + } + } + + private PersistentCacheManager createCacheManager() { + CacheConfiguration cacheConfiguration = + newCacheConfigurationBuilder(Long.class, String.class, ResourcePoolsBuilder.newResourcePoolsBuilder() + .heap(10, EntryUnit.ENTRIES) + .offheap(1, MemoryUnit.MB) + .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 2, MemoryUnit.MB))) + .withLoaderWriter(loaderWriter) + .add(WriteBehindConfigurationBuilder.newUnBatchedWriteBehindConfiguration()) + .add(new ClusteredStoreConfiguration(Consistency.STRONG)) + .build(); + + return CacheManagerBuilder + .newCacheManagerBuilder() + .with(cluster(CLUSTER_URI).autoCreate()) + .withCache(CACHE_NAME, cacheConfiguration) + .build(true); + } + + private static final class Record { + private final Long key; + private final String value; + + private Record(Long key, String value) { + this.key = key; + this.value = value; + } + + Long getKey() { + return key; + } + + String getValue() { + return value; + } + } +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/loaderWriter/writebehind/RecordingLoaderWriter.java b/clustered/client/src/test/java/org/ehcache/clustered/loaderWriter/writebehind/RecordingLoaderWriter.java new file mode 100644 index 0000000000..c6a9334376 --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/loaderWriter/writebehind/RecordingLoaderWriter.java @@ -0,0 +1,68 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.loaderWriter.writebehind; + +import org.ehcache.spi.loaderwriter.CacheLoaderWriter; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class RecordingLoaderWriter implements CacheLoaderWriter { + + private final Map> records = new HashMap<>(); + + @Override + public synchronized V load(K key) { + List list = records.get(key); + return list == null ? null : list.get(list.size() - 1); + } + + @Override + public synchronized void write(K key, V value) { + record(key, value); + } + + @Override + public synchronized void delete(K key) { + record(key, null); + } + + @Override + public synchronized Map loadAll(Iterable keys) throws Exception { + return CacheLoaderWriter.super.loadAll(keys); + } + + @Override + public void writeAll(Iterable> entries) throws Exception { + CacheLoaderWriter.super.writeAll(entries); + } + + @Override + public void deleteAll(Iterable keys) throws Exception { + CacheLoaderWriter.super.deleteAll(keys); + } + + private void record(K key, V value) { + records.computeIfAbsent(key, k -> new ArrayList<>()).add(value); + } + + public synchronized Map> getRecords() { + return Collections.unmodifiableMap(records); + } +} diff --git a/clustered/client/src/test/java/org/ehcache/clustered/server/ObservableEhcacheServerEntityService.java b/clustered/client/src/test/java/org/ehcache/clustered/server/ObservableEhcacheServerEntityService.java index f9349df1c7..2dba617610 100644 --- a/clustered/client/src/test/java/org/ehcache/clustered/server/ObservableEhcacheServerEntityService.java +++ b/clustered/client/src/test/java/org/ehcache/clustered/server/ObservableEhcacheServerEntityService.java @@ -20,13 +20,11 @@ import org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse; import org.ehcache.impl.internal.concurrent.ConcurrentHashMap; import org.terracotta.entity.ActiveInvokeContext; -import org.terracotta.entity.ActiveServerEntity; import org.terracotta.entity.ClientDescriptor; import org.terracotta.entity.ClientSourceId; import org.terracotta.entity.ConfigurationException; import org.terracotta.entity.EntityServerService; import org.terracotta.entity.PassiveSynchronizationChannel; -import org.terracotta.entity.ReconnectRejectedException; import org.terracotta.entity.ServiceRegistry; import org.terracotta.entity.StateDumpCollector; @@ -50,7 +48,7 @@ public class ObservableEhcacheServerEntityService extends ClusterTierManagerServ * * @return an unmodifiable list of {@code ObservableEhcacheActiveEntity} instances */ - public List getServedActiveEntities() throws NoSuchFieldException, IllegalAccessException { + public List getServedActiveEntities() { return Collections.unmodifiableList(servedActiveEntities); } @@ -114,7 +112,7 @@ public void disconnected(ClientDescriptor clientDescriptor) { } @Override - public EhcacheEntityResponse invokeActive(ActiveInvokeContext invokeContext, EhcacheEntityMessage message) { + public EhcacheEntityResponse invokeActive(ActiveInvokeContext invokeContext, EhcacheEntityMessage message) { return activeEntity.invokeActive(invokeContext, message); } diff --git a/clustered/client/src/test/java/org/ehcache/clustered/util/ThrowingResilienceStrategy.java b/clustered/client/src/test/java/org/ehcache/clustered/util/ThrowingResilienceStrategy.java new file mode 100644 index 0000000000..0708ff3934 --- /dev/null +++ b/clustered/client/src/test/java/org/ehcache/clustered/util/ThrowingResilienceStrategy.java @@ -0,0 +1,89 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.util; + +import org.ehcache.Cache; +import org.ehcache.spi.resilience.ResilienceStrategy; +import org.ehcache.spi.resilience.StoreAccessException; + +import java.util.Map; + +public class ThrowingResilienceStrategy implements ResilienceStrategy { + @Override + public String getFailure(Long key, StoreAccessException e) { + throw new AssertionError("Cache op failed for key " + key, e); + } + + @Override + public boolean containsKeyFailure(Long key, StoreAccessException e) { + throw new AssertionError("Cache op failed for key " + key, e); + } + + @Override + public void putFailure(Long key, String value, StoreAccessException e) { + throw new AssertionError("Cache op failed for key " + key, e); + } + + @Override + public void removeFailure(Long key, StoreAccessException e) { + throw new AssertionError("Cache op failed for key " + key, e); + } + + @Override + public void clearFailure(StoreAccessException e) { + throw new AssertionError("Cache op failed", e); + } + + @Override + public Cache.Entry iteratorFailure(StoreAccessException e) { + throw new AssertionError("Cache op failed", e); + } + + @Override + public String putIfAbsentFailure(Long key, String value, StoreAccessException e) { + throw new AssertionError("Cache op failed for key " + key, e); + } + + @Override + public boolean removeFailure(Long key, String value, StoreAccessException e) { + throw new AssertionError("Cache op failed for key " + key, e); + } + + @Override + public String replaceFailure(Long key, String value, StoreAccessException e) { + throw new AssertionError("Cache op failed for key " + key, e); + } + + @Override + public boolean replaceFailure(Long key, String value, String newValue, StoreAccessException e) { + throw new AssertionError("Cache op failed for key " + key, e); + } + + @Override + public Map getAllFailure(Iterable keys, StoreAccessException e) { + throw new AssertionError("Cache op failed", e); + } + + @Override + public void putAllFailure(Map entries, StoreAccessException e) { + throw new AssertionError("Cache op failed", e); + } + + @Override + public void removeAllFailure(Iterable keys, StoreAccessException e) { + throw new AssertionError("Cache op failed", e); + } +} diff --git a/clustered/client/src/test/resources/configs/clustered-cache.xml b/clustered/client/src/test/resources/configs/clustered-cache.xml new file mode 100644 index 0000000000..4b065bd150 --- /dev/null +++ b/clustered/client/src/test/resources/configs/clustered-cache.xml @@ -0,0 +1,71 @@ + + + + + + + + + 5 + 5 + 150 + + + 8388608 + + + + + + java.lang.Long + java.lang.String + + + + + 12 + + + + + + java.lang.Long + java.lang.String + + + + + + + + + + + java.lang.Long + java.lang.String + + + + + 12 + + + + + + diff --git a/clustered/client/src/test/resources/configs/simple-cluster.xml b/clustered/client/src/test/resources/configs/simple-cluster.xml index d1bb120854..429e80b9fe 100644 --- a/clustered/client/src/test/resources/configs/simple-cluster.xml +++ b/clustered/client/src/test/resources/configs/simple-cluster.xml @@ -33,7 +33,7 @@ java.lang.Long java.lang.String - 16 + 8 diff --git a/clustered/clustered-dist/build.gradle b/clustered/clustered-dist/build.gradle index 26882348c6..f49a141127 100644 --- a/clustered/clustered-dist/build.gradle +++ b/clustered/clustered-dist/build.gradle @@ -45,6 +45,7 @@ configurations { dependencies { compileOnly "org.terracotta.internal:client-runtime:$terracottaCoreVersion" + compileOnly "org.terracotta:lease-api:$terracottaPlatformVersion" serverLibs(project(':clustered:server')) { exclude group: 'org.terracotta', module: 'entity-server-api' @@ -56,7 +57,9 @@ dependencies { // Needed because declared as provided in the different projects serverLibs "org.terracotta:runnel:$parent.terracottaPlatformVersion" - kit "org.terracotta.internal:terracotta-kit:$terracottaCoreVersion@zip" + serverLibs "org.terracotta:lease-entity-server:$parent.terracottaPlatformVersion" + + kit "org.terracotta.internal:terracotta-kit:$terracottaCoreVersion@tar.gz" shadowCompile "org.slf4j:slf4j-api:$parent.slf4jVersion" pomOnlyCompile "org.ehcache:ehcache:$parent.baseVersion" @@ -74,7 +77,7 @@ distributions { contents { //tc kit into ('') { - from configurations.kit.files.collect { zipTree(it) } + from configurations.kit.files.collect { tarTree(it) } eachFile { f -> // remove top level directory from the kit f.path = f.path.replace("terracotta-$terracottaCoreVersion/", "") diff --git a/clustered/common/build.gradle b/clustered/common/build.gradle index 22b22a1c69..7698facc9a 100644 --- a/clustered/common/build.gradle +++ b/clustered/common/build.gradle @@ -17,11 +17,7 @@ apply plugin: EhDeploy dependencies { - compile "org.slf4j:slf4j-api:$slf4jVersion" - provided "org.terracotta:entity-common-api:$terracottaApisVersion" - provided "org.terracotta:runnel:$terracottaPlatformVersion" -} - -tasks.withType(JavaCompile) { - options.compilerArgs += ['-Werror'] + providedImplementation project(':api') + providedImplementation "org.terracotta:entity-common-api:$terracottaApisVersion" + providedImplementation "org.terracotta:runnel:$terracottaPlatformVersion" } diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/PoolAllocation.java b/clustered/common/src/main/java/org/ehcache/clustered/common/PoolAllocation.java index 9a60d37aaa..6917c5b099 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/PoolAllocation.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/PoolAllocation.java @@ -61,6 +61,7 @@ public Dedicated(String resourceName, long size) { * * @return the dedicated allocation size */ + @Override public long getSize() { return size; } @@ -71,6 +72,7 @@ public long getSize() { * * @return the server-side resource name */ + @Override public String getResourceName() { return resourceName; } @@ -117,6 +119,7 @@ public Shared(String resourcePoolName) { * * @return the server-side resource pool name */ + @Override public String getResourcePoolName() { return resourcePoolName; } diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/ServerStoreConfiguration.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/ServerStoreConfiguration.java index 2ce6ad708f..0779729877 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/ServerStoreConfiguration.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/ServerStoreConfiguration.java @@ -37,20 +37,37 @@ public class ServerStoreConfiguration implements Serializable { private final String keySerializerType; private final String valueSerializerType; private final Consistency consistency; - // TODO: Loader/Writer configuration ... + private final boolean loaderWriterConfigured; + private final boolean writeBehindConfigured; public ServerStoreConfiguration(PoolAllocation poolAllocation, String storedKeyType, String storedValueType, String keySerializerType, String valueSerializerType, - Consistency consistency) { + Consistency consistency, + boolean loaderWriterConfigured) { + this(poolAllocation, storedKeyType, storedValueType, keySerializerType, valueSerializerType, consistency, + loaderWriterConfigured, false); + } + + + public ServerStoreConfiguration(PoolAllocation poolAllocation, + String storedKeyType, + String storedValueType, + String keySerializerType, + String valueSerializerType, + Consistency consistency, + boolean loaderWriterConfigured, + boolean writeBehindConfigured) { this.poolAllocation = poolAllocation; this.storedKeyType = storedKeyType; this.storedValueType = storedValueType; this.keySerializerType = keySerializerType; this.valueSerializerType = valueSerializerType; this.consistency = consistency; + this.loaderWriterConfigured = loaderWriterConfigured; + this.writeBehindConfigured = writeBehindConfigured; } public PoolAllocation getPoolAllocation() { @@ -77,6 +94,14 @@ public Consistency getConsistency() { return consistency; } + public boolean isLoaderWriterConfigured() { + return loaderWriterConfigured; + } + + public boolean isWriteBehindConfigured() { + return writeBehindConfigured; + } + public boolean isCompatible(ServerStoreConfiguration otherConfiguration, StringBuilder sb) { boolean isCompatible = true; @@ -86,6 +111,8 @@ public boolean isCompatible(ServerStoreConfiguration otherConfiguration, StringB isCompatible = isCompatible && compareField(sb, "valueSerializerType", valueSerializerType, otherConfiguration.getValueSerializerType()); isCompatible = isCompatible && compareConsistencyField(sb, consistency, otherConfiguration.getConsistency()); isCompatible = isCompatible && comparePoolAllocation(sb, otherConfiguration.getPoolAllocation()); + isCompatible = isCompatible && (otherConfiguration.isLoaderWriterConfigured() == loaderWriterConfigured); + isCompatible = isCompatible && (otherConfiguration.isWriteBehindConfigured() == writeBehindConfigured); return isCompatible; } diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/exceptions/DestroyInProgressException.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/exceptions/DestroyInProgressException.java index 8a183d81d5..afe0e5281a 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/exceptions/DestroyInProgressException.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/exceptions/DestroyInProgressException.java @@ -21,6 +21,8 @@ */ public class DestroyInProgressException extends LifecycleException { + private static final long serialVersionUID = 1917543049279158303L; + public DestroyInProgressException(String message) { super(message); } diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/exceptions/UnknownClusterException.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/exceptions/UnknownClusterException.java index da21efeddb..14746b151f 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/exceptions/UnknownClusterException.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/exceptions/UnknownClusterException.java @@ -16,7 +16,9 @@ package org.ehcache.clustered.common.internal.exceptions; -public class UnknownClusterException extends ClusterException{ +public class UnknownClusterException extends ClusterException { + + private static final long serialVersionUID = -2612856483315331382L; public UnknownClusterException(String message) { super(message); diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/lock/LockMessaging.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/lock/LockMessaging.java index 544edee519..8f5b23d4f3 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/lock/LockMessaging.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/lock/LockMessaging.java @@ -25,7 +25,7 @@ public class LockMessaging { private static final MessageCodec CODEC = new MessageCodec() { @Override - public byte[] encodeMessage(LockOperation message) throws MessageCodecException { + public byte[] encodeMessage(LockOperation message) { return new byte[] { (byte) message.getOperation().ordinal(), (byte) message.getHoldType().ordinal() @@ -33,12 +33,12 @@ public byte[] encodeMessage(LockOperation message) throws MessageCodecException } @Override - public LockOperation decodeMessage(byte[] bytes) throws MessageCodecException { + public LockOperation decodeMessage(byte[] bytes) { return new LockOperation(Operation.values()[bytes[0]], HoldType.values()[bytes[1]]); } @Override - public byte[] encodeResponse(LockTransition response) throws MessageCodecException { + public byte[] encodeResponse(LockTransition response) { if (response.isAcquired()) { return new byte[] {0x00}; } else if (response.isReleased()) { @@ -126,10 +126,10 @@ public boolean isReleased() { } public enum HoldType { - WRITE, READ; + WRITE, READ } public enum Operation { - ACQUIRE, TRY_ACQUIRE, RELEASE; + ACQUIRE, TRY_ACQUIRE, RELEASE } } diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ChainCodec.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ChainCodec.java index d99391c882..77f6b62852 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ChainCodec.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ChainCodec.java @@ -26,7 +26,6 @@ import org.terracotta.runnel.decoding.StructDecoder; import org.terracotta.runnel.encoding.StructArrayEncoder; import org.terracotta.runnel.encoding.StructEncoder; -import org.terracotta.runnel.encoding.StructEncoderFunction; import java.nio.ByteBuffer; import java.util.ArrayList; diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ClusterTierReconnectMessage.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ClusterTierReconnectMessage.java index d1445a4a0b..49315a99d4 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ClusterTierReconnectMessage.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ClusterTierReconnectMessage.java @@ -23,19 +23,27 @@ public class ClusterTierReconnectMessage { private final Set hashInvalidationsInProgress; private boolean clearInProgress = false; + private final Set locksHeld; public ClusterTierReconnectMessage() { hashInvalidationsInProgress = new HashSet<>(); + locksHeld = new HashSet<>(); } - public ClusterTierReconnectMessage(Set hashInvalidationsInProgress) { + public ClusterTierReconnectMessage(Set hashInvalidationsInProgress, Set locksHeld, boolean clearInProgress) { this.hashInvalidationsInProgress = hashInvalidationsInProgress; + this.locksHeld = locksHeld; + this.clearInProgress = clearInProgress; } public void addInvalidationsInProgress(Set hashInvalidationsInProgress) { this.hashInvalidationsInProgress.addAll(hashInvalidationsInProgress); } + public void addLocksHeld(Set locksHeld) { + this.locksHeld.addAll(locksHeld); + } + public Set getInvalidationsInProgress() { return hashInvalidationsInProgress; } @@ -48,4 +56,7 @@ public boolean isClearInProgress() { return clearInProgress; } + public Set getLocksHeld() { + return locksHeld; + } } diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/CommonConfigCodec.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/CommonConfigCodec.java index b03c732fc8..0edc019597 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/CommonConfigCodec.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/CommonConfigCodec.java @@ -49,11 +49,13 @@ public class CommonConfigCodec implements ConfigCodec { private static final String STORE_CONFIG_VALUE_TYPE_FIELD = "valueType"; private static final String STORE_CONFIG_VALUE_SERIALIZER_TYPE_FIELD = "valueSerializerType"; private static final String STORE_CONFIG_CONSISTENCY_FIELD = "consistency"; - private static final String POOL_SIZE_FIELD = "poolSize"; - private static final String POOL_RESOURCE_NAME_FIELD = "resourceName"; + public static final String POOL_SIZE_FIELD = "poolSize"; + public static final String POOL_RESOURCE_NAME_FIELD = "resourceName"; private static final String DEFAULT_RESOURCE_FIELD = "defaultResource"; private static final String POOLS_SUB_STRUCT = "pools"; private static final String POOL_NAME_FIELD = "poolName"; + private static final String LOADER_WRITER_CONFIGURED_FIELD = "loaderWriterConfigured"; + private static final String WRITE_BEHIND_CONFIGURED_FIELD = "writeBehindConfigured"; private static final EnumMapping CONSISTENCY_ENUM_MAPPING = newEnumMappingBuilder(Consistency.class) .mapping(Consistency.EVENTUAL, 1) @@ -67,18 +69,25 @@ public class CommonConfigCodec implements ConfigCodec { @Override public InjectTuple injectServerStoreConfiguration(StructBuilder baseBuilder, final int index) { + //this needs to be returned whenever the index for builder is changed, so that + //other injecting places get the correct last index for adding structs to codec + int lastIndexToReturn = index + 30; final StructBuilder structBuilder = baseBuilder.string(STORE_CONFIG_KEY_TYPE_FIELD, index) .string(STORE_CONFIG_KEY_SERIALIZER_TYPE_FIELD, index + 10) .string(STORE_CONFIG_VALUE_TYPE_FIELD, index + 11) .string(STORE_CONFIG_VALUE_SERIALIZER_TYPE_FIELD, index + 15) .enm(STORE_CONFIG_CONSISTENCY_FIELD, index + 16, CONSISTENCY_ENUM_MAPPING) + .bool(LOADER_WRITER_CONFIGURED_FIELD, index + 17) + .bool(WRITE_BEHIND_CONFIGURED_FIELD, index + 18) + // keep poolsize and resource name last .int64(POOL_SIZE_FIELD, index + 20) - .string(POOL_RESOURCE_NAME_FIELD, index + 30); + .string(POOL_RESOURCE_NAME_FIELD, lastIndexToReturn); + return new InjectTuple() { @Override public int getLastIndex() { - return index + 30; + return lastIndexToReturn; } @Override @@ -116,6 +125,9 @@ public void encodeServerStoreConfiguration(PrimitiveEncodingSupport encoder, encoder.enm(STORE_CONFIG_CONSISTENCY_FIELD, configuration.getConsistency()); } + encoder.bool(LOADER_WRITER_CONFIGURED_FIELD, configuration.isLoaderWriterConfigured()); + encoder.bool(WRITE_BEHIND_CONFIGURED_FIELD, configuration.isWriteBehindConfigured()); + PoolAllocation poolAllocation = configuration.getPoolAllocation(); if (poolAllocation instanceof PoolAllocation.Dedicated) { PoolAllocation.Dedicated dedicatedPool = (PoolAllocation.Dedicated) poolAllocation; @@ -139,6 +151,9 @@ public ServerStoreConfiguration decodeServerStoreConfiguration(PrimitiveDecoding if (consistencyEnm.isValid()) { consistency = consistencyEnm.get(); } + Boolean loaderWriterConfigured = decoder.bool(LOADER_WRITER_CONFIGURED_FIELD); + Boolean writeBehindConfigured = decoder.bool(WRITE_BEHIND_CONFIGURED_FIELD); + Long poolSize = decoder.int64(POOL_SIZE_FIELD); String poolResource = decoder.string(POOL_RESOURCE_NAME_FIELD); PoolAllocation poolAllocation = new PoolAllocation.Unknown(); @@ -147,7 +162,9 @@ public ServerStoreConfiguration decodeServerStoreConfiguration(PrimitiveDecoding } else if (poolResource != null) { poolAllocation = new PoolAllocation.Shared(poolResource); } - return new ServerStoreConfiguration(poolAllocation, keyType, valueType, keySerializer, valueSerializer, consistency); + + return new ServerStoreConfiguration(poolAllocation, keyType, valueType, keySerializer, valueSerializer, consistency, + loaderWriterConfigured, writeBehindConfigured); } @Override diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EhcacheCodec.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EhcacheCodec.java index 0bd585dcdc..69c2c7c879 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EhcacheCodec.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EhcacheCodec.java @@ -70,7 +70,7 @@ public byte[] encodeMessage(EhcacheEntityMessage message) { } @Override - public EhcacheEntityMessage decodeMessage(byte[] payload) throws MessageCodecException { + public EhcacheEntityMessage decodeMessage(byte[] payload) { ByteBuffer byteBuffer = wrap(payload); Enm opCodeEnm = OP_CODE_DECODER.decoder(byteBuffer).enm("opCode"); @@ -101,12 +101,12 @@ public EhcacheEntityMessage decodeMessage(ByteBuffer byteBuffer, EhcacheMessageT } @Override - public byte[] encodeResponse(EhcacheEntityResponse response) throws MessageCodecException { + public byte[] encodeResponse(EhcacheEntityResponse response) { return responseCodec.encode(response); } @Override - public EhcacheEntityResponse decodeResponse(byte[] payload) throws MessageCodecException { + public EhcacheEntityResponse decodeResponse(byte[] payload) { return responseCodec.decode(payload); } } diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EhcacheEntityResponse.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EhcacheEntityResponse.java index 9e9e20b892..99da5edee5 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EhcacheEntityResponse.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EhcacheEntityResponse.java @@ -265,4 +265,42 @@ public Chain getChain() { return chain; } } + + public static LockSuccess lockSuccess(Chain chain) { + return new LockSuccess(chain); + } + + public static class LockSuccess extends EhcacheEntityResponse { + + private final Chain chain; + + LockSuccess(Chain chain) { + this.chain = chain; + } + + public Chain getChain() { + return chain; + } + + @Override + public EhcacheResponseType getResponseType() { + return EhcacheResponseType.LOCK_SUCCESS; + } + } + + public static LockFailure lockFailure() { + return new LockFailure(); + } + + public static class LockFailure extends EhcacheEntityResponse { + + private LockFailure() { + + } + + @Override + public EhcacheResponseType getResponseType() { + return EhcacheResponseType.LOCK_FAILURE; + } + } } diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EhcacheMessageType.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EhcacheMessageType.java index cc32993831..f05d542dc3 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EhcacheMessageType.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EhcacheMessageType.java @@ -42,6 +42,8 @@ public enum EhcacheMessageType { CLIENT_INVALIDATION_ALL_ACK, CLEAR, GET_STORE, + LOCK, + UNLOCK, // StateRepository operation messages GET_STATE_REPO, @@ -67,6 +69,8 @@ public enum EhcacheMessageType { .mapping(CLIENT_INVALIDATION_ALL_ACK, 25) .mapping(CLEAR, 26) .mapping(GET_STORE, 27) + .mapping(LOCK, 28) + .mapping(UNLOCK, 29) .mapping(GET_STATE_REPO, 41) .mapping(PUT_IF_ABSENT, 42) @@ -82,7 +86,8 @@ public static boolean isLifecycleMessage(EhcacheMessageType value) { return LIFECYCLE_MESSAGES.contains(value); } - public static final EnumSet STORE_OPERATION_MESSAGES = of(GET_AND_APPEND, APPEND, REPLACE, CLIENT_INVALIDATION_ACK, CLIENT_INVALIDATION_ALL_ACK, CLEAR, GET_STORE); + public static final EnumSet STORE_OPERATION_MESSAGES = of(GET_AND_APPEND, APPEND, + REPLACE, CLIENT_INVALIDATION_ACK, CLIENT_INVALIDATION_ALL_ACK, CLEAR, GET_STORE, LOCK, UNLOCK); public static boolean isStoreOperationMessage(EhcacheMessageType value) { return STORE_OPERATION_MESSAGES.contains(value); } diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EhcacheResponseType.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EhcacheResponseType.java index e475702143..fe4804e3e8 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EhcacheResponseType.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EhcacheResponseType.java @@ -34,7 +34,9 @@ public enum EhcacheResponseType { MAP_VALUE, ALL_INVALIDATION_DONE, PREPARE_FOR_DESTROY, - RESOLVE_REQUEST; + RESOLVE_REQUEST, + LOCK_SUCCESS, + LOCK_FAILURE; public static final String RESPONSE_TYPE_FIELD_NAME = "opCode"; @@ -51,5 +53,7 @@ public enum EhcacheResponseType { .mapping(EhcacheResponseType.MAP_VALUE, 88) .mapping(EhcacheResponseType.PREPARE_FOR_DESTROY, 89) .mapping(EhcacheResponseType.RESOLVE_REQUEST, 90) + .mapping(EhcacheResponseType.LOCK_SUCCESS, 91) + .mapping(EhcacheResponseType.LOCK_FAILURE, 92) .build(); } diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EntityConfigurationCodec.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EntityConfigurationCodec.java index 150457fbbc..b5e6f970d3 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EntityConfigurationCodec.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/EntityConfigurationCodec.java @@ -36,28 +36,31 @@ public class EntityConfigurationCodec { private static final String IDENTIFIER = "identifier"; - private final StructBuilder tierManagerConfigurationBaseStruct = newStructBuilder() - .string(IDENTIFIER, 10); - private final StructBuilder clusteredStoreConfigurationBaseStruct = newStructBuilder() - .string(IDENTIFIER, 10) - .string(SERVER_STORE_NAME_FIELD, 20); - private final ConfigCodec configCodec; private final Struct tierManagerConfigurationStruct; private final Struct clusteredStoreConfigurationStruct; public EntityConfigurationCodec(ConfigCodec configCodec) { this.configCodec = configCodec; + + StructBuilder tierManagerConfigurationBaseStruct = newStructBuilder() + .string(IDENTIFIER, 10); + tierManagerConfigurationStruct = configCodec.injectServerSideConfiguration(tierManagerConfigurationBaseStruct, 10) .getUpdatedBuilder() .build(); + + StructBuilder clusteredStoreConfigurationBaseStruct = newStructBuilder() + .string(IDENTIFIER, 10) + .string(SERVER_STORE_NAME_FIELD, 20); + clusteredStoreConfigurationStruct = configCodec.injectServerStoreConfiguration(clusteredStoreConfigurationBaseStruct, 30) .getUpdatedBuilder() .build(); } public byte[] encode(ClusterTierEntityConfiguration configuration) { - StructEncoder encoder = clusteredStoreConfigurationStruct.encoder(); + StructEncoder encoder = clusteredStoreConfigurationStruct.encoder(); encoder.string(IDENTIFIER, configuration.getManagerIdentifier()) .string(SERVER_STORE_NAME_FIELD, configuration.getStoreIdentifier()); configCodec.encodeServerStoreConfiguration(encoder, configuration.getConfiguration()); @@ -65,7 +68,7 @@ public byte[] encode(ClusterTierEntityConfiguration configuration) { } public ClusterTierEntityConfiguration decodeClusteredStoreConfiguration(byte[] configuration) { - StructDecoder decoder = clusteredStoreConfigurationStruct.decoder(wrap(configuration)); + StructDecoder decoder = clusteredStoreConfigurationStruct.decoder(wrap(configuration)); String managerIdentifier = decoder.string(IDENTIFIER); if (managerIdentifier == null) { throw new IllegalArgumentException("Payload is an invalid content"); @@ -75,14 +78,14 @@ public ClusterTierEntityConfiguration decodeClusteredStoreConfiguration(byte[] c return new ClusterTierEntityConfiguration(managerIdentifier, storeIdentifier, serverStoreConfiguration); } public byte[] encode(ClusterTierManagerConfiguration configuration) { - StructEncoder encoder = tierManagerConfigurationStruct.encoder(); + StructEncoder encoder = tierManagerConfigurationStruct.encoder(); encoder.string(IDENTIFIER, configuration.getIdentifier()); configCodec.encodeServerSideConfiguration(encoder, configuration.getConfiguration()); return encoder.encode().array(); } public ClusterTierManagerConfiguration decodeClusterTierManagerConfiguration(byte[] payload) { - StructDecoder decoder = tierManagerConfigurationStruct.decoder(wrap(payload)); + StructDecoder decoder = tierManagerConfigurationStruct.decoder(wrap(payload)); String identifier = decoder.string(IDENTIFIER); if (identifier == null) { throw new IllegalArgumentException("Payload is an invalid content"); diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ExceptionCodec.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ExceptionCodec.java index 7499986ed7..8d4b3dc83d 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ExceptionCodec.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ExceptionCodec.java @@ -26,7 +26,6 @@ import org.terracotta.runnel.decoding.StructDecoder; import org.terracotta.runnel.encoding.StructArrayEncoder; import org.terracotta.runnel.encoding.StructEncoder; -import org.terracotta.runnel.encoding.StructEncoderFunction; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; @@ -92,10 +91,10 @@ public static ClusterException decode(StructDecoder> decoder element.end(); } arrayDecoder.end(); - Class clazz = null; - ClusterException exception = null; + Class clazz = null; + ClusterException exception; try { - clazz = Class.forName(exceptionClassName); + clazz = Class.forName(exceptionClassName).asSubclass(ClusterException.class); } catch (ClassNotFoundException e) { LOGGER.error("Exception type not found", e); } @@ -108,12 +107,12 @@ public static ClusterException decode(StructDecoder> decoder } @SuppressWarnings("unchecked") - private static ClusterException getClusterException(String message, Class clazz) { + private static ClusterException getClusterException(String message, Class clazz) { ClusterException exception = null; if (clazz != null) { try { - Constructor declaredConstructor = clazz.getDeclaredConstructor(String.class); - exception = (ClusterException)declaredConstructor.newInstance(message); + Constructor declaredConstructor = clazz.getDeclaredConstructor(String.class); + exception = declaredConstructor.newInstance(message); } catch (NoSuchMethodException | InvocationTargetException | InstantiationException | IllegalAccessException e) { LOGGER.error("Failed to instantiate exception object.", e); } diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/LifeCycleMessageCodec.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/LifeCycleMessageCodec.java index f9204ff2f0..af1c5b5ba9 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/LifeCycleMessageCodec.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/LifeCycleMessageCodec.java @@ -35,14 +35,7 @@ public class LifeCycleMessageCodec { private static final String CONFIG_PRESENT_FIELD = "configPresent"; - private final StructBuilder VALIDATE_MESSAGE_STRUCT_BUILDER_PREFIX = newStructBuilder() - .enm(MESSAGE_TYPE_FIELD_NAME, MESSAGE_TYPE_FIELD_INDEX, EHCACHE_MESSAGE_TYPES_ENUM_MAPPING) - .bool(CONFIG_PRESENT_FIELD, 30); private static final int CONFIGURE_MESSAGE_NEXT_INDEX = 40; - - private final StructBuilder VALIDATE_STORE_MESSAGE_STRUCT_BUILDER_PREFIX = newStructBuilder() - .enm(MESSAGE_TYPE_FIELD_NAME, MESSAGE_TYPE_FIELD_INDEX, EHCACHE_MESSAGE_TYPES_ENUM_MAPPING) - .string(SERVER_STORE_NAME_FIELD, 30); private static final int VALIDATE_STORE_NEXT_INDEX = 40; private final Struct PREPARE_FOR_DESTROY_STRUCT = newStructBuilder() @@ -58,11 +51,20 @@ public class LifeCycleMessageCodec { public LifeCycleMessageCodec(ConfigCodec configCodec) { this.messageCodecUtils = new MessageCodecUtils(); this.configCodec = configCodec; + + StructBuilder validateMessageStructBuilderPrefix = newStructBuilder() + .enm(MESSAGE_TYPE_FIELD_NAME, MESSAGE_TYPE_FIELD_INDEX, EHCACHE_MESSAGE_TYPES_ENUM_MAPPING) + .bool(CONFIG_PRESENT_FIELD, 30); + validateMessageStruct = this.configCodec.injectServerSideConfiguration( - VALIDATE_MESSAGE_STRUCT_BUILDER_PREFIX, CONFIGURE_MESSAGE_NEXT_INDEX).getUpdatedBuilder().build(); + validateMessageStructBuilderPrefix, CONFIGURE_MESSAGE_NEXT_INDEX).getUpdatedBuilder().build(); + + StructBuilder validateStoreMessageStructBuilderPrefix = newStructBuilder() + .enm(MESSAGE_TYPE_FIELD_NAME, MESSAGE_TYPE_FIELD_INDEX, EHCACHE_MESSAGE_TYPES_ENUM_MAPPING) + .string(SERVER_STORE_NAME_FIELD, 30); validateStoreMessageStruct = this.configCodec.injectServerStoreConfiguration( - VALIDATE_STORE_MESSAGE_STRUCT_BUILDER_PREFIX, VALIDATE_STORE_NEXT_INDEX).getUpdatedBuilder().build(); + validateStoreMessageStructBuilderPrefix, VALIDATE_STORE_NEXT_INDEX).getUpdatedBuilder().build(); } public byte[] encode(LifecycleMessage message) { @@ -115,8 +117,9 @@ public EhcacheEntityMessage decode(EhcacheMessageType messageType, ByteBuffer me return decodeValidateServerStoreMessage(messageBuffer); case PREPARE_FOR_DESTROY: return decodePrepareForDestroyMessage(); + default: + throw new IllegalArgumentException("LifeCycleMessage operation not defined for : " + messageType); } - throw new IllegalArgumentException("LifeCycleMessage operation not defined for : " + messageType); } private LifecycleMessage.PrepareForDestroy decodePrepareForDestroyMessage() { diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/LifecycleMessage.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/LifecycleMessage.java index 174800b8f1..b479a02cd8 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/LifecycleMessage.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/LifecycleMessage.java @@ -23,8 +23,12 @@ public abstract class LifecycleMessage extends EhcacheOperationMessage implements Serializable { + private static final long serialVersionUID = -5877907682623164227L; + public static class ValidateStoreManager extends LifecycleMessage { + private static final long serialVersionUID = -7459333332357106170L; + private final ServerSideConfiguration configuration; ValidateStoreManager(ServerSideConfiguration config) { @@ -46,6 +50,8 @@ public ServerSideConfiguration getConfiguration() { */ public static class ValidateServerStore extends LifecycleMessage { + private static final long serialVersionUID = -7271460156539083757L; + private final String name; private final ServerStoreConfiguration storeConfiguration; @@ -69,6 +75,9 @@ public EhcacheMessageType getMessageType() { } public static class PrepareForDestroy extends LifecycleMessage { + + private static final long serialVersionUID = -680257947889507297L; + @Override public EhcacheMessageType getMessageType() { return EhcacheMessageType.PREPARE_FOR_DESTROY; diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/MessageCodecUtils.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/MessageCodecUtils.java index 13f0a821e0..0472f0f89c 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/MessageCodecUtils.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/MessageCodecUtils.java @@ -18,8 +18,6 @@ import org.terracotta.runnel.encoding.StructEncoder; -import java.util.UUID; - /** * MessageCodecUtils */ diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ReconnectMessageCodec.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ReconnectMessageCodec.java index 01b2a73f2b..b68a4ef661 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ReconnectMessageCodec.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ReconnectMessageCodec.java @@ -24,7 +24,6 @@ import java.util.HashSet; import java.util.Set; -import java.util.UUID; import static java.nio.ByteBuffer.wrap; import static org.terracotta.runnel.StructBuilder.newStructBuilder; @@ -33,19 +32,21 @@ public class ReconnectMessageCodec { private static final String HASH_INVALIDATION_IN_PROGRESS_FIELD = "hashInvalidationInProgress"; private static final String CLEAR_IN_PROGRESS_FIELD = "clearInProgress"; + private static final String LOCKS_HELD_FIELD = "locksHeld"; private static final Struct CLUSTER_TIER_RECONNECT_MESSAGE_STRUCT = newStructBuilder() .int64s(HASH_INVALIDATION_IN_PROGRESS_FIELD, 20) .bool(CLEAR_IN_PROGRESS_FIELD, 30) + .int64s(LOCKS_HELD_FIELD, 40) .build(); public byte[] encode(ClusterTierReconnectMessage reconnectMessage) { StructEncoder encoder = CLUSTER_TIER_RECONNECT_MESSAGE_STRUCT.encoder(); ArrayEncoder> arrayEncoder = encoder.int64s(HASH_INVALIDATION_IN_PROGRESS_FIELD); - for (Long hash : reconnectMessage.getInvalidationsInProgress()) { - arrayEncoder.value(hash); - } + reconnectMessage.getInvalidationsInProgress().forEach(arrayEncoder::value); encoder.bool(CLEAR_IN_PROGRESS_FIELD, reconnectMessage.isClearInProgress()); + ArrayEncoder> locksHeldEncoder = encoder.int64s(LOCKS_HELD_FIELD); + reconnectMessage.getLocksHeld().forEach(locksHeldEncoder::value); return encoder.encode().array(); } @@ -53,21 +54,30 @@ public ClusterTierReconnectMessage decode(byte[] payload) { StructDecoder decoder = CLUSTER_TIER_RECONNECT_MESSAGE_STRUCT.decoder(wrap(payload)); ArrayDecoder> arrayDecoder = decoder.int64s(HASH_INVALIDATION_IN_PROGRESS_FIELD); - Set hashes; - if (arrayDecoder != null) { - hashes = new HashSet(arrayDecoder.length()); - for (int i = 0; i < arrayDecoder.length(); i++) { - hashes.add(arrayDecoder.value()); - } - } else { - hashes = new HashSet(0); - } - ClusterTierReconnectMessage message = new ClusterTierReconnectMessage(hashes); + Set hashes = decodeLongs(arrayDecoder); Boolean clearInProgress = decoder.bool(CLEAR_IN_PROGRESS_FIELD); - if (clearInProgress != null && clearInProgress) { - message.clearInProgress(); - } + + ArrayDecoder> locksHeldDecoder = decoder.int64s(LOCKS_HELD_FIELD); + Set locks = decodeLongs(locksHeldDecoder); + + ClusterTierReconnectMessage message = new ClusterTierReconnectMessage(hashes, locks, clearInProgress != null ? clearInProgress : false); + + + return message; } + + private static Set decodeLongs(ArrayDecoder> decoder) { + Set longs; + if (decoder != null) { + longs = new HashSet<>(decoder.length()); + for (int i = 0; i < decoder.length(); i++) { + longs.add(decoder.value()); + } + } else { + longs = new HashSet<>(0); + } + return longs; + } } diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ResponseCodec.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ResponseCodec.java index 4e3f69b20a..2230519a53 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ResponseCodec.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ResponseCodec.java @@ -100,6 +100,10 @@ public class ResponseCodec { .int64(KEY_FIELD, 20) .struct(CHAIN_FIELD, 30, CHAIN_STRUCT) .build(); + private static final Struct LOCK_RESPONSE_STRUCT = newStructBuilder() + .enm(RESPONSE_TYPE_FIELD_NAME, RESPONSE_TYPE_FIELD_INDEX, EHCACHE_RESPONSE_TYPES_ENUM_MAPPING) + .struct(CHAIN_FIELD, 20, CHAIN_STRUCT) + .build(); public byte[] encode(EhcacheEntityResponse response) { switch (response.getResponseType()) { @@ -181,6 +185,19 @@ public byte[] encode(EhcacheEntityResponse response) { .struct(CHAIN_FIELD, resolve.getChain(), ChainCodec::encode) .encode().array(); } + case LOCK_SUCCESS: { + EhcacheEntityResponse.LockSuccess lockSuccess = (EhcacheEntityResponse.LockSuccess) response; + return LOCK_RESPONSE_STRUCT.encoder() + .enm(RESPONSE_TYPE_FIELD_NAME, lockSuccess.getResponseType()) + .struct(CHAIN_FIELD, lockSuccess.getChain(), ChainCodec::encode) + .encode().array(); + } + case LOCK_FAILURE: { + EhcacheEntityResponse.LockFailure lockFailure = (EhcacheEntityResponse.LockFailure) response; + return LOCK_RESPONSE_STRUCT.encoder() + .enm(RESPONSE_TYPE_FIELD_NAME, lockFailure.getResponseType()) + .encode().array(); + } default: throw new UnsupportedOperationException("The operation is not supported : " + response.getResponseType()); } @@ -255,6 +272,14 @@ public EhcacheEntityResponse decode(byte[] payload) { Chain chain = ChainCodec.decode(decoder.struct(CHAIN_FIELD)); return EhcacheEntityResponse.resolveRequest(key, chain); } + case LOCK_SUCCESS: { + decoder = LOCK_RESPONSE_STRUCT.decoder(buffer); + Chain chain = ChainCodec.decode(decoder.struct(CHAIN_FIELD)); + return new EhcacheEntityResponse.LockSuccess(chain); + } + case LOCK_FAILURE: { + return EhcacheEntityResponse.lockFailure(); + } default: throw new UnsupportedOperationException("The operation is not supported with opCode : " + opCode); } diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ServerStoreOpCodec.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ServerStoreOpCodec.java index 7b26ae7caf..89cdffb5fa 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ServerStoreOpCodec.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ServerStoreOpCodec.java @@ -78,6 +78,11 @@ public class ServerStoreOpCodec { .int64(KEY_FIELD, 30) .build(); + private static final Struct LOCK_STRUCT = newStructBuilder() + .enm(MESSAGE_TYPE_FIELD_NAME, MESSAGE_TYPE_FIELD_INDEX, EHCACHE_MESSAGE_TYPES_ENUM_MAPPING) + .int64("hash", 30) + .build(); + private final MessageCodecUtils messageCodecUtils = new MessageCodecUtils(); public byte[] encode(ServerStoreOpMessage message) { @@ -142,6 +147,18 @@ public byte[] encode(ServerStoreOpMessage message) { return encoder .encode() .array(); + case LOCK: + encoder = LOCK_STRUCT.encoder(); + return encoder + .enm(MESSAGE_TYPE_FIELD_NAME, message.getMessageType()) + .int64("hash", ((ServerStoreOpMessage.LockMessage) message).getHash()) + .encode().array(); + case UNLOCK: + encoder = LOCK_STRUCT.encoder(); + return encoder + .enm(MESSAGE_TYPE_FIELD_NAME, message.getMessageType()) + .int64("hash", ((ServerStoreOpMessage.UnlockMessage) message).getHash()) + .encode().array(); default: throw new RuntimeException("Unhandled message operation : " + message.getMessageType()); } @@ -189,6 +206,16 @@ public EhcacheEntityMessage decode(EhcacheMessageType opCode, ByteBuffer message case CLEAR: { return new ClearMessage(); } + case LOCK: { + decoder = LOCK_STRUCT.decoder(messageBuffer); + long hash = decoder.int64("hash"); + return new ServerStoreOpMessage.LockMessage(hash); + } + case UNLOCK: { + decoder = LOCK_STRUCT.decoder(messageBuffer); + long hash = decoder.int64("hash"); + return new ServerStoreOpMessage.UnlockMessage(hash); + } default: throw new RuntimeException("Unhandled message operation : " + opCode); } diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ServerStoreOpMessage.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ServerStoreOpMessage.java index 6d3db2154f..50c52a8f20 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ServerStoreOpMessage.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/ServerStoreOpMessage.java @@ -166,5 +166,41 @@ public EhcacheMessageType getMessageType() { } } + public static class LockMessage extends ServerStoreOpMessage { + + private final long hash; + + public LockMessage(long hash) { + this.hash = hash; + } + + public long getHash() { + return hash; + } + + @Override + public EhcacheMessageType getMessageType() { + return EhcacheMessageType.LOCK; + } + } + + public static class UnlockMessage extends ServerStoreOpMessage { + + private final long hash; + + public UnlockMessage(long hash) { + this.hash = hash; + } + + public long getHash() { + return hash; + } + + @Override + public EhcacheMessageType getMessageType() { + return EhcacheMessageType.UNLOCK; + } + } + } diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/StateRepositoryOpMessage.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/StateRepositoryOpMessage.java index 203f874985..953ac5c7fa 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/StateRepositoryOpMessage.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/messages/StateRepositoryOpMessage.java @@ -20,6 +20,8 @@ public abstract class StateRepositoryOpMessage extends EhcacheOperationMessage implements Serializable { + private static final long serialVersionUID = -6701802926010996981L; + private final String cacheId; private final String mapId; @@ -38,6 +40,8 @@ public String getMapId() { private static abstract class KeyBasedMessage extends StateRepositoryOpMessage { + private static final long serialVersionUID = 2338704755924839309L; + private final Object key; private KeyBasedMessage(final String cacheId, final String mapId, final Object key) { @@ -53,6 +57,8 @@ public Object getKey() { public static class GetMessage extends KeyBasedMessage { + private static final long serialVersionUID = 7263513962868446470L; + public GetMessage(final String cacheId, final String mapId, final Object key) { super(cacheId, mapId, key); } @@ -65,6 +71,8 @@ public EhcacheMessageType getMessageType() { public static class PutIfAbsentMessage extends KeyBasedMessage { + private static final long serialVersionUID = 2743653481411126124L; + private final Object value; public PutIfAbsentMessage(final String cacheId, final String mapId, final Object key, final Object value) { @@ -84,6 +92,8 @@ public EhcacheMessageType getMessageType() { public static class EntrySetMessage extends StateRepositoryOpMessage { + private static final long serialVersionUID = 5230634750732779978L; + public EntrySetMessage(final String cacheId, final String mapId) { super(cacheId, mapId); } diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/Util.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/Util.java index 9d177130ec..42c14ba6df 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/Util.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/Util.java @@ -166,4 +166,26 @@ public static byte[] marshall(Object message) { } return out.toByteArray(); } + + public static final Chain EMPTY_CHAIN = new Chain() { + @Override + public Iterator reverseIterator() { + return Collections.emptyList().iterator(); + } + + @Override + public boolean isEmpty() { + return true; + } + + @Override + public int length() { + return 0; + } + + @Override + public Iterator iterator() { + return Collections.emptyList().iterator(); + } + }; } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/BaseKeyValueOperation.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/BaseKeyValueOperation.java similarity index 96% rename from clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/BaseKeyValueOperation.java rename to clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/BaseKeyValueOperation.java index 0655372836..46db028c7f 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/BaseKeyValueOperation.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/BaseKeyValueOperation.java @@ -14,9 +14,9 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; -import org.ehcache.clustered.client.internal.store.operations.codecs.CodecException; +import org.ehcache.clustered.common.internal.store.operations.codecs.CodecException; import org.ehcache.spi.serialization.Serializer; import java.nio.ByteBuffer; @@ -123,7 +123,7 @@ public boolean equals(final Object obj) { return false; } - BaseKeyValueOperation other = (BaseKeyValueOperation) obj; + BaseKeyValueOperation other = (BaseKeyValueOperation) obj; if(this.getOpCode() != other.getOpCode()) { return false; } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/ConditionalRemoveOperation.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/ConditionalRemoveOperation.java similarity index 89% rename from clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/ConditionalRemoveOperation.java rename to clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/ConditionalRemoveOperation.java index 21186246fa..780003584f 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/ConditionalRemoveOperation.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/ConditionalRemoveOperation.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; import org.ehcache.spi.serialization.Serializer; @@ -37,10 +37,10 @@ public OperationCode getOpCode() { @Override public Result apply(final Result previousOperation) { - if(previousOperation == null) { + if (previousOperation == null) { return null; } else { - if(getValue().equals(previousOperation.getValue())) { + if (getValue().equals(previousOperation.getValue())) { return null; } else { return previousOperation; diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/ConditionalReplaceOperation.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/ConditionalReplaceOperation.java similarity index 96% rename from clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/ConditionalReplaceOperation.java rename to clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/ConditionalReplaceOperation.java index cd2a101fe7..e38cf833b0 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/ConditionalReplaceOperation.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/ConditionalReplaceOperation.java @@ -14,14 +14,14 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; -import org.ehcache.clustered.client.internal.store.operations.codecs.CodecException; +import org.ehcache.clustered.common.internal.store.operations.codecs.CodecException; import org.ehcache.spi.serialization.Serializer; import java.nio.ByteBuffer; -import static org.ehcache.clustered.client.internal.store.operations.OperationCode.REPLACE_CONDITIONAL; +import static org.ehcache.clustered.common.internal.store.operations.OperationCode.REPLACE_CONDITIONAL; public class ConditionalReplaceOperation implements Operation, Result { diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/LazyValueHolder.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/LazyValueHolder.java similarity index 92% rename from clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/LazyValueHolder.java rename to clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/LazyValueHolder.java index dd02977824..fb50329113 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/LazyValueHolder.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/LazyValueHolder.java @@ -14,9 +14,9 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; -import org.ehcache.clustered.client.internal.store.operations.codecs.CodecException; +import org.ehcache.clustered.common.internal.store.operations.codecs.CodecException; import org.ehcache.spi.serialization.Serializer; import java.nio.ByteBuffer; diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/Operation.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/Operation.java similarity index 95% rename from clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/Operation.java rename to clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/Operation.java index d0211f9bd4..c72f9726f1 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/Operation.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/Operation.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; import org.ehcache.spi.serialization.Serializer; diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/OperationCode.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/OperationCode.java similarity index 84% rename from clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/OperationCode.java rename to clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/OperationCode.java index 691851f458..4ee3277644 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/OperationCode.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/OperationCode.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; import org.ehcache.spi.serialization.Serializer; @@ -27,6 +27,11 @@ public enum OperationCode { public Operation decode(ByteBuffer buffer, final Serializer keySerializer, final Serializer valueSerializer) { return new PutOperation<>(buffer, keySerializer, valueSerializer); } + + @Override + public boolean shouldBePinned() { + return false; + } }, REMOVE((byte)2) { @Override @@ -57,9 +62,15 @@ public Operation decode(final ByteBuffer buffer, final Serializer Operation decode(final ByteBuffer buffer, final Serializer keySerializer, final Serializer valueSerializer) { return new ConditionalReplaceOperation<>(buffer, keySerializer, valueSerializer); } + }, + PUT_WITH_WRITER((byte)7) { + @Override + public Operation decode(ByteBuffer buffer, Serializer keySerializer, Serializer valueSerializer) { + return new PutWithWriterOperation<>(buffer, keySerializer, valueSerializer); + } }; - private byte value; + private final byte value; OperationCode(byte value) { this.value = value; @@ -69,6 +80,10 @@ public byte getValue() { return value; } + public boolean shouldBePinned() { + return true; + } + public abstract Operation decode(ByteBuffer buffer, Serializer keySerializer, Serializer valueSerializer); public static OperationCode valueOf(byte value) { @@ -85,6 +100,8 @@ public static OperationCode valueOf(byte value) { return REPLACE; case 6: return REPLACE_CONDITIONAL; + case 7: + return PUT_WITH_WRITER; default: throw new IllegalArgumentException("Operation undefined for the value " + value); } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/PutIfAbsentOperation.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/PutIfAbsentOperation.java similarity index 96% rename from clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/PutIfAbsentOperation.java rename to clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/PutIfAbsentOperation.java index 22361e6de7..1684dcb0c4 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/PutIfAbsentOperation.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/PutIfAbsentOperation.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; import org.ehcache.spi.serialization.Serializer; diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/PutOperation.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/PutOperation.java similarity index 96% rename from clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/PutOperation.java rename to clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/PutOperation.java index 40ce92ab89..6da446f51e 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/PutOperation.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/PutOperation.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; import org.ehcache.spi.serialization.Serializer; diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/PutWithWriterOperation.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/PutWithWriterOperation.java new file mode 100644 index 0000000000..3273bca77f --- /dev/null +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/PutWithWriterOperation.java @@ -0,0 +1,55 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.clustered.common.internal.store.operations; + +import org.ehcache.spi.serialization.Serializer; + +import java.nio.ByteBuffer; + +/** + * @param key type + * @param value type + */ +public class PutWithWriterOperation extends BaseKeyValueOperation implements Result { + + public PutWithWriterOperation(final K key, final V value, final long timeStamp) { + super(key, value, timeStamp); + } + + PutWithWriterOperation(final ByteBuffer buffer, final Serializer keySerializer, final Serializer valueSerializer) { + super(buffer, keySerializer, valueSerializer); + } + + @Override + public OperationCode getOpCode() { + return OperationCode.PUT_WITH_WRITER; + } + + /** + * Put operation applied on top of another {@link Operation} does not care + * what the other operation is. The result is gonna be {@code this} operation. + */ + @Override + public Result apply(final Result previousOperation) { + return this; + } + + @Override + public PutOperation asOperationExpiringAt(long expirationTime) { + return new PutOperation<>(this, -expirationTime); + } +} diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/RemoveOperation.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/RemoveOperation.java similarity index 96% rename from clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/RemoveOperation.java rename to clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/RemoveOperation.java index db6da0619d..1eb9cda227 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/RemoveOperation.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/RemoveOperation.java @@ -14,9 +14,9 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; -import org.ehcache.clustered.client.internal.store.operations.codecs.CodecException; +import org.ehcache.clustered.common.internal.store.operations.codecs.CodecException; import org.ehcache.spi.serialization.Serializer; import java.nio.ByteBuffer; diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/ReplaceOperation.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/ReplaceOperation.java similarity index 95% rename from clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/ReplaceOperation.java rename to clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/ReplaceOperation.java index efc43229dd..ee10ca5a03 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/ReplaceOperation.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/ReplaceOperation.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; import org.ehcache.spi.serialization.Serializer; diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/Result.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/Result.java similarity index 91% rename from clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/Result.java rename to clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/Result.java index 63203da8b8..bfb2ecdd5b 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/Result.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/Result.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations; +package org.ehcache.clustered.common.internal.store.operations; public interface Result { diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/codecs/CodecException.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/codecs/CodecException.java similarity index 82% rename from clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/codecs/CodecException.java rename to clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/codecs/CodecException.java index 454235e94f..cca0af1341 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/codecs/CodecException.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/codecs/CodecException.java @@ -14,18 +14,19 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations.codecs; +package org.ehcache.clustered.common.internal.store.operations.codecs; /** * Thrown when a payload can not be encoded or decoded */ public class CodecException extends RuntimeException { + private static final long serialVersionUID = -4879598222155854243L; + /** * Creates a {@code CodecException}. */ public CodecException() { - super(); } /** @@ -33,7 +34,7 @@ public CodecException() { * * @param message information about the exception */ - public CodecException(final String message) { + public CodecException(String message) { super(message); } @@ -43,7 +44,7 @@ public CodecException(final String message) { * @param message information about the exception * @param cause the cause of this exception */ - public CodecException(final String message, final Throwable cause) { + public CodecException(String message, Throwable cause) { super(message, cause); } @@ -52,7 +53,7 @@ public CodecException(final String message, final Throwable cause) { * * @param cause the cause of this exception */ - public CodecException(final Throwable cause) { + public CodecException(Throwable cause) { super(cause); } } diff --git a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/codecs/OperationsCodec.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/codecs/OperationsCodec.java similarity index 82% rename from clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/codecs/OperationsCodec.java rename to clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/codecs/OperationsCodec.java index 16bbf347f9..4ce754ae55 100644 --- a/clustered/client/src/main/java/org/ehcache/clustered/client/internal/store/operations/codecs/OperationsCodec.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/store/operations/codecs/OperationsCodec.java @@ -14,10 +14,10 @@ * limitations under the License. */ -package org.ehcache.clustered.client.internal.store.operations.codecs; +package org.ehcache.clustered.common.internal.store.operations.codecs; -import org.ehcache.clustered.client.internal.store.operations.Operation; -import org.ehcache.clustered.client.internal.store.operations.OperationCode; +import org.ehcache.clustered.common.internal.store.operations.Operation; +import org.ehcache.clustered.common.internal.store.operations.OperationCode; import org.ehcache.spi.serialization.Serializer; import java.nio.ByteBuffer; @@ -37,6 +37,10 @@ public ByteBuffer encode(Operation operation) { return operation.encode(keySerializer, valueSerializer); } + public static OperationCode getOperationCode(ByteBuffer buffer) { + return OperationCode.valueOf(buffer.duplicate().get()); + } + public Operation decode(ByteBuffer buffer) { OperationCode opCode = OperationCode.valueOf(buffer.get()); buffer.rewind(); diff --git a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/util/ByteBufferInputStream.java b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/util/ByteBufferInputStream.java index 0b0c1e60b0..895291069b 100644 --- a/clustered/common/src/main/java/org/ehcache/clustered/common/internal/util/ByteBufferInputStream.java +++ b/clustered/common/src/main/java/org/ehcache/clustered/common/internal/util/ByteBufferInputStream.java @@ -33,7 +33,7 @@ public ByteBufferInputStream(ByteBuffer buffer) { } @Override - public int read() throws IOException { + public int read() { if (buffer.hasRemaining()) { return 0xff & buffer.get(); } else { diff --git a/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/CommonConfigCodecTest.java b/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/CommonConfigCodecTest.java index d8e1764146..879960b2af 100644 --- a/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/CommonConfigCodecTest.java +++ b/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/CommonConfigCodecTest.java @@ -16,9 +16,14 @@ package org.ehcache.clustered.common.internal.messages; +import org.ehcache.clustered.common.Consistency; +import org.ehcache.clustered.common.PoolAllocation; import org.ehcache.clustered.common.ServerSideConfiguration; +import org.ehcache.clustered.common.internal.ServerStoreConfiguration; import org.junit.Test; +import org.mockito.Mockito; import org.terracotta.runnel.Struct; +import org.terracotta.runnel.StructBuilder; import org.terracotta.runnel.encoding.StructEncoder; import java.nio.ByteBuffer; @@ -27,6 +32,7 @@ import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; import static org.junit.Assert.*; +import static org.mockito.Mockito.mock; import static org.terracotta.runnel.StructBuilder.newStructBuilder; public class CommonConfigCodecTest { @@ -47,4 +53,23 @@ public void testEncodeDecodeServerSideConfiguration() throws Exception { assertThat(decodedServerSideConfiguration.getDefaultServerResource(), is("foo")); assertThat(decodedServerSideConfiguration.getResourcePools(), hasKey("bar")); } + + @Test + public void testInjectServerStoreConfiguration() { + PoolAllocation poolAllocation = mock(PoolAllocation.class); + ServerStoreConfiguration serverStoreConfiguration = new ServerStoreConfiguration(poolAllocation, "Long.class", + "String.class", null, null, Consistency.EVENTUAL, false, false); + ConfigCodec.InjectTuple injectTuple = CODEC.injectServerStoreConfiguration(newStructBuilder(), 10); + + assertThat(injectTuple.getLastIndex(), is(40)); + + Struct struct = injectTuple.getUpdatedBuilder().build(); + StructEncoder encoder = struct.encoder(); + + CODEC.encodeServerStoreConfiguration(encoder, serverStoreConfiguration); + + encoder.int64(CommonConfigCodec.POOL_SIZE_FIELD, 20); + + } + } diff --git a/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/LifeCycleMessageCodecTest.java b/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/LifeCycleMessageCodecTest.java index 27e6cd172c..f031e0a896 100644 --- a/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/LifeCycleMessageCodecTest.java +++ b/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/LifeCycleMessageCodecTest.java @@ -24,13 +24,11 @@ import org.junit.Test; import java.util.Collections; -import java.util.UUID; import static java.nio.ByteBuffer.wrap; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; -import static org.junit.Assert.*; +import static org.junit.Assert.assertThat; /** * LifeCycleMessageCodecTest @@ -64,7 +62,7 @@ public void testValidateServerStoreDedicated() throws Exception { PoolAllocation.Dedicated dedicated = new PoolAllocation.Dedicated("dedicate", 420000L); ServerStoreConfiguration configuration = new ServerStoreConfiguration(dedicated, "java.lang.Long", "java.lang.String", "org.ehcache.impl.serialization.LongSerializer", "org.ehcache.impl.serialization.StringSerializer", - Consistency.STRONG); + Consistency.STRONG, false); LifecycleMessage message = factory.validateServerStore("store1", configuration); byte[] encoded = codec.encode(message); @@ -75,6 +73,7 @@ public void testValidateServerStoreDedicated() throws Exception { PoolAllocation.Dedicated decodedPoolAllocation = (PoolAllocation.Dedicated) decodedMessage.getStoreConfiguration().getPoolAllocation(); assertThat(decodedPoolAllocation.getResourceName(), is(dedicated.getResourceName())); assertThat(decodedPoolAllocation.getSize(), is(dedicated.getSize())); + assertThat(decodedMessage.getStoreConfiguration().isLoaderWriterConfigured(), is(false)); } @Test @@ -82,7 +81,7 @@ public void testValidateServerStoreShared() throws Exception { PoolAllocation.Shared shared = new PoolAllocation.Shared("shared"); ServerStoreConfiguration configuration = new ServerStoreConfiguration(shared, "java.lang.Long", "java.lang.String", "org.ehcache.impl.serialization.LongSerializer", "org.ehcache.impl.serialization.StringSerializer", - Consistency.STRONG); + Consistency.STRONG, false); LifecycleMessage message = factory.validateServerStore("store1", configuration); byte[] encoded = codec.encode(message); @@ -92,6 +91,7 @@ public void testValidateServerStoreShared() throws Exception { validateCommonServerStoreConfig(decodedMessage, configuration); PoolAllocation.Shared decodedPoolAllocation = (PoolAllocation.Shared) decodedMessage.getStoreConfiguration().getPoolAllocation(); assertThat(decodedPoolAllocation.getResourcePoolName(), is(shared.getResourcePoolName())); + assertThat(decodedMessage.getStoreConfiguration().isLoaderWriterConfigured(), is(false)); } @Test @@ -99,7 +99,7 @@ public void testValidateServerStoreUnknown() throws Exception { PoolAllocation.Unknown unknown = new PoolAllocation.Unknown(); ServerStoreConfiguration configuration = new ServerStoreConfiguration(unknown, "java.lang.Long", "java.lang.String", "org.ehcache.impl.serialization.LongSerializer", "org.ehcache.impl.serialization.StringSerializer", - Consistency.STRONG); + Consistency.STRONG, false); LifecycleMessage message = factory.validateServerStore("store1", configuration); byte[] encoded = codec.encode(message); @@ -108,6 +108,7 @@ public void testValidateServerStoreUnknown() throws Exception { assertThat(decodedMessage.getMessageType(), is(EhcacheMessageType.VALIDATE_SERVER_STORE)); validateCommonServerStoreConfig(decodedMessage, configuration); assertThat(decodedMessage.getStoreConfiguration().getPoolAllocation(), instanceOf(PoolAllocation.Unknown.class)); + assertThat(decodedMessage.getStoreConfiguration().isLoaderWriterConfigured(), is(false)); } private void validateCommonServerStoreConfig(LifecycleMessage.ValidateServerStore decodedMessage, ServerStoreConfiguration initialConfiguration) { diff --git a/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/ReconnectMessageCodecTest.java b/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/ReconnectMessageCodecTest.java index f2532d7232..d1b336fd53 100644 --- a/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/ReconnectMessageCodecTest.java +++ b/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/ReconnectMessageCodecTest.java @@ -21,7 +21,6 @@ import java.util.HashSet; import java.util.Set; -import java.util.UUID; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.is; @@ -47,13 +46,20 @@ public void testClusterTierReconnectCodec() { setToInvalidate.add(11L); setToInvalidate.add(111L); + Set locks = new HashSet<>(); + locks.add(20L); + locks.add(200L); + locks.add(2000L); + reconnectMessage.addInvalidationsInProgress(setToInvalidate); reconnectMessage.clearInProgress(); + reconnectMessage.addLocksHeld(locks); ClusterTierReconnectMessage decoded = reconnectMessageCodec.decode(reconnectMessageCodec.encode(reconnectMessage)); assertThat(decoded, notNullValue()); assertThat(decoded.getInvalidationsInProgress(), containsInAnyOrder(setToInvalidate.toArray())); assertThat(decoded.isClearInProgress(), is(true)); + assertThat(decoded.getLocksHeld(), containsInAnyOrder(locks.toArray())); } } diff --git a/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/ResponseCodecTest.java b/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/ResponseCodecTest.java index 3d09962a86..32182d3bb3 100644 --- a/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/ResponseCodecTest.java +++ b/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/ResponseCodecTest.java @@ -163,4 +163,21 @@ public void testResolveRequest() throws Exception { assertThat(decodedResponse.getKey(), is(42L)); Util.assertChainHas(decodedResponse.getChain(), 1L, 11L, 111L); } + + @Test + public void testLockResponse() { + EhcacheEntityResponse.LockSuccess lockSuccess = new EhcacheEntityResponse.LockSuccess(getChain(false, createPayload(1L), createPayload(10L))); + + byte[] sucessEncoded = RESPONSE_CODEC.encode(lockSuccess); + EhcacheEntityResponse.LockSuccess successDecoded = (EhcacheEntityResponse.LockSuccess) RESPONSE_CODEC.decode(sucessEncoded); + + assertThat(successDecoded.getResponseType(), is(EhcacheResponseType.LOCK_SUCCESS)); + Util.assertChainHas(successDecoded.getChain(), 1L, 10L); + + EhcacheEntityResponse.LockFailure lockFailure = EhcacheEntityResponse.lockFailure(); + byte[] failureEncoded = RESPONSE_CODEC.encode(lockFailure); + EhcacheEntityResponse.LockFailure failureDecoded = (EhcacheEntityResponse.LockFailure) RESPONSE_CODEC.decode(failureEncoded); + + assertThat(failureDecoded.getResponseType(), is(EhcacheResponseType.LOCK_FAILURE)); + } } diff --git a/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/ServerStoreOpCodecTest.java b/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/ServerStoreOpCodecTest.java index 07c350d5cd..e6c6c1d3bd 100644 --- a/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/ServerStoreOpCodecTest.java +++ b/clustered/common/src/test/java/org/ehcache/clustered/common/internal/messages/ServerStoreOpCodecTest.java @@ -106,4 +106,31 @@ public void testClientInvalidationAckMessageCodec() throws Exception { assertThat(decodedInvalidationAckMessage.getInvalidationId(), is(123)); assertThat(decodedInvalidationAckMessage.getMessageType(), is(EhcacheMessageType.CLIENT_INVALIDATION_ACK)); } + + @Test + public void testLockMessage() throws Exception { + ServerStoreOpMessage lockMessage = new ServerStoreOpMessage.LockMessage(2L); + + byte[] encoded = STORE_OP_CODEC.encode(lockMessage); + EhcacheEntityMessage decoded = STORE_OP_CODEC.decode(lockMessage.getMessageType(), wrap(encoded)); + + ServerStoreOpMessage.LockMessage decodedLockMessage = (ServerStoreOpMessage.LockMessage) decoded; + + assertThat(decodedLockMessage.getHash(), is(2L)); + assertThat(decodedLockMessage.getMessageType(), is(EhcacheMessageType.LOCK)); + } + + @Test + public void testUnlockMessage() throws Exception { + ServerStoreOpMessage unlockMessage = new ServerStoreOpMessage.UnlockMessage(2L); + + byte[] encoded = STORE_OP_CODEC.encode(unlockMessage); + EhcacheEntityMessage decoded = STORE_OP_CODEC.decode(unlockMessage.getMessageType(), wrap(encoded)); + + ServerStoreOpMessage.UnlockMessage decodedLockMessage = (ServerStoreOpMessage.UnlockMessage) decoded; + + assertThat(decodedLockMessage.getHash(), is(2L)); + assertThat(decodedLockMessage.getMessageType(), is(EhcacheMessageType.UNLOCK)); + } + } diff --git a/clustered/common/src/test/java/org/ehcache/clustered/common/internal/store/operations/OperationCodeTest.java b/clustered/common/src/test/java/org/ehcache/clustered/common/internal/store/operations/OperationCodeTest.java new file mode 100644 index 0000000000..9ae6559a11 --- /dev/null +++ b/clustered/common/src/test/java/org/ehcache/clustered/common/internal/store/operations/OperationCodeTest.java @@ -0,0 +1,36 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.common.internal.store.operations; + +import org.ehcache.clustered.common.internal.store.operations.OperationCode; +import org.junit.Test; + +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.*; + +public class OperationCodeTest { + + @Test + public void testPinning() { + assertThat(OperationCode.PUT.shouldBePinned(), is(false)); + + for (OperationCode operationCode : OperationCode.values()) { + if (OperationCode.PUT != operationCode) { + assertThat(operationCode.shouldBePinned(), is(true)); + } + } + } +} diff --git a/clustered/integration-test/build.gradle b/clustered/integration-test/build.gradle index 100d653363..ae1c06ee55 100644 --- a/clustered/integration-test/build.gradle +++ b/clustered/integration-test/build.gradle @@ -26,23 +26,21 @@ dependencies { testCompileOnly project(':xml') testCompileOnly "org.terracotta.internal:client-runtime:$terracottaCoreVersion" testCompileOnly "org.terracotta:runnel:$terracottaPlatformVersion" + testCompileOnly "org.terracotta:lease-api:$terracottaPlatformVersion" - testCompile project(':management') - testCompile "org.terracotta.management.dist:mnm-nms:$terracottaPlatformVersion" - testCompile "org.terracotta.management.dist:mnm-nms-agent:$terracottaPlatformVersion" - testCompile "com.fasterxml.jackson.core:jackson-databind:2.8.0" - testRuntime project(':clustered:clustered-dist') - testRuntime project(':dist') + testImplementation project(':management') + testImplementation "org.terracotta.management.dist:mnm-nms:$terracottaPlatformVersion" + testImplementation "org.terracotta.management.dist:mnm-nms-agent:$terracottaPlatformVersion" + testImplementation "com.fasterxml.jackson.core:jackson-databind:2.8.0" + testRuntimeOnly project(':clustered:clustered-dist') + testRuntimeOnly project(':dist') - testCompile (group:'org.terracotta.internal', name:'galvan-support', version: terracottaCoreVersion) { - // galvan-support depends on junit 4.11 and version enforcement plugin won't allow that - exclude group:'junit', module:'junit' - } - testCompile (group:'com.google.code.tempus-fugit', name:'tempus-fugit', version:'1.1') { + testImplementation (group:'org.terracotta.internal', name:'galvan-support', version: terracottaCoreVersion) + testImplementation (group:'com.google.code.tempus-fugit', name:'tempus-fugit', version:'1.1') { exclude group:'junit', module:'junit' exclude group:'org.hamcrest', module:'hamcrest-core' } - testCompile group: 'javax.cache', name: 'cache-api', version: jcacheVersion + testImplementation group: 'javax.cache', name: 'cache-api', version: jcacheVersion serverLibs ("org.terracotta.management.dist:mnm-server:$terracottaPlatformVersion") { exclude group:'org.terracotta.management.dist', module:'mnm-common' @@ -74,6 +72,3 @@ test { // testLogging.showStandardStreams = true } -tasks.withType(JavaCompile) { - options.compilerArgs += ['-Werror'] -} diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/BasicCacheOpsMultiThreadedTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/BasicCacheOpsMultiThreadedTest.java new file mode 100644 index 0000000000..ba74b1872d --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/BasicCacheOpsMultiThreadedTest.java @@ -0,0 +1,197 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.clustered; + +import org.ehcache.Cache; +import org.ehcache.PersistentCacheManager; +import org.ehcache.clustered.client.config.ClusteredStoreConfiguration; +import org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder; +import org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder; +import org.ehcache.clustered.client.config.builders.ServerSideConfigurationBuilder; +import org.ehcache.clustered.client.config.builders.TimeoutsBuilder; +import org.ehcache.clustered.common.Consistency; +import org.ehcache.config.ResourcePool; +import org.ehcache.config.builders.CacheConfigurationBuilder; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.ResourcePoolsBuilder; +import org.ehcache.config.units.MemoryUnit; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.terracotta.testing.rules.Cluster; + +import com.tc.util.Assert; + +import java.io.File; +import java.net.URI; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.terracotta.testing.rules.BasicExternalClusterBuilder.newCluster; + +/** + * Simulate multiple clients starting up the same cache manager simultaneously and ensure that puts and gets works just + * fine and nothing get lost or hung, just because multiple cache manager instances of the same cache manager are coming up + * simultaneously. + */ +public class BasicCacheOpsMultiThreadedTest extends ClusteredTests { + + private static final String RESOURCE_CONFIG = + "" + + "" + + "64" + + "" + + "\n"; + + @ClassRule + public static Cluster CLUSTER = + newCluster().in(new File("build/cluster")).withServiceFragment(RESOURCE_CONFIG).build(); + + @BeforeClass + public static void waitForActive() throws Exception { + CLUSTER.getClusterControl().waitForActive(); + } + + private static final String CLUSTERED_CACHE_NAME = "clustered-cache"; + private static final String SYN_CACHE_NAME = "syn-cache"; + private static final String PRIMARY_SERVER_RESOURCE_NAME = "primary-server-resource"; + private static final String CACHE_MANAGER_NAME = "/crud-cm"; + private static final int PRIMARY_SERVER_RESOURCE_SIZE = 4; //MB + private static final int NUM_THREADS = 8; + private static final int MAX_WAIT_TIME_SECONDS = 30; + + private final AtomicReference exception = new AtomicReference<>(); + private final AtomicLong idGenerator = new AtomicLong(2L); + + @Test + public void testMulipleClients() throws Throwable { + CountDownLatch latch = new CountDownLatch(NUM_THREADS + 1); + + List threads = new ArrayList<>(NUM_THREADS); + for (int i = 0; i < NUM_THREADS; i++) { + Thread t1 = new Thread(content(latch)); + t1.start(); + threads.add(t1); + } + + latch.countDown(); + assertTrue(latch.await(MAX_WAIT_TIME_SECONDS, TimeUnit.SECONDS)); + + for (Thread t : threads) { + t.join(); + } + + Throwable throwable = exception.get(); + if (throwable != null) { + throw throwable; + } + } + + private Runnable content(CountDownLatch latch) { + return () -> { + try (PersistentCacheManager cacheManager = createCacheManager(CLUSTER.getConnectionURI())) { + latch.countDown(); + try { + assertTrue(latch.await(MAX_WAIT_TIME_SECONDS, TimeUnit.SECONDS)); + } catch (InterruptedException e) { + // continue + } + + cacheManager.init(); + doSyncAndPut(cacheManager); + } catch (Throwable t) { + if (!exception.compareAndSet(null, t)) { + exception.get().addSuppressed(t); + } + } + }; + } + + private void doSyncAndPut(PersistentCacheManager cacheManager) throws InterruptedException { + String customValue = "value"; + Cache synCache = cacheManager.getCache(SYN_CACHE_NAME, String.class, Boolean.class); + Cache customValueCache = cacheManager.getCache(CLUSTERED_CACHE_NAME, Long.class, String.class); + parallelPuts(customValueCache); + String firstClientStartKey = "first_client_start", firstClientEndKey = "first_client_end"; + if (synCache.putIfAbsent(firstClientStartKey, true) == null) { + customValueCache.put(1L, customValue); + assertThat(customValueCache.get(1L), is(customValue)); + synCache.put(firstClientEndKey, true); + } else { + int retry = 0, maxRetryCount = 30; + while (++retry <= maxRetryCount && synCache.get(firstClientEndKey) == null) { + Thread.sleep(1000L); + } + + if (retry > maxRetryCount) { + Assert.fail("Couldn't find " + firstClientEndKey + " in synCache after " + maxRetryCount + " retries!"); + } + + assertThat(customValueCache.get(1L), is(customValue)); + } + } + + private static PersistentCacheManager createCacheManager(URI clusterURI) { + ServerSideConfigurationBuilder serverSideConfigBuilder = ClusteringServiceConfigurationBuilder + .cluster(clusterURI.resolve(CACHE_MANAGER_NAME)) + .timeouts(TimeoutsBuilder.timeouts().read(Duration.ofSeconds(20)).write(Duration.ofSeconds(30))) + .autoCreate() + .defaultServerResource(PRIMARY_SERVER_RESOURCE_NAME); + + ResourcePool resourcePool = ClusteredResourcePoolBuilder + .clusteredDedicated(PRIMARY_SERVER_RESOURCE_NAME, PRIMARY_SERVER_RESOURCE_SIZE, MemoryUnit.MB); + + CacheManagerBuilder clusteredCacheManagerBuilder = CacheManagerBuilder + .newCacheManagerBuilder() + .with(serverSideConfigBuilder) + .withCache(CLUSTERED_CACHE_NAME, + CacheConfigurationBuilder + .newCacheConfigurationBuilder(Long.class, String.class, + ResourcePoolsBuilder.newResourcePoolsBuilder() + .with(resourcePool)) + .add(new ClusteredStoreConfiguration(Consistency.STRONG))) + .withCache(SYN_CACHE_NAME, + CacheConfigurationBuilder + .newCacheConfigurationBuilder(String.class, Boolean.class, + ResourcePoolsBuilder.newResourcePoolsBuilder() + .with(resourcePool)) + .add(new ClusteredStoreConfiguration(Consistency.STRONG))); + return clusteredCacheManagerBuilder.build(false); + } + + private void parallelPuts(Cache customValueCache) { + // make sure each thread gets its own id + long startingId = idGenerator.getAndAdd(10L); + customValueCache.put(startingId + 1, "value1"); + customValueCache.put(startingId + 1, "value11"); + customValueCache.put(startingId + 2, "value2"); + customValueCache.put(startingId + 3, "value3"); + customValueCache.put(startingId + 4, "value4"); + assertThat(customValueCache.get(startingId + 1), is("value11")); + assertThat(customValueCache.get(startingId + 2), is("value2")); + assertThat(customValueCache.get(startingId + 3), is("value3")); + assertThat(customValueCache.get(startingId + 4), is("value4")); + } +} diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/BasicEntityInteractionTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/BasicEntityInteractionTest.java index a6068f56cb..8f3227cee3 100644 --- a/clustered/integration-test/src/test/java/org/ehcache/clustered/BasicEntityInteractionTest.java +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/BasicEntityInteractionTest.java @@ -145,6 +145,7 @@ public void testPresentEntityDestroySucceeds() throws Throwable { @Test @Ignore + @SuppressWarnings("try") public void testPresentEntityDestroyBlockedByHeldReferenceSucceeds() throws Throwable { try (Connection client = CLUSTER.newConnection()) { EntityRef ref = getEntityRef(client); diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/ClusterTierManagerClientEntityFactoryIntegrationTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/ClusterTierManagerClientEntityFactoryIntegrationTest.java index 0912e00fa6..8678e0b8e0 100644 --- a/clustered/integration-test/src/test/java/org/ehcache/clustered/ClusterTierManagerClientEntityFactoryIntegrationTest.java +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/ClusterTierManagerClientEntityFactoryIntegrationTest.java @@ -36,7 +36,9 @@ import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsNull.notNullValue; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.terracotta.testing.rules.BasicExternalClusterBuilder.newCluster; @@ -154,12 +156,7 @@ public void testDestroyWhenNotExisting() throws Exception { @Test public void testAbandonLeadershipWhenNotOwning() throws Exception { ClusterTierManagerClientEntityFactory factory = new ClusterTierManagerClientEntityFactory(CONNECTION); - try { - factory.abandonLeadership("testAbandonLeadershipWhenNotOwning"); - fail("Expected IllegalMonitorStateException"); - } catch (IllegalMonitorStateException e) { - //expected - } + assertFalse(factory.abandonLeadership("testAbandonLeadershipWhenNotOwning")); } @Test @@ -183,7 +180,7 @@ public void testAcquireLeadershipWhenTaken() throws Exception { public void testAcquireLeadershipAfterAbandoned() throws Exception { ClusterTierManagerClientEntityFactory factoryA = new ClusterTierManagerClientEntityFactory(CONNECTION); factoryA.acquireLeadership("testAcquireLeadershipAfterAbandoned"); - factoryA.abandonLeadership("testAcquireLeadershipAfterAbandoned"); + assertTrue(factoryA.abandonLeadership("testAcquireLeadershipAfterAbandoned")); try (Connection clientB = CLUSTER.newConnection()) { ClusterTierManagerClientEntityFactory factoryB = new ClusterTierManagerClientEntityFactory(clientB); diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/ClusteredLoaderWriterTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/ClusteredLoaderWriterTest.java new file mode 100644 index 0000000000..a59acfd07a --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/ClusteredLoaderWriterTest.java @@ -0,0 +1,178 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered; + +import org.ehcache.Cache; +import org.ehcache.CacheManager; +import org.ehcache.PersistentCacheManager; +import org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder; +import org.ehcache.clustered.client.config.builders.ClusteredStoreConfigurationBuilder; +import org.ehcache.clustered.common.Consistency; +import org.ehcache.clustered.util.TestCacheLoaderWriter; +import org.ehcache.config.CacheConfiguration; +import org.ehcache.config.builders.CacheConfigurationBuilder; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.ResourcePoolsBuilder; +import org.ehcache.config.units.MemoryUnit; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.terracotta.testing.rules.Cluster; + +import java.io.File; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder.cluster; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; +import static org.terracotta.testing.rules.BasicExternalClusterBuilder.newCluster; + +@RunWith(Parameterized.class) +public class ClusteredLoaderWriterTest extends ClusteredTests { + + private static final String RESOURCE_CONFIG = + "" + + "" + + "64" + + "" + + "\n"; + + @Parameterized.Parameters(name = "consistency={0}") + public static Consistency[] data() { + return Consistency.values(); + } + + @Parameterized.Parameter + public Consistency cacheConsistency; + + private static CacheManager cacheManager; + private Cache client1; + private CacheConfiguration configuration; + + private ConcurrentMap sor; + + @ClassRule + public static Cluster CLUSTER = + newCluster().in(new File("build/cluster")).withServiceFragment(RESOURCE_CONFIG).build(); + + @BeforeClass + public static void waitForActive() throws Exception { + CLUSTER.getClusterControl().waitForActive(); + cacheManager = newCacheManager(); + } + + private static PersistentCacheManager newCacheManager() { + return CacheManagerBuilder.newCacheManagerBuilder() + .with(cluster(CLUSTER.getConnectionURI()) + .autoCreate() + .build()) + .build(true); + } + + @Before + public void setUp() throws Exception { + + sor = new ConcurrentHashMap<>(); + configuration = getCacheConfig(); + } + + private CacheConfiguration getCacheConfig() { + return CacheConfigurationBuilder + .newCacheConfigurationBuilder(Long.class, String.class, + ResourcePoolsBuilder + .heap(20) + .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 2, MemoryUnit.MB))) + .withLoaderWriter(new TestCacheLoaderWriter(sor)) + .add(ClusteredStoreConfigurationBuilder.withConsistency(cacheConsistency)) + .build(); + } + + @Test + public void testBasicOps() { + client1 = cacheManager.createCache("basicops" + cacheConsistency.name(), configuration); + assertThat(sor.isEmpty(), is(true)); + + Set keys = new HashSet<>(); + ThreadLocalRandom.current().longs(10).forEach(x -> { + keys.add(x); + client1.put(x, Long.toString(x)); + }); + + assertThat(sor.size(), is(10)); + + CacheManager anotherCacheManager = newCacheManager(); + Cache client2 = anotherCacheManager.createCache("basicops" + cacheConsistency.name(), + getCacheConfig()); + Map all = client2.getAll(keys); + assertThat(all.keySet(), containsInAnyOrder(keys.toArray())); + + keys.stream().limit(3).forEach(client2::remove); + + assertThat(sor.size(), is(7)); + } + + @Test + public void testCASOps() { + client1 = cacheManager.createCache("casops" + cacheConsistency.name(), configuration); + assertThat(sor.isEmpty(), is(true)); + + Set keys = new HashSet<>(); + ThreadLocalRandom.current().longs(10).forEach(x -> { + keys.add(x); + client1.put(x, Long.toString(x)); + }); + assertThat(sor.size(), is(10)); + + CacheManager anotherCacheManager = newCacheManager(); + Cache client2 = anotherCacheManager.createCache("casops" + cacheConsistency.name(), + getCacheConfig()); + + keys.forEach(x -> assertThat(client2.putIfAbsent(x, "Again" + x), is(Long.toString(x)))); + + assertThat(sor.size(), is(10)); + + keys.stream().limit(5).forEach(x -> + assertThat(client2.replace(x , "Replaced" + x), is(Long.toString(x)))); + + assertThat(sor.size(), is(10)); + + keys.forEach(x -> client1.remove(x, Long.toString(x))); + + assertThat(sor.size(), is(5)); + + AtomicInteger success = new AtomicInteger(0); + + keys.forEach(x -> { + if (client2.replace(x, "Replaced" + x, "Again")) { + success.incrementAndGet(); + } + }); + + assertThat(success.get(), is(5)); + + } + +} diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/DestroyLoopTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/DestroyLoopTest.java new file mode 100644 index 0000000000..444631d2d0 --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/DestroyLoopTest.java @@ -0,0 +1,115 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.clustered; + +import org.ehcache.CachePersistenceException; +import org.ehcache.PersistentCacheManager; +import org.ehcache.StateTransitionException; +import org.ehcache.clustered.client.config.ClusteredStoreConfiguration; +import org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder; +import org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder; +import org.ehcache.clustered.common.Consistency; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.ResourcePoolsBuilder; +import org.ehcache.config.units.EntryUnit; +import org.ehcache.config.units.MemoryUnit; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.terracotta.testing.rules.Cluster; + +import java.io.File; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.function.Supplier; + +import static org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder.cluster; +import static org.ehcache.config.builders.CacheConfigurationBuilder.newCacheConfigurationBuilder; +import static org.ehcache.config.builders.CacheManagerBuilder.newCacheManagerBuilder; +import static org.hamcrest.CoreMatchers.is; +import static org.junit.Assert.assertThat; +import static org.terracotta.testing.rules.BasicExternalClusterBuilder.newCluster; + +public class DestroyLoopTest extends ClusteredTests { + + private static final String RESOURCE_CONFIG = + "" + + "" + + "64" + + "" + + "\n"; + + private static final String CACHE_MANAGER_NAME = "/destroy-cm"; + private static final String CACHE_NAME = "clustered-cache"; + + @ClassRule + public static Cluster CLUSTER = newCluster().in(new File("build/cluster")).withServiceFragment(RESOURCE_CONFIG).build(); + + @BeforeClass + public static void waitForActive() throws Exception { + CLUSTER.getClusterControl().waitForActive(); + } + + @Test + public void testDestroyLoop() throws Exception { + for (int i = 0; i < 10; i++) { + try (CacheManagerContainer cmc = new CacheManagerContainer(10, this::createCacheManager)) { + // just put in one and get from another + cmc.cacheManagerList.get(0).getCache(CACHE_NAME, Long.class, String.class).put(1L, "value"); + assertThat(cmc.cacheManagerList.get(5).getCache(CACHE_NAME, Long.class, String.class).get(1L), + is("value")); + } + destroyCacheManager(); + } + } + + private void destroyCacheManager() throws CachePersistenceException { + PersistentCacheManager cacheManager = newCacheManagerBuilder().with( + ClusteringServiceConfigurationBuilder.cluster(CLUSTER.getConnectionURI().resolve(CACHE_MANAGER_NAME)) + .expecting()).build(false); + cacheManager.destroy(); + } + + private PersistentCacheManager createCacheManager() { + CacheManagerBuilder clusteredCacheManagerBuilder = + newCacheManagerBuilder() + .with(cluster(CLUSTER.getConnectionURI().resolve(CACHE_MANAGER_NAME)).autoCreate()) + .withCache(CACHE_NAME, newCacheConfigurationBuilder(Long.class, String.class, + ResourcePoolsBuilder.newResourcePoolsBuilder().heap(100, EntryUnit.ENTRIES) + .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 2, MemoryUnit.MB))) + .add(new ClusteredStoreConfiguration(Consistency.STRONG))); + return clusteredCacheManagerBuilder.build(true); + } + + private static class CacheManagerContainer implements AutoCloseable { + private final List cacheManagerList; + + private CacheManagerContainer(int numCacheManagers, Supplier cmSupplier) { + List cm = new ArrayList<>(); + for (int i = 0; i < numCacheManagers; i++) { + cm.add(cmSupplier.get()); + } + cacheManagerList = Collections.unmodifiableList(cm); + } + + @Override + public void close() throws StateTransitionException { + cacheManagerList.forEach(PersistentCacheManager::close); + } + } +} diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/LeaseTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/LeaseTest.java new file mode 100644 index 0000000000..c5025a2b74 --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/LeaseTest.java @@ -0,0 +1,156 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered; + +import com.tc.net.proxy.TCPProxy; +import org.ehcache.Cache; +import org.ehcache.PersistentCacheManager; +import org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder; +import org.ehcache.clustered.client.config.builders.TimeoutsBuilder; +import org.ehcache.clustered.util.TCPProxyUtil; +import org.ehcache.config.CacheConfiguration; +import org.ehcache.config.builders.CacheConfigurationBuilder; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.ResourcePoolsBuilder; +import org.ehcache.config.units.EntryUnit; +import org.ehcache.config.units.MemoryUnit; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.terracotta.testing.rules.Cluster; + +import java.io.File; +import java.net.URI; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder.clusteredDedicated; +import static org.ehcache.clustered.util.TCPProxyUtil.setDelay; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; +import static org.terracotta.testing.rules.BasicExternalClusterBuilder.newCluster; + +@RunWith(Parameterized.class) +public class LeaseTest extends ClusteredTests { + + private static final String RESOURCE_CONFIG = + "" + + "" + + "64" + + "" + + "\n" + + "" + + "" + + "5" + + "" + + ""; + + @ClassRule + public static Cluster CLUSTER = + newCluster().in(new File("build/cluster")).withServiceFragment(RESOURCE_CONFIG).build(); + + private final List proxies = new ArrayList<>(); + + @BeforeClass + public static void waitForActive() throws Exception { + CLUSTER.getClusterControl().waitForActive(); + } + + @After + public void after() { + proxies.forEach(TCPProxy::stop); + } + + @Parameterized.Parameters + public static ResourcePoolsBuilder[] data() { + return new ResourcePoolsBuilder[]{ + ResourcePoolsBuilder.newResourcePoolsBuilder() + .with(clusteredDedicated("primary-server-resource", 1, MemoryUnit.MB)), + ResourcePoolsBuilder.newResourcePoolsBuilder() + .heap(10, EntryUnit.ENTRIES) + .with(clusteredDedicated("primary-server-resource", 1, MemoryUnit.MB)) + }; + } + + @Parameterized.Parameter + public ResourcePoolsBuilder resourcePoolsBuilder; + + @Test + public void leaseExpiry() throws Exception { + URI connectionURI = TCPProxyUtil.getProxyURI(CLUSTER.getConnectionURI(), proxies); + + CacheManagerBuilder clusteredCacheManagerBuilder + = CacheManagerBuilder.newCacheManagerBuilder() + .with(ClusteringServiceConfigurationBuilder.cluster(connectionURI.resolve("/crud-cm")) + .timeouts(TimeoutsBuilder.timeouts() + .connection(Duration.ofSeconds(20))) + .autoCreate() + .defaultServerResource("primary-server-resource")); + PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); + cacheManager.init(); + + CacheConfiguration config = CacheConfigurationBuilder.newCacheConfigurationBuilder(Long.class, String.class, + resourcePoolsBuilder).build(); + + Cache cache = cacheManager.createCache("clustered-cache", config); + cache.put(1L, "The one"); + cache.put(2L, "The two"); + cache.put(3L, "The three"); + assertThat(cache.get(1L), equalTo("The one")); + assertThat(cache.get(2L), equalTo("The two")); + assertThat(cache.get(3L), equalTo("The three")); + + setDelay(6000, proxies); + Thread.sleep(6000); + // We will now have lost the lease + + setDelay(0L, proxies); + + AtomicBoolean timedout = new AtomicBoolean(false); + + CompletableFuture future = CompletableFuture.supplyAsync(() -> { + while (!timedout.get()) { + try { + Thread.sleep(200); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + String result = cache.get(1L); + if (result != null) { + return result; + } + } + return null; + }); + + assertThat(future.get(30, TimeUnit.SECONDS), is("The one")); + + timedout.set(true); + + assertThat(cache.get(2L), equalTo("The two")); + assertThat(cache.get(3L), equalTo("The three")); + + } + +} diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/OversizedCacheOpsTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/OversizedCacheOpsTest.java new file mode 100644 index 0000000000..210a781176 --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/OversizedCacheOpsTest.java @@ -0,0 +1,89 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.clustered; + +import org.ehcache.Cache; +import org.ehcache.PersistentCacheManager; +import org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder; +import org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder; +import org.ehcache.config.CacheConfiguration; +import org.ehcache.config.builders.CacheConfigurationBuilder; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.ResourcePoolsBuilder; +import org.ehcache.config.units.MemoryUnit; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.terracotta.testing.rules.Cluster; + +import java.io.File; +import java.util.Arrays; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assert.assertThat; +import static org.terracotta.testing.rules.BasicExternalClusterBuilder.newCluster; + +public class OversizedCacheOpsTest extends ClusteredTests { + + private static final String RESOURCE_CONFIG = + "" + + "" + + "2" + + "" + + "\n"; + + @ClassRule + public static Cluster CLUSTER = + newCluster().in(new File("build/cluster")).withServiceFragment(RESOURCE_CONFIG).build(); + + @Test + public void overSizedCacheOps() throws Exception { + CacheManagerBuilder clusteredCacheManagerBuilder + = CacheManagerBuilder.newCacheManagerBuilder() + .with(ClusteringServiceConfigurationBuilder.cluster(CLUSTER.getConnectionURI().resolve("/crud-cm")) + .autoCreate() + .defaultServerResource("primary-server-resource")); + + try (PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(true)) { + CacheConfiguration config = CacheConfigurationBuilder.newCacheConfigurationBuilder(Long.class, String.class, + ResourcePoolsBuilder.newResourcePoolsBuilder() + .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 1, MemoryUnit.MB))).build(); + + Cache cache = cacheManager.createCache("clustered-cache", config); + cache.put(1L, "The one"); + cache.put(2L, "The two"); + cache.put(1L, "Another one"); + cache.put(3L, "The three"); + assertThat(cache.get(1L), equalTo("Another one")); + assertThat(cache.get(2L), equalTo("The two")); + assertThat(cache.get(3L), equalTo("The three")); + cache.put(1L, buildLargeString(2)); + assertThat(cache.get(1L), is(nullValue())); + // ensure others are not evicted + assertThat(cache.get(2L), equalTo("The two")); + assertThat(cache.get(3L), equalTo("The three")); + } + } + + private String buildLargeString(int sizeInMB) { + char[] filler = new char[sizeInMB * 1024 * 1024]; + Arrays.fill(filler, '0'); + return new String(filler); + } +} diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/TerminatedServerTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/TerminatedServerTest.java index d5a23b64a2..d3856aade0 100644 --- a/clustered/integration-test/src/test/java/org/ehcache/clustered/TerminatedServerTest.java +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/TerminatedServerTest.java @@ -17,6 +17,8 @@ package org.ehcache.clustered; import com.google.code.tempusfugit.concurrency.ConcurrentTestRunner; + +import org.assertj.core.api.ThrowableAssertAlternative; import org.ehcache.Cache; import org.ehcache.CacheManager; import org.ehcache.CachePersistenceException; @@ -30,8 +32,8 @@ import org.ehcache.config.builders.CacheManagerBuilder; import org.ehcache.config.builders.ResourcePoolsBuilder; import org.ehcache.config.units.MemoryUnit; -import org.ehcache.core.spi.store.StoreAccessTimeoutException; -import org.hamcrest.Matchers; +import org.ehcache.core.spi.service.StatisticsService; +import org.ehcache.impl.internal.statistics.DefaultStatisticsService; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -46,7 +48,6 @@ import org.junit.runner.Description; import org.junit.runner.RunWith; import org.junit.runners.model.Statement; -import org.terracotta.connection.ConnectionException; import org.terracotta.testing.rules.Cluster; import com.tc.net.protocol.transport.ClientMessageTransport; @@ -57,10 +58,8 @@ import java.io.File; import java.time.Duration; import java.time.temporal.ChronoUnit; -import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.concurrent.Future; import java.util.concurrent.FutureTask; @@ -68,14 +67,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.nullValue; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; import static org.junit.Assume.assumeNoException; import static org.terracotta.testing.rules.BasicExternalClusterBuilder.newCluster; @@ -94,6 +87,8 @@ @RunWith(ConcurrentTestRunner.class) public class TerminatedServerTest extends ClusteredTests { + private static final int CLIENT_MAX_PENDING_REQUESTS = 5; + /** * Determines the level of test concurrency. The number of allowed concurrent tests * is set in {@link #setConcurrency()}. @@ -125,10 +120,15 @@ public static void setConcurrency() { private static final String RESOURCE_CONFIG = "" - + "" + + "" + "64" - + "" + - "\n"; + + "" + + "" + + "" + + "" + + "5" + + "" + + "\n"; private static Map OLD_PROPERTIES; @@ -148,6 +148,9 @@ public static void setProperties() { overrideProperty(oldProperties, TCPropertiesConsts.L1_SHUTDOWN_THREADGROUP_GRACETIME, "1000"); overrideProperty(oldProperties, TCPropertiesConsts.TC_TRANSPORT_HANDSHAKE_TIMEOUT, "1000"); + // Used only by testTerminationFreezesTheClient to be able to fill the inflight queue + overrideProperty(oldProperties, TCPropertiesConsts.CLIENT_MAX_PENDING_REQUESTS, Integer.toString(CLIENT_MAX_PENDING_REQUESTS)); + OLD_PROPERTIES = oldProperties; } @@ -161,6 +164,11 @@ public static void restoreProperties() { } } + private ThrowableAssertAlternative assertExceptionOccurred(Class exception, TimeLimitedTask task) { + return assertThatExceptionOfType(exception) + .isThrownBy(() -> task.run()); + } + private static Cluster createCluster() { try { return newCluster().in(new File("build/cluster")).withServiceFragment(RESOURCE_CONFIG).build(); @@ -201,12 +209,12 @@ public void testTerminationBeforeCacheManagerClose() throws Exception { .with(ClusteringServiceConfigurationBuilder.cluster(cluster.getConnectionURI().resolve("/MyCacheManagerName")) .autoCreate() .defaultServerResource("primary-server-resource")); - final PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); + PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); cacheManager.init(); cluster.getClusterControl().terminateAllServers(); - new TimeLimitedTask(2, TimeUnit.SECONDS) { + new TimeLimitedTask(10, TimeUnit.SECONDS) { @Override Void runTask() throws Exception { cacheManager.close(); @@ -218,7 +226,6 @@ Void runTask() throws Exception { } @Test - @Ignore("Need to decide if we close cache entity in a daemon thread") public void testTerminationBeforeCacheManagerCloseWithCaches() throws Exception { CacheManagerBuilder clusteredCacheManagerBuilder = CacheManagerBuilder.newCacheManagerBuilder() @@ -229,18 +236,13 @@ public void testTerminationBeforeCacheManagerCloseWithCaches() throws Exception CacheConfigurationBuilder.newCacheConfigurationBuilder(Long.class, String.class, ResourcePoolsBuilder.newResourcePoolsBuilder() .with(ClusteredResourcePoolBuilder.clusteredDedicated(4, MemoryUnit.MB)))); - final PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); + PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); cacheManager.init(); cluster.getClusterControl().terminateAllServers(); - new TimeLimitedTask(5, TimeUnit.SECONDS) { - @Override - Void runTask() throws Exception { - cacheManager.close(); - return null; - } - }.run(); + cacheManager.close(); + } @Test @@ -255,26 +257,24 @@ public void testTerminationBeforeCacheManagerRetrieve() throws Exception { .timeouts(TimeoutsBuilder.timeouts().connection(Duration.ofSeconds(1))) // Need a connection timeout shorter than the TimeLimitedTask timeout .expecting() .defaultServerResource("primary-server-resource")); - final PersistentCacheManager cacheManagerExisting = clusteredCacheManagerBuilder.build(false); + PersistentCacheManager cacheManagerExisting = clusteredCacheManagerBuilder.build(false); // Base test time limit on observed TRANSPORT_HANDSHAKE_SYNACK_TIMEOUT; might not have been set in time to be effective long synackTimeout = TimeUnit.MILLISECONDS.toSeconds(ClientMessageTransport.TRANSPORT_HANDSHAKE_SYNACK_TIMEOUT); - try { + + assertExceptionOccurred(StateTransitionException.class, new TimeLimitedTask(3 + synackTimeout, TimeUnit.SECONDS) { @Override - Void runTask() throws Exception { + Void runTask() { cacheManagerExisting.init(); return null; } - }.run(); - fail("Expecting StateTransitionException"); - } catch (StateTransitionException e) { - assertThat(getCausalChain(e), hasItem(Matchers.instanceOf(ConnectionException.class))); - } + }) + .withRootCauseInstanceOf(TimeoutException.class); } @Test - @Ignore("In multi entity, destroy cache is a blocking operation") + @Ignore("Works but by sending a really low level exception. Need to be fixed to get the expected CachePersistenceException") public void testTerminationBeforeCacheManagerDestroyCache() throws Exception { CacheManagerBuilder clusteredCacheManagerBuilder = CacheManagerBuilder.newCacheManagerBuilder() @@ -285,10 +285,10 @@ public void testTerminationBeforeCacheManagerDestroyCache() throws Exception { CacheConfigurationBuilder.newCacheConfigurationBuilder(Long.class, String.class, ResourcePoolsBuilder.newResourcePoolsBuilder() .with(ClusteredResourcePoolBuilder.clusteredDedicated(4, MemoryUnit.MB)))); - final PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); + PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); cacheManager.init(); - final Cache cache = cacheManager.getCache("simple-cache", Long.class, String.class); + Cache cache = cacheManager.getCache("simple-cache", Long.class, String.class); cache.put(1L, "un"); cache.put(2L, "deux"); cache.put(3L, "trois"); @@ -297,35 +297,31 @@ public void testTerminationBeforeCacheManagerDestroyCache() throws Exception { cluster.getClusterControl().terminateAllServers(); - try { - new TimeLimitedTask(5, TimeUnit.SECONDS) { + assertExceptionOccurred(CachePersistenceException.class, + new TimeLimitedTask(10, TimeUnit.SECONDS) { @Override Void runTask() throws Exception { cacheManager.destroyCache("simple-cache"); return null; } - }.run(); - fail("Expecting CachePersistenceException"); - } catch (CachePersistenceException e) { - assertThat(getUltimateCause(e), is(instanceOf(TimeoutException.class))); - } + }); } @Test - @Ignore("Multi entity means this is now a blocking operation") + @Ignore("There are no timeout on the create cache right now. It waits until the server comes back") public void testTerminationBeforeCacheCreate() throws Exception { CacheManagerBuilder clusteredCacheManagerBuilder = CacheManagerBuilder.newCacheManagerBuilder() .with(ClusteringServiceConfigurationBuilder.cluster(cluster.getConnectionURI().resolve("/MyCacheManagerName")) .autoCreate() .defaultServerResource("primary-server-resource")); - final PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); + PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); cacheManager.init(); cluster.getClusterControl().terminateAllServers(); - try { - new TimeLimitedTask>(5, TimeUnit.SECONDS) { + assertExceptionOccurred(IllegalStateException.class, + new TimeLimitedTask>(10, TimeUnit.SECONDS) { @Override Cache runTask() throws Exception { return cacheManager.createCache("simple-cache", @@ -333,15 +329,11 @@ Cache runTask() throws Exception { ResourcePoolsBuilder.newResourcePoolsBuilder() .with(ClusteredResourcePoolBuilder.clusteredDedicated(4, MemoryUnit.MB)))); } - }.run(); - fail("Expecting IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(getUltimateCause(e), is(instanceOf(TimeoutException.class))); - } + }) + .withRootCauseInstanceOf(TimeoutException.class); } @Test - @Ignore("Need to decide if we close cache entity in a daemon thread") public void testTerminationBeforeCacheRemove() throws Exception { CacheManagerBuilder clusteredCacheManagerBuilder = CacheManagerBuilder.newCacheManagerBuilder() @@ -352,19 +344,12 @@ public void testTerminationBeforeCacheRemove() throws Exception { CacheConfigurationBuilder.newCacheConfigurationBuilder(Long.class, String.class, ResourcePoolsBuilder.newResourcePoolsBuilder() .with(ClusteredResourcePoolBuilder.clusteredDedicated(4, MemoryUnit.MB)))); - final PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); + PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); cacheManager.init(); cluster.getClusterControl().terminateAllServers(); - new TimeLimitedTask(5, TimeUnit.SECONDS) { - @Override - Void runTask() throws Exception { - // CacheManager.removeCache silently "fails" when a timeout is recognized - cacheManager.removeCache("simple-cache"); - return null; - } - }.run(); + cacheManager.removeCache("simple-cache"); } @Test @@ -382,12 +367,12 @@ public void testTerminationThenGet() throws Exception { PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); cacheManager.init(); - final Cache cache = cacheManager.getCache("simple-cache", Long.class, String.class); + Cache cache = cacheManager.getCache("simple-cache", Long.class, String.class); cache.put(1L, "un"); cache.put(2L, "deux"); cache.put(3L, "trois"); - assertThat(cache.get(2L), is(not(nullValue()))); + assertThat(cache.get(2L)).isNotNull(); cluster.getClusterControl().terminateAllServers(); @@ -398,7 +383,7 @@ String runTask() throws Exception { } }.run(); - assertThat(value, is(nullValue())); + assertThat(value).isNull(); } @Test @@ -416,12 +401,12 @@ public void testTerminationThenContainsKey() throws Exception { PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); cacheManager.init(); - final Cache cache = cacheManager.getCache("simple-cache", Long.class, String.class); + Cache cache = cacheManager.getCache("simple-cache", Long.class, String.class); cache.put(1L, "un"); cache.put(2L, "deux"); cache.put(3L, "trois"); - assertThat(cache.containsKey(2L), is(true)); + assertThat(cache.containsKey(2L)).isTrue(); cluster.getClusterControl().terminateAllServers(); @@ -432,7 +417,7 @@ Boolean runTask() throws Exception { } }.run(); - assertThat(value, is(false)); + assertThat(value).isFalse(); } @Ignore("ClusteredStore.iterator() is not implemented") @@ -451,7 +436,7 @@ public void testTerminationThenIterator() throws Exception { PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); cacheManager.init(); - final Cache cache = cacheManager.getCache("simple-cache", Long.class, String.class); + Cache cache = cacheManager.getCache("simple-cache", Long.class, String.class); cache.put(1L, "un"); cache.put(2L, "deux"); cache.put(3L, "trois"); @@ -465,7 +450,7 @@ Iterator> runTask() throws Exception { } }.run(); - assertThat(value.hasNext(), is(false)); + assertThat(value.hasNext()).isFalse(); } @Test @@ -483,26 +468,21 @@ public void testTerminationThenPut() throws Exception { PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); cacheManager.init(); - final Cache cache = cacheManager.getCache("simple-cache", Long.class, String.class); + Cache cache = cacheManager.getCache("simple-cache", Long.class, String.class); cache.put(1L, "un"); cache.put(2L, "deux"); cache.put(3L, "trois"); cluster.getClusterControl().terminateAllServers(); - try { - new TimeLimitedTask(5, TimeUnit.SECONDS) { - @Override - Void runTask() throws Exception { - cache.put(2L, "dos"); - return null; - } - }.run(); - fail("Expecting StoreAccessTimeoutException"); - } catch (StoreAccessTimeoutException e) { - //Final timeout occurs on the cleanup which is a remove (hence a GET_AND_APPEND) - assertThat(e.getMessage(), containsString("Timeout exceeded for GET_AND_APPEND")); - } + // The resilience strategy will pick it up and not exception is thrown + new TimeLimitedTask(10, TimeUnit.SECONDS) { + @Override + Void runTask() throws Exception { + cache.put(2L, "dos"); + return null; + } + }.run(); } @Test @@ -520,24 +500,20 @@ public void testTerminationThenPutIfAbsent() throws Exception { PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); cacheManager.init(); - final Cache cache = cacheManager.getCache("simple-cache", Long.class, String.class); + Cache cache = cacheManager.getCache("simple-cache", Long.class, String.class); cache.put(1L, "un"); cache.put(2L, "deux"); cache.put(3L, "trois"); cluster.getClusterControl().terminateAllServers(); - try { - new TimeLimitedTask(5, TimeUnit.SECONDS) { - @Override - String runTask() throws Exception { - return cache.putIfAbsent(2L, "dos"); - } - }.run(); - fail("Expecting StoreAccessTimeoutException"); - } catch (StoreAccessTimeoutException e) { - assertThat(e.getMessage(), containsString("Timeout exceeded for GET_AND_APPEND")); - } + // The resilience strategy will pick it up and not exception is thrown + new TimeLimitedTask(10, TimeUnit.SECONDS) { + @Override + String runTask() throws Exception { + return cache.putIfAbsent(2L, "dos"); + } + }.run(); } @Test @@ -555,31 +531,28 @@ public void testTerminationThenRemove() throws Exception { PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); cacheManager.init(); - final Cache cache = cacheManager.getCache("simple-cache", Long.class, String.class); + Cache cache = cacheManager.getCache("simple-cache", Long.class, String.class); cache.put(1L, "un"); cache.put(2L, "deux"); cache.put(3L, "trois"); cluster.getClusterControl().terminateAllServers(); - try { - new TimeLimitedTask(5, TimeUnit.SECONDS) { - @Override - Void runTask() throws Exception { - cache.remove(2L); - return null; - } - }.run(); - fail("Expecting StoreAccessTimeoutException"); - } catch (StoreAccessTimeoutException e) { - assertThat(e.getMessage(), containsString("Timeout exceeded for GET_AND_APPEND")); - } + new TimeLimitedTask(10, TimeUnit.SECONDS) { + @Override + Void runTask() throws Exception { + cache.remove(2L); + return null; + } + }.run(); } @Test public void testTerminationThenClear() throws Exception { + StatisticsService statisticsService = new DefaultStatisticsService(); CacheManagerBuilder clusteredCacheManagerBuilder = CacheManagerBuilder.newCacheManagerBuilder() + .using(statisticsService) .with(ClusteringServiceConfigurationBuilder.cluster(cluster.getConnectionURI().resolve("/MyCacheManagerName")) .timeouts(TimeoutsBuilder.timeouts().write(Duration.of(1, ChronoUnit.SECONDS)).build()) .autoCreate() @@ -591,46 +564,73 @@ public void testTerminationThenClear() throws Exception { PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(false); cacheManager.init(); - final Cache cache = cacheManager.getCache("simple-cache", Long.class, String.class); + Cache cache = cacheManager.getCache("simple-cache", Long.class, String.class); cache.put(1L, "un"); cache.put(2L, "deux"); cache.put(3L, "trois"); cluster.getClusterControl().terminateAllServers(); - try { - new TimeLimitedTask(5, TimeUnit.SECONDS) { + // The resilience strategy will pick it up and not exception is thrown + new TimeLimitedTask(10, TimeUnit.SECONDS) { @Override - Void runTask() throws Exception { + Void runTask() { cache.clear(); return null; } }.run(); - fail("Expecting StoreAccessTimeoutException"); - } catch (StoreAccessTimeoutException e) { - assertThat(e.getMessage(), containsString("Timeout exceeded for CLEAR")); - } } - private Throwable getUltimateCause(Throwable t) { - Throwable ultimateCause = t; - while (ultimateCause.getCause() != null) { - ultimateCause = ultimateCause.getCause(); - } - return ultimateCause; - } + /** + * If the server goes down, the client should not freeze on a server call. It should timeout and answer using + * the resilience strategy. Whatever the number of calls is done afterwards. + * + * @throws Exception + */ + @Test + public void testTerminationFreezesTheClient() throws Exception { + Duration readOperationTimeout = Duration.ofMillis(100); + + try(PersistentCacheManager cacheManager = + CacheManagerBuilder.newCacheManagerBuilder() + .with(ClusteringServiceConfigurationBuilder.cluster(cluster.getConnectionURI().resolve("/MyCacheManagerName")) + .timeouts(TimeoutsBuilder.timeouts() + .read(readOperationTimeout)) + .autoCreate() + .defaultServerResource("primary-server-resource")) + .withCache("simple-cache", + CacheConfigurationBuilder.newCacheConfigurationBuilder(Long.class, String.class, + ResourcePoolsBuilder.newResourcePoolsBuilder() + .with(ClusteredResourcePoolBuilder.clusteredDedicated(4, MemoryUnit.MB)))) + .build(true)) { + + Cache cache = cacheManager.getCache("simple-cache", Long.class, String.class); + cache.put(1L, "un"); + + cluster.getClusterControl().terminateAllServers(); + + // Fill the inflight queue and check that we wait no longer than the read timeout + for (int i = 0; i < CLIENT_MAX_PENDING_REQUESTS; i++) { + cache.get(1L); + } + + // The resilience strategy will pick it up and not exception is thrown + new TimeLimitedTask(readOperationTimeout.toMillis() * 2, TimeUnit.MILLISECONDS) { // I multiply by 2 to let some room after the expected timeout + @Override + Void runTask() { + cache.get(1L); // the call that could block + return null; + } + }.run(); - private List getCausalChain(Throwable t) { - ArrayList causalChain = new ArrayList<>(); - for (Throwable cause = t; cause != null; cause = cause.getCause()) { - causalChain.add(cause); + } catch(StateTransitionException e) { + // On the cacheManager.close(), it waits for the lease to expire and then throw this exception } - return causalChain; } private static void overrideProperty(Map oldProperties, String propertyName, String propertyValue) { TCProperties tcProperties = TCPropertiesImpl.getProperties(); - oldProperties.put(propertyName, tcProperties.getProperty(propertyName)); + oldProperties.put(propertyName, tcProperties.getProperty(propertyName, true)); tcProperties.setProperty(propertyName, propertyValue); } @@ -735,7 +735,7 @@ V run() throws Exception { future.cancel(true); Thread.interrupted(); // Reset interrupted status } - assertThat(testName.getMethodName() + " test thread exceeded its time limit of " + timeLimit + " " + unit, isExpired, is(false)); + assertThat(isExpired).describedAs( "%s test thread exceeded its time limit of %d %s", testName.getMethodName(), timeLimit, unit).isFalse(); } return result; diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/management/AbstractClusteringManagementTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/management/AbstractClusteringManagementTest.java index 634bb4e0b6..b320f523ca 100644 --- a/clustered/integration-test/src/test/java/org/ehcache/clustered/management/AbstractClusteringManagementTest.java +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/management/AbstractClusteringManagementTest.java @@ -15,46 +15,49 @@ */ package org.ehcache.clustered.management; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; import org.ehcache.CacheManager; import org.ehcache.Status; import org.ehcache.clustered.ClusteredTests; +import org.ehcache.clustered.util.BeforeAll; +import org.ehcache.clustered.util.BeforeAllRule; import org.ehcache.config.units.EntryUnit; import org.ehcache.config.units.MemoryUnit; import org.ehcache.management.registry.DefaultManagementRegistryConfiguration; import org.junit.AfterClass; import org.junit.Before; -import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Rule; +import org.junit.rules.RuleChain; import org.junit.rules.Timeout; import org.terracotta.connection.Connection; +import org.terracotta.connection.ConnectionException; +import org.terracotta.exception.EntityConfigurationException; import org.terracotta.management.entity.nms.NmsConfig; import org.terracotta.management.entity.nms.client.DefaultNmsService; import org.terracotta.management.entity.nms.client.NmsEntity; import org.terracotta.management.entity.nms.client.NmsEntityFactory; import org.terracotta.management.entity.nms.client.NmsService; +import org.terracotta.management.model.cluster.AbstractManageableNode; import org.terracotta.management.model.cluster.Client; import org.terracotta.management.model.cluster.ClientIdentifier; +import org.terracotta.management.model.cluster.ServerEntity; import org.terracotta.management.model.cluster.ServerEntityIdentifier; import org.terracotta.management.model.context.Context; -import org.terracotta.management.model.message.Message; import org.terracotta.management.model.notification.ContextualNotification; import org.terracotta.management.model.stats.ContextualStatistics; import org.terracotta.testing.rules.Cluster; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.SerializationFeature; - import java.io.File; -import java.io.FileNotFoundException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Scanner; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; +import static java.lang.Thread.sleep; import static org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder.clusteredDedicated; import static org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder.clusteredShared; import static org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder.cluster; @@ -66,6 +69,7 @@ import static org.junit.Assert.assertTrue; import static org.terracotta.testing.rules.BasicExternalClusterBuilder.newCluster; +@SuppressWarnings("rawtypes") // Need to suppress because of a Javac bug giving a rawtype on AbstractManageableNode::isManageable. public abstract class AbstractClusteringManagementTest extends ClusteredTests { private static final String RESOURCE_CONFIG = @@ -81,41 +85,61 @@ public abstract class AbstractClusteringManagementTest extends ClusteredTests { protected static ServerEntityIdentifier clusterTierManagerEntityIdentifier; protected static ObjectMapper mapper = new ObjectMapper(); - protected static NmsService nmsService; + static NmsService nmsService; protected static ServerEntityIdentifier tmsServerEntityIdentifier; protected static Connection managementConnection; + static { + mapper.configure(SerializationFeature.INDENT_OUTPUT, true); + } + @ClassRule - public static Cluster CLUSTER = newCluster().in(new File("build/cluster")) - .withServiceFragment(RESOURCE_CONFIG).build(); + public static Cluster CLUSTER = newCluster(2) + .in(new File("build/cluster")) + .withServiceFragment(RESOURCE_CONFIG) + .build(); - @BeforeClass - public static void beforeClass() throws Exception { - mapper.configure(SerializationFeature.INDENT_OUTPUT, true); + @Rule + public final RuleChain rules = RuleChain.emptyRuleChain() + .around(Timeout.seconds(90)) + .around(new BeforeAllRule(this)); + @BeforeAll + public void beforeAllTests() throws Exception { CLUSTER.getClusterControl().waitForActive(); + CLUSTER.getClusterControl().waitForRunningPassivesInStandby(); // simulate a TMS client - managementConnection = CLUSTER.newConnection(); - NmsEntityFactory entityFactory = new NmsEntityFactory(managementConnection, AbstractClusteringManagementTest.class.getName()); - NmsEntity tmsAgentEntity = entityFactory.retrieveOrCreate(new NmsConfig()); - nmsService = new DefaultNmsService(tmsAgentEntity); - nmsService.setOperationTimeout(5, TimeUnit.SECONDS); + createNmsService(); + + initCM(); - tmsServerEntityIdentifier = readTopology() - .activeServerEntityStream() - .filter(serverEntity -> serverEntity.getType().equals(NmsConfig.ENTITY_TYPE)) - .findFirst() - .get() // throws if not found - .getServerEntityIdentifier(); + initIdentifiers(); + sendManagementCallOnEntityToCollectStats(); + } + + @Before + public void init() { + if (nmsService != null) { + // this call clear the CURRENT arrived messages, but be aware that some other messages can arrive just after the drain + nmsService.readMessages(); + } + } + + @AfterClass + public static void afterClass() throws Exception { + tearDownCacheManagerAndStatsCollector(); + } + + protected void initCM() throws InterruptedException { cacheManager = newCacheManagerBuilder() // cluster config .with(cluster(CLUSTER.getConnectionURI().resolve("/my-server-entity-1")) .autoCreate() .defaultServerResource("primary-server-resource") - .resourcePool("resource-pool-a", 28, MemoryUnit.MB, "secondary-server-resource") // <2> - .resourcePool("resource-pool-b", 16, MemoryUnit.MB)) // will take from primary-server-resource + .resourcePool("resource-pool-a", 10, MemoryUnit.MB, "secondary-server-resource") // <2> + .resourcePool("resource-pool-b", 8, MemoryUnit.MB)) // will take from primary-server-resource // management config .using(new DefaultManagementRegistryConfiguration() .addTags("webapp-1", "server-node-1") @@ -146,19 +170,6 @@ public static void beforeClass() throws Exception { // ensure the CM is running and get its client id assertThat(cacheManager.getStatus(), equalTo(Status.AVAILABLE)); - ehcacheClientIdentifier = readTopology().getClients().values() - .stream() - .filter(client -> client.getName().equals("Ehcache:my-server-entity-1")) - .findFirst() - .map(Client::getClientIdentifier) - .get(); - - clusterTierManagerEntityIdentifier = readTopology() - .activeServerEntityStream() - .filter(serverEntity -> serverEntity.getName().equals("my-server-entity-1")) - .findFirst() - .get() // throws if not found - .getServerEntityIdentifier(); // test_notifs_sent_at_CM_init waitForAllNotifications( @@ -170,56 +181,119 @@ public static void beforeClass() throws Exception { "ENTITY_REGISTRY_AVAILABLE", "ENTITY_REGISTRY_AVAILABLE", "ENTITY_REGISTRY_AVAILABLE", "ENTITY_REGISTRY_AVAILABLE", "SERVER_ENTITY_CREATED", "SERVER_ENTITY_CREATED", "SERVER_ENTITY_CREATED", "SERVER_ENTITY_CREATED", "SERVER_ENTITY_CREATED", "SERVER_ENTITY_CREATED", "SERVER_ENTITY_DESTROYED", - "SERVER_ENTITY_FETCHED", "SERVER_ENTITY_FETCHED", "SERVER_ENTITY_FETCHED", "SERVER_ENTITY_FETCHED", "SERVER_ENTITY_FETCHED", "SERVER_ENTITY_FETCHED", "SERVER_ENTITY_FETCHED", - "SERVER_ENTITY_UNFETCHED" + "SERVER_ENTITY_FETCHED", "SERVER_ENTITY_FETCHED", "SERVER_ENTITY_FETCHED", "SERVER_ENTITY_FETCHED", "SERVER_ENTITY_FETCHED", "SERVER_ENTITY_FETCHED", "SERVER_ENTITY_FETCHED", "SERVER_ENTITY_FETCHED", + "SERVER_ENTITY_UNFETCHED", + "EHCACHE_RESOURCE_POOLS_CONFIGURED", + + "SERVER_ENTITY_DESTROYED", + "SERVER_ENTITY_CREATED", + "SERVER_ENTITY_CREATED", "SERVER_ENTITY_CREATED", "SERVER_ENTITY_CREATED", "SERVER_ENTITY_CREATED", "SERVER_ENTITY_CREATED", + "ENTITY_REGISTRY_AVAILABLE", "ENTITY_REGISTRY_AVAILABLE", "ENTITY_REGISTRY_AVAILABLE", "ENTITY_REGISTRY_AVAILABLE", + "EHCACHE_SERVER_STORE_CREATED", "EHCACHE_SERVER_STORE_CREATED", "EHCACHE_SERVER_STORE_CREATED" + ); + } - sendManagementCallOnEntityToCollectStats(); + public static void initIdentifiers() throws Exception { + tmsServerEntityIdentifier = null; + ehcacheClientIdentifier = null; + clusterTierManagerEntityIdentifier = null; + + do { + tmsServerEntityIdentifier = readTopology() + .activeServerEntityStream() + .filter(serverEntity -> serverEntity.getType().equals(NmsConfig.ENTITY_TYPE)) + .filter(AbstractManageableNode::isManageable) + .map(ServerEntity::getServerEntityIdentifier) + .findFirst() + .orElse(null); + sleep(500); + } while (tmsServerEntityIdentifier == null && !Thread.currentThread().isInterrupted()); + + do { + ehcacheClientIdentifier = readTopology().getClients().values() + .stream() + .filter(client -> client.getName().equals("Ehcache:my-server-entity-1")) + .filter(AbstractManageableNode::isManageable) + .findFirst() + .map(Client::getClientIdentifier) + .orElse(null); + sleep(500); + } while (ehcacheClientIdentifier == null && !Thread.currentThread().isInterrupted()); + + do { + clusterTierManagerEntityIdentifier = readTopology() + .activeServerEntityStream() + .filter(serverEntity -> serverEntity.getName().equals("my-server-entity-1")) + .filter(AbstractManageableNode::isManageable) + .map(ServerEntity::getServerEntityIdentifier) + .findFirst() + .orElse(null); + sleep(500); + } while (clusterTierManagerEntityIdentifier == null && !Thread.currentThread().isInterrupted()); } - @AfterClass - public static void afterClass() throws Exception { + public static void tearDownCacheManagerAndStatsCollector() throws Exception { if (cacheManager != null && cacheManager.getStatus() == Status.AVAILABLE) { if (nmsService != null) { - Context ehcacheClient = readTopology().getClient(ehcacheClientIdentifier).get().getContext().with("cacheManagerName", "my-super-cache-manager"); - nmsService.stopStatisticCollector(ehcacheClient).waitForReturn(); + readTopology().getClient(ehcacheClientIdentifier) + .ifPresent(client -> { + try { + nmsService.stopStatisticCollector(client.getContext().with("cacheManagerName", "my-super-cache-manager")).waitForReturn(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); } cacheManager.close(); } if (nmsService != null) { - Context context = readTopology().getSingleStripe().getActiveServerEntity(tmsServerEntityIdentifier).get().getContext(); - nmsService.stopStatisticCollector(context); + readTopology().getSingleStripe().getActiveServerEntity(tmsServerEntityIdentifier) + .ifPresent(client -> { + try { + nmsService.stopStatisticCollector(client.getContext()); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); managementConnection.close(); } } - @Rule - public final Timeout globalTimeout = Timeout.seconds(60); + public static void createNmsService() throws ConnectionException, EntityConfigurationException { + createNmsService(CLUSTER); + } - @Before - public void init() throws Exception { - if (nmsService != null) { - // this call clear the CURRRENT arrived messages, but be aware that some other messages can arrive just after the drain - nmsService.readMessages(); - } + public static void createNmsService(Cluster cluster) throws ConnectionException, EntityConfigurationException { + managementConnection = cluster.newConnection(); + + NmsEntityFactory entityFactory = new NmsEntityFactory(managementConnection, AbstractClusteringManagementTest.class.getName()); + NmsEntity tmsAgentEntity = entityFactory.retrieveOrCreate(new NmsConfig()); + + nmsService = new DefaultNmsService(tmsAgentEntity); + nmsService.setOperationTimeout(5, TimeUnit.SECONDS); } - protected static org.terracotta.management.model.cluster.Cluster readTopology() throws Exception { - return nmsService.readTopology(); + public static org.terracotta.management.model.cluster.Cluster readTopology() throws Exception { + org.terracotta.management.model.cluster.Cluster cluster = nmsService.readTopology(); + //System.out.println(mapper.writeValueAsString(cluster.toMap())); + return cluster; } - protected static void sendManagementCallOnClientToCollectStats() throws Exception { - Context ehcacheClient = readTopology().getClient(ehcacheClientIdentifier).get().getContext() + public static void sendManagementCallOnClientToCollectStats() throws Exception { + org.terracotta.management.model.cluster.Cluster topology = readTopology(); + Client manageableClient = topology.getClient(ehcacheClientIdentifier).filter(AbstractManageableNode::isManageable).get(); + Context cmContext = manageableClient.getContext() .with("cacheManagerName", "my-super-cache-manager"); - nmsService.startStatisticCollector(ehcacheClient, 1, TimeUnit.SECONDS).waitForReturn(); + nmsService.startStatisticCollector(cmContext, 1, TimeUnit.SECONDS).waitForReturn(); } - protected static List waitForNextStats() throws Exception { - // uses the monitoring consumre entity to get the content of the stat buffer when some stats are collected + public static List waitForNextStats() throws Exception { + // uses the monitoring to get the content of the stat buffer when some stats are collected return nmsService.waitForMessage(message -> message.getType().equals("STATISTICS")) .stream() .filter(message -> message.getType().equals("STATISTICS")) @@ -227,16 +301,7 @@ protected static List waitForNextStats() throws Exception .collect(Collectors.toList()); } - protected static List notificationTypes(List messages) { - return messages - .stream() - .filter(message -> "NOTIFICATION".equals(message.getType())) - .flatMap(message -> message.unwrap(ContextualNotification.class).stream()) - .map(ContextualNotification::getType) - .collect(Collectors.toList()); - } - - protected static String read(String path) throws FileNotFoundException { + protected static String read(String path) { try (Scanner scanner = new Scanner(AbstractClusteringManagementTest.class.getResourceAsStream(path), "UTF-8")) { return scanner.useDelimiter("\\A").next(); } @@ -246,13 +311,17 @@ protected static String normalizeForLineEndings(String stringToNormalize) { return stringToNormalize.replace("\r\n", "\n").replace("\r", "\n"); } - private static void sendManagementCallOnEntityToCollectStats() throws Exception { - Context context = readTopology().getSingleStripe().getActiveServerEntity(tmsServerEntityIdentifier).get().getContext(); + public static void sendManagementCallOnEntityToCollectStats() throws Exception { + org.terracotta.management.model.cluster.Cluster topology = readTopology(); + ServerEntity manageableEntity = topology.getSingleStripe().getActiveServerEntity(tmsServerEntityIdentifier).filter(AbstractManageableNode::isManageable).get(); + Context context = manageableEntity.getContext(); nmsService.startStatisticCollector(context, 1, TimeUnit.SECONDS).waitForReturn(); } - protected static void waitForAllNotifications(String... notificationTypes) throws InterruptedException, TimeoutException { + public static void waitForAllNotifications(String... notificationTypes) throws InterruptedException { List waitingFor = new ArrayList<>(Arrays.asList(notificationTypes)); + List missingOnes = new ArrayList<>(); + // please keep these sout because it is really hard to troubleshoot blocking tests in the beforeClass method in the case we do not receive all notifs. // System.out.println("waitForAllNotifications: " + waitingFor); @@ -264,6 +333,8 @@ protected static void waitForAllNotifications(String... notificationTypes) throw if (waitingFor.remove(notification.getType())) { // System.out.println("Remove " + notification.getType()); // System.out.println("Still waiting for: " + waitingFor); + } else { + missingOnes.add(notification); } } } @@ -278,5 +349,6 @@ protected static void waitForAllNotifications(String... notificationTypes) throw t.interrupt(); // we interrupt the thread that is waiting on the message queue assertTrue("Still waiting for: " + waitingFor, waitingFor.isEmpty()); + assertTrue("Unexpected notification: " + missingOnes, missingOnes.isEmpty()); } } diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/management/AfterFailoverManagementServiceTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/management/AfterFailoverManagementServiceTest.java new file mode 100644 index 0000000000..159eaace68 --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/management/AfterFailoverManagementServiceTest.java @@ -0,0 +1,40 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.management; + +import org.ehcache.clustered.util.BeforeAll; +import org.junit.FixMethodOrder; +import org.junit.runners.MethodSorters; + +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +public class AfterFailoverManagementServiceTest extends ClusteringManagementServiceTest { + + @BeforeAll + @Override + public void beforeAllTests() throws Exception { + super.beforeAllTests(); + + CLUSTER.getClusterControl().terminateActive(); + CLUSTER.getClusterControl().waitForActive(); + + createNmsService(); + + initIdentifiers(); + + sendManagementCallOnEntityToCollectStats(); + } + +} diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/management/CMClosedEventSentTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/management/CMClosedEventSentTest.java new file mode 100644 index 0000000000..c444348b80 --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/management/CMClosedEventSentTest.java @@ -0,0 +1,104 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.management; + +import org.ehcache.CacheManager; +import org.ehcache.Status; +import org.ehcache.config.units.EntryUnit; +import org.ehcache.config.units.MemoryUnit; +import org.ehcache.management.registry.DefaultManagementRegistryConfiguration; +import org.junit.ClassRule; +import org.junit.Test; +import org.terracotta.management.model.message.Message; +import org.terracotta.management.model.notification.ContextualNotification; +import org.terracotta.testing.rules.Cluster; + +import java.io.File; + +import static org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder.clusteredDedicated; +import static org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder.cluster; +import static org.ehcache.clustered.management.AbstractClusteringManagementTest.createNmsService; +import static org.ehcache.clustered.management.AbstractClusteringManagementTest.nmsService; +import static org.ehcache.config.builders.CacheConfigurationBuilder.newCacheConfigurationBuilder; +import static org.ehcache.config.builders.CacheManagerBuilder.newCacheManagerBuilder; +import static org.ehcache.config.builders.ResourcePoolsBuilder.newResourcePoolsBuilder; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; +import static org.terracotta.testing.rules.BasicExternalClusterBuilder.newCluster; + +public class CMClosedEventSentTest { + + private static final String RESOURCE_CONFIG = + "" + + "" + + "64" + + "64" + + "" + + "\n" + + "" + + "" + + "5" + + "" + + ""; + + @ClassRule + public static Cluster CLUSTER = newCluster().in(new File("build/cluster")).withServiceFragment(RESOURCE_CONFIG).build(); + + @Test(timeout = 60_000) + public void test_CACHE_MANAGER_CLOSED() throws Exception { + createNmsService(CLUSTER); + + try (CacheManager cacheManager = newCacheManagerBuilder().with(cluster(CLUSTER.getConnectionURI().resolve("/my-server-entity-1")) + .autoCreate() + .defaultServerResource("primary-server-resource") + .resourcePool("resource-pool-a", 10, MemoryUnit.MB, "secondary-server-resource") // <2> + .resourcePool("resource-pool-b", 10, MemoryUnit.MB)) // will take from primary-server-resource + // management config + .using(new DefaultManagementRegistryConfiguration() + .addTags("webapp-1", "server-node-1") + .setCacheManagerAlias("my-super-cache-manager")) + // cache config + .withCache("dedicated-cache-1", newCacheConfigurationBuilder( + String.class, String.class, + newResourcePoolsBuilder() + .heap(10, EntryUnit.ENTRIES) + .offheap(1, MemoryUnit.MB) + .with(clusteredDedicated("primary-server-resource", 4, MemoryUnit.MB))) + .build()) + .build(true)) { + + assertThat(cacheManager.getStatus(), equalTo(Status.AVAILABLE)); + waitFor("CACHE_MANAGER_AVAILABLE"); + + } + waitFor("CACHE_MANAGER_CLOSED"); + } + + private void waitFor(String notifType) throws InterruptedException { + while (!Thread.currentThread().isInterrupted()) { + Message message = nmsService.waitForMessage(); + if (message.getType().equals("NOTIFICATION")) { + ContextualNotification notification = message.unwrap(ContextualNotification.class).get(0); + if (notification.getType().equals(notifType)) { + break; + } + } + } + assertFalse(Thread.currentThread().isInterrupted()); + } + +} diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/management/ClusteredStatisticsCountTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/management/ClusteredStatisticsCountTest.java index 560fed6df2..f27143201e 100755 --- a/clustered/integration-test/src/test/java/org/ehcache/clustered/management/ClusteredStatisticsCountTest.java +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/management/ClusteredStatisticsCountTest.java @@ -66,10 +66,10 @@ public void countTest() throws Exception { System.out.println(" - " + entry.getKey() + " : " + entry.getValue()); }*/ - cacheHitCount = stat.getStatistic("Cache:HitCount").longValue(); - clusteredHitCount = stat.getStatistic("Clustered:HitCount").longValue(); - clusteredMissCount = stat.getStatistic("Clustered:MissCount").longValue(); - cacheMissCount = stat.getStatistic("Cache:MissCount").longValue(); + cacheHitCount = stat.getLatestSampleValue("Cache:HitCount").get(); + clusteredHitCount = stat.getLatestSampleValue("Clustered:HitCount").get(); + clusteredMissCount = stat.getLatestSampleValue("Clustered:MissCount").get(); + cacheMissCount = stat.getLatestSampleValue("Cache:MissCount").get(); } } } while(!Thread.currentThread().isInterrupted() && diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/management/ClusteringManagementServiceTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/management/ClusteringManagementServiceTest.java index 521eaefcc1..0a9420d5d2 100644 --- a/clustered/integration-test/src/test/java/org/ehcache/clustered/management/ClusteringManagementServiceTest.java +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/management/ClusteringManagementServiceTest.java @@ -18,7 +18,6 @@ import org.ehcache.Cache; import org.ehcache.config.units.EntryUnit; import org.ehcache.config.units.MemoryUnit; -import org.junit.BeforeClass; import org.junit.FixMethodOrder; import org.junit.Ignore; import org.junit.Test; @@ -48,14 +47,88 @@ @FixMethodOrder(MethodSorters.NAME_ASCENDING) public class ClusteringManagementServiceTest extends AbstractClusteringManagementTest { - private static final Collection ONHEAP_DESCRIPTORS = new ArrayList<>(); - private static final Collection OFFHEAP_DESCRIPTORS = new ArrayList<>(); - private static final Collection DISK_DESCRIPTORS = new ArrayList<>(); - private static final Collection CLUSTERED_DESCRIPTORS = new ArrayList<>(); - private static final Collection CACHE_DESCRIPTORS = new ArrayList<>(); - private static final Collection POOL_DESCRIPTORS = new ArrayList<>(); - private static final Collection SERVER_STORE_DESCRIPTORS = new ArrayList<>(); - private static final Collection OFFHEAP_RES_DESCRIPTORS = new ArrayList<>(); + private static final Collection ONHEAP_DESCRIPTORS = Arrays.asList( + new StatisticDescriptor("OnHeap:EvictionCount" , "COUNTER"), + new StatisticDescriptor("OnHeap:ExpirationCount" , "COUNTER"), + new StatisticDescriptor("OnHeap:MissCount" , "COUNTER"), + new StatisticDescriptor("OnHeap:MappingCount" , "GAUGE"), + new StatisticDescriptor("OnHeap:HitCount" , "COUNTER"), + new StatisticDescriptor("OnHeap:PutCount" , "COUNTER"), + new StatisticDescriptor("OnHeap:RemovalCount" , "COUNTER") + ); + private static final Collection OFFHEAP_DESCRIPTORS = Arrays.asList( + new StatisticDescriptor("OffHeap:MissCount", "COUNTER"), + new StatisticDescriptor("OffHeap:OccupiedByteSize", "GAUGE"), + new StatisticDescriptor("OffHeap:AllocatedByteSize", "GAUGE"), + new StatisticDescriptor("OffHeap:MappingCount", "GAUGE"), + new StatisticDescriptor("OffHeap:EvictionCount", "COUNTER"), + new StatisticDescriptor("OffHeap:ExpirationCount", "COUNTER"), + new StatisticDescriptor("OffHeap:HitCount", "COUNTER"), + new StatisticDescriptor("OffHeap:PutCount", "COUNTER"), + new StatisticDescriptor("OffHeap:RemovalCount", "COUNTER") + ); + private static final Collection DISK_DESCRIPTORS = Arrays.asList( + new StatisticDescriptor("Disk:OccupiedByteSize", "GAUGE"), + new StatisticDescriptor("Disk:AllocatedByteSize", "GAUGE"), + new StatisticDescriptor("Disk:HitCount", "COUNTER"), + new StatisticDescriptor("Disk:EvictionCount", "COUNTER"), + new StatisticDescriptor("Disk:ExpirationCount", "COUNTER"), + new StatisticDescriptor("Disk:MissCount", "COUNTER"), + new StatisticDescriptor("Disk:MappingCount", "GAUGE"), + new StatisticDescriptor("Disk:PutCount", "COUNTER"), + new StatisticDescriptor("Disk:RemovalCount", "COUNTER") + ); + private static final Collection CLUSTERED_DESCRIPTORS = Arrays.asList( + new StatisticDescriptor("Clustered:MissCount", "COUNTER"), + new StatisticDescriptor("Clustered:HitCount", "COUNTER"), + new StatisticDescriptor("Clustered:PutCount", "COUNTER"), + new StatisticDescriptor("Clustered:RemovalCount", "COUNTER"), + new StatisticDescriptor("Clustered:EvictionCount", "COUNTER"), + new StatisticDescriptor("Clustered:ExpirationCount", "COUNTER") + ); + private static final Collection CACHE_DESCRIPTORS = Arrays.asList( + new StatisticDescriptor("Cache:HitCount", "COUNTER"), + new StatisticDescriptor("Cache:MissCount", "COUNTER"), + new StatisticDescriptor("Cache:PutCount", "COUNTER"), + new StatisticDescriptor("Cache:RemovalCount", "COUNTER"), + new StatisticDescriptor("Cache:EvictionCount", "COUNTER"), + new StatisticDescriptor("Cache:ExpirationCount", "COUNTER"), + new StatisticDescriptor("Cache:GetHitLatency#100", "GAUGE"), + new StatisticDescriptor("Cache:GetHitLatency#50", "GAUGE"), + new StatisticDescriptor("Cache:GetHitLatency#95", "GAUGE"), + new StatisticDescriptor("Cache:GetHitLatency#99", "GAUGE"), + new StatisticDescriptor("Cache:GetMissLatency#100", "GAUGE"), + new StatisticDescriptor("Cache:GetMissLatency#50", "GAUGE"), + new StatisticDescriptor("Cache:GetMissLatency#95", "GAUGE"), + new StatisticDescriptor("Cache:GetMissLatency#99", "GAUGE"), + new StatisticDescriptor("Cache:PutLatency#100", "GAUGE"), + new StatisticDescriptor("Cache:PutLatency#50", "GAUGE"), + new StatisticDescriptor("Cache:PutLatency#95", "GAUGE"), + new StatisticDescriptor("Cache:PutLatency#99", "GAUGE"), + new StatisticDescriptor("Cache:RemoveLatency#100", "GAUGE"), + new StatisticDescriptor("Cache:RemoveLatency#50", "GAUGE"), + new StatisticDescriptor("Cache:RemoveLatency#95", "GAUGE"), + new StatisticDescriptor("Cache:RemoveLatency#99", "GAUGE") + ); + private static final Collection POOL_DESCRIPTORS = Arrays.asList( + new StatisticDescriptor("Pool:AllocatedSize", "GAUGE") + ); + private static final Collection SERVER_STORE_DESCRIPTORS = Arrays.asList( + new StatisticDescriptor("Store:AllocatedMemory", "GAUGE"), + new StatisticDescriptor("Store:DataAllocatedMemory", "GAUGE"), + new StatisticDescriptor("Store:OccupiedMemory", "GAUGE"), + new StatisticDescriptor("Store:DataOccupiedMemory", "GAUGE"), + new StatisticDescriptor("Store:Entries", "COUNTER"), + new StatisticDescriptor("Store:UsedSlotCount", "COUNTER"), + new StatisticDescriptor("Store:DataVitalMemory", "GAUGE"), + new StatisticDescriptor("Store:VitalMemory", "GAUGE"), + new StatisticDescriptor("Store:RemovedSlotCount", "COUNTER"), + new StatisticDescriptor("Store:DataSize", "GAUGE"), + new StatisticDescriptor("Store:TableCapacity", "GAUGE") + ); + private static final Collection OFFHEAP_RES_DESCRIPTORS = Arrays.asList( + new StatisticDescriptor("OffHeapResource:AllocatedMemory", "GAUGE") + ); @Test @Ignore("This is not a test, but something useful to show a json print of a cluster topology with all management metadata inside") @@ -150,14 +223,14 @@ public void test_D_server_capabilities_exposed() throws Exception { assertThat(settings.get("alias")).isEqualTo("resource-pool-b"); assertThat(settings.get("type")).isEqualTo("Pool"); assertThat(settings.get("serverResource")).isEqualTo("primary-server-resource"); - assertThat(settings.get("size")).isEqualTo(16 * 1024 * 1024L); + assertThat(settings.get("size")).isEqualTo(8 * 1024 * 1024L); assertThat(settings.get("allocationType")).isEqualTo("shared"); settings = (Settings) descriptors.get(1); assertThat(settings.get("alias")).isEqualTo("resource-pool-a"); assertThat(settings.get("type")).isEqualTo("Pool"); assertThat(settings.get("serverResource")).isEqualTo("secondary-server-resource"); - assertThat(settings.get("size")).isEqualTo(28 * 1024 * 1024L); + assertThat(settings.get("size")).isEqualTo(10 * 1024 * 1024L); assertThat(settings.get("allocationType")).isEqualTo("shared"); // Dedicated PoolSettings @@ -211,13 +284,21 @@ public void test_E_notifs_on_add_cache() throws Exception { .with(clusteredDedicated("primary-server-resource", 2, MemoryUnit.MB))) .build()); - ContextContainer contextContainer = readTopology().getClient(ehcacheClientIdentifier).get().getManagementRegistry().get().getContextContainer(); + Cluster cluster = readTopology(); + ContextContainer contextContainer = cluster.getClient(ehcacheClientIdentifier).get().getManagementRegistry().get().getContextContainer(); assertThat(contextContainer.getSubContexts()).hasSize(4); TreeSet cNames = contextContainer.getSubContexts().stream().map(ContextContainer::getValue).collect(Collectors.toCollection(TreeSet::new)); assertThat(cNames).isEqualTo(new TreeSet<>(Arrays.asList("cache-2", "dedicated-cache-1", "shared-cache-2", "shared-cache-3"))); - waitForAllNotifications("SERVER_ENTITY_CREATED", "ENTITY_REGISTRY_AVAILABLE", "EHCACHE_SERVER_STORE_CREATED", "SERVER_ENTITY_FETCHED", "CACHE_ADDED"); + if (cluster.serverStream().count() == 2) { + waitForAllNotifications( + "SERVER_ENTITY_CREATED", "ENTITY_REGISTRY_AVAILABLE", "EHCACHE_SERVER_STORE_CREATED", "SERVER_ENTITY_FETCHED", "CACHE_ADDED", + "SERVER_ENTITY_CREATED", "ENTITY_REGISTRY_AVAILABLE", "EHCACHE_SERVER_STORE_CREATED"); // passive server + } else { + waitForAllNotifications( + "SERVER_ENTITY_CREATED", "ENTITY_REGISTRY_AVAILABLE", "EHCACHE_SERVER_STORE_CREATED", "SERVER_ENTITY_FETCHED", "CACHE_ADDED"); + } } @Test @@ -256,7 +337,7 @@ public void test_G_stats_collection() throws Exception { .collect(Collectors.toList()); for (ContextualStatistics stat : stats) { - val = stat.getStatistic("Cache:HitCount").longValue(); + val = stat.getLatestSampleValue("Cache:HitCount").get(); } } while(!Thread.currentThread().isInterrupted() && val != 2); @@ -274,16 +355,18 @@ public void test_G_stats_collection() throws Exception { .collect(Collectors.toList()); for (ContextualStatistics stat : stats) { - val = stat.getStatistic("Cache:HitCount").longValue(); + val = stat.getLatestSampleValue("Cache:HitCount").get(); } } while(!Thread.currentThread().isInterrupted() && val != 4); // wait until we have some stats coming from the server entity - while (!Thread.currentThread().isInterrupted() && !allStats.stream().filter(statistics -> statistics.getContext().contains("consumerId")).findFirst().isPresent()) { + while (!Thread.currentThread().isInterrupted() && !allStats.stream().anyMatch(statistics -> statistics.getContext().contains("consumerId"))) { allStats.addAll(waitForNextStats()); } - List serverStats = allStats.stream().filter(statistics -> statistics.getContext().contains("consumerId")).collect(Collectors.toList()); + List serverStats = allStats.stream() + .filter(statistics -> statistics.getContext().contains("consumerId")) + .collect(Collectors.toList()); // server-side stats TreeSet capabilities = serverStats.stream() @@ -333,72 +416,4 @@ public void test_G_stats_collection() throws Exception { assertThat(offHeapResourceDescriptors).isEqualTo(OFFHEAP_RES_DESCRIPTORS.stream().map(StatisticDescriptor::getName).collect(Collectors.toSet())); } - @BeforeClass - public static void initDescriptors() throws ClassNotFoundException { - ONHEAP_DESCRIPTORS.add(new StatisticDescriptor("OnHeap:EvictionCount" , "COUNTER")); - ONHEAP_DESCRIPTORS.add(new StatisticDescriptor("OnHeap:ExpirationCount" , "COUNTER")); - ONHEAP_DESCRIPTORS.add(new StatisticDescriptor("OnHeap:MissCount" , "COUNTER")); - ONHEAP_DESCRIPTORS.add(new StatisticDescriptor("OnHeap:MappingCount" , "COUNTER")); - ONHEAP_DESCRIPTORS.add(new StatisticDescriptor("OnHeap:OccupiedByteSize", "SIZE")); - ONHEAP_DESCRIPTORS.add(new StatisticDescriptor("OnHeap:HitCount" , "COUNTER")); - ONHEAP_DESCRIPTORS.add(new StatisticDescriptor("OnHeap:PutCount" , "COUNTER")); - ONHEAP_DESCRIPTORS.add(new StatisticDescriptor("OnHeap:RemovalCount" , "COUNTER")); - - OFFHEAP_DESCRIPTORS.add(new StatisticDescriptor("OffHeap:MissCount", "COUNTER")); - OFFHEAP_DESCRIPTORS.add(new StatisticDescriptor("OffHeap:OccupiedByteSize", "SIZE")); - OFFHEAP_DESCRIPTORS.add(new StatisticDescriptor("OffHeap:AllocatedByteSize", "SIZE")); - OFFHEAP_DESCRIPTORS.add(new StatisticDescriptor("OffHeap:MappingCount", "COUNTER")); - OFFHEAP_DESCRIPTORS.add(new StatisticDescriptor("OffHeap:EvictionCount", "COUNTER")); - OFFHEAP_DESCRIPTORS.add(new StatisticDescriptor("OffHeap:ExpirationCount", "COUNTER")); - OFFHEAP_DESCRIPTORS.add(new StatisticDescriptor("OffHeap:MaxMappingCount", "COUNTER")); - OFFHEAP_DESCRIPTORS.add(new StatisticDescriptor("OffHeap:HitCount", "COUNTER")); - OFFHEAP_DESCRIPTORS.add(new StatisticDescriptor("OffHeap:PutCount", "COUNTER")); - OFFHEAP_DESCRIPTORS.add(new StatisticDescriptor("OffHeap:RemovalCount", "COUNTER")); - - DISK_DESCRIPTORS.add(new StatisticDescriptor("Disk:MaxMappingCount", "COUNTER")); - DISK_DESCRIPTORS.add(new StatisticDescriptor("Disk:OccupiedByteSize", "SIZE")); - DISK_DESCRIPTORS.add(new StatisticDescriptor("Disk:AllocatedByteSize", "SIZE")); - DISK_DESCRIPTORS.add(new StatisticDescriptor("Disk:HitCount", "COUNTER")); - DISK_DESCRIPTORS.add(new StatisticDescriptor("Disk:EvictionCount", "COUNTER")); - DISK_DESCRIPTORS.add(new StatisticDescriptor("Disk:ExpirationCount", "COUNTER")); - DISK_DESCRIPTORS.add(new StatisticDescriptor("Disk:MissCount", "COUNTER")); - DISK_DESCRIPTORS.add(new StatisticDescriptor("Disk:MappingCount", "COUNTER")); - DISK_DESCRIPTORS.add(new StatisticDescriptor("Disk:PutCount", "COUNTER")); - DISK_DESCRIPTORS.add(new StatisticDescriptor("Disk:RemovalCount", "COUNTER")); - - CLUSTERED_DESCRIPTORS.add(new StatisticDescriptor("Clustered:MissCount", "COUNTER")); - CLUSTERED_DESCRIPTORS.add(new StatisticDescriptor("Clustered:HitCount", "COUNTER")); - CLUSTERED_DESCRIPTORS.add(new StatisticDescriptor("Clustered:PutCount", "COUNTER")); - CLUSTERED_DESCRIPTORS.add(new StatisticDescriptor("Clustered:RemovalCount", "COUNTER")); - CLUSTERED_DESCRIPTORS.add(new StatisticDescriptor("Clustered:MaxMappingCount", "COUNTER")); - CLUSTERED_DESCRIPTORS.add(new StatisticDescriptor("Clustered:EvictionCount", "COUNTER")); - CLUSTERED_DESCRIPTORS.add(new StatisticDescriptor("Clustered:ExpirationCount", "COUNTER")); - CLUSTERED_DESCRIPTORS.add(new StatisticDescriptor("Clustered:OccupiedByteSize", "SIZE")); - CLUSTERED_DESCRIPTORS.add(new StatisticDescriptor("Clustered:AllocatedByteSize", "SIZE")); - CLUSTERED_DESCRIPTORS.add(new StatisticDescriptor("Clustered:MappingCount", "COUNTER")); - - CACHE_DESCRIPTORS.add(new StatisticDescriptor("Cache:HitCount", "COUNTER")); - CACHE_DESCRIPTORS.add(new StatisticDescriptor("Cache:MissCount", "COUNTER")); - CACHE_DESCRIPTORS.add(new StatisticDescriptor("Cache:PutCount", "COUNTER")); - CACHE_DESCRIPTORS.add(new StatisticDescriptor("Cache:RemovalCount", "COUNTER")); - CACHE_DESCRIPTORS.add(new StatisticDescriptor("Cache:EvictionCount", "COUNTER")); - CACHE_DESCRIPTORS.add(new StatisticDescriptor("Cache:ExpirationCount", "COUNTER")); - - POOL_DESCRIPTORS.add(new StatisticDescriptor("Pool:AllocatedSize", "SIZE")); - - SERVER_STORE_DESCRIPTORS.add(new StatisticDescriptor("Store:AllocatedMemory", "SIZE")); - SERVER_STORE_DESCRIPTORS.add(new StatisticDescriptor("Store:DataAllocatedMemory", "SIZE")); - SERVER_STORE_DESCRIPTORS.add(new StatisticDescriptor("Store:OccupiedMemory", "SIZE")); - SERVER_STORE_DESCRIPTORS.add(new StatisticDescriptor("Store:DataOccupiedMemory", "SIZE")); - SERVER_STORE_DESCRIPTORS.add(new StatisticDescriptor("Store:Entries", "COUNTER")); - SERVER_STORE_DESCRIPTORS.add(new StatisticDescriptor("Store:UsedSlotCount", "COUNTER")); - SERVER_STORE_DESCRIPTORS.add(new StatisticDescriptor("Store:DataVitalMemory", "SIZE")); - SERVER_STORE_DESCRIPTORS.add(new StatisticDescriptor("Store:VitalMemory", "SIZE")); - SERVER_STORE_DESCRIPTORS.add(new StatisticDescriptor("Store:RemovedSlotCount", "COUNTER")); - SERVER_STORE_DESCRIPTORS.add(new StatisticDescriptor("Store:DataSize", "SIZE")); - SERVER_STORE_DESCRIPTORS.add(new StatisticDescriptor("Store:TableCapacity", "SIZE")); - - OFFHEAP_RES_DESCRIPTORS.add(new StatisticDescriptor("OffHeapResource:AllocatedMemory", "SIZE")); - } - } diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/management/DiagnosticTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/management/DiagnosticTest.java index b456b113cd..fb1fdd6114 100644 --- a/clustered/integration-test/src/test/java/org/ehcache/clustered/management/DiagnosticTest.java +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/management/DiagnosticTest.java @@ -23,6 +23,7 @@ import org.terracotta.connection.ConnectionFactory; import org.terracotta.connection.ConnectionPropertyNames; import org.terracotta.connection.entity.EntityRef; +import org.terracotta.management.model.cluster.Server; import java.net.URI; import java.util.Properties; @@ -38,7 +39,7 @@ public class DiagnosticTest extends AbstractClusteringManagementTest { private static final String PROP_REQUEST_TIMEOUTMESSAGE = "request.timeoutMessage"; @Test - public void test_state_dump() throws Exception { + public void test_CACHE_MANAGER_CLOSED() throws Exception { cacheManager.createCache("cache-2", newCacheConfigurationBuilder( String.class, String.class, newResourcePoolsBuilder() @@ -47,12 +48,14 @@ public void test_state_dump() throws Exception { .with(clusteredDedicated("primary-server-resource", 2, MemoryUnit.MB))) .build()); + int activePort = readTopology().serverStream().filter(Server::isActive).findFirst().get().getBindPort(); + Properties properties = new Properties(); properties.setProperty(ConnectionPropertyNames.CONNECTION_TIMEOUT, String.valueOf("5000")); properties.setProperty(ConnectionPropertyNames.CONNECTION_NAME, "diagnostic"); properties.setProperty(PROP_REQUEST_TIMEOUT, "5000"); properties.setProperty(PROP_REQUEST_TIMEOUTMESSAGE, "timed out"); - URI uri = URI.create("diagnostic://" + CLUSTER.getConnectionURI().getAuthority()); + URI uri = URI.create("diagnostic://localhost:" + activePort); Connection connection = ConnectionFactory.connect(uri, properties); EntityRef ref = connection.getEntityRef(Diagnostics.class, 1, "root"); diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/management/EhcacheConfigWithManagementTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/management/EhcacheConfigWithManagementTest.java index fd941deb85..44d1dca2d0 100644 --- a/clustered/integration-test/src/test/java/org/ehcache/clustered/management/EhcacheConfigWithManagementTest.java +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/management/EhcacheConfigWithManagementTest.java @@ -61,8 +61,8 @@ public void create_cache_manager() throws Exception { .with(cluster(CLUSTER.getConnectionURI().resolve("/my-server-entity-3")) .autoCreate() .defaultServerResource("primary-server-resource") - .resourcePool("resource-pool-a", 28, MemoryUnit.MB, "secondary-server-resource") // <2> - .resourcePool("resource-pool-b", 16, MemoryUnit.MB)) // will take from primary-server-resource + .resourcePool("resource-pool-a", 10, MemoryUnit.MB, "secondary-server-resource") // <2> + .resourcePool("resource-pool-b", 8, MemoryUnit.MB)) // will take from primary-server-resource // management config .using(new DefaultManagementRegistryConfiguration() .addTags("webapp-1", "server-node-1") diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/management/EhcacheManagerToStringTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/management/EhcacheManagerToStringTest.java index 270ca16f23..dd5dbe36be 100644 --- a/clustered/integration-test/src/test/java/org/ehcache/clustered/management/EhcacheManagerToStringTest.java +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/management/EhcacheManagerToStringTest.java @@ -89,7 +89,7 @@ public void clusteredToString() throws Exception { .with(ClusteringServiceConfigurationBuilder.cluster(uri) .autoCreate() .defaultServerResource("primary-server-resource") - .resourcePool("resource-pool-a", 32, MemoryUnit.MB)) + .resourcePool("resource-pool-a", 10, MemoryUnit.MB)) // management config .using(new DefaultManagementRegistryConfiguration() .addTags("webapp-1", "server-node-1") @@ -133,32 +133,32 @@ public void clusteredToString() throws Exception { public static class SampleLoaderWriter implements CacheLoaderWriter { @Override - public V load(K key) throws Exception { + public V load(K key) { throw new UnsupportedOperationException("Implement Me"); } @Override - public Map loadAll(Iterable keys) throws Exception { + public Map loadAll(Iterable keys) { throw new UnsupportedOperationException("Implement me!"); } @Override - public void write(K key, V value) throws Exception { + public void write(K key, V value) { throw new UnsupportedOperationException("Implement me!"); } @Override - public void writeAll(Iterable> entries) throws Exception { + public void writeAll(Iterable> entries) { throw new UnsupportedOperationException("Implement me!"); } @Override - public void delete(K key) throws Exception { + public void delete(K key) { throw new UnsupportedOperationException("Implement me!"); } @Override - public void deleteAll(Iterable keys) throws Exception { + public void deleteAll(Iterable keys) { throw new UnsupportedOperationException("Implement me!"); } } diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/management/ManagementClusterConnectionTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/management/ManagementClusterConnectionTest.java new file mode 100644 index 0000000000..6d70c9dc93 --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/management/ManagementClusterConnectionTest.java @@ -0,0 +1,199 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.management; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; +import com.tc.net.proxy.TCPProxy; +import org.ehcache.Cache; +import org.ehcache.CacheManager; +import org.ehcache.Status; +import org.ehcache.clustered.util.TCPProxyUtil; +import org.ehcache.config.units.EntryUnit; +import org.ehcache.config.units.MemoryUnit; +import org.ehcache.management.registry.DefaultManagementRegistryConfiguration; +import org.hamcrest.Matchers; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.terracotta.management.model.capabilities.descriptors.Settings; +import org.terracotta.testing.rules.Cluster; + +import java.io.File; +import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder.clusteredDedicated; +import static org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder.cluster; +import static org.ehcache.clustered.management.AbstractClusteringManagementTest.createNmsService; +import static org.ehcache.clustered.management.AbstractClusteringManagementTest.initIdentifiers; +import static org.ehcache.clustered.management.AbstractClusteringManagementTest.readTopology; +import static org.ehcache.clustered.management.AbstractClusteringManagementTest.sendManagementCallOnEntityToCollectStats; +import static org.ehcache.clustered.management.AbstractClusteringManagementTest.tearDownCacheManagerAndStatsCollector; +import static org.ehcache.clustered.util.TCPProxyUtil.setDelay; +import static org.ehcache.config.builders.CacheConfigurationBuilder.newCacheConfigurationBuilder; +import static org.ehcache.config.builders.CacheManagerBuilder.newCacheManagerBuilder; +import static org.ehcache.config.builders.ResourcePoolsBuilder.newResourcePoolsBuilder; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.junit.Assert.assertThat; +import static org.terracotta.testing.rules.BasicExternalClusterBuilder.newCluster; + +public class ManagementClusterConnectionTest { + + private static final String RESOURCE_CONFIG = + "" + + "" + + "64" + + "64" + + "" + + "\n" + + "" + + "" + + "5" + + "" + + ""; + + protected static CacheManager cacheManager; + protected static ObjectMapper mapper = new ObjectMapper(); + + private static final List proxies = new ArrayList<>(); + + @ClassRule + public static Cluster CLUSTER = newCluster() + .in(new File("build/cluster")) + .withServiceFragment(RESOURCE_CONFIG).build(); + + + @BeforeClass + public static void beforeClass() throws Exception { + + mapper.configure(SerializationFeature.INDENT_OUTPUT, true); + + CLUSTER.getClusterControl().waitForActive(); + + // simulate a TMS client + createNmsService(CLUSTER); + + URI connectionURI = TCPProxyUtil.getProxyURI(CLUSTER.getConnectionURI(), proxies); + + cacheManager = newCacheManagerBuilder() + // cluster config + .with(cluster(connectionURI.resolve("/my-server-entity-1")) + .autoCreate() + .defaultServerResource("primary-server-resource") + .resourcePool("resource-pool-a", 10, MemoryUnit.MB, "secondary-server-resource") // <2> + .resourcePool("resource-pool-b", 10, MemoryUnit.MB)) // will take from primary-server-resource + // management config + .using(new DefaultManagementRegistryConfiguration() + .addTags("webapp-1", "server-node-1") + .setCacheManagerAlias("my-super-cache-manager")) + // cache config + .withCache("dedicated-cache-1", newCacheConfigurationBuilder( + String.class, String.class, + newResourcePoolsBuilder() + .heap(10, EntryUnit.ENTRIES) + .offheap(1, MemoryUnit.MB) + .with(clusteredDedicated("primary-server-resource", 4, MemoryUnit.MB))) + .build()) + .build(true); + + // ensure the CM is running and get its client id + assertThat(cacheManager.getStatus(), equalTo(Status.AVAILABLE)); + + // test_notifs_sent_at_CM_init + AbstractClusteringManagementTest.waitForAllNotifications( + "CLIENT_CONNECTED", + "CLIENT_REGISTRY_AVAILABLE", + "CLIENT_TAGS_UPDATED", + "EHCACHE_RESOURCE_POOLS_CONFIGURED", + "EHCACHE_SERVER_STORE_CREATED", + "ENTITY_REGISTRY_AVAILABLE", "ENTITY_REGISTRY_AVAILABLE", + "SERVER_ENTITY_CREATED", "SERVER_ENTITY_CREATED", "SERVER_ENTITY_CREATED", "SERVER_ENTITY_CREATED", + "SERVER_ENTITY_DESTROYED", + "SERVER_ENTITY_FETCHED", "SERVER_ENTITY_FETCHED", "SERVER_ENTITY_FETCHED", "SERVER_ENTITY_FETCHED", "SERVER_ENTITY_FETCHED", "SERVER_ENTITY_FETCHED", + "SERVER_ENTITY_UNFETCHED" + ); + + initIdentifiers(); + + sendManagementCallOnEntityToCollectStats(); + } + + @Test + public void test_reconnection() throws Exception { + long count = readTopology().clientStream() + .filter(client -> client.getName() + .startsWith("Ehcache:") && client.isManageable() && client.getTags() + .containsAll(Arrays.asList("webapp-1", "server-node-1"))) + .count(); + + Assert.assertThat(count, Matchers.equalTo(1L)); + + String instanceId = getInstanceId(); + + setDelay(6000, proxies); + Thread.sleep(6000); + + setDelay(0L, proxies); + + Cache cache = cacheManager.getCache("dedicated-cache-1", String.class, String.class); + String initiate_reconnect = cache.get("initiate reconnect"); + + assertThat(initiate_reconnect, Matchers.nullValue()); + + while (!Thread.currentThread().isInterrupted()) { +// System.out.println(mapper.writeValueAsString(readTopology().toMap())); + + count = readTopology().clientStream() + .filter(client -> client.getName() + .startsWith("Ehcache:") && client.isManageable() && client.getTags() + .containsAll(Arrays.asList("webapp-1", "server-node-1"))) + .count(); + + if (count == 1) { + break; + } else { + Thread.sleep(1_000); + } + } + + assertThat(Thread.currentThread().isInterrupted(), is(false)); + assertThat(getInstanceId(), equalTo(instanceId)); + } + + private String getInstanceId() throws Exception { + return readTopology().clientStream() + .filter(client -> client.getName().startsWith("Ehcache:") && client.isManageable()) + .findFirst().get() + .getManagementRegistry().get() + .getCapability("SettingsCapability").get() + .getDescriptors(Settings.class).stream() + .filter(settings -> settings.containsKey("instanceId")) + .map(settings -> settings.getString("instanceId")) + .findFirst().get(); + } + + @AfterClass + public static void afterClass() throws Exception { + tearDownCacheManagerAndStatsCollector(); + } + +} diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/reconnect/BasicCacheReconnectTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/reconnect/BasicCacheReconnectTest.java new file mode 100644 index 0000000000..162baf2b26 --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/reconnect/BasicCacheReconnectTest.java @@ -0,0 +1,173 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.reconnect; + +import com.tc.net.proxy.TCPProxy; +import org.ehcache.Cache; +import org.ehcache.CacheManager; +import org.ehcache.PersistentCacheManager; +import org.ehcache.StateTransitionException; +import org.ehcache.clustered.ClusteredTests; +import org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder; +import org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder; +import org.ehcache.clustered.client.internal.store.ReconnectInProgressException; +import org.ehcache.clustered.util.TCPProxyUtil; +import org.ehcache.config.CacheConfiguration; +import org.ehcache.config.builders.CacheConfigurationBuilder; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.ResourcePoolsBuilder; +import org.ehcache.config.units.MemoryUnit; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Ignore; +import org.junit.Test; +import org.terracotta.testing.rules.Cluster; + +import java.io.File; +import java.net.URI; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; + +import static org.ehcache.clustered.util.TCPProxyUtil.setDelay; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.fail; +import static org.terracotta.testing.rules.BasicExternalClusterBuilder.newCluster; + +public class BasicCacheReconnectTest extends ClusteredTests { + public static final String RESOURCE_CONFIG = + "" + + "" + + "64" + + "" + + "\n" + + "" + + "" + + "5" + + "" + + ""; + + private static PersistentCacheManager cacheManager; + + private static CacheConfiguration config = CacheConfigurationBuilder.newCacheConfigurationBuilder(Long.class, String.class, + ResourcePoolsBuilder.newResourcePoolsBuilder() + .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 1, MemoryUnit.MB))) + .withResilienceStrategy(new ThrowingResiliencyStrategy<>()) + .build(); + + private static final List proxies = new ArrayList<>(); + + @ClassRule + public static Cluster CLUSTER = + newCluster().in(new File("build/cluster")).withServiceFragment(RESOURCE_CONFIG).build(); + + @BeforeClass + public static void waitForActive() throws Exception { + CLUSTER.getClusterControl().waitForActive(); + + URI connectionURI = TCPProxyUtil.getProxyURI(CLUSTER.getConnectionURI(), proxies); + + CacheManagerBuilder clusteredCacheManagerBuilder + = CacheManagerBuilder.newCacheManagerBuilder() + .with(ClusteringServiceConfigurationBuilder.cluster(connectionURI.resolve("/crud-cm")) + .autoCreate() + .defaultServerResource("primary-server-resource")); + cacheManager = clusteredCacheManagerBuilder.build(false); + cacheManager.init(); + } + + @Test + public void cacheOpsDuringReconnection() throws Exception { + + try { + + Cache cache = cacheManager.createCache("clustered-cache", config); + + CompletableFuture future = CompletableFuture.runAsync(() -> + ThreadLocalRandom.current() + .longs() + .forEach(value -> + cache.put(value, Long.toString(value)))); + + expireLease(); + + try { + future.get(5000, TimeUnit.MILLISECONDS); + fail(); + } catch (ExecutionException e) { + assertThat(e.getCause().getCause().getCause(), instanceOf(ReconnectInProgressException.class)); + } + + CompletableFuture getSucceededFuture = CompletableFuture.runAsync(() -> { + while (true) { + try { + cache.get(1L); + break; + } catch (RuntimeException e) { + + } + } + }); + + getSucceededFuture.get(20000, TimeUnit.MILLISECONDS); + } finally { + cacheManager.destroyCache("clustered-cache"); + } + + } + + @Test + public void reconnectDuringCacheCreation() throws Exception { + + expireLease(); + + Cache cache = cacheManager.createCache("clustered-cache", config); + + assertThat(cache, notNullValue()); + + cacheManager.destroyCache("clustered-cache"); + + } + + @Test + public void reconnectDuringCacheDestroy() throws Exception { + + Cache cache = cacheManager.createCache("clustered-cache", config); + + assertThat(cache, notNullValue()); + + expireLease(); + + cacheManager.destroyCache("clustered-cache"); + assertThat(cacheManager.getCache("clustered-cache", Long.class, String.class), nullValue()); + + } + + private static void expireLease() throws InterruptedException { + setDelay(6000, proxies); + Thread.sleep(6000); + + setDelay(0L, proxies); + } +} \ No newline at end of file diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/reconnect/CacheManagerDestroyReconnectTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/reconnect/CacheManagerDestroyReconnectTest.java new file mode 100644 index 0000000000..bbb3dc92ca --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/reconnect/CacheManagerDestroyReconnectTest.java @@ -0,0 +1,80 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.reconnect; + +import com.tc.net.proxy.TCPProxy; +import org.ehcache.PersistentCacheManager; +import org.ehcache.clustered.ClusteredTests; +import org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder; +import org.ehcache.clustered.util.TCPProxyUtil; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.terracotta.testing.rules.Cluster; + +import java.io.File; +import java.net.URI; +import java.util.ArrayList; +import java.util.List; + +import static org.ehcache.clustered.reconnect.BasicCacheReconnectTest.RESOURCE_CONFIG; +import static org.ehcache.clustered.util.TCPProxyUtil.setDelay; +import static org.terracotta.testing.rules.BasicExternalClusterBuilder.newCluster; + +public class CacheManagerDestroyReconnectTest extends ClusteredTests { + + + private static PersistentCacheManager cacheManager; + + private static final List proxies = new ArrayList<>(); + + @ClassRule + public static Cluster CLUSTER = + newCluster().in(new File("build/cluster")).withServiceFragment(RESOURCE_CONFIG).build(); + + @BeforeClass + public static void waitForActive() throws Exception { + CLUSTER.getClusterControl().waitForActive(); + + URI connectionURI = TCPProxyUtil.getProxyURI(CLUSTER.getConnectionURI(), proxies); + + CacheManagerBuilder clusteredCacheManagerBuilder + = CacheManagerBuilder.newCacheManagerBuilder() + .with(ClusteringServiceConfigurationBuilder.cluster(connectionURI.resolve("/crud-cm")) + .autoCreate() + .defaultServerResource("primary-server-resource")); + cacheManager = clusteredCacheManagerBuilder.build(false); + cacheManager.init(); + } + + @Test + public void testDestroyCacheManagerReconnects() throws Exception { + + setDelay(6000, proxies); + Thread.sleep(6000); + + setDelay(0L, proxies); + + cacheManager.close(); + + cacheManager.destroy(); + + System.out.println(cacheManager.getStatus()); + + } + +} diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/reconnect/ThrowingResiliencyStrategy.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/reconnect/ThrowingResiliencyStrategy.java new file mode 100644 index 0000000000..eddd4192a6 --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/reconnect/ThrowingResiliencyStrategy.java @@ -0,0 +1,95 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.reconnect; + +import org.ehcache.Cache; +import org.ehcache.clustered.client.internal.store.ReconnectInProgressException; +import org.ehcache.spi.resilience.ResilienceStrategy; +import org.ehcache.spi.resilience.StoreAccessException; + +import java.util.Map; + +//For tests +public class ThrowingResiliencyStrategy implements ResilienceStrategy { + @Override + public V getFailure(K key, StoreAccessException e) { + throw new RuntimeException(e); + } + + @Override + public boolean containsKeyFailure(K key, StoreAccessException e) { + throw new RuntimeException(e); + } + + // This is specifically for the test so that it only throws due to a ReconnectInProgress, + // sometimes puts might even timeout in build systems + @Override + public void putFailure(K key, V value, StoreAccessException e) { + if (e.getCause() instanceof ReconnectInProgressException) { + throw new RuntimeException(e); + } + } + + @Override + public void removeFailure(K key, StoreAccessException e) { + throw new RuntimeException(e); + } + + @Override + public void clearFailure(StoreAccessException e) { + throw new RuntimeException(e); + } + + @Override + public Cache.Entry iteratorFailure(StoreAccessException e) { + throw new RuntimeException(e); + } + + @Override + public V putIfAbsentFailure(K key, V value, StoreAccessException e) { + throw new RuntimeException(e); + } + + @Override + public boolean removeFailure(K key, V value, StoreAccessException e) { + throw new RuntimeException(e); + } + + @Override + public V replaceFailure(K key, V value, StoreAccessException e) { + throw new RuntimeException(e); + } + + @Override + public boolean replaceFailure(K key, V value, V newValue, StoreAccessException e) { + throw new RuntimeException(e); + } + + @Override + public Map getAllFailure(Iterable keys, StoreAccessException e) { + throw new RuntimeException(e); + } + + @Override + public void putAllFailure(Map entries, StoreAccessException e) { + throw new RuntimeException(e); + } + + @Override + public void removeAllFailure(Iterable keys, StoreAccessException e) { + throw new RuntimeException(e); + } +} diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/replication/BasicClusteredCacheOpsReplicationMultiThreadedTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/replication/BasicClusteredCacheOpsReplicationMultiThreadedTest.java index 04faa8a683..a7617cba6a 100644 --- a/clustered/integration-test/src/test/java/org/ehcache/clustered/replication/BasicClusteredCacheOpsReplicationMultiThreadedTest.java +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/replication/BasicClusteredCacheOpsReplicationMultiThreadedTest.java @@ -158,7 +158,7 @@ public void tearDown() throws Exception { @Test(timeout=180000) public void testCRUD() throws Exception { Set universalSet = ConcurrentHashMap.newKeySet(); - List futures = new ArrayList<>(); + List> futures = new ArrayList<>(); caches.forEach(cache -> { for (int i = 0; i < NUM_OF_THREADS; i++) { @@ -199,7 +199,7 @@ public void testCRUD() throws Exception { @Test(timeout=180000) public void testBulkOps() throws Exception { Set universalSet = ConcurrentHashMap.newKeySet(); - List futures = new ArrayList<>(); + List> futures = new ArrayList<>(); caches.forEach(cache -> { for (int i = 0; i < NUM_OF_THREADS; i++) { @@ -245,7 +245,7 @@ public void testBulkOps() throws Exception { "dealing with in-flight invalidation reconstructed from reconnect data") @Test(timeout=180000) public void testClear() throws Exception { - List futures = new ArrayList<>(); + List> futures = new ArrayList<>(); Set universalSet = ConcurrentHashMap.newKeySet(); caches.forEach(cache -> { @@ -265,7 +265,7 @@ public void testClear() throws Exception { CACHE2.get(x); }); - Future clearFuture = executorService.submit(() -> CACHE1.clear()); + Future clearFuture = executorService.submit(() -> CACHE1.clear()); CLUSTER.getClusterControl().terminateActive(); @@ -275,10 +275,10 @@ public void testClear() throws Exception { } - private void drainTasks(List futures) throws InterruptedException, java.util.concurrent.ExecutionException { + private void drainTasks(List> futures) throws InterruptedException, java.util.concurrent.ExecutionException { for (int i = 0; i < futures.size(); i++) { try { - futures.get(i).get(10, TimeUnit.SECONDS); + futures.get(i).get(60, TimeUnit.SECONDS); } catch (TimeoutException e) { fail("Stuck on number " + i); } @@ -286,6 +286,9 @@ private void drainTasks(List futures) throws InterruptedException, java. } private static class BlobValue implements Serializable { + + private static final long serialVersionUID = 1L; + private final byte[] data = new byte[10 * 1024]; } diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/replication/BasicClusteredCacheOpsReplicationWithMultipleClientsTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/replication/BasicClusteredCacheOpsReplicationWithMultipleClientsTest.java index b755d1d046..3860a9eb77 100644 --- a/clustered/integration-test/src/test/java/org/ehcache/clustered/replication/BasicClusteredCacheOpsReplicationWithMultipleClientsTest.java +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/replication/BasicClusteredCacheOpsReplicationWithMultipleClientsTest.java @@ -22,6 +22,7 @@ import org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder; import org.ehcache.clustered.client.config.builders.ClusteredStoreConfigurationBuilder; import org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder; +import org.ehcache.clustered.client.config.builders.TimeoutsBuilder; import org.ehcache.clustered.common.Consistency; import org.ehcache.config.CacheConfiguration; import org.ehcache.config.builders.CacheConfigurationBuilder; @@ -42,6 +43,7 @@ import java.io.File; import java.io.Serializable; +import java.time.Duration; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -95,6 +97,7 @@ public void startServers() throws Exception { final CacheManagerBuilder clusteredCacheManagerBuilder = CacheManagerBuilder.newCacheManagerBuilder() .with(ClusteringServiceConfigurationBuilder.cluster(CLUSTER.getConnectionURI().resolve("/crud-cm-replication")) + .timeouts(TimeoutsBuilder.timeouts().read(Duration.ofSeconds(20)).write(Duration.ofSeconds(20))) .autoCreate() .defaultServerResource("primary-server-resource")); CACHE_MANAGER1 = clusteredCacheManagerBuilder.build(true); @@ -214,11 +217,18 @@ public void testClear() throws Exception { CLUSTER.getClusterControl().terminateActive(); - readKeysByCache2BeforeFailOver.forEach(x -> assertThat(CACHE2.get(x), nullValue())); + if (cacheConsistency == Consistency.STRONG) { + readKeysByCache2BeforeFailOver.forEach(x -> assertThat(CACHE2.get(x), nullValue())); + } else { + readKeysByCache2BeforeFailOver.forEach(x -> assertThat(CACHE1.get(x), nullValue())); + } } private static class BlobValue implements Serializable { + + private static final long serialVersionUID = 1L; + private final byte[] data = new byte[10 * 1024]; } } diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/replication/BasicClusteredCacheOpsReplicationWithServersApiTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/replication/BasicClusteredCacheOpsReplicationWithServersApiTest.java new file mode 100644 index 0000000000..d2d1889d3c --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/replication/BasicClusteredCacheOpsReplicationWithServersApiTest.java @@ -0,0 +1,129 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.clustered.replication; + +import org.ehcache.Cache; +import org.ehcache.PersistentCacheManager; +import org.ehcache.clustered.ClusteredTests; +import org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder; +import org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder; +import org.ehcache.clustered.client.config.builders.TimeoutsBuilder; +import org.ehcache.config.CacheConfiguration; +import org.ehcache.config.builders.CacheConfigurationBuilder; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.ResourcePoolsBuilder; +import org.ehcache.config.units.EntryUnit; +import org.ehcache.config.units.MemoryUnit; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.terracotta.testing.rules.Cluster; + +import java.io.File; +import java.net.InetSocketAddress; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assert.assertThat; +import static org.terracotta.testing.rules.BasicExternalClusterBuilder.newCluster; + +public class BasicClusteredCacheOpsReplicationWithServersApiTest extends ClusteredTests { + private static final String CONFIG = + "" + + "" + + "16" + + "" + + "\n"; + + private static PersistentCacheManager CACHE_MANAGER; + private static Cache CACHE1; + private static Cache CACHE2; + + @ClassRule + public static Cluster CLUSTER = newCluster(2).in(new File("build/cluster")).withServiceFragment(CONFIG).build(); + + @Before + public void setUp() throws Exception { + CLUSTER.getClusterControl().startAllServers(); + CLUSTER.getClusterControl().waitForActive(); + CLUSTER.getClusterControl().waitForRunningPassivesInStandby(); + + final CacheManagerBuilder clusteredCacheManagerBuilder + = CacheManagerBuilder.newCacheManagerBuilder() + .with(getConfigBuilder() + .timeouts(TimeoutsBuilder.timeouts() // we need to give some time for the failover to occur + .read(Duration.ofMinutes(1)) + .write(Duration.ofMinutes(1))) + .autoCreate() + .defaultServerResource("primary-server-resource")); + CACHE_MANAGER = clusteredCacheManagerBuilder.build(true); + CacheConfiguration config = CacheConfigurationBuilder.newCacheConfigurationBuilder(Long.class, String.class, + ResourcePoolsBuilder.newResourcePoolsBuilder().heap(100, EntryUnit.ENTRIES) + .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 4, MemoryUnit.MB))) + .build(); + + CACHE1 = CACHE_MANAGER.createCache("clustered-cache", config); + CACHE2 = CACHE_MANAGER.createCache("another-cache", config); + } + + private ClusteringServiceConfigurationBuilder getConfigBuilder() { + String cacheManagerName = "cm-replication"; + List addresses = new ArrayList<>(); + for (String server : CLUSTER.getClusterHostPorts()) { + String[] hostPort = server.split(":"); + addresses.add(InetSocketAddress.createUnresolved(hostPort[0], Integer.parseInt(hostPort[1]))); + } + return ClusteringServiceConfigurationBuilder.cluster(addresses, cacheManagerName); + } + + @After + public void tearDown() throws Exception { + CACHE_MANAGER.close(); + CACHE_MANAGER.destroy(); + } + + @Test + public void testCRUD() throws Exception { + List> caches = new ArrayList<>(); + caches.add(CACHE1); + caches.add(CACHE2); + caches.forEach(x -> { + x.put(1L, "The one"); + x.put(2L, "The two"); + x.put(1L, "Another one"); + x.put(3L, "The three"); + x.put(4L, "The four"); + assertThat(x.get(1L), equalTo("Another one")); + assertThat(x.get(2L), equalTo("The two")); + assertThat(x.get(3L), equalTo("The three")); + x.remove(4L); + }); + + CLUSTER.getClusterControl().terminateActive(); + + caches.forEach(x -> { + assertThat(x.get(1L), equalTo("Another one")); + assertThat(x.get(2L), equalTo("The two")); + assertThat(x.get(3L), equalTo("The three")); + assertThat(x.get(4L), nullValue()); + }); + } +} diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/replication/DuplicateTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/replication/DuplicateTest.java index 47b6c43122..dfd16ecab9 100644 --- a/clustered/integration-test/src/test/java/org/ehcache/clustered/replication/DuplicateTest.java +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/replication/DuplicateTest.java @@ -27,9 +27,8 @@ import org.ehcache.config.builders.CacheManagerBuilder; import org.ehcache.config.builders.ResourcePoolsBuilder; import org.ehcache.config.units.MemoryUnit; -import org.ehcache.core.Ehcache; -import org.ehcache.core.internal.resilience.ResilienceStrategy; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.ResilienceStrategy; +import org.ehcache.spi.resilience.StoreAccessException; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; @@ -37,7 +36,6 @@ import org.terracotta.testing.rules.Cluster; import java.io.File; -import java.lang.reflect.Field; import java.lang.reflect.Proxy; import java.time.Duration; import java.util.Arrays; @@ -85,18 +83,18 @@ public void tearDown() throws Exception { public void duplicateAfterFailoverAreReturningTheCorrectResponse() throws Exception { CacheManagerBuilder builder = CacheManagerBuilder.newCacheManagerBuilder() .with(ClusteringServiceConfigurationBuilder.cluster(CLUSTER.getConnectionURI()) - .timeouts(TimeoutsBuilder.timeouts().write(Duration.ofSeconds(20))) + .timeouts(TimeoutsBuilder.timeouts().write(Duration.ofSeconds(30))) .autoCreate() .defaultServerResource("primary-server-resource")) .withCache("cache", CacheConfigurationBuilder.newCacheConfigurationBuilder(Integer.class, String.class, ResourcePoolsBuilder.newResourcePoolsBuilder() - .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 128, MemoryUnit.MB))) + .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 10, MemoryUnit.MB))) + .withResilienceStrategy(failingResilienceStrategy()) .add(ClusteredStoreConfigurationBuilder.withConsistency(Consistency.STRONG))); cacheManager = builder.build(true); Cache cache = cacheManager.getCache("cache", Integer.class, String.class); - switchResilienceStrategy(cache); int numEntries = 3000; AtomicInteger currentEntry = new AtomicInteger(); @@ -104,7 +102,7 @@ public void duplicateAfterFailoverAreReturningTheCorrectResponse() throws Except //Perform put operations in another thread ExecutorService executorService = Executors.newSingleThreadExecutor(); try { - Future puts = executorService.submit((Runnable) () -> { + Future puts = executorService.submit(() -> { while (true) { int i = currentEntry.getAndIncrement(); if (i >= numEntries) { @@ -131,19 +129,27 @@ public void duplicateAfterFailoverAreReturningTheCorrectResponse() throws Except } - private void switchResilienceStrategy(Cache cache) throws Exception { - Field field = Ehcache.class.getDeclaredField("resilienceStrategy"); - field.setAccessible(true); - ResilienceStrategy newResilienceStrategy = (ResilienceStrategy) - Proxy.newProxyInstance(cache.getClass().getClassLoader(), + @SuppressWarnings("unchecked") + private ResilienceStrategy failingResilienceStrategy() throws Exception { + return (ResilienceStrategy) + Proxy.newProxyInstance(getClass().getClassLoader(), new Class[] { ResilienceStrategy.class}, (proxy, method, args) -> { - System.out.println("In there!!!!!!!!!!!!!!!!!!!!!!!!!"); - fail("Failure on " + method.getName(), findStoreAccessException(args)); // 1 is always the exception - return null; - }); + if(method.getName().endsWith("Failure")) { + fail("Failure on " + method.getName(), findStoreAccessException(args)); // one param is always a SAE + return null; + } - field.set(cache, newResilienceStrategy); + switch(method.getName()) { + case "hashCode": + return 0; + case "equals": + return proxy == args[0]; + default: + fail("Unexpected method call: " + method.getName()); + return null; + } + }); } private StoreAccessException findStoreAccessException(Object[] objects) { diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/replication/OversizedCacheOpsPassiveTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/replication/OversizedCacheOpsPassiveTest.java new file mode 100644 index 0000000000..68f1b7639e --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/replication/OversizedCacheOpsPassiveTest.java @@ -0,0 +1,124 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.clustered.replication; + +import org.ehcache.Cache; +import org.ehcache.PersistentCacheManager; +import org.ehcache.clustered.ClusteredTests; +import org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder; +import org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder; +import org.ehcache.config.CacheConfiguration; +import org.ehcache.config.builders.CacheConfigurationBuilder; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.ResourcePoolsBuilder; +import org.ehcache.config.units.MemoryUnit; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.terracotta.testing.rules.Cluster; + +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; + +import static org.terracotta.testing.rules.BasicExternalClusterBuilder.newCluster; + +/** + * Test the effect of cache eviction during passive sync. + */ +public class OversizedCacheOpsPassiveTest extends ClusteredTests { + private static final int MAX_PUTS = 3000; + private static final int MAX_SWITCH_OVER = 3; + private static final int PER_ELEMENT_SIZE = 256 * 1024; + private static final int CACHE_SIZE_IN_MB = 2; + private static final String LARGE_VALUE = buildLargeString(); + + private static final String RESOURCE_CONFIG = + "" + + "" + + "2" + + "" + + "\n"; + + @ClassRule + public static Cluster CLUSTER = + newCluster(2).in(Paths.get("build", "cluster").toFile()) + .withSystemProperty("ehcache.sync.data.gets.threshold", "2") + .withServiceFragment(RESOURCE_CONFIG) + .build(); + + @BeforeClass + public static void waitForServers() throws Exception { + CLUSTER.getClusterControl().waitForActive(); + CLUSTER.getClusterControl().waitForRunningPassivesInStandby(); + } + + @Test + public void oversizedPuts() throws Exception { + CacheManagerBuilder clusteredCacheManagerBuilder + = CacheManagerBuilder.newCacheManagerBuilder() + .with(ClusteringServiceConfigurationBuilder.cluster(CLUSTER.getConnectionURI().resolve("/crud-cm")) + .autoCreate() + .defaultServerResource("primary-server-resource")); + CountDownLatch syncLatch = new CountDownLatch(2); + + CompletableFuture f1 = CompletableFuture.runAsync(() -> doPuts(clusteredCacheManagerBuilder, syncLatch)); + CompletableFuture f2 = CompletableFuture.runAsync(() -> doPuts(clusteredCacheManagerBuilder, syncLatch)); + + syncLatch.await(); + for (int i = 0; i < MAX_SWITCH_OVER; i++) { + CLUSTER.getClusterControl().terminateActive(); + CLUSTER.getClusterControl().waitForActive(); + CLUSTER.getClusterControl().startOneServer(); + CLUSTER.getClusterControl().waitForRunningPassivesInStandby(); + Thread.sleep(2000); + } + + f1.get(); + f2.get(); + } + + private void doPuts(CacheManagerBuilder clusteredCacheManagerBuilder, + CountDownLatch syncLatch) { + try (PersistentCacheManager cacheManager = clusteredCacheManagerBuilder.build(true)) { + CacheConfiguration config = CacheConfigurationBuilder.newCacheConfigurationBuilder(Long.class, String.class, + ResourcePoolsBuilder.newResourcePoolsBuilder() + .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", CACHE_SIZE_IN_MB, MemoryUnit.MB))) + .build(); + + syncLatch.countDown(); + Cache cache = cacheManager.createCache("clustered-cache", config); + for (long i = 0; i < MAX_PUTS; i++) { + if (i % 1000 == 0) { + // a small pause + try { + Thread.sleep(10); + } catch (InterruptedException ignored) { + } + } + cache.put(i, LARGE_VALUE); + } + } + } + + private static String buildLargeString() { + char[] filler = new char[PER_ELEMENT_SIZE]; + Arrays.fill(filler, '0'); + return new String(filler); + } +} diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/util/BeforeAll.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/util/BeforeAll.java new file mode 100644 index 0000000000..5b261dbc3c --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/util/BeforeAll.java @@ -0,0 +1,32 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.util; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Inherited; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Port of Junit 5 @BeforeAll + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.METHOD) +@Inherited +public @interface BeforeAll { +} + diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/util/BeforeAllRule.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/util/BeforeAllRule.java new file mode 100644 index 0000000000..9267fe7c5d --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/util/BeforeAllRule.java @@ -0,0 +1,61 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.util; + +import org.junit.rules.ExternalResource; +import org.junit.runner.Description; +import org.junit.runners.model.Statement; + +import java.lang.reflect.Method; +import java.util.Comparator; +import java.util.List; +import java.util.WeakHashMap; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * @author Mathieu Carbou + */ +public class BeforeAllRule extends ExternalResource { + + private static WeakHashMap, Boolean> ran = new WeakHashMap<>(); + + private final Object test; + + public BeforeAllRule(Object test) { + this.test = test; + } + + @Override + public Statement apply(Statement base, Description description) { + ran.putIfAbsent(description.getTestClass(), Boolean.FALSE); + return super.apply(base, description); + } + + @Override + protected void before() throws Throwable { + if (ran.replace(test.getClass(), Boolean.FALSE, Boolean.TRUE)) { + List list = Stream.of(test.getClass().getMethods()) + .filter(m -> m.isAnnotationPresent(BeforeAll.class)) + .sorted(Comparator.comparing(Method::getName)) + .collect(Collectors.toList()); + for (Method method : list) { + method.invoke(test); + } + } + } + +} diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/util/TCPProxyUtil.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/util/TCPProxyUtil.java new file mode 100644 index 0000000000..8ba910117f --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/util/TCPProxyUtil.java @@ -0,0 +1,92 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.util; + +import com.tc.net.proxy.TCPProxy; +import org.terracotta.testing.common.PortChooser; + +import java.net.InetAddress; +import java.net.URI; +import java.util.Arrays; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public final class TCPProxyUtil { + + private static final String STRIPE_SEPARATOR = ","; + + private TCPProxyUtil() { + + } + + public static URI getProxyURI(URI connectionURI, List proxies) throws Exception { + + List ports = parsePorts(connectionURI); + List proxyPorts = createProxyPorts(ports.size()); + + for (int i = 0; i < ports.size(); i++) { + int port = ports.get(i); + int proxyPort = proxyPorts.get(i); + + InetAddress host = InetAddress.getByName("localhost"); + TCPProxy proxy = new TCPProxy(proxyPort, host, port, 0L, false, null); + proxies.add(proxy); + proxy.start(); + } + + return createURI(proxyPorts); + } + + private static List parsePorts(URI connectionURI) { + String uriString = connectionURI.toString(); + String withoutProtocol = uriString.substring(13); + String[] stripes = withoutProtocol.split(STRIPE_SEPARATOR); + + return Arrays.stream(stripes) + .map(stripe -> stripe.substring(stripe.indexOf(":") + 1)) + .mapToInt(Integer::parseInt) + .boxed() + .collect(Collectors.toList()); + } + + private static List createProxyPorts(int portCount) { + PortChooser portChooser = new PortChooser(); + int firstProxyPort = portChooser.chooseRandomPorts(portCount); + + return IntStream + .range(0, portCount) + .map(i -> firstProxyPort + i) + .boxed() + .collect(Collectors.toList()); + } + + private static URI createURI(List proxyPorts) { + + String uri = proxyPorts.stream() + .map(port -> "localhost:" + port) + .collect(Collectors.joining(",", "terracotta://", "")); + + return URI.create(uri); + } + + public static void setDelay(long delay, List proxies) { + for (TCPProxy proxy : proxies) { + proxy.setDelay(delay); + } + } +} diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/util/TestCacheLoaderWriter.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/util/TestCacheLoaderWriter.java new file mode 100644 index 0000000000..26d7f691ff --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/util/TestCacheLoaderWriter.java @@ -0,0 +1,44 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.util; + +import org.ehcache.spi.loaderwriter.CacheLoaderWriter; + +import java.util.concurrent.ConcurrentMap; + +public class TestCacheLoaderWriter implements CacheLoaderWriter { + + private final ConcurrentMap sor; + + public TestCacheLoaderWriter(ConcurrentMap sor) { + this.sor = sor; + } + + @Override + public String load(Long key) throws Exception { + return sor.get(key); + } + + @Override + public void write(Long key, String value) throws Exception { + sor.put(key, value); + } + + @Override + public void delete(Long key) throws Exception { + sor.remove(key); + } +} diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/writebehind/BasicClusteredWriteBehindTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/writebehind/BasicClusteredWriteBehindTest.java new file mode 100644 index 0000000000..6e47b882a5 --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/writebehind/BasicClusteredWriteBehindTest.java @@ -0,0 +1,281 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.clustered.writebehind; + +import org.ehcache.Cache; +import org.ehcache.CacheManager; +import org.ehcache.PersistentCacheManager; +import org.ehcache.clustered.ClusteredTests; +import org.ehcache.clustered.client.config.ClusteredStoreConfiguration; +import org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder; +import org.ehcache.clustered.common.Consistency; +import org.ehcache.config.CacheConfiguration; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.ResourcePoolsBuilder; +import org.ehcache.config.builders.WriteBehindConfigurationBuilder; +import org.ehcache.config.units.EntryUnit; +import org.ehcache.config.units.MemoryUnit; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.terracotta.testing.rules.Cluster; + +import java.io.File; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder.cluster; +import static org.ehcache.config.builders.CacheConfigurationBuilder.newCacheConfigurationBuilder; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.junit.Assert.assertThat; +import static org.terracotta.testing.rules.BasicExternalClusterBuilder.newCluster; + +public class BasicClusteredWriteBehindTest extends ClusteredTests { + + private static final String RESOURCE_CONFIG = + "" + + "" + + "64" + + "" + + "\n"; + + @ClassRule + public static Cluster CLUSTER = + newCluster().in(new File("build/cluster")).withServiceFragment(RESOURCE_CONFIG).build(); + + @BeforeClass + public static void waitForActive() throws Exception { + CLUSTER.getClusterControl().waitForActive(); + } + + private final List cacheRecords = new ArrayList<>(); + + private static final String CACHE_NAME = "cache-1"; + private static final long KEY = 1L; + + private RecordingLoaderWriter loaderWriter; + + @Before + public void setUp() { + loaderWriter = new RecordingLoaderWriter<>(); + } + + @Test + public void testBasicClusteredWriteBehind() { + PersistentCacheManager cacheManager = createCacheManager(); + Cache cache = cacheManager.getCache(CACHE_NAME, Long.class, String.class); + + for (int i = 0; i < 10; i++) { + put(cache, String.valueOf(i)); + } + + assertValue(cache, String.valueOf(9)); + + verifyRecords(cache); + cache.clear(); + } + + @Test + public void testWriteBehindMultipleClients() { + PersistentCacheManager cacheManager1 = createCacheManager(); + PersistentCacheManager cacheManager2 = createCacheManager(); + Cache client1 = cacheManager1.getCache(CACHE_NAME, Long.class, String.class); + Cache client2 = cacheManager2.getCache(CACHE_NAME, Long.class, String.class); + + put(client1, "The one from client1"); + put(client2, "The one one from client2"); + assertValue(client1, "The one one from client2"); + remove(client1); + put(client2, "The one from client2"); + put(client1, "The one one from client1"); + assertValue(client2, "The one one from client1"); + remove(client2); + assertValue(client1, null); + put(client1, "The one from client1"); + put(client1, "The one one from client1"); + remove(client2); + put(client2, "The one from client2"); + put(client2, "The one one from client2"); + remove(client1); + assertValue(client2, null); + + verifyRecords(client1); + client1.clear(); + } + + @Test + public void testClusteredWriteBehindCAS() { + PersistentCacheManager cacheManager = createCacheManager(); + Cache cache = cacheManager.getCache(CACHE_NAME, Long.class, String.class); + putIfAbsent(cache, "First value", true); + assertValue(cache,"First value"); + putIfAbsent(cache, "Second value", false); + assertValue(cache, "First value"); + put(cache, "First value again"); + assertValue(cache, "First value again"); + replace(cache, "Replaced First value", true); + assertValue(cache, "Replaced First value"); + replace(cache, "Replaced First value", "Replaced First value again", true); + assertValue(cache, "Replaced First value again"); + replace(cache, "Replaced First", "Tried Replacing First value again", false); + assertValue(cache, "Replaced First value again"); + condRemove(cache, "Replaced First value again", true); + assertValue(cache, null); + replace(cache, "Trying to replace value", false); + assertValue(cache, null); + put(cache, "new value", true); + assertValue(cache, "new value"); + condRemove(cache, "new value", false); + + verifyRecords(cache); + cache.clear(); + } + + @Test + public void testClusteredWriteBehindLoading() { + CacheManager cacheManager = createCacheManager(); + Cache cache = cacheManager.getCache(CACHE_NAME, Long.class, String.class); + + put(cache,"Some value"); + tryFlushingUpdatesToSOR(cache); + cache.clear(); + + assertThat(cache.get(KEY), notNullValue()); + + cache.clear(); + } + + private void assertValue(Cache cache, String value) { + assertThat(cache.get(KEY), is(value)); + } + + private void put(Cache cache, String value) { + put(cache, value, true); + } + + private void put(Cache cache, String value, boolean addToCacheRecords) { + cache.put(KEY, value); + if (addToCacheRecords) { + cacheRecords.add(new Record(KEY, cache.get(KEY))); + } + } + + private void putIfAbsent(Cache cache, String value, boolean addToCacheRecords) { + cache.putIfAbsent(KEY, value); + if (addToCacheRecords) { + cacheRecords.add(new Record(KEY, cache.get(KEY))); + } + } + + private void replace(Cache cache, String value, boolean addToCacheRecords) { + cache.replace(KEY, value); + if (addToCacheRecords) { + cacheRecords.add(new Record(KEY, cache.get(KEY))); + } + } + + private void replace(Cache cache, String oldValue, String newValue, boolean addToCacheRecords) { + cache.replace(KEY, oldValue, newValue); + if (addToCacheRecords) { + cacheRecords.add(new Record(KEY, cache.get(KEY))); + } + } + + private void remove(Cache cache) { + cache.remove(KEY); + cacheRecords.add(new Record(KEY, null)); + } + + private void condRemove(Cache cache, String value, boolean addToCacheRecords) { + cache.remove(KEY, value); + if (addToCacheRecords) { + cacheRecords.add(new Record(KEY, null)); + } + } + + private void verifyRecords(Cache cache) { + tryFlushingUpdatesToSOR(cache); + + Map> loaderWriterRecords = loaderWriter.getRecords(); + + Map track = new HashMap<>(); + for (Record cacheRecord : cacheRecords) { + Long key = cacheRecord.getKey(); + int next = track.compute(key, (k, v) -> v == null ? 0 : v + 1); + assertThat(loaderWriterRecords.get(key).get(next), is(cacheRecord.getValue())); + } + } + + private void tryFlushingUpdatesToSOR(Cache cache) { + int retryCount = 1000; + int i = 0; + while (true) { + String value = "flush_queue_" + i; + put(cache, value, false); + try { + Thread.sleep(100); + } catch (InterruptedException e) { + e.printStackTrace(); + } + if (value.equals(loaderWriter.load(KEY))) break; + if (i > retryCount) { + throw new RuntimeException("Couldn't flush updates to SOR after " + retryCount + " tries"); + } + i++; + } + } + + private PersistentCacheManager createCacheManager() { + CacheConfiguration cacheConfiguration = + newCacheConfigurationBuilder(Long.class, String.class, ResourcePoolsBuilder.newResourcePoolsBuilder() + .heap(10, EntryUnit.ENTRIES) + .offheap(1, MemoryUnit.MB) + .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 2, MemoryUnit.MB))) + .withLoaderWriter(loaderWriter) + .add(WriteBehindConfigurationBuilder.newUnBatchedWriteBehindConfiguration()) + .add(new ClusteredStoreConfiguration(Consistency.STRONG)) + .build(); + + return CacheManagerBuilder + .newCacheManagerBuilder() + .with(cluster(CLUSTER.getConnectionURI().resolve("/cm-wb")).autoCreate()) + .withCache(CACHE_NAME, cacheConfiguration) + .build(true); + } + + private static final class Record { + private final Long key; + private final String value; + + private Record(Long key, String value) { + this.key = key; + this.value = value; + } + + Long getKey() { + return key; + } + + String getValue() { + return value; + } + } +} diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/writebehind/BasicClusteredWriteBehindWithPassiveTest.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/writebehind/BasicClusteredWriteBehindWithPassiveTest.java new file mode 100644 index 0000000000..8dceaca796 --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/writebehind/BasicClusteredWriteBehindWithPassiveTest.java @@ -0,0 +1,218 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.clustered.writebehind; + +import org.ehcache.Cache; +import org.ehcache.PersistentCacheManager; +import org.ehcache.clustered.ClusteredTests; +import org.ehcache.clustered.client.config.ClusteredStoreConfiguration; +import org.ehcache.clustered.client.config.builders.ClusteredResourcePoolBuilder; +import org.ehcache.clustered.common.Consistency; +import org.ehcache.config.CacheConfiguration; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.ResourcePoolsBuilder; +import org.ehcache.config.builders.WriteBehindConfigurationBuilder; +import org.ehcache.config.units.EntryUnit; +import org.ehcache.config.units.MemoryUnit; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.terracotta.testing.rules.Cluster; + +import java.io.File; +import java.util.List; +import java.util.Map; + +import static org.ehcache.clustered.client.config.builders.ClusteringServiceConfigurationBuilder.cluster; +import static org.ehcache.config.builders.CacheConfigurationBuilder.newCacheConfigurationBuilder; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; +import static org.terracotta.testing.rules.BasicExternalClusterBuilder.newCluster; + +public class BasicClusteredWriteBehindWithPassiveTest extends ClusteredTests { + + private static final String RESOURCE_CONFIG = + "" + + "" + + "64" + + "" + + "\n"; + + @ClassRule + public static Cluster CLUSTER = + newCluster(2).in(new File("build/cluster")).withServiceFragment(RESOURCE_CONFIG).build(); + + @BeforeClass + public static void waitForActive() throws Exception { + CLUSTER.getClusterControl().waitForActive(); + CLUSTER.getClusterControl().waitForRunningPassivesInStandby(); + } + + private static final String CACHE_NAME = "cache-1"; + private static final long KEY = 1L; + + private RecordingLoaderWriter loaderWriter; + + @Before + public void setUp() { + loaderWriter = new RecordingLoaderWriter<>(); + } + + @Test + public void testBasicClusteredWriteBehind() throws Exception { + PersistentCacheManager cacheManager = createCacheManager(); + Cache cache = cacheManager.getCache(CACHE_NAME, Long.class, String.class); + + for (int i = 0; i < 10; i++) { + cache.put(KEY, String.valueOf(i)); + } + + assertValue(cache, String.valueOf(9)); + + CLUSTER.getClusterControl().terminateActive(); + CLUSTER.getClusterControl().waitForActive(); + CLUSTER.getClusterControl().startOneServer(); + + assertValue(cache, String.valueOf(9)); + checkValueFromLoaderWriter(cache, String.valueOf(9)); + + cache.clear(); + } + + @Test + public void testWriteBehindMultipleClients() throws Exception { + PersistentCacheManager cacheManager1 = createCacheManager(); + PersistentCacheManager cacheManager2 = createCacheManager(); + Cache client1 = cacheManager1.getCache(CACHE_NAME, Long.class, String.class); + Cache client2 = cacheManager2.getCache(CACHE_NAME, Long.class, String.class); + + client1.put(KEY, "The one from client1"); + client2.put(KEY, "The one one from client2"); + assertValue(client1, "The one one from client2"); + client1.remove(KEY); + client2.put(KEY, "The one from client2"); + client1.put(KEY, "The one one from client1"); + assertValue(client2, "The one one from client1"); + client2.remove(KEY); + assertValue(client1, null); + client1.put(KEY, "The one from client1"); + client1.put(KEY, "The one one from client1"); + client2.remove(KEY); + client2.put(KEY, "The one from client2"); + client2.put(KEY, "The one one from client2"); + client1.remove(KEY); + assertValue(client2, null); + + CLUSTER.getClusterControl().terminateActive(); + CLUSTER.getClusterControl().waitForActive(); + CLUSTER.getClusterControl().startOneServer(); + + assertValue(client1, null); + assertValue(client2, null); + checkValueFromLoaderWriter(client1, null); + + client1.clear(); + } + + private void checkValueFromLoaderWriter(Cache cache, String expected) { + + tryFlushingUpdatesToSOR(cache); + + Map> records = loaderWriter.getRecords(); + List keyRecords = records.get(KEY); + + int index = keyRecords.size() - 1; + while (index >= 0 && keyRecords.get(index) != null && keyRecords.get(index).startsWith("flush_")) { + index--; + } + + assertThat(keyRecords.get(index), is(expected)); + + } + + @Test + public void testClusteredWriteBehindCAS() throws Exception { + PersistentCacheManager cacheManager = createCacheManager(); + Cache cache = cacheManager.getCache(CACHE_NAME, Long.class, String.class); + cache.putIfAbsent(KEY, "First value"); + assertValue(cache,"First value"); + cache.putIfAbsent(KEY, "Second value"); + assertValue(cache, "First value"); + cache.put(KEY, "First value again"); + assertValue(cache, "First value again"); + cache.replace(KEY, "Replaced First value"); + assertValue(cache, "Replaced First value"); + cache.replace(KEY, "Replaced First value", "Replaced First value again"); + assertValue(cache, "Replaced First value again"); + cache.replace(KEY, "Replaced First", "Tried Replacing First value again"); + assertValue(cache, "Replaced First value again"); + cache.remove(KEY, "Replaced First value again"); + assertValue(cache, null); + cache.replace(KEY, "Trying to replace value"); + assertValue(cache, null); + cache.put(KEY, "new value"); + assertValue(cache, "new value"); + + CLUSTER.getClusterControl().terminateActive(); + CLUSTER.getClusterControl().waitForActive(); + CLUSTER.getClusterControl().startOneServer(); + + cache.clear(); + } + + private void assertValue(Cache cache, String value) { + assertThat(cache.get(KEY), is(value)); + } + + private void tryFlushingUpdatesToSOR(Cache cache) { + int retryCount = 1000; + int i = 0; + while (true) { + String value = "flush_queue_" + i; + cache.put(KEY, value); + try { + Thread.sleep(100); + } catch (InterruptedException e) { + e.printStackTrace(); + } + if (value.equals(loaderWriter.load(KEY))) break; + if (i > retryCount) { + throw new RuntimeException("Couldn't flush updates to SOR after " + retryCount + " tries"); + } + i++; + } + } + + private PersistentCacheManager createCacheManager() { + CacheConfiguration cacheConfiguration = + newCacheConfigurationBuilder(Long.class, String.class, ResourcePoolsBuilder.newResourcePoolsBuilder() + .heap(10, EntryUnit.ENTRIES) + .offheap(1, MemoryUnit.MB) + .with(ClusteredResourcePoolBuilder.clusteredDedicated("primary-server-resource", 2, MemoryUnit.MB))) + .withLoaderWriter(loaderWriter) + .add(WriteBehindConfigurationBuilder.newUnBatchedWriteBehindConfiguration()) + .add(new ClusteredStoreConfiguration(Consistency.STRONG)) + .build(); + + return CacheManagerBuilder + .newCacheManagerBuilder() + .with(cluster(CLUSTER.getConnectionURI().resolve("/cm-wb")).autoCreate()) + .withCache(CACHE_NAME, cacheConfiguration) + .build(true); + } +} diff --git a/clustered/integration-test/src/test/java/org/ehcache/clustered/writebehind/RecordingLoaderWriter.java b/clustered/integration-test/src/test/java/org/ehcache/clustered/writebehind/RecordingLoaderWriter.java new file mode 100644 index 0000000000..e37557687c --- /dev/null +++ b/clustered/integration-test/src/test/java/org/ehcache/clustered/writebehind/RecordingLoaderWriter.java @@ -0,0 +1,72 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.writebehind; + +import org.ehcache.spi.loaderwriter.CacheLoaderWriter; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class RecordingLoaderWriter implements CacheLoaderWriter { + + private final Map> records = new HashMap<>(); + + @Override + public synchronized V load(K key) { + List list = records.get(key); + return list == null ? null : list.get(list.size() - 1); + } + + @Override + public synchronized void write(K key, V value) { + record(key, value); + } + + @Override + public synchronized void delete(K key) { + record(key, null); + } + + @Override + public synchronized Map loadAll(Iterable keys) throws Exception { + return CacheLoaderWriter.super.loadAll(keys); + } + + @Override + public void writeAll(Iterable> entries) throws Exception { + CacheLoaderWriter.super.writeAll(entries); + } + + @Override + public void deleteAll(Iterable keys) throws Exception { + CacheLoaderWriter.super.deleteAll(keys); + } + + private void record(K key, V value) { + records.computeIfAbsent(key, k -> new ArrayList<>()).add(value); + } + + public synchronized Map> getRecords() { + return Collections.unmodifiableMap(records); + } + + public void clear() { + records.clear(); + } +} diff --git a/clustered/integration-test/src/test/resources/clusteredConfiguration.txt b/clustered/integration-test/src/test/resources/clusteredConfiguration.txt index 9a601bca24..b72691f038 100644 --- a/clustered/integration-test/src/test/resources/clusteredConfiguration.txt +++ b/clustered/integration-test/src/test/resources/clusteredConfiguration.txt @@ -4,7 +4,7 @@ caches: valueType: java.lang.String serviceConfigurations: None evictionAdvisor: None - expiry: NoExpiry + expiry: NoExpiryPolicy resourcePools: pools: heap: @@ -18,7 +18,7 @@ caches: valueType: java.lang.String serviceConfigurations: None evictionAdvisor: None - expiry: NoExpiry + expiry: NoExpiryPolicy resourcePools: pools: heap: @@ -33,9 +33,9 @@ caches: services: - org.ehcache.clustered.client.config.ClusteringServiceConfiguration: clusterUri: terracotta://server-1/my-server-entity-2 - timeouts: Timeouts{readOperation=PT5S,writeOperation=PT5S,connection=PT2562047H47M16.854775807S} + timeouts: Timeouts{readOperation=PT5S,writeOperation=PT5S,connection=PT2M30S} autoCreate: true defaultServerResource: primary-server-resource resourcePools: - resource-pool-a: [33554432 bytes from ''] + resource-pool-a: [10485760 bytes from ''] - org.ehcache.management.registry.DefaultManagementRegistryConfiguration diff --git a/clustered/integration-test/src/test/resources/simpleConfiguration.txt b/clustered/integration-test/src/test/resources/simpleConfiguration.txt index 09765c1a48..2767f10986 100644 --- a/clustered/integration-test/src/test/resources/simpleConfiguration.txt +++ b/clustered/integration-test/src/test/resources/simpleConfiguration.txt @@ -6,7 +6,7 @@ caches: - org.ehcache.impl.config.loaderwriter.DefaultCacheLoaderWriterConfiguration - org.ehcache.impl.config.loaderwriter.writebehind.DefaultWriteBehindConfiguration evictionAdvisor: org.ehcache.clustered.management.EhcacheManagerToStringTest$1 - expiry: NoExpiry + expiry: NoExpiryPolicy resourcePools: pools: heap: diff --git a/clustered/ops-tool/build.gradle b/clustered/ops-tool/build.gradle index 4f676ee524..7499269244 100644 --- a/clustered/ops-tool/build.gradle +++ b/clustered/ops-tool/build.gradle @@ -15,5 +15,5 @@ */ dependencies { - compile 'com.beust:jcommander:1.47' + implementation 'com.beust:jcommander:1.47' } diff --git a/clustered/ops-tool/src/main/java/org/ehcache/clustered/operations/OperationsTool.java b/clustered/ops-tool/src/main/java/org/ehcache/clustered/operations/OperationsTool.java index a38da11064..cdffbc3d67 100644 --- a/clustered/ops-tool/src/main/java/org/ehcache/clustered/operations/OperationsTool.java +++ b/clustered/ops-tool/src/main/java/org/ehcache/clustered/operations/OperationsTool.java @@ -33,7 +33,7 @@ public class OperationsTool { } }; - public static void main(String[] args) throws IOException { + public static void main(String[] args) { System.exit(innerMain(args)); } diff --git a/clustered/server/build.gradle b/clustered/server/build.gradle index eb7bd1f0ff..8dfb3d93d9 100644 --- a/clustered/server/build.gradle +++ b/clustered/server/build.gradle @@ -17,22 +17,18 @@ apply plugin: EhDeploy dependencies { - compile project(':clustered:common'), "org.slf4j:slf4j-api:$slf4jVersion" - compile("org.terracotta:offheap-resource:$terracottaPlatformVersion") { + api "org.terracotta:client-message-tracker:$terracottaPlatformVersion" + api("org.terracotta:offheap-resource:$terracottaPlatformVersion") { transitive = false } - compile group: 'org.terracotta', name: 'offheap-store', version: offheapVersion - compile group: 'org.slf4j', name: 'slf4j-api', version: slf4jVersion - compile("org.terracotta.management:monitoring-service-api:$terracottaPlatformVersion") { + implementation project(':clustered:common') + implementation group: 'org.terracotta', name: 'offheap-store', version: offheapVersion + implementation group: 'org.slf4j', name: 'slf4j-api', version: slf4jVersion + implementation ("org.terracotta.management:monitoring-service-api:$terracottaPlatformVersion") { transitive = false } - compile"org.terracotta.management.dist:mnm-common:$terracottaPlatformVersion" - compile("org.terracotta:client-message-tracker:$terracottaPlatformVersion") - provided "org.terracotta:entity-server-api:$terracottaApisVersion" - provided "org.terracotta:standard-cluster-services:$terracottaApisVersion" - provided "org.terracotta:runnel:$terracottaPlatformVersion" -} - -tasks.withType(JavaCompile) { - options.compilerArgs += ['-Werror'] + implementation "org.terracotta.management.dist:mnm-common:$terracottaPlatformVersion" + providedImplementation "org.terracotta:entity-server-api:$terracottaApisVersion" + providedImplementation "org.terracotta:standard-cluster-services:$terracottaApisVersion" + providedImplementation "org.terracotta:runnel:$terracottaPlatformVersion" } diff --git a/clustered/server/src/main/java/org/ehcache/clustered/lock/server/VoltronReadWriteLockActiveEntity.java b/clustered/server/src/main/java/org/ehcache/clustered/lock/server/VoltronReadWriteLockActiveEntity.java index 868dd231de..b989bb1dfa 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/lock/server/VoltronReadWriteLockActiveEntity.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/lock/server/VoltronReadWriteLockActiveEntity.java @@ -24,12 +24,12 @@ import org.ehcache.clustered.common.internal.lock.LockMessaging.LockOperation; import org.ehcache.clustered.common.internal.lock.LockMessaging.LockTransition; +import org.terracotta.entity.ActiveInvokeContext; import org.terracotta.entity.ActiveServerEntity; import org.terracotta.entity.ClientCommunicator; import org.terracotta.entity.ClientDescriptor; import org.terracotta.entity.MessageCodecException; import org.terracotta.entity.PassiveSynchronizationChannel; -import org.terracotta.entity.ReconnectRejectedException; import org.terracotta.entity.StateDumpCollector; /** @@ -50,11 +50,17 @@ public VoltronReadWriteLockActiveEntity(ClientCommunicator communicator) { } @Override - public LockTransition invoke(ClientDescriptor client, LockOperation message) { + public LockTransition invokeActive(ActiveInvokeContext context, LockOperation message) { + ClientDescriptor clientDescriptor = context.getClientDescriptor(); + return invokeActive(clientDescriptor, message); + } + + private LockTransition invokeActive(ClientDescriptor clientDescriptor, LockOperation message) { switch (message.getOperation()) { - case TRY_ACQUIRE: return tryAcquire(client, message.getHoldType()); - case ACQUIRE: return acquire(client, message.getHoldType()); - case RELEASE: return release(client, message.getHoldType()); + case TRY_ACQUIRE: + return tryAcquire(clientDescriptor, message.getHoldType()); + case ACQUIRE: return acquire(clientDescriptor, message.getHoldType()); + case RELEASE: return release(clientDescriptor, message.getHoldType()); default: throw new AssertionError(); } } @@ -105,7 +111,8 @@ public ReconnectHandler startReconnect() { releaseListeners.add(clientDescriptor); } else { try { - if (!invoke(clientDescriptor, LockMessaging.codec().decodeMessage(bytes)).isAcquired()) { + LockOperation message = LockMessaging.codec().decodeMessage(bytes); + if (!invokeActive(clientDescriptor, message).isAcquired()) { throw new IllegalStateException("Unexpected lock acquisition failure during reconnect"); } } catch (MessageCodecException ex) { diff --git a/clustered/server/src/main/java/org/ehcache/clustered/lock/server/VoltronReadWriteLockPassiveEntity.java b/clustered/server/src/main/java/org/ehcache/clustered/lock/server/VoltronReadWriteLockPassiveEntity.java index 98451e0ad1..2ac4a54513 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/lock/server/VoltronReadWriteLockPassiveEntity.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/lock/server/VoltronReadWriteLockPassiveEntity.java @@ -17,9 +17,10 @@ import org.ehcache.clustered.common.internal.lock.LockMessaging.LockOperation; import org.ehcache.clustered.common.internal.lock.LockMessaging.LockTransition; +import org.terracotta.entity.InvokeContext; import org.terracotta.entity.PassiveServerEntity; -class VoltronReadWriteLockPassiveEntity implements PassiveServerEntity { +final class VoltronReadWriteLockPassiveEntity implements PassiveServerEntity { static final VoltronReadWriteLockPassiveEntity INSTANCE = new VoltronReadWriteLockPassiveEntity(); @@ -28,7 +29,7 @@ private VoltronReadWriteLockPassiveEntity() { } @Override - public void invoke(LockOperation message) { + public void invokePassive(InvokeContext context, LockOperation message) { throw new AssertionError("Unexpected message at passive " + message); } diff --git a/clustered/server/src/main/java/org/ehcache/clustered/lock/server/VoltronReadWriteLockServerEntityService.java b/clustered/server/src/main/java/org/ehcache/clustered/lock/server/VoltronReadWriteLockServerEntityService.java index 76f111eb65..b73b4930b8 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/lock/server/VoltronReadWriteLockServerEntityService.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/lock/server/VoltronReadWriteLockServerEntityService.java @@ -90,7 +90,7 @@ public SyncMessageCodec getSyncMessageCodec() { return LockSyncMessaging.syncCodec(); } - private static final ServiceConfiguration config(final Class klazz) { + private static ServiceConfiguration config(Class klazz) { return () -> klazz; } } diff --git a/clustered/server/src/main/java/org/ehcache/clustered/lock/server/messages/LockSyncMessaging.java b/clustered/server/src/main/java/org/ehcache/clustered/lock/server/messages/LockSyncMessaging.java index 354d7b2e1d..792f2d05b7 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/lock/server/messages/LockSyncMessaging.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/lock/server/messages/LockSyncMessaging.java @@ -31,12 +31,12 @@ public static SyncMessageCodec syncCodec() { private static final SyncMessageCodec SYNC_CODEC = new SyncMessageCodec() { @Override - public byte[] encode(int i, LockMessaging.LockOperation message) throws MessageCodecException { + public byte[] encode(int i, LockMessaging.LockOperation message) { throw new AssertionError(); } @Override - public LockMessaging.LockOperation decode(int i, byte[] bytes) throws MessageCodecException { + public LockMessaging.LockOperation decode(int i, byte[] bytes) { throw new AssertionError(); } }; diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/ClusterTierManagerActiveEntity.java b/clustered/server/src/main/java/org/ehcache/clustered/server/ClusterTierManagerActiveEntity.java index c4c578d6fe..4987d693dc 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/ClusterTierManagerActiveEntity.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/ClusterTierManagerActiveEntity.java @@ -33,7 +33,6 @@ import org.terracotta.entity.ClientDescriptor; import org.terracotta.entity.ConfigurationException; import org.terracotta.entity.PassiveSynchronizationChannel; -import org.terracotta.entity.ReconnectRejectedException; import org.terracotta.entity.StateDumpCollector; import java.util.concurrent.atomic.AtomicBoolean; @@ -99,7 +98,7 @@ public void disconnected(ClientDescriptor clientDescriptor) { } @Override - public EhcacheEntityResponse invokeActive(ActiveInvokeContext invokeContext, EhcacheEntityMessage message) { + public EhcacheEntityResponse invokeActive(ActiveInvokeContext invokeContext, EhcacheEntityMessage message) { try { if (message instanceof EhcacheOperationMessage) { EhcacheOperationMessage operationMessage = (EhcacheOperationMessage) message; @@ -129,7 +128,7 @@ public void synchronizeKeyToPassive(PassiveSynchronizationChannel getMessageCodec public SyncMessageCodec getSyncMessageCodec() { return new SyncMessageCodec() { @Override - public byte[] encode(int concurrencyKey, EhcacheEntityMessage response) throws MessageCodecException { + public byte[] encode(int concurrencyKey, EhcacheEntityMessage response) { throw new UnsupportedOperationException("This entity does not have sync messages"); } @Override - public EhcacheEntityMessage decode(int concurrencyKey, byte[] payload) throws MessageCodecException { + public EhcacheEntityMessage decode(int concurrencyKey, byte[] payload) { throw new UnsupportedOperationException("This entity does not have sync messages"); } }; diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/EhcacheStateServiceImpl.java b/clustered/server/src/main/java/org/ehcache/clustered/server/EhcacheStateServiceImpl.java index ae0a02a052..a31a00289f 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/EhcacheStateServiceImpl.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/EhcacheStateServiceImpl.java @@ -25,7 +25,9 @@ import org.ehcache.clustered.common.internal.exceptions.InvalidServerSideConfigurationException; import org.ehcache.clustered.common.internal.exceptions.InvalidStoreException; import org.ehcache.clustered.common.internal.exceptions.LifecycleException; +import org.ehcache.clustered.common.internal.messages.EhcacheOperationMessage; import org.ehcache.clustered.server.repo.StateRepositoryManager; +import org.ehcache.clustered.server.state.EhcacheStateContext; import org.ehcache.clustered.server.state.EhcacheStateService; import org.ehcache.clustered.server.state.EhcacheStateServiceProvider; import org.ehcache.clustered.server.state.InvalidationTracker; @@ -39,21 +41,23 @@ import org.terracotta.offheapresource.OffHeapResources; import org.terracotta.offheapstore.paging.PageSource; import org.terracotta.statistics.StatisticsManager; +import org.terracotta.statistics.ValueStatistic; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.Map; import java.util.Set; -import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.function.Function; import static java.util.stream.Collectors.toMap; import static org.terracotta.offheapresource.OffHeapResourceIdentifier.identifier; +import static org.terracotta.statistics.StatisticsManager.tags; +import static org.terracotta.statistics.ValueStatistics.supply; +import static org.terracotta.statistics.StatisticType.COUNTER; +import static org.terracotta.statistics.StatisticType.GAUGE; public class EhcacheStateServiceImpl implements EhcacheStateService { @@ -65,23 +69,23 @@ public class EhcacheStateServiceImpl implements EhcacheStateService { private static final String PROPERTY_STORE_KEY = "storeName"; private static final String PROPERTY_POOL_KEY = "poolName"; - private static final Map> STAT_STORE_METHOD_REFERENCES = new HashMap<>(); - private static final Map> STAT_POOL_METHOD_REFERENCES = new HashMap<>(); + private static final Map>> STAT_STORE_METHOD_REFERENCES = new HashMap<>(11); + private static final Map>> STAT_POOL_METHOD_REFERENCES = new HashMap<>(1); static { - STAT_STORE_METHOD_REFERENCES.put("allocatedMemory", ServerStoreImpl::getAllocatedMemory); - STAT_STORE_METHOD_REFERENCES.put("dataAllocatedMemory", ServerStoreImpl::getDataAllocatedMemory); - STAT_STORE_METHOD_REFERENCES.put("occupiedMemory", ServerStoreImpl::getOccupiedMemory); - STAT_STORE_METHOD_REFERENCES.put("dataOccupiedMemory", ServerStoreImpl::getDataOccupiedMemory); - STAT_STORE_METHOD_REFERENCES.put("entries", ServerStoreImpl::getSize); - STAT_STORE_METHOD_REFERENCES.put("usedSlotCount", ServerStoreImpl::getUsedSlotCount); - STAT_STORE_METHOD_REFERENCES.put("dataVitalMemory", ServerStoreImpl::getDataVitalMemory); - STAT_STORE_METHOD_REFERENCES.put("vitalMemory", ServerStoreImpl::getVitalMemory); - STAT_STORE_METHOD_REFERENCES.put("removedSlotCount", ServerStoreImpl::getRemovedSlotCount); - STAT_STORE_METHOD_REFERENCES.put("dataSize", ServerStoreImpl::getDataSize); - STAT_STORE_METHOD_REFERENCES.put("tableCapacity", ServerStoreImpl::getTableCapacity); + STAT_STORE_METHOD_REFERENCES.put("allocatedMemory", store -> supply(GAUGE, store::getAllocatedMemory)); + STAT_STORE_METHOD_REFERENCES.put("dataAllocatedMemory", store -> supply(GAUGE, store::getDataAllocatedMemory)); + STAT_STORE_METHOD_REFERENCES.put("occupiedMemory", store -> supply(GAUGE, store::getOccupiedMemory)); + STAT_STORE_METHOD_REFERENCES.put("dataOccupiedMemory", store -> supply(GAUGE, store::getDataOccupiedMemory)); + STAT_STORE_METHOD_REFERENCES.put("entries", store -> supply(COUNTER, store::getSize)); + STAT_STORE_METHOD_REFERENCES.put("usedSlotCount", store -> supply(COUNTER, store::getUsedSlotCount)); + STAT_STORE_METHOD_REFERENCES.put("dataVitalMemory", store -> supply(GAUGE, store::getDataVitalMemory)); + STAT_STORE_METHOD_REFERENCES.put("vitalMemory", store -> supply(GAUGE, store::getVitalMemory)); + STAT_STORE_METHOD_REFERENCES.put("removedSlotCount", store -> supply(COUNTER, store::getRemovedSlotCount)); + STAT_STORE_METHOD_REFERENCES.put("dataSize", store -> supply(GAUGE, store::getDataSize)); + STAT_STORE_METHOD_REFERENCES.put("tableCapacity", store -> supply(GAUGE, store::getTableCapacity)); - STAT_POOL_METHOD_REFERENCES.put("allocatedSize", ResourcePageSource::getAllocatedSize); + STAT_POOL_METHOD_REFERENCES.put("allocatedSize", pool -> supply(GAUGE, pool::getAllocatedSize)); } private final OffHeapResources offHeapResources; @@ -303,14 +307,13 @@ private ResourcePageSource createPageSource(String poolName, ServerSideConfigura } private void registerStoreStatistics(ServerStoreImpl store, String storeName) { - STAT_STORE_METHOD_REFERENCES.entrySet().stream().forEach((entry)-> - registerStatistic(store, storeName, entry.getKey(), STATISTICS_STORE_TAG, PROPERTY_STORE_KEY, () -> entry.getValue().apply(store) )); + STAT_STORE_METHOD_REFERENCES.forEach((key, value) -> + registerStatistic(store, storeName, key, STATISTICS_STORE_TAG, PROPERTY_STORE_KEY, value.apply(store))); } private void registerPoolStatistics(String poolName, ResourcePageSource pageSource) { - STAT_POOL_METHOD_REFERENCES.entrySet().stream().forEach((entry)-> - registerStatistic(pageSource, poolName, entry.getKey(), STATISTICS_POOL_TAG, PROPERTY_POOL_KEY, () -> entry.getValue().apply(pageSource)) - ); + STAT_POOL_METHOD_REFERENCES.forEach((key, value) -> + registerStatistic(pageSource, poolName, key, STATISTICS_POOL_TAG, PROPERTY_POOL_KEY, value.apply(pageSource))); } private void unRegisterStoreStatistics(ServerStoreImpl store) { @@ -329,13 +332,12 @@ private void unRegisterPoolStatistics(ResourcePageSource pageSource) { } } - private void registerStatistic(Object context, String name, String observerName, String tag, String propertyKey, Callable callable) { - Set tags = new HashSet<>(Arrays.asList(tag, "tier")); + private void registerStatistic(Object context, String name, String observerName, String tag, String propertyKey, ValueStatistic source) { Map properties = new HashMap<>(); properties.put("discriminator", tag); properties.put(propertyKey, name); - StatisticsManager.createPassThroughStatistic(context, observerName, tags, properties, callable); + StatisticsManager.createPassThroughStatistic(context, observerName, tags(tag, "tier"), properties, source); } private void releaseDedicatedPool(String name, PageSource pageSource) { @@ -407,7 +409,7 @@ public ServerStoreImpl createStore(String name, ServerStoreConfiguration serverS ServerStoreImpl serverStore; ResourcePageSource resourcePageSource = getPageSource(name, serverStoreConfiguration.getPoolAllocation()); try { - serverStore = new ServerStoreImpl(serverStoreConfiguration, resourcePageSource, mapper); + serverStore = new ServerStoreImpl(serverStoreConfiguration, resourcePageSource, mapper, serverStoreConfiguration.isWriteBehindConfigured()); } catch (RuntimeException rte) { releaseDedicatedPool(name, resourcePageSource); throw new ConfigurationException("Failed to create ServerStore.", rte); @@ -490,6 +492,11 @@ public void loadExisting(ServerSideConfiguration configuration) { } } + @Override + public EhcacheStateContext beginProcessing(EhcacheOperationMessage message, String name) { + return () -> {}; + } + public boolean isConfigured() { return configured; } diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/ServerSideServerStore.java b/clustered/server/src/main/java/org/ehcache/clustered/server/ServerSideServerStore.java index 09424093df..be5a576ebc 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/ServerSideServerStore.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/ServerSideServerStore.java @@ -31,4 +31,5 @@ public interface ServerSideServerStore extends ServerStore, MapInternals { ServerStoreConfiguration getStoreConfiguration(); List> getSegmentKeySets(); void put(long key, Chain chain); + void remove(long key); } diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/ServerStoreImpl.java b/clustered/server/src/main/java/org/ehcache/clustered/server/ServerStoreImpl.java index 724bd30289..72bee490e2 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/ServerStoreImpl.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/ServerStoreImpl.java @@ -18,8 +18,10 @@ import org.ehcache.clustered.common.internal.ServerStoreConfiguration; import org.ehcache.clustered.common.internal.store.Chain; +import org.ehcache.clustered.server.offheap.OffHeapChainMap; import org.ehcache.clustered.server.offheap.OffHeapServerStore; import org.ehcache.clustered.server.state.ResourcePageSource; +import org.terracotta.offheapstore.exceptions.OversizeMappingException; import org.terracotta.offheapstore.paging.PageSource; import com.tc.classloader.CommonComponent; @@ -36,10 +38,17 @@ public class ServerStoreImpl implements ServerSideServerStore { private final ResourcePageSource pageSource; private final OffHeapServerStore store; - public ServerStoreImpl(ServerStoreConfiguration storeConfiguration, ResourcePageSource pageSource, KeySegmentMapper mapper) { + public ServerStoreImpl(ServerStoreConfiguration configuration, ResourcePageSource source, KeySegmentMapper mapper, + List> recoveredMaps) { + this.storeConfiguration = configuration; + this.pageSource = source; + this.store = new OffHeapServerStore(recoveredMaps, mapper); + } + + public ServerStoreImpl(ServerStoreConfiguration storeConfiguration, ResourcePageSource pageSource, KeySegmentMapper mapper, boolean writeBehindConfigured) { this.storeConfiguration = storeConfiguration; this.pageSource = pageSource; - this.store = new OffHeapServerStore(pageSource, mapper); + this.store = new OffHeapServerStore(pageSource, mapper, writeBehindConfigured); } public void setEvictionListener(ServerStoreEvictionListener listener) { @@ -66,11 +75,13 @@ public Chain get(long key) { @Override public void append(long key, ByteBuffer payLoad) { + checkPayLoadSize(payLoad); store.append(key, payLoad); } @Override public Chain getAndAppend(long key, ByteBuffer payLoad) { + checkPayLoadSize(payLoad); return store.getAndAppend(key, payLoad); } @@ -83,6 +94,11 @@ public void put(long key, Chain chain) { store.put(key, chain); } + @Override + public void remove(long key) { + store.remove(key); + } + @Override public void clear() { store.clear(); @@ -175,4 +191,11 @@ public int getReprobeLength() { throw new UnsupportedOperationException("Not supported yet."); } + + private void checkPayLoadSize(ByteBuffer payLoad) { + if (payLoad.remaining() > pageSource.getPool().getSize()) { + throw new OversizeMappingException("Payload (" + payLoad.remaining() + + ") bigger than pool size (" + pageSource.getPool().getSize() + ")"); + } + } } diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/EhcacheDataSyncMessage.java b/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/EhcacheDataSyncMessage.java index 9099b341f2..1937f9389f 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/EhcacheDataSyncMessage.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/EhcacheDataSyncMessage.java @@ -21,7 +21,6 @@ import com.tc.classloader.CommonComponent; import java.util.Collections; -import java.util.HashMap; import java.util.Map; @CommonComponent diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/EhcacheServerCodec.java b/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/EhcacheServerCodec.java index 743089b0b9..46ce38330b 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/EhcacheServerCodec.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/EhcacheServerCodec.java @@ -47,7 +47,7 @@ public EhcacheServerCodec(EhcacheCodec clientCodec, PassiveReplicationMessageCod } @Override - public byte[] encodeMessage(EhcacheEntityMessage message) throws MessageCodecException { + public byte[] encodeMessage(EhcacheEntityMessage message) { if (message instanceof PassiveReplicationMessage) { return replicationCodec.encode((PassiveReplicationMessage) message); } @@ -55,7 +55,7 @@ public byte[] encodeMessage(EhcacheEntityMessage message) throws MessageCodecExc } @Override - public EhcacheEntityMessage decodeMessage(byte[] payload) throws MessageCodecException { + public EhcacheEntityMessage decodeMessage(byte[] payload) { ByteBuffer byteBuffer = wrap(payload); Enm opCodeEnm = EhcacheCodec.OP_CODE_DECODER.decoder(byteBuffer).enm("opCode"); if (!opCodeEnm.isFound()) { diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/EhcacheSyncMessage.java b/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/EhcacheSyncMessage.java index ba23e4b23a..79dd79f908 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/EhcacheSyncMessage.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/EhcacheSyncMessage.java @@ -20,8 +20,6 @@ import com.tc.classloader.CommonComponent; -import java.util.UUID; - @CommonComponent public abstract class EhcacheSyncMessage extends EhcacheEntityMessage { diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/EhcacheSyncMessageCodec.java b/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/EhcacheSyncMessageCodec.java index 84aa466394..be536b5df5 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/EhcacheSyncMessageCodec.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/EhcacheSyncMessageCodec.java @@ -20,7 +20,6 @@ import org.ehcache.clustered.common.internal.messages.EhcacheEntityMessage; import org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse; import org.ehcache.clustered.common.internal.messages.ResponseCodec; -import org.ehcache.clustered.common.internal.messages.StateRepositoryOpCodec; import org.ehcache.clustered.common.internal.store.Chain; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -32,6 +31,7 @@ import org.terracotta.runnel.encoding.StructEncoder; import java.nio.ByteBuffer; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -105,7 +105,7 @@ public class EhcacheSyncMessageCodec implements SyncMessageCodec decodeChainMapEntries(StructDecoder decoder) { - Map chainMap = new HashMap<>(); - StructArrayDecoder> entriesDecoder = decoder.structs(CHAIN_MAP_ENTRIES_SUB_STRUCT); + if (entriesDecoder != null) { - for (int i = 0; i < entriesDecoder.length(); i++) { + int len = entriesDecoder.length(); + Map chainMap = new HashMap<>((int) ((float) len / 0.75f + 1.0f)); + for (int i = 0; i < len; i++) { StructDecoder entryDecoder = entriesDecoder.next(); Long key = entryDecoder.int64(KEY_FIELD); StructDecoder chainDecoder = entryDecoder.struct(CHAIN_FIELD); @@ -270,8 +271,9 @@ private Map decodeChainMapEntries(StructDecoder decoder) { chainMap.put(key, chain); entryDecoder.end(); } + return chainMap; + } else { + return Collections.emptyMap(); } - return chainMap; } - } diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/PassiveReplicationMessage.java b/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/PassiveReplicationMessage.java index c716ad734d..6baace78ca 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/PassiveReplicationMessage.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/PassiveReplicationMessage.java @@ -49,10 +49,14 @@ public ChainReplicationMessage(long key, Chain chain, long transactionId, long o } private Chain dropLastElement(Chain chain) { - List elements = StreamSupport.stream(chain.spliterator(), false) - .collect(Collectors.toList()); - elements.remove(elements.size() -1); // remove last - return Util.getChain(elements); + if (!chain.isEmpty()) { + List elements = StreamSupport.stream(chain.spliterator(), false) + .collect(Collectors.toList()); + elements.remove(elements.size() - 1); // remove last + return Util.getChain(elements); + } else { + return chain; + } } public long getClientId() { diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/PassiveReplicationMessageCodec.java b/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/PassiveReplicationMessageCodec.java index 5de3839962..3d902a6610 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/PassiveReplicationMessageCodec.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/internal/messages/PassiveReplicationMessageCodec.java @@ -26,7 +26,6 @@ import org.terracotta.runnel.encoding.StructEncoder; import java.nio.ByteBuffer; -import java.util.UUID; import static org.ehcache.clustered.common.internal.messages.EhcacheMessageType.EHCACHE_MESSAGE_TYPES_ENUM_MAPPING; import static org.ehcache.clustered.common.internal.messages.EhcacheMessageType.MESSAGE_TYPE_FIELD_INDEX; diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/management/ClusterTierManagement.java b/clustered/server/src/main/java/org/ehcache/clustered/server/management/ClusterTierManagement.java index b7f7dc945a..4d75e11b77 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/management/ClusterTierManagement.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/management/ClusterTierManagement.java @@ -71,32 +71,38 @@ public void close() { } } - public void reload() { + public void entityCreated() { if (managementRegistry != null) { + LOGGER.trace("entityCreated({})", storeIdentifier); + managementRegistry.entityCreated(); + init(); + } + } + + public void entityPromotionCompleted() { + if (managementRegistry != null) { + LOGGER.trace("entityPromotionCompleted({})", storeIdentifier); managementRegistry.entityPromotionCompleted(); init(); } } // the goal of the following code is to send the management metadata from the entity into the monitoring tree AFTER the entity creation - public void init() { - if (managementRegistry != null) { - LOGGER.trace("init({})", storeIdentifier); - ServerSideServerStore serverStore = ehcacheStateService.getStore(storeIdentifier); - ServerStoreBinding serverStoreBinding = new ServerStoreBinding(storeIdentifier, serverStore); - CompletableFuture r1 = managementRegistry.register(serverStoreBinding); - ServerSideConfiguration.Pool pool = ehcacheStateService.getDedicatedResourcePool(storeIdentifier); - CompletableFuture allOf; - if (pool != null) { - allOf = CompletableFuture.allOf(r1, managementRegistry.register(new PoolBinding(storeIdentifier, pool, PoolBinding.AllocationType.DEDICATED))); - } else { - allOf = r1; - } - allOf.thenRun(() -> { - managementRegistry.refresh(); - managementRegistry.pushServerEntityNotification(serverStoreBinding, EHCACHE_SERVER_STORE_CREATED.name()); - }); + private void init() { + ServerSideServerStore serverStore = ehcacheStateService.getStore(storeIdentifier); + ServerStoreBinding serverStoreBinding = new ServerStoreBinding(storeIdentifier, serverStore); + CompletableFuture r1 = managementRegistry.register(serverStoreBinding); + ServerSideConfiguration.Pool pool = ehcacheStateService.getDedicatedResourcePool(storeIdentifier); + CompletableFuture allOf; + if (pool != null) { + allOf = CompletableFuture.allOf(r1, managementRegistry.register(new PoolBinding(storeIdentifier, pool, PoolBinding.AllocationType.DEDICATED))); + } else { + allOf = r1; } + allOf.thenRun(() -> { + managementRegistry.refresh(); + managementRegistry.pushServerEntityNotification(serverStoreBinding, EHCACHE_SERVER_STORE_CREATED.name()); + }); } } diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/management/Management.java b/clustered/server/src/main/java/org/ehcache/clustered/server/management/Management.java index e1afd1b8f7..90441496af 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/management/Management.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/management/Management.java @@ -82,24 +82,19 @@ protected void registerClusterTierManagerSettingsProvider() { getManagementRegistry().addManagementProvider(new ClusterTierManagerSettingsManagementProvider()); } - public void reload() { + public void entityCreated() { if (managementRegistry != null) { - managementRegistry.entityPromotionCompleted(); + LOGGER.trace("entityCreated()"); + managementRegistry.entityCreated(); init(); } } - // the goal of the following code is to send the management metadata from the entity into the monitoring tre AFTER the entity creation - public void init() { + public void entityPromotionCompleted() { if (managementRegistry != null) { - LOGGER.trace("init()"); - - CompletableFuture.allOf( - managementRegistry.register(generateClusterTierManagerBinding()), - // PoolBinding.ALL_SHARED is a marker so that we can send events not specifically related to 1 pool - // this object is ignored from the stats and descriptors - managementRegistry.register(PoolBinding.ALL_SHARED) - ).thenRun(managementRegistry::refresh); + LOGGER.trace("entityPromotionCompleted()"); + managementRegistry.entityPromotionCompleted(); + init(); } } @@ -118,4 +113,14 @@ public void sharedPoolsConfigured() { } } + // the goal of the following code is to send the management metadata from the entity into the monitoring tre AFTER the entity creation + private void init() { + CompletableFuture.allOf( + managementRegistry.register(generateClusterTierManagerBinding()), + // PoolBinding.ALL_SHARED is a marker so that we can send events not specifically related to 1 pool + // this object is ignored from the stats and descriptors + managementRegistry.register(PoolBinding.ALL_SHARED) + ).thenRun(managementRegistry::refresh); + } + } diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/management/PoolStatisticsManagementProvider.java b/clustered/server/src/main/java/org/ehcache/clustered/server/management/PoolStatisticsManagementProvider.java index bcbc015abf..47dbf004b8 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/management/PoolStatisticsManagementProvider.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/management/PoolStatisticsManagementProvider.java @@ -31,7 +31,7 @@ import java.util.stream.Collectors; import static java.util.Arrays.asList; -import static org.terracotta.context.extended.ValueStatisticDescriptor.descriptor; +import static org.terracotta.statistics.registry.ValueStatisticDescriptor.descriptor; @Named("PoolStatistics") @RequiredContext({@Named("consumerId"), @Named("type"), @Named("alias")}) @@ -53,17 +53,17 @@ public Collection> getExposedObjects() { @Override protected StatisticRegistry getStatisticRegistry(PoolBinding managedObject) { if (managedObject == PoolBinding.ALL_SHARED) { - return StatisticRegistry.noop(); + return new StatisticRegistry(null, () -> getTimeSource().getTimestamp()); } String poolName = managedObject.getAlias(); PoolBinding.AllocationType allocationType = managedObject.getAllocationType(); if (allocationType == PoolBinding.AllocationType.DEDICATED) { - return new StatisticRegistry(ehcacheStateService.getDedicatedResourcePageSource(poolName)); + return new StatisticRegistry(ehcacheStateService.getDedicatedResourcePageSource(poolName), () -> getTimeSource().getTimestamp()); } else { - return new StatisticRegistry(ehcacheStateService.getSharedResourcePageSource(poolName)); + return new StatisticRegistry(ehcacheStateService.getSharedResourcePageSource(poolName), () -> getTimeSource().getTimestamp()); } } @@ -76,7 +76,7 @@ private static class PoolExposedStatistics extends AbstractExposedStatistics extends StorageEngine { + InternalChain newChain(ByteBuffer element); + InternalChain newChain(Chain chain); +} diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/InternalChain.java b/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/InternalChain.java index 0e7e209986..18af113114 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/InternalChain.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/InternalChain.java @@ -20,7 +20,7 @@ import org.ehcache.clustered.common.internal.store.Chain; -interface InternalChain extends Closeable { +public interface InternalChain extends Closeable { Chain detach(); diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/LongPortability.java b/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/LongPortability.java index 54a172414f..f2db3299bb 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/LongPortability.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/LongPortability.java @@ -18,9 +18,9 @@ import java.nio.ByteBuffer; import org.terracotta.offheapstore.storage.portability.Portability; -class LongPortability implements Portability { +public class LongPortability implements Portability { - static final Portability INSTANCE = new LongPortability(); + public static final Portability INSTANCE = new LongPortability(); private LongPortability() {} diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/OffHeapChainMap.java b/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/OffHeapChainMap.java index b11880f35e..68fd8eb964 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/OffHeapChainMap.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/OffHeapChainMap.java @@ -22,7 +22,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.Callable; import java.util.concurrent.locks.Lock; import org.ehcache.clustered.common.internal.store.Chain; @@ -36,6 +35,7 @@ import org.terracotta.offheapstore.exceptions.OversizeMappingException; import org.terracotta.offheapstore.paging.PageSource; import org.terracotta.offheapstore.storage.portability.Portability; +import org.terracotta.offheapstore.util.Factory; public class OffHeapChainMap implements MapInternals { @@ -43,12 +43,12 @@ interface ChainMapEvictionListener { void onEviction(K key); } - private final ReadWriteLockedOffHeapClockCache heads; - private final OffHeapChainStorageEngine chainStorage; + protected final ReadWriteLockedOffHeapClockCache heads; + private final ChainStorageEngine chainStorage; private volatile ChainMapEvictionListener evictionListener; - public OffHeapChainMap(PageSource source, Portability keyPortability, int minPageSize, int maxPageSize, boolean shareByThieving) { - this.chainStorage = new OffHeapChainStorageEngine<>(source, keyPortability, minPageSize, maxPageSize, shareByThieving, shareByThieving); + private OffHeapChainMap(PageSource source, ChainStorageEngine storageEngine) { + this.chainStorage = storageEngine; EvictionListener listener = callable -> { try { Map.Entry entry = callable.call(); @@ -69,6 +69,14 @@ public OffHeapChainMap(PageSource source, Portability keyPortability, this.heads = new EvictionListeningReadWriteLockedOffHeapClockCache<>(listener, source, chainStorage); } + public OffHeapChainMap(PageSource source, Factory> storageEngineFactory) { + this(source, storageEngineFactory.newInstance()); + } + + public OffHeapChainMap(PageSource source, Portability keyPortability, int minPageSize, int maxPageSize, boolean shareByThieving) { + this(source, new OffHeapChainStorageEngine<>(source, keyPortability, minPageSize, maxPageSize, shareByThieving, shareByThieving)); + } + //For tests OffHeapChainMap(ReadWriteLockedOffHeapClockCache heads, OffHeapChainStorageEngine chainStorage) { this.chainStorage = chainStorage; @@ -79,6 +87,10 @@ void setEvictionListener(ChainMapEvictionListener listener) { evictionListener = listener; } + public ChainStorageEngine getStorageEngine() { + return chainStorage; + } + public Chain get(K key) { final Lock lock = heads.readLock(); lock.lock(); @@ -193,8 +205,8 @@ public void put(K key, Chain chain) { current.close(); } } else { - for (Element x : chain) { - append(key, x.getPayload()); + if (!chain.isEmpty()) { + heads.put(key, chainStorage.newChain(chain)); } } } finally { @@ -202,6 +214,16 @@ public void put(K key, Chain chain) { } } + void remove(K key) { + Lock lock = heads.writeLock(); + lock.lock(); + try { + heads.removeNoReturn(key); + } finally { + lock.unlock(); + } + } + public void clear() { heads.writeLock().lock(); try { @@ -223,9 +245,7 @@ public Set keySet() { private void evict() { int evictionIndex = heads.getEvictionIndex(); if (evictionIndex < 0) { - StringBuilder sb = new StringBuilder("Storage Engine and Eviction Failed - Everything Pinned ("); - sb.append(getSize()).append(" mappings) \n").append("Storage Engine : ").append(chainStorage); - throw new OversizeMappingException(sb.toString()); + throw new OversizeMappingException("Storage Engine and Eviction Failed - Everything Pinned (" + getSize() + " mappings) \n" + "Storage Engine : " + chainStorage); } else { heads.evict(evictionIndex, false); } @@ -349,11 +369,11 @@ public long getDataSize() { return heads.getDataSize(); } - boolean shrink() { + public boolean shrink() { return heads.shrink(); } - Lock writeLock() { + public Lock writeLock() { return heads.writeLock(); } diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/OffHeapChainStorageEngine.java b/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/OffHeapChainStorageEngine.java index 3688ae746a..8601790d64 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/OffHeapChainStorageEngine.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/OffHeapChainStorageEngine.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; -import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; @@ -30,14 +29,16 @@ import org.ehcache.clustered.common.internal.store.Util; import org.terracotta.offheapstore.paging.OffHeapStorageArea; import org.terracotta.offheapstore.paging.PageSource; +import org.terracotta.offheapstore.storage.BinaryStorageEngine; import org.terracotta.offheapstore.storage.PointerSize; import org.terracotta.offheapstore.storage.StorageEngine; import org.terracotta.offheapstore.storage.portability.Portability; +import org.terracotta.offheapstore.storage.portability.WriteContext; +import org.terracotta.offheapstore.util.Factory; import static java.util.Collections.unmodifiableList; -class OffHeapChainStorageEngine implements StorageEngine { - +public class OffHeapChainStorageEngine implements ChainStorageEngine, BinaryStorageEngine { private static final int ELEMENT_HEADER_SEQUENCE_OFFSET = 0; private static final int ELEMENT_HEADER_LENGTH_OFFSET = 8; private static final int ELEMENT_HEADER_NEXT_OFFSET = 12; @@ -48,16 +49,40 @@ class OffHeapChainStorageEngine implements StorageEngine { private static final int CHAIN_HEADER_TAIL_OFFSET = 8; private static final int CHAIN_HEADER_SIZE = 16; + private static final int DETACHED_CONTIGUOUS_CHAIN_ADDRESS_OFFSET = 0; + private static final int DETACHED_CONTIGUOUS_CHAIN_HEADER_SIZE = 8; + private final OffHeapStorageArea storage; private final Portability keyPortability; private final Set activeChains = Collections.newSetFromMap(new ConcurrentHashMap()); + private final int extendedChainHeaderSize; + private final ByteBuffer emptyExtendedChainHeader; + private final int totalChainHeaderSize; - private StorageEngine.Owner owner; + protected StorageEngine.Owner owner; private long nextSequenceNumber = 0; + private volatile boolean hasContiguousChains = false; + + public static Factory> + createFactory(final PageSource source, + final Portability keyPortability, + final int minPageSize, final int maxPageSize, + final boolean thief, final boolean victim) { + return (Factory>)() -> new OffHeapChainStorageEngine<>(source, keyPortability, + minPageSize, maxPageSize, thief, victim); + } - public OffHeapChainStorageEngine(PageSource source, Portability keyPortability, int minPageSize, int maxPageSize, boolean thief, boolean victim) { + OffHeapChainStorageEngine(PageSource source, Portability keyPortability, int minPageSize, int maxPageSize, boolean thief, boolean victim) { + this(source, keyPortability, minPageSize, maxPageSize, thief, victim, ByteBuffer.allocate(0)); + } + + protected OffHeapChainStorageEngine(PageSource source, Portability keyPortability, int minPageSize, int maxPageSize, boolean thief, boolean victim, + final ByteBuffer emptyExtendedChainHeader) { this.storage = new OffHeapStorageArea(PointerSize.LONG, new StorageOwner(), source, minPageSize, maxPageSize, thief, victim); this.keyPortability = keyPortability; + this.extendedChainHeaderSize = emptyExtendedChainHeader.remaining(); + this.emptyExtendedChainHeader = emptyExtendedChainHeader; + this.totalChainHeaderSize = CHAIN_HEADER_SIZE + this.extendedChainHeaderSize; } //For tests @@ -65,14 +90,20 @@ Set getActiveChains() { return this.activeChains; } - InternalChain newChain(ByteBuffer element) { - return new PrimordialChain(element); + @Override + public InternalChain newChain(ByteBuffer element) { + return new GenesisLink(element); + } + + @Override + public InternalChain newChain(Chain chain) { + return new GenesisLinks(chain); } @Override public Long writeMapping(K key, InternalChain value, int hash, int metadata) { - if (value instanceof PrimordialChain) { - return createAttachedChain(key, hash, (PrimordialChain) value); + if (value instanceof GenesisChain) { + return createAttachedChain(key, hash, (GenesisChain) value); } else { throw new AssertionError("only detached internal chains should be initially written"); } @@ -80,7 +111,7 @@ public Long writeMapping(K key, InternalChain value, int hash, int metadata) { @Override public void attachedMapping(long encoding, int hash, int metadata) { - //nothing + chainAttached(encoding); } @Override @@ -115,14 +146,150 @@ public boolean equalsKey(Object key, long encoding) { private ByteBuffer readKeyBuffer(long encoding) { int keyLength = readKeySize(encoding); - int elemLength = storage.readInt(encoding + CHAIN_HEADER_SIZE + ELEMENT_HEADER_LENGTH_OFFSET); - return storage.readBuffer(encoding + CHAIN_HEADER_SIZE + ELEMENT_HEADER_SIZE + elemLength, keyLength); + int elemLength = readElementLength(encoding + this.totalChainHeaderSize); + return storage.readBuffer(encoding + this.totalChainHeaderSize + ELEMENT_HEADER_SIZE + elemLength, keyLength); } - private int readKeyHash(long encoding) { + @Override + public int readKeyHash(long encoding) { return storage.readInt(encoding + CHAIN_HEADER_KEY_HASH_OFFSET); } + private int readElementLength(long element) { + // The most significant bit (MSB) of element length is used to signify whether an element is explicitly allocated + // (msb clear) or part of a contiguous chain (msb set). Clear the msb when returning length. + return Integer.MAX_VALUE & storage.readInt(element + ELEMENT_HEADER_LENGTH_OFFSET); + } + + @Override + public ByteBuffer readBinaryKey(long encoding) { + return readKeyBuffer(encoding); + } + + @Override + public ByteBuffer readBinaryValue(long chain) { + // first get total element size and allocate buffer + long element = chain + this.totalChainHeaderSize; + int totalLength = DETACHED_CONTIGUOUS_CHAIN_HEADER_SIZE; + do { + totalLength += ELEMENT_HEADER_SIZE + readElementLength(element); + element = storage.readLong(element + ELEMENT_HEADER_NEXT_OFFSET); + } while (element != chain); + + final ByteBuffer detachedContiguousBuffer = ByteBuffer.allocate(totalLength); + // one way for layers above to extract encoding is to put the encoding of the chain address in the value + detachedContiguousBuffer.putLong(chain); + + // now add the elements to the buffer + element = chain + this.totalChainHeaderSize; + do { + final int startPosition = detachedContiguousBuffer.position(); + detachedContiguousBuffer.put(storage.readBuffer(element, ELEMENT_HEADER_SIZE + readElementLength(element))); + detachedContiguousBuffer.mark(); + detachedContiguousBuffer.putLong(startPosition + ELEMENT_HEADER_NEXT_OFFSET, -1L); + detachedContiguousBuffer.reset(); + element = storage.readLong(element + ELEMENT_HEADER_NEXT_OFFSET); + } while (element != chain); + return (ByteBuffer)detachedContiguousBuffer.flip(); + } + + @Override + public boolean equalsBinaryKey(ByteBuffer binaryKey, long chain) { + return binaryKey.equals(readKeyBuffer(chain)); + } + + @Override + public Long writeBinaryMapping(ByteBuffer binaryKey, ByteBuffer binaryValue, int hash, int metadata) { + final int totalSize = binaryKey.remaining() + + (binaryValue.remaining() - DETACHED_CONTIGUOUS_CHAIN_HEADER_SIZE) + + this.totalChainHeaderSize; + long chain = storage.allocate(totalSize); + if (chain < 0) { + return null; + } + if (binaryValue.remaining() < DETACHED_CONTIGUOUS_CHAIN_HEADER_SIZE + ELEMENT_HEADER_SIZE) { + // a chain must have at least one element. Something is wrong + throw new AssertionError("Invalid chain data detected. Empty links"); + } + binaryValue.mark(); + binaryKey.mark(); + try { + // extract first element + binaryValue.position(DETACHED_CONTIGUOUS_CHAIN_HEADER_SIZE); + final ByteBuffer firstElementWithHeader = binaryValue.slice(); + final int firstElementWithHeaderSize = ELEMENT_HEADER_SIZE + + (Integer.MAX_VALUE & firstElementWithHeader.getInt(ELEMENT_HEADER_LENGTH_OFFSET)); + firstElementWithHeader.limit(firstElementWithHeaderSize); + binaryValue.position(binaryValue.position() + firstElementWithHeaderSize); + + // mark relevant locations + final int keySize = binaryKey.remaining(); + final long firstElementLocation = chain + this.totalChainHeaderSize; + final long keyLocation = firstElementLocation + firstElementWithHeaderSize; + final long restOfElementsLocation = keyLocation + keySize; + + // build element length list + final ByteBuffer restOfElementsBuffer = binaryValue.slice(); + final List restOfElementLengthsWithHeader = new ArrayList<>(); + while (restOfElementsBuffer.hasRemaining()) { + final int skipLength = ELEMENT_HEADER_SIZE + (Integer.MAX_VALUE & restOfElementsBuffer.getInt( + restOfElementsBuffer.position() + ELEMENT_HEADER_LENGTH_OFFSET)); + restOfElementLengthsWithHeader.add(skipLength); + restOfElementsBuffer.position(restOfElementsBuffer.position() + skipLength); + } + restOfElementsBuffer.rewind(); + + // now write all the data + storage.writeInt(chain + CHAIN_HEADER_KEY_HASH_OFFSET, hash); + storage.writeInt(chain + CHAIN_HEADER_KEY_LENGTH_OFFSET, Integer.MIN_VALUE | keySize); + storage.writeBuffer(keyLocation, binaryKey); + storage.writeBuffer(firstElementLocation, firstElementWithHeader); + storage.writeBuffer(chain + CHAIN_HEADER_SIZE, emptyExtendedChainHeader.duplicate()); + if (restOfElementsBuffer.hasRemaining()) { + storage.writeBuffer(restOfElementsLocation, restOfElementsBuffer); + } + + // now adjust offsets + if (restOfElementLengthsWithHeader.size() <= 0) { + // we have only one element + storage.writeLong(chain + CHAIN_HEADER_TAIL_OFFSET, firstElementLocation); + storage.writeLong(firstElementLocation + ELEMENT_HEADER_NEXT_OFFSET, chain); + } else { + // recovering the buffer into a contiguous chain..denote this.. + this.hasContiguousChains = true; + storage.writeLong(firstElementLocation + ELEMENT_HEADER_NEXT_OFFSET, restOfElementsLocation); + long currentLocation = restOfElementsLocation; + int i = 0; + for (; i < restOfElementLengthsWithHeader.size() - 1; i++) { + final int elemLength = restOfElementLengthsWithHeader.get(i) - ELEMENT_HEADER_SIZE; + final int adjustedLength = Integer.MIN_VALUE | elemLength; + long nextLocation = currentLocation + elemLength + ELEMENT_HEADER_SIZE; + storage.writeLong(currentLocation + ELEMENT_HEADER_NEXT_OFFSET, nextLocation); + // denote that this is not an allocated chunk + storage.writeInt(currentLocation + ELEMENT_HEADER_LENGTH_OFFSET, adjustedLength); + currentLocation = nextLocation; + } + final int adjustedLength = Integer.MIN_VALUE | (restOfElementLengthsWithHeader.get(i) - ELEMENT_HEADER_SIZE); + storage.writeLong(currentLocation + ELEMENT_HEADER_NEXT_OFFSET, chain); + storage.writeInt(currentLocation + ELEMENT_HEADER_LENGTH_OFFSET, adjustedLength); + storage.writeLong(chain + CHAIN_HEADER_TAIL_OFFSET, currentLocation); + } + return chain; + } finally { + binaryKey.reset(); + binaryValue.reset(); + } + } + + public static long extractChainAddressFromValue(ByteBuffer valueBuffer) { + return valueBuffer.getLong(DETACHED_CONTIGUOUS_CHAIN_ADDRESS_OFFSET); + } + + @Override + public Long writeBinaryMapping(ByteBuffer[] byteBuffers, ByteBuffer[] byteBuffers1, int i, int i1) { + throw new AssertionError("Operation Not supported"); + } + private int readKeySize(long encoding) { return Integer.MAX_VALUE & storage.readInt(encoding + CHAIN_HEADER_KEY_LENGTH_OFFSET); } @@ -172,6 +339,53 @@ public boolean shrink() { return storage.shrink(); } + protected ByteBuffer getExtensionHeader(long chainAddress) { + checkExtensionHeaderExists(); + return storage.readBuffer(toExtensionAddress(chainAddress), extendedChainHeaderSize); + } + + protected WriteContext getExtensionWriteContext(long chainAddress) { + checkExtensionHeaderExists(); + return new WriteContext() { + + @Override + public void setLong(int offset, long value) { + if (offset < 0 || offset >= extendedChainHeaderSize) { + throw new IllegalArgumentException("Offset not within bounds 0 >= " + offset + " < " + extendedChainHeaderSize); + } else { + storage.writeLong(toExtensionAddress(chainAddress) + offset, value); + } + } + + @Override + public void flush() { + //no-op + } + }; + } + + protected void chainAttached(long chainAddress) { + } + + protected void chainFreed(long chainAddress) { + } + + protected void chainModified(long chainAddress) { + } + + protected void chainMoved(long fromChainAddress, long toChainAddress) { + } + + private void checkExtensionHeaderExists() { + if (extendedChainHeaderSize <= 0) { + throw new AssertionError("No extended header support for this storage engine"); + } + } + + private long toExtensionAddress(long chainAddress) { + return chainAddress + CHAIN_HEADER_SIZE; + } + private static class DetachedChain implements Chain { private final List elements; @@ -202,33 +416,65 @@ public Iterator iterator() { } - private static class PrimordialChain implements InternalChain { - - private final ByteBuffer element; - - public PrimordialChain(ByteBuffer element) { - this.element = element; - } - + /** + * Represents the initial form of a chain before the storage engine writes the chain mapping + * to the underlying map against the key. + */ + private static abstract class GenesisChain implements InternalChain { @Override public Chain detach() { - throw new AssertionError("primordial chains cannot be detached"); + throw new AssertionError("Chain not in storage yet. Cannot be detached"); } @Override public boolean append(ByteBuffer element) { - throw new AssertionError("primordial chains cannot be appended"); + throw new AssertionError("Chain not in storage yet. Cannot be appended"); } @Override public boolean replace(Chain expected, Chain replacement) { - throw new AssertionError("primordial chains cannot be mutated"); + throw new AssertionError("Chain not in storage yet. Cannot be mutated"); } @Override public void close() { //no-op } + + protected abstract Iterator iterator(); + } + + /** + * Represents a simple {@link GenesisChain} that contains a single link. + */ + private static class GenesisLink extends GenesisChain { + private final Element element; + + public GenesisLink(ByteBuffer buffer) { + element = () -> buffer; + } + + @Override + protected Iterator iterator() { + return Collections.singleton(element).iterator(); + } + } + + /** + * Represents a more complex {@link GenesisChain} that contains multiple links represented itself + * as a {@link Chain}. + */ + private static class GenesisLinks extends GenesisChain { + private final Chain chain; + + public GenesisLinks(Chain chain) { + this.chain = chain; + } + + @Override + protected Iterator iterator() { + return chain.iterator(); + } } private final class AttachedInternalChain implements InternalChain { @@ -237,6 +483,10 @@ private final class AttachedInternalChain implements InternalChain { * Location of the chain structure, not of the first element. */ private long chain; + /** + * track if this chain is modified so that we can signal on close + */ + private boolean chainModified = false; AttachedInternalChain(long address) { this.chain = address; @@ -247,7 +497,7 @@ private final class AttachedInternalChain implements InternalChain { public Chain detach() { List buffers = new ArrayList<>(); - long element = chain + CHAIN_HEADER_SIZE; + long element = chain + OffHeapChainStorageEngine.this.totalChainHeaderSize; do { buffers.add(element(readElementBuffer(element), readElementSequenceNumber(element))); element = storage.readLong(element + ELEMENT_HEADER_NEXT_OFFSET); @@ -262,13 +512,10 @@ public boolean append(ByteBuffer element) { if (newTail < 0) { return false; } else { + this.chainModified = true; long oldTail = storage.readLong(chain + CHAIN_HEADER_TAIL_OFFSET); storage.writeLong(newTail + ELEMENT_HEADER_NEXT_OFFSET, chain); - try { - storage.writeLong(oldTail + ELEMENT_HEADER_NEXT_OFFSET, newTail); - } catch (NullPointerException e) { - throw e; - } + storage.writeLong(oldTail + ELEMENT_HEADER_NEXT_OFFSET, newTail); storage.writeLong(chain + CHAIN_HEADER_TAIL_OFFSET, newTail); return true; } @@ -286,7 +533,7 @@ public boolean replace(Chain expected, Chain replacement) { } public boolean removeHeader(Chain header) { - long suffixHead = chain + CHAIN_HEADER_SIZE; + long suffixHead = chain + OffHeapChainStorageEngine.this.totalChainHeaderSize; long prefixTail; Iterator iterator = header.iterator(); @@ -307,13 +554,14 @@ public boolean removeHeader(Chain header) { return true; } else { int hash = readKeyHash(chain); - int elemSize = storage.readInt(suffixHead + ELEMENT_HEADER_LENGTH_OFFSET); + int elemSize = readElementLength(suffixHead); ByteBuffer elemBuffer = storage.readBuffer(suffixHead + ELEMENT_HEADER_SIZE, elemSize); Long newChainAddress = createAttachedChain(readKeyBuffer(chain), hash, elemBuffer); if (newChainAddress == null) { return false; } else { try (AttachedInternalChain newChain = new AttachedInternalChain(newChainAddress)) { + newChain.chainModified = true; //copy remaining elements from old chain (by reference) long next = storage.readLong(suffixHead + ELEMENT_HEADER_NEXT_OFFSET); long tail = storage.readLong(chain + CHAIN_HEADER_TAIL_OFFSET); @@ -323,6 +571,7 @@ public boolean removeHeader(Chain header) { if (owner.updateEncoding(hash, chain, newChainAddress, ~0)) { storage.writeLong(prefixTail + ELEMENT_HEADER_NEXT_OFFSET, chain); + chainMoved(chain, newChainAddress); free(); return true; } else { @@ -335,7 +584,7 @@ public boolean removeHeader(Chain header) { } public boolean replaceHeader(Chain expected, Chain replacement) { - long suffixHead = chain + CHAIN_HEADER_SIZE; + long suffixHead = chain + OffHeapChainStorageEngine.this.totalChainHeaderSize; long prefixTail; Iterator expectedIt = expected.iterator(); @@ -348,11 +597,12 @@ public boolean replaceHeader(Chain expected, Chain replacement) { } while (expectedIt.hasNext()); int hash = readKeyHash(chain); - Long newChainAddress = createAttachedChain(readKeyBuffer(chain), hash, replacement); + Long newChainAddress = createAttachedChain(readKeyBuffer(chain), hash, replacement.iterator()); if (newChainAddress == null) { return false; } else { try (AttachedInternalChain newChain = new AttachedInternalChain(newChainAddress)) { + newChain.chainModified = true; //copy remaining elements from old chain (by reference) if (suffixHead != chain) { newChain.append(suffixHead, storage.readLong(chain + CHAIN_HEADER_TAIL_OFFSET)); @@ -360,6 +610,7 @@ public boolean replaceHeader(Chain expected, Chain replacement) { if (owner.updateEncoding(hash, chain, newChainAddress, ~0)) { storage.writeLong(prefixTail + ELEMENT_HEADER_NEXT_OFFSET, chain); + chainMoved(chain, newChainAddress); free(); return true; } else { @@ -371,14 +622,21 @@ public boolean replaceHeader(Chain expected, Chain replacement) { } private void free() { - long element = storage.readLong(chain + CHAIN_HEADER_SIZE + ELEMENT_HEADER_NEXT_OFFSET); - storage.free(chain); + // signal dependent engines to act on this free before freeing the storage + chainFreed(chain); + chainModified = false; + long element = storage.readLong(chain + OffHeapChainStorageEngine.this.totalChainHeaderSize + ELEMENT_HEADER_NEXT_OFFSET); while (element != chain) { long next = storage.readLong(element + ELEMENT_HEADER_NEXT_OFFSET); - storage.free(element); + if (storage.readInt(element + ELEMENT_HEADER_LENGTH_OFFSET) >= 0) { + // do not free blocks contiguous to chain + storage.free(element); + } element = next; } + + storage.free(chain); } private long createElement(ByteBuffer element) { @@ -405,6 +663,28 @@ private void append(long head, long tail) { storage.writeLong(oldTail + ELEMENT_HEADER_NEXT_OFFSET, head); storage.writeLong(tail + ELEMENT_HEADER_NEXT_OFFSET, chain); storage.writeLong(chain + CHAIN_HEADER_TAIL_OFFSET, tail); + + if (OffHeapChainStorageEngine.this.hasContiguousChains) { + // we will have to move out any contiguous elements in the old chain as it is going to be freed soon + long current = head; + long prev = oldTail; + while (current != chain) { + final long next = storage.readLong(current + ELEMENT_HEADER_NEXT_OFFSET); + final int elemLength = storage.readInt(current + ELEMENT_HEADER_LENGTH_OFFSET); + if (elemLength < 0) { + final int elemLengthWithHeader = (Integer.MAX_VALUE & elemLength) + ELEMENT_HEADER_SIZE; + final long element = storage.allocate(elemLengthWithHeader); + storage.writeBuffer(element, storage.readBuffer(current, elemLengthWithHeader)); + storage.writeInt(element + ELEMENT_HEADER_LENGTH_OFFSET, elemLengthWithHeader - ELEMENT_HEADER_SIZE); + storage.writeLong(prev + ELEMENT_HEADER_NEXT_OFFSET, element); + prev = element; + } else { + prev = current; + } + current = next; + } + storage.writeLong(chain + CHAIN_HEADER_TAIL_OFFSET, prev); + } } private Element element(ByteBuffer attachedBuffer, final long sequence) { @@ -425,7 +705,7 @@ public long getSequenceNumber() { } private ByteBuffer readElementBuffer(long address) { - int elemLength = storage.readInt(address + ELEMENT_HEADER_LENGTH_OFFSET); + int elemLength = readElementLength(address); return storage.readBuffer(address + ELEMENT_HEADER_SIZE, elemLength); } @@ -436,12 +716,23 @@ private long readElementSequenceNumber(long address) { public void moved(long from, long to) { if (from == chain) { chain = to; + if (from != to) { + chainMoved(from, to); + } } } @Override public void close() { - OffHeapChainStorageEngine.this.activeChains.remove(this); + try { + if (this.chainModified) { + this.chainModified = false; + chainModified(chain); + } + } finally { + // must remove even if chain modified threw an unexpected exception + OffHeapChainStorageEngine.this.activeChains.remove(this); + } } } @@ -452,41 +743,44 @@ private long writeElement(long address, ByteBuffer element) { return address; } - private Long createAttachedChain(K key, int hash, PrimordialChain value) { + private Long createAttachedChain(K key, int hash, GenesisChain value) { ByteBuffer keyBuffer = keyPortability.encode(key); - ByteBuffer elemBuffer = value.element; - return createAttachedChain(keyBuffer, hash, elemBuffer); + return createAttachedChain(keyBuffer, hash, value.iterator()); } private Long createAttachedChain(ByteBuffer keyBuffer, int hash, ByteBuffer elemBuffer) { - long chain = storage.allocate(keyBuffer.remaining() + elemBuffer.remaining() + CHAIN_HEADER_SIZE + ELEMENT_HEADER_SIZE); + long chain = storage.allocate(keyBuffer.remaining() + elemBuffer.remaining() + this.totalChainHeaderSize + ELEMENT_HEADER_SIZE); if (chain < 0) { return null; } int keySize = keyBuffer.remaining(); storage.writeInt(chain + CHAIN_HEADER_KEY_HASH_OFFSET, hash); storage.writeInt(chain + CHAIN_HEADER_KEY_LENGTH_OFFSET, Integer.MIN_VALUE | keySize); - storage.writeBuffer(chain + CHAIN_HEADER_SIZE + ELEMENT_HEADER_SIZE + elemBuffer.remaining(), keyBuffer); - long element = chain + CHAIN_HEADER_SIZE; + storage.writeBuffer(chain + this.totalChainHeaderSize + ELEMENT_HEADER_SIZE + elemBuffer.remaining(), keyBuffer); + if (extendedChainHeaderSize > 0) { + storage.writeBuffer(chain + CHAIN_HEADER_SIZE, emptyExtendedChainHeader.duplicate()); + } + long element = chain + this.totalChainHeaderSize; writeElement(element, elemBuffer); storage.writeLong(element + ELEMENT_HEADER_NEXT_OFFSET, chain); storage.writeLong(chain + CHAIN_HEADER_TAIL_OFFSET, element); return chain; } - private Long createAttachedChain(ByteBuffer readKeyBuffer, int hash, Chain from) { - Iterator iterator = from.iterator(); + private Long createAttachedChain(ByteBuffer readKeyBuffer, int hash, Iterator iterator) { Long address = createAttachedChain(readKeyBuffer, hash, iterator.next().getPayload()); if (address == null) { return null; } - try (AttachedInternalChain chain = new AttachedInternalChain(address)) { - while (iterator.hasNext()) { - if (!chain.append(iterator.next().getPayload())) { - chain.free(); - return null; - } + if (iterator.hasNext()) { + try (AttachedInternalChain chain = new AttachedInternalChain(address)) { + do { + if (!chain.append(iterator.next().getPayload())) { + chain.free(); + return null; + } + } while (iterator.hasNext()); } } return address; @@ -536,8 +830,8 @@ public boolean moved(long from, long to) { return false; } else { long tail = storage.readLong(to + CHAIN_HEADER_TAIL_OFFSET); - if (tail == from + CHAIN_HEADER_SIZE) { - tail = to + CHAIN_HEADER_SIZE; + if (tail == from + OffHeapChainStorageEngine.this.totalChainHeaderSize) { + tail = to + OffHeapChainStorageEngine.this.totalChainHeaderSize; storage.writeLong(to + CHAIN_HEADER_TAIL_OFFSET, tail); } storage.writeLong(tail + ELEMENT_HEADER_NEXT_OFFSET, to); @@ -554,7 +848,7 @@ public boolean moved(long from, long to) { storage.writeLong(chain + CHAIN_HEADER_TAIL_OFFSET, to); } - long element = chain + CHAIN_HEADER_SIZE; + long element = chain + OffHeapChainStorageEngine.this.totalChainHeaderSize; while (element != chain) { long next = storage.readLong(element + ELEMENT_HEADER_NEXT_OFFSET); if (next == from) { @@ -572,9 +866,9 @@ public boolean moved(long from, long to) { public int sizeOf(long address) { if (isHead(address)) { int keySize = readKeySize(address); - return CHAIN_HEADER_SIZE + keySize + sizeOf(address + CHAIN_HEADER_SIZE); + return keySize + OffHeapChainStorageEngine.this.totalChainHeaderSize + sizeOf(address + OffHeapChainStorageEngine.this.totalChainHeaderSize); } else { - int elementSize = storage.readInt(address + ELEMENT_HEADER_LENGTH_OFFSET); + int elementSize = readElementLength(address); return ELEMENT_HEADER_SIZE + elementSize; } } diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/OffHeapServerStore.java b/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/OffHeapServerStore.java index b1e7d84709..3547c6137a 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/OffHeapServerStore.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/OffHeapServerStore.java @@ -18,6 +18,8 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; +import java.util.function.LongConsumer; +import java.util.function.LongFunction; import org.ehcache.clustered.common.internal.store.Chain; import org.ehcache.clustered.common.internal.store.ServerStore; @@ -39,20 +41,33 @@ public class OffHeapServerStore implements ServerStore, MapInternals { private final List> segments; private final KeySegmentMapper mapper; - OffHeapServerStore(PageSource source, KeySegmentMapper mapper) { + public OffHeapServerStore(List> segments, KeySegmentMapper mapper) { + this.mapper = mapper; + this.segments = segments; + } + + OffHeapServerStore(PageSource source, KeySegmentMapper mapper, boolean writeBehindConfigured) { this.mapper = mapper; segments = new ArrayList<>(mapper.getSegments()); for (int i = 0; i < mapper.getSegments(); i++) { - segments.add(new OffHeapChainMap<>(source, LongPortability.INSTANCE, KILOBYTES.toBytes(4), MEGABYTES.toBytes(8), false)); + if (writeBehindConfigured) { + segments.add(new PinningOffHeapChainMap<>(source, LongPortability.INSTANCE, KILOBYTES.toBytes(4), MEGABYTES.toBytes(8), false)); + } else { + segments.add(new OffHeapChainMap<>(source, LongPortability.INSTANCE, KILOBYTES.toBytes(4), MEGABYTES.toBytes(8), false)); + } } } - public OffHeapServerStore(ResourcePageSource source, KeySegmentMapper mapper) { + public OffHeapServerStore(ResourcePageSource source, KeySegmentMapper mapper, boolean writeBehindConfigured) { this.mapper = mapper; segments = new ArrayList<>(mapper.getSegments()); long maxSize = getMaxSize(source.getPool().getSize()); for (int i = 0; i < mapper.getSegments(); i++) { - segments.add(new OffHeapChainMap<>(source, LongPortability.INSTANCE, KILOBYTES.toBytes(4), (int) KILOBYTES.toBytes(maxSize), false)); + if (writeBehindConfigured) { + segments.add(new PinningOffHeapChainMap<>(source, LongPortability.INSTANCE, KILOBYTES.toBytes(4), (int) KILOBYTES.toBytes(maxSize), false)); + } else { + segments.add(new OffHeapChainMap<>(source, LongPortability.INSTANCE, KILOBYTES.toBytes(4), (int)KILOBYTES.toBytes(maxSize), false)); + } } } @@ -88,29 +103,7 @@ public void append(long key, ByteBuffer payLoad) { try { segmentFor(key).append(key, payLoad); } catch (OversizeMappingException e) { - if (handleOversizeMappingException(key)) { - try { - segmentFor(key).append(key, payLoad); - return; - } catch (OversizeMappingException ex) { - //ignore - } - } - - writeLockAll(); - try { - do { - try { - segmentFor(key).append(key, payLoad); - return; - } catch (OversizeMappingException ex) { - e = ex; - } - } while (handleOversizeMappingException(key)); - throw e; - } finally { - writeUnlockAll(); - } + consumeOversizeMappingException(key, (long k) -> segmentFor(k).append(k, payLoad)); } } @@ -119,27 +112,7 @@ public Chain getAndAppend(long key, ByteBuffer payLoad) { try { return segmentFor(key).getAndAppend(key, payLoad); } catch (OversizeMappingException e) { - if (handleOversizeMappingException(key)) { - try { - return segmentFor(key).getAndAppend(key, payLoad); - } catch (OversizeMappingException ex) { - //ignore - } - } - - writeLockAll(); - try { - do { - try { - return segmentFor(key).getAndAppend(key, payLoad); - } catch (OversizeMappingException ex) { - e = ex; - } - } while (handleOversizeMappingException(key)); - throw e; - } finally { - writeUnlockAll(); - } + return handleOversizeMappingException(key, (long k) -> segmentFor(k).getAndAppend(k, payLoad)); } } @@ -148,60 +121,24 @@ public void replaceAtHead(long key, Chain expect, Chain update) { try { segmentFor(key).replaceAtHead(key, expect, update); } catch (OversizeMappingException e) { - if (handleOversizeMappingException(key)) { - try { - segmentFor(key).replaceAtHead(key, expect, update); - return; - } catch (OversizeMappingException ex) { - //ignore - } - } - - writeLockAll(); - try { - do { - try { - segmentFor(key).replaceAtHead(key, expect, update); - return; - } catch (OversizeMappingException ex) { - e = ex; - } - } while (handleOversizeMappingException(key)); - throw e; - } finally { - writeUnlockAll(); - } + consumeOversizeMappingException(key, (long k) -> segmentFor(k).replaceAtHead(k, expect, update)); } } public void put(long key, Chain chain) { try { - segmentFor(key).put(key, chain); + try {segmentFor(key).put(key, chain); } catch (OversizeMappingException e) { - if (handleOversizeMappingException(key)) { - try { - segmentFor(key).put(key, chain); - } catch (OversizeMappingException ex) { - //ignore - } - } - - writeLockAll(); - try { - do { - try { - segmentFor(key).put(key, chain); - } catch (OversizeMappingException ex) { - e = ex; - } - } while (handleOversizeMappingException(key)); - throw e; - } finally { - writeUnlockAll(); - } + consumeOversizeMappingException(key, (long k) -> segmentFor(k).put(k, chain));} + } catch (Throwable t) { + segmentFor(key).remove(key); + throw t; } } + public void remove(long key) { + segmentFor(key).remove(key); + } @Override public void clear() { @@ -226,10 +163,51 @@ private void writeUnlockAll() { } } - boolean handleOversizeMappingException(long hash) { + private void consumeOversizeMappingException(long key, LongConsumer operation) { + handleOversizeMappingException(key, k -> { + operation.accept(k); + return null; + }); + } + + /** + * Force eviction from other segments until {@code operation} succeeds or no further eviction is possible. + * + * @param key the target key + * @param operation the previously failed operation + * @param operation result type + * @return the operation result + * @throws OversizeMappingException if the operation cannot be made to succeed + */ + private R handleOversizeMappingException(long key, LongFunction operation) throws OversizeMappingException { + if (tryShrinkOthers(key)) { + try { + return operation.apply(key); + } catch (OversizeMappingException ex) { + //ignore + } + } + + writeLockAll(); + try { + OversizeMappingException e; + do { + try { + return operation.apply(key); + } catch (OversizeMappingException ex) { + e = ex; + } + } while (tryShrinkOthers(key)); + throw e; + } finally { + writeUnlockAll(); + } + } + + boolean tryShrinkOthers(long key) { boolean evicted = false; - OffHeapChainMap target = segmentFor(hash); + OffHeapChainMap target = segmentFor(key); for (OffHeapChainMap s : segments) { if (s != target) { evicted |= s.shrink(); diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/PinningOffHeapChainMap.java b/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/PinningOffHeapChainMap.java new file mode 100644 index 0000000000..caa6d1154c --- /dev/null +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/offheap/PinningOffHeapChainMap.java @@ -0,0 +1,93 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.server.offheap; + +import org.ehcache.clustered.common.internal.store.Chain; +import org.ehcache.clustered.common.internal.store.Element; +import org.ehcache.clustered.common.internal.store.operations.codecs.OperationsCodec; +import org.terracotta.offheapstore.paging.PageSource; +import org.terracotta.offheapstore.storage.portability.Portability; + +import java.nio.ByteBuffer; +import java.util.concurrent.locks.Lock; +import java.util.function.Supplier; + +/** + * This class is used in WriteBehind implementation + */ +public class PinningOffHeapChainMap extends OffHeapChainMap { + + public PinningOffHeapChainMap(PageSource source, Portability keyPortability, int minPageSize, int maxPageSize, boolean shareByThieving) { + super(source, keyPortability, minPageSize, maxPageSize, shareByThieving); + } + + @Override + public Chain getAndAppend(K key, ByteBuffer element) { + return execute(key, () -> super.getAndAppend(key, element)); + } + + @Override + public void append(K key, ByteBuffer element) { + execute(key, () -> { + super.append(key, element); + return null; + }); + } + + @Override + public void put(K key, Chain chain) { + execute(key, () -> { + super.put(key, chain); + return null; + }); + } + + @Override + public void replaceAtHead(K key, Chain expected, Chain replacement) { + execute(key, () -> { + heads.setPinning(key, false); + super.replaceAtHead(key, expected, replacement); + return null; + }); + } + + private Chain execute(K key, Supplier supplier) { + final Lock lock = heads.writeLock(); + lock.lock(); + try { + return supplier.get(); + } finally { + pinIfNeeded(key); + lock.unlock(); + } + } + + private void pinIfNeeded(K key) { + InternalChain internalChain = heads.get(key); + if (internalChain != null && shouldBePinned(internalChain.detach())) { + heads.setPinning(key, true); + } + } + + private boolean shouldBePinned(Chain chain) { + for (Element element : chain) { + if (OperationsCodec.getOperationCode(element.getPayload()).shouldBePinned()) { + return true; + } + } + return false; + } +} diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/repo/ServerStateRepository.java b/clustered/server/src/main/java/org/ehcache/clustered/server/repo/ServerStateRepository.java index e6bc05876e..9942c16e5d 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/repo/ServerStateRepository.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/repo/ServerStateRepository.java @@ -33,7 +33,7 @@ class ServerStateRepository { private final ConcurrentMap> concurrentMapRepo = new ConcurrentHashMap<>(); - EhcacheEntityResponse invoke(StateRepositoryOpMessage message) throws ClusterException { + EhcacheEntityResponse invoke(StateRepositoryOpMessage message) { String mapId = message.getMapId(); ConcurrentMap map = getStateMap(mapId); diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/repo/StateRepositoryManager.java b/clustered/server/src/main/java/org/ehcache/clustered/server/repo/StateRepositoryManager.java index 2f0255f0c4..cdad28f650 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/repo/StateRepositoryManager.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/repo/StateRepositoryManager.java @@ -34,11 +34,11 @@ public class StateRepositoryManager { private final ConcurrentMap mapRepositoryMap = new ConcurrentHashMap<>(); - public void destroyStateRepository(String cacheId) throws ClusterException { + public void destroyStateRepository(String cacheId) { mapRepositoryMap.remove(cacheId); } - public EhcacheEntityResponse invoke(StateRepositoryOpMessage message) throws ClusterException { + public EhcacheEntityResponse invoke(StateRepositoryOpMessage message) { String cacheId = message.getCacheId(); ServerStateRepository currentRepo = getServerStateRepository(cacheId); return currentRepo.invoke(message); diff --git a/core/src/main/java/org/ehcache/core/internal/resilience/RecoveryCache.java b/clustered/server/src/main/java/org/ehcache/clustered/server/state/EhcacheStateContext.java similarity index 63% rename from core/src/main/java/org/ehcache/core/internal/resilience/RecoveryCache.java rename to clustered/server/src/main/java/org/ehcache/clustered/server/state/EhcacheStateContext.java index 5ebcac1c71..045ff8a7e7 100644 --- a/core/src/main/java/org/ehcache/core/internal/resilience/RecoveryCache.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/state/EhcacheStateContext.java @@ -13,20 +13,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +package org.ehcache.clustered.server.state; -package org.ehcache.core.internal.resilience; - -import org.ehcache.core.spi.store.StoreAccessException; +import com.tc.classloader.CommonComponent; /** - * - * @author Chris Dennis + * Marker interface to pass context between begin and end message processing. */ -public interface RecoveryCache { - - void obliterate() throws StoreAccessException; - - void obliterate(K key) throws StoreAccessException; - - void obliterate(Iterable keys) throws StoreAccessException; +@CommonComponent +@FunctionalInterface +public interface EhcacheStateContext extends AutoCloseable { + void close(); } diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/state/EhcacheStateService.java b/clustered/server/src/main/java/org/ehcache/clustered/server/state/EhcacheStateService.java index 3ae9833793..8309166839 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/state/EhcacheStateService.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/state/EhcacheStateService.java @@ -19,7 +19,7 @@ import org.ehcache.clustered.common.ServerSideConfiguration; import org.ehcache.clustered.common.internal.ServerStoreConfiguration; import org.ehcache.clustered.common.internal.exceptions.ClusterException; -import org.ehcache.clustered.common.internal.exceptions.InvalidStoreException; +import org.ehcache.clustered.common.internal.messages.EhcacheOperationMessage; import org.ehcache.clustered.server.ServerSideServerStore; import org.ehcache.clustered.server.repo.StateRepositoryManager; import org.terracotta.entity.ConfigurationException; @@ -68,4 +68,5 @@ public interface EhcacheStateService { void loadExisting(ServerSideConfiguration configuration); + EhcacheStateContext beginProcessing(EhcacheOperationMessage message, String name); } diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/state/EhcacheStateServiceProvider.java b/clustered/server/src/main/java/org/ehcache/clustered/server/state/EhcacheStateServiceProvider.java index ab1c5ac533..e0ec5d9bfd 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/state/EhcacheStateServiceProvider.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/state/EhcacheStateServiceProvider.java @@ -47,7 +47,7 @@ public class EhcacheStateServiceProvider implements ServiceProvider { private static final Logger LOGGER = LoggerFactory.getLogger(EhcacheStateServiceProvider.class); - private ConcurrentMap serviceMap = new ConcurrentHashMap<>(); + private final ConcurrentMap serviceMap = new ConcurrentHashMap<>(); private OffHeapResources offHeapResourcesProvider; @Override @@ -112,7 +112,7 @@ public Collection> getProvidedServiceTypes() { } @Override - public void prepareForSynchronization() throws ServiceProviderCleanupException { + public void prepareForSynchronization() { serviceMap.clear(); } diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/state/InvalidationTracker.java b/clustered/server/src/main/java/org/ehcache/clustered/server/state/InvalidationTracker.java index 30c5e75e4e..d3c95d3ba2 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/state/InvalidationTracker.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/state/InvalidationTracker.java @@ -19,7 +19,6 @@ import com.tc.classloader.CommonComponent; import java.util.Set; -import java.util.concurrent.ConcurrentMap; @CommonComponent public interface InvalidationTracker { diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/state/ResourcePageSource.java b/clustered/server/src/main/java/org/ehcache/clustered/server/state/ResourcePageSource.java index 9827e45474..ceb6e968dd 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/state/ResourcePageSource.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/state/ResourcePageSource.java @@ -63,10 +63,9 @@ public void free(Page page) { @Override public String toString() { - final StringBuilder sb = new StringBuilder("ResourcePageSource{"); - sb.append("pool=").append(pool); - sb.append(", delegatePageSource=").append(delegatePageSource); - sb.append('}'); - return sb.toString(); + String sb = "ResourcePageSource{" + "pool=" + pool + + ", delegatePageSource=" + delegatePageSource + + '}'; + return sb; } } diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/store/ClusterTierActiveEntity.java b/clustered/server/src/main/java/org/ehcache/clustered/server/store/ClusterTierActiveEntity.java index 56f4307cb6..8b1db23bc3 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/store/ClusterTierActiveEntity.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/store/ClusterTierActiveEntity.java @@ -34,10 +34,11 @@ import org.ehcache.clustered.common.internal.messages.ServerStoreOpMessage.AppendMessage; import org.ehcache.clustered.common.internal.messages.ServerStoreOpMessage.ClientInvalidationAck; import org.ehcache.clustered.common.internal.messages.ServerStoreOpMessage.ClientInvalidationAllAck; -import org.ehcache.clustered.common.internal.messages.ServerStoreOpMessage.GetAndAppendMessage; import org.ehcache.clustered.common.internal.messages.ServerStoreOpMessage.GetMessage; import org.ehcache.clustered.common.internal.messages.ServerStoreOpMessage.KeyBasedServerStoreOpMessage; +import org.ehcache.clustered.common.internal.messages.ServerStoreOpMessage.LockMessage; import org.ehcache.clustered.common.internal.messages.ServerStoreOpMessage.ReplaceAtHeadMessage; +import org.ehcache.clustered.common.internal.messages.ServerStoreOpMessage.UnlockMessage; import org.ehcache.clustered.common.internal.messages.StateRepositoryOpMessage; import org.ehcache.clustered.common.internal.store.Chain; import org.ehcache.clustered.common.internal.store.ClusterTierEntityConfiguration; @@ -52,6 +53,7 @@ import org.ehcache.clustered.server.internal.messages.PassiveReplicationMessage.ClearInvalidationCompleteMessage; import org.ehcache.clustered.server.internal.messages.PassiveReplicationMessage.InvalidationCompleteMessage; import org.ehcache.clustered.server.management.ClusterTierManagement; +import org.ehcache.clustered.server.state.EhcacheStateContext; import org.ehcache.clustered.server.state.EhcacheStateService; import org.ehcache.clustered.server.state.InvalidationTracker; import org.ehcache.clustered.server.state.config.EhcacheStoreStateServiceConfig; @@ -71,7 +73,6 @@ import org.terracotta.entity.InvokeContext; import org.terracotta.entity.MessageCodecException; import org.terracotta.entity.PassiveSynchronizationChannel; -import org.terracotta.entity.ReconnectRejectedException; import org.terracotta.entity.ServiceException; import org.terracotta.entity.ServiceRegistry; import org.terracotta.entity.StateDumpCollector; @@ -83,22 +84,30 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.UUID; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; import static java.util.stream.Collectors.toMap; +import static java.util.stream.Collectors.toSet; import static org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse.allInvalidationDone; import static org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse.clientInvalidateAll; import static org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse.clientInvalidateHash; import static org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse.failure; import static org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse.getResponse; import static org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse.hashInvalidationDone; +import static org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse.lockFailure; +import static org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse.lockSuccess; import static org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse.resolveRequest; import static org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse.serverInvalidateHash; import static org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse.success; @@ -115,9 +124,19 @@ public class ClusterTierActiveEntity implements ActiveServerEntity()); private final String storeIdentifier; private final ServerStoreConfiguration configuration; @@ -134,8 +153,13 @@ public class ClusterTierActiveEntity implements ActiveServerEntity inflightInvalidations; - private final Set connectedClients = ConcurrentHashMap.newKeySet(); + private final Map connectedClients = new ConcurrentHashMap<>(); private final int chainCompactionLimit; + private final ServerLockManager lockManager; + + private final long dataSizeThreshold = Long.getLong(SYNC_DATA_SIZE_PROP, DEFAULT_SYNC_DATA_SIZE_THRESHOLD); + private final int dataGetsThreshold = Integer.getInteger(SYNC_DATA_GETS_PROP, DEFAULT_SYNC_DATA_GETS_THRESHOLD); + private volatile Integer dataMapInitialCapacity = null; @SuppressWarnings("unchecked") public ClusterTierActiveEntity(ServiceRegistry registry, ClusterTierEntityConfiguration entityConfiguration, KeySegmentMapper defaultMapper) throws ConfigurationException { @@ -149,7 +173,7 @@ public ClusterTierActiveEntity(ServiceRegistry registry, ClusterTierEntityConfig clientCommunicator = registry.getService(new CommunicatorServiceConfiguration()); stateService = registry.getService(new EhcacheStoreStateServiceConfig(entityConfiguration.getManagerIdentifier(), defaultMapper)); entityMessenger = registry.getService(new BasicServiceConfiguration<>(IEntityMessenger.class)); - messageHandler = registry.getService(new OOOMessageHandlerConfiguration(managerIdentifier + "###" + storeIdentifier, + messageHandler = registry.getService(new OOOMessageHandlerConfiguration<>(managerIdentifier + "###" + storeIdentifier, ClusterTierActiveEntity::isTrackedMessage, defaultMapper.getSegments() + 1, new MessageToTrackerSegmentFunction(clusterTierConcurrency(defaultMapper)))); } catch (ServiceException e) { throw new ConfigurationException("Unable to retrieve service: " + e.getMessage()); @@ -159,6 +183,11 @@ public ClusterTierActiveEntity(ServiceRegistry registry, ClusterTierEntityConfig } management = new ClusterTierManagement(registry, stateService, true, storeIdentifier, entityConfiguration.getManagerIdentifier()); chainCompactionLimit = Integer.getInteger(CHAIN_COMPACTION_THRESHOLD_PROP, DEFAULT_CHAIN_COMPACTION_THRESHOLD); + if (configuration.isLoaderWriterConfigured()) { + lockManager = new LockManagerImpl(); + } else { + lockManager = new NoopLockManager(); + } } static boolean isTrackedMessage(EhcacheEntityMessage msg) { @@ -174,7 +203,7 @@ public void addStateTo(StateDumpCollector dump) { ClusterTierDump.dump(dump, managerIdentifier, storeIdentifier, configuration); Set clients = new HashSet<>(getConnectedClients()); - List allClients = new ArrayList<>(clients.size()); + List> allClients = new ArrayList<>(clients.size()); for (ClientDescriptor entry : clients) { Map clientMap = new HashMap<>(1); clientMap.put("clientDescriptor", entry.toString()); @@ -188,7 +217,7 @@ public void addStateTo(StateDumpCollector dump) { public void createNew() throws ConfigurationException { ServerSideServerStore store = stateService.createStore(storeIdentifier, configuration, true); store.setEvictionListener(this::invalidateHashAfterEviction); - management.init(); + management.entityCreated(); } List getInflightInvalidations() { @@ -204,11 +233,11 @@ public void loadExisting() { } stateService.loadStore(storeIdentifier, configuration).setEvictionListener(this::invalidateHashAfterEviction); reconnectComplete.set(false); - management.reload(); + management.entityPromotionCompleted(); } private void invalidateHashAfterEviction(long key) { - Set clientsToInvalidate = new HashSet<>(getConnectedClients()); + Set clientsToInvalidate = new HashSet<>(getValidatedClients()); for (ClientDescriptor clientDescriptorThatHasToInvalidate : clientsToInvalidate) { LOGGER.debug("SERVER: eviction happened; asking client {} to invalidate hash {} from cache {}", clientDescriptorThatHasToInvalidate, key, storeIdentifier); try { @@ -221,7 +250,7 @@ private void invalidateHashAfterEviction(long key) { @Override public void connected(ClientDescriptor clientDescriptor) { - connectedClients.add(clientDescriptor); + connectedClients.put(clientDescriptor, Boolean.FALSE); } @Override @@ -241,26 +270,32 @@ public void disconnected(ClientDescriptor clientDescriptor) { } } + lockManager.sweepLocksForClient(clientDescriptor, + configuration.isWriteBehindConfigured() ? null : heldKeys -> heldKeys.forEach(stateService.getStore(storeIdentifier)::remove)); + connectedClients.remove(clientDescriptor); } @Override - public EhcacheEntityResponse invokeActive(ActiveInvokeContext context, EhcacheEntityMessage message) throws EntityUserException { + public EhcacheEntityResponse invokeActive(ActiveInvokeContext context, EhcacheEntityMessage message) throws EntityUserException { return messageHandler.invoke(context, message, this::invokeActiveInternal); } + @SuppressWarnings("try") private EhcacheEntityResponse invokeActiveInternal(InvokeContext context, EhcacheEntityMessage message) { try { if (message instanceof EhcacheOperationMessage) { EhcacheOperationMessage operationMessage = (EhcacheOperationMessage) message; - EhcacheMessageType messageType = operationMessage.getMessageType(); - if (isStoreOperationMessage(messageType)) { - return invokeServerStoreOperation(context, (ServerStoreOpMessage) message); - } else if (isLifecycleMessage(messageType)) { - return invokeLifeCycleOperation(context, (LifecycleMessage) message); - } else if (isStateRepoOperationMessage(messageType)) { - return invokeStateRepositoryOperation((StateRepositoryOpMessage) message); + try (EhcacheStateContext ignored = stateService.beginProcessing(operationMessage, storeIdentifier)) { + EhcacheMessageType messageType = operationMessage.getMessageType(); + if (isStoreOperationMessage(messageType)) { + return invokeServerStoreOperation(context, (ServerStoreOpMessage) message); + } else if (isLifecycleMessage(messageType)) { + return invokeLifeCycleOperation(context, (LifecycleMessage) message); + } else if (isStateRepoOperationMessage(messageType)) { + return invokeStateRepositoryOperation((StateRepositoryOpMessage) message); + } } } throw new AssertionError("Unsupported message : " + message.getClass()); @@ -272,12 +307,13 @@ private EhcacheEntityResponse invokeActiveInternal(InvokeContext context, Ehcach } } - private EhcacheEntityResponse invokeStateRepositoryOperation(StateRepositoryOpMessage message) throws ClusterException { + private EhcacheEntityResponse invokeStateRepositoryOperation(StateRepositoryOpMessage message) { return stateService.getStateRepositoryManager().invoke(message); } private EhcacheEntityResponse invokeLifeCycleOperation(InvokeContext context, LifecycleMessage message) throws ClusterException { - ActiveInvokeContext activeInvokeContext = (ActiveInvokeContext)context; + @SuppressWarnings("unchecked") + ActiveInvokeContext activeInvokeContext = (ActiveInvokeContext) context; switch (message.getMessageType()) { case VALIDATE_SERVER_STORE: validateServerStore(activeInvokeContext.getClientDescriptor(), (ValidateServerStore) message); @@ -294,13 +330,15 @@ private void validateServerStore(ClientDescriptor clientDescriptor, ValidateServ ServerSideServerStore store = stateService.getStore(storeIdentifier); if (store != null) { storeCompatibility.verify(store.getStoreConfiguration(), clientConfiguration); + connectedClients.put(clientDescriptor, Boolean.TRUE); } else { throw new InvalidStoreException("cluster tier '" + storeIdentifier + "' does not exist"); } } private EhcacheEntityResponse invokeServerStoreOperation(InvokeContext context, ServerStoreOpMessage message) throws ClusterException { - ActiveInvokeContext activeInvokeContext = (ActiveInvokeContext) context; + @SuppressWarnings("unchecked") + ActiveInvokeContext activeInvokeContext = (ActiveInvokeContext) context; ClientDescriptor clientDescriptor = activeInvokeContext.getClientDescriptor(); ServerSideServerStore cacheStore = stateService.getStore(storeIdentifier); @@ -358,6 +396,9 @@ private EhcacheEntityResponse invokeServerStoreOperation(InvokeContext context, if (newChain.length() > chainCompactionLimit) { requestChainResolution(clientDescriptor, key, newChain); } + if (!configuration.isWriteBehindConfigured()) { + lockManager.unlock(key); + } return success(); } case GET_AND_APPEND: { @@ -416,6 +457,24 @@ private EhcacheEntityResponse invokeServerStoreOperation(InvokeContext context, invalidateAll(clientDescriptor); return success(); } + case LOCK: { + LockMessage lockMessage = (LockMessage) message; + if (lockManager.lock(lockMessage.getHash(), activeInvokeContext.getClientDescriptor())) { + try { + Chain chain = cacheStore.get(lockMessage.getHash()); + return lockSuccess(chain); + } catch (TimeoutException e) { + throw new AssertionError("Server side store is not expected to throw timeout exception"); + } + } else { + return lockFailure(); + } + } + case UNLOCK: { + UnlockMessage unlockMessage = (UnlockMessage) message; + lockManager.unlock(unlockMessage.getHash()); + return success(); + } default: throw new AssertionError("Unsupported ServerStore operation : " + message); } @@ -423,7 +482,7 @@ private EhcacheEntityResponse invokeServerStoreOperation(InvokeContext context, private void invalidateAll(ClientDescriptor originatingClientDescriptor) { int invalidationId = invalidationIdGenerator.getAndIncrement(); - Set clientsToInvalidate = new HashSet<>(getConnectedClients()); + Set clientsToInvalidate = new HashSet<>(getValidatedClients()); if (originatingClientDescriptor != null) { clientsToInvalidate.remove(originatingClientDescriptor); } @@ -494,7 +553,7 @@ private void clientInvalidated(ClientDescriptor clientDescriptor, int invalidati private void invalidateHashForClient(ClientDescriptor originatingClientDescriptor, long key) { int invalidationId = invalidationIdGenerator.getAndIncrement(); - Set clientsToInvalidate = new HashSet<>(getConnectedClients()); + Set clientsToInvalidate = new HashSet<>(getValidatedClients()); if (originatingClientDescriptor != null) { clientsToInvalidate.remove(originatingClientDescriptor); } @@ -534,7 +593,7 @@ private void requestChainResolution(ClientDescriptor clientDescriptor, long key, * @param message message to be forwarded * @param newChain resulting chain to send */ - private void sendMessageToSelfAndDeferRetirement(ActiveInvokeContext context, KeyBasedServerStoreOpMessage message, Chain newChain) { + private void sendMessageToSelfAndDeferRetirement(ActiveInvokeContext context, KeyBasedServerStoreOpMessage message, Chain newChain) { try { long clientId = context.getClientSource().toLong(); entityMessenger.messageSelfAndDeferRetirement(message, new PassiveReplicationMessage.ChainReplicationMessage(message.getKey(), newChain, @@ -567,10 +626,11 @@ public ReconnectHandler startReconnect() { ClusterTierReconnectMessage reconnectMessage = reconnectMessageCodec.decode(bytes); ServerSideServerStore serverStore = stateService.getStore(storeIdentifier); addInflightInvalidationsForStrongCache(clientDescriptor, reconnectMessage, serverStore); + lockManager.createLockStateAfterFailover(clientDescriptor, reconnectMessage.getLocksHeld()); LOGGER.info("Client '{}' successfully reconnected to newly promoted ACTIVE after failover.", clientDescriptor); - connectedClients.add(clientDescriptor); + connectedClients.put(clientDescriptor, Boolean.TRUE); }; } @@ -589,38 +649,171 @@ public void synchronizeKeyToPassive(PassiveSynchronizationChannel messageQ = new SynchronousQueue<>(); int segmentId = concurrencyKey - DEFAULT_KEY - 1; - Long dataSizeThreshold = Long.getLong(SYNC_DATA_SIZE_PROP, DEFAULT_SYNC_DATA_SIZE_THRESHOLD); - AtomicLong size = new AtomicLong(0); + Thread thisThread = Thread.currentThread(); + CompletableFuture asyncGet = CompletableFuture.runAsync( + () -> doGetsForSync(segmentId, messageQ, syncChannel, thisThread), SYNC_GETS_EXECUTOR); + try { + try { + while (messageQ.take().execute()) ; + } catch (InterruptedException e) { + interrupted = true; + } + if (interrupted) { + // here we may have been interrupted due to a genuine exception on the async get thread + // let us try and not loose that exception as it takes precedence over the interrupt + asyncGet.get(10, TimeUnit.SECONDS); + // we received a genuine interrupt + throw new InterruptedException(); + } else { + asyncGet.get(); + } + } catch (InterruptedException | ExecutionException | TimeoutException e) { + throw new RuntimeException(e); + } + } + sendMessageTrackerReplication(syncChannel, concurrencyKey - 1); + + LOGGER.info("Sync complete for concurrency key {}.", concurrencyKey); + } + + private void doGetsForSync(int segmentId, BlockingQueue messageQ, + PassiveSynchronizationChannel syncChannel, Thread waitingThread) { + int numKeyGets = 0; + long dataSize = 0; + try { ServerSideServerStore store = stateService.getStore(storeIdentifier); - final AtomicReference> mappingsToSend = new AtomicReference<>(new HashMap<>()); - store.getSegmentKeySets().get(segmentId) - .forEach(key -> { - final Chain chain; - try { - chain = store.get(key); - } catch (TimeoutException e) { - throw new AssertionError("Server side store is not expected to throw timeout exception"); - } - for (Element element : chain) { - size.addAndGet(element.getPayload().remaining()); + Set keys = store.getSegmentKeySets().get(segmentId); + int remainingKeys = keys.size(); + Map mappingsToSend = new HashMap<>(computeInitialMapCapacity(remainingKeys)); + boolean capacityAdjusted = false; + for (Long key : keys) { + final Chain chain; + try { + chain = store.get(key); + if (chain.isEmpty()) { + // evicted just continue with next + remainingKeys--; + continue; } - mappingsToSend.get().put(key, chain); - if (size.get() > dataSizeThreshold) { - syncChannel.synchronizeToPassive(new EhcacheDataSyncMessage(mappingsToSend.get())); - mappingsToSend.set(new HashMap<>()); - size.set(0); + numKeyGets++; + } catch (TimeoutException e) { + throw new AssertionError("Server side store is not expected to throw timeout exception"); + } + for (Element element : chain) { + dataSize += element.getPayload().remaining(); + } + mappingsToSend.put(key, chain); + if (dataSize > dataSizeThreshold || numKeyGets >= dataGetsThreshold) { + putMessage(messageQ, syncChannel, mappingsToSend); + if (!capacityAdjusted && segmentId == 0) { + capacityAdjusted = true; + adjustInitialCapacity(numKeyGets); } + remainingKeys -= numKeyGets; + mappingsToSend = new HashMap<>(computeMapCapacity(remainingKeys, numKeyGets)); + dataSize = 0; + numKeyGets = 0; + } + } + if (!mappingsToSend.isEmpty()) { + putMessage(messageQ, syncChannel, mappingsToSend); + } + // put the last message indicator into the queue + putMessage(messageQ, null, null); + } catch (Throwable e) { + // ensure waiting peer thread is interrupted, if we run into trouble + waitingThread.interrupt(); + throw e; + } + } + + private void putMessage(BlockingQueue messageQ, + PassiveSynchronizationChannel syncChannel, + Map mappingsToSend) { + try { + if (syncChannel != null) { + final EhcacheDataSyncMessage msg = new EhcacheDataSyncMessage(mappingsToSend); + messageQ.put(() -> { + syncChannel.synchronizeToPassive(msg); + return true; }); - if (!mappingsToSend.get().isEmpty()) { - syncChannel.synchronizeToPassive(new EhcacheDataSyncMessage(mappingsToSend.get())); - mappingsToSend.set(new HashMap<>()); - size.set(0); + } else { + // we are done + messageQ.put(() -> false); } + } catch (InterruptedException e) { + throw new RuntimeException(e); } - sendMessageTrackerReplication(syncChannel, concurrencyKey - 1); + } - LOGGER.info("Sync complete for concurrency key {}.", concurrencyKey); + /** + * Compute map capacity based on {@code remainingSize} and {@code expectedGets}. Both varies depending on the size of + * the chains and number of keys in the map. + *

+ * NOTE: if expected gets dips below 32, keep at 32 as it indicates a large segment with possibly smaller number of keys + * which means the next iteration may show more keys in the map. + * + * @param remainingSize is the number of keys left in the segment yet to be send + * @param expectedGets is the max expected number of keys that could be put in the map before the map gets full + * @return required capacity for the map. + */ + private int computeMapCapacity(int remainingSize, int expectedGets) { + if (remainingSize < 16) { + return 16; + } else if (expectedGets < 32) { + return 32; + } else if (remainingSize < expectedGets) { + return (int) ((float) remainingSize / 0.75f + 1.0f); + } else { + return (int) ((float) expectedGets / 0.75f + 1.0f); + } + } + + /** + * Adjust {@link this#dataMapInitialCapacity} based on what we learned about the cache during iteration of segment 0. + * + * NOTE: The required capacity calculation and the initial capacity adjustment assumes some sort of symmetry across + * multiple segments, but it is possible that in a given segment, some keys has chain with LARGE data and some keys + * has small chain with smaller data sizes. But on a larger sweep that should even out. Even if it does not even out, + * this should perform better as the initial size is reset back to a minimum of 32 and not 16 when a cache is large + * and when a cache is very small it starts with initial size of 16 as the {@link this#computeInitialMapCapacity(int)} + * will take the total number of keys in the segment into account. + * + * @param actualKeyGets the actual number of keys we got when the map got full + */ + private void adjustInitialCapacity(int actualKeyGets) { + // even when there are larger data chains with less keys..let us keep the lower bound at 32. + dataMapInitialCapacity = (actualKeyGets < 32) ? 32 : (int) ((float) actualKeyGets / 0.75f + 1.0f); + } + + /** + * Starts with an initial size of configured {@link this#dataGetsThreshold} or adjusted initial size, unless the + * segment of the cache is smaller than the initial expected size. + * + * @param totalKeys is the total number of keys in this segment + */ + private int computeInitialMapCapacity(int totalKeys) { + if (dataMapInitialCapacity == null) { + dataMapInitialCapacity = (int) ((float) dataGetsThreshold / 0.75f + 1.0f); + } + if (totalKeys < 16) { + return 16; + } else if (totalKeys < dataMapInitialCapacity) { + return (int) ((float) totalKeys / 0.75f + 1.0f); + } else { + return dataMapInitialCapacity; + } + } + + /** + * Executes message sending asynchronously to preparation of message. + */ + @FunctionalInterface + private interface DataSyncMessageHandler { + boolean execute(); } private void sendMessageTrackerReplication(PassiveSynchronizationChannel syncChannel, int concurrencyKey) { @@ -643,7 +836,11 @@ public void destroy() { } Set getConnectedClients() { - return connectedClients; + return connectedClients.keySet(); + } + + Set getValidatedClients() { + return connectedClients.entrySet().stream().filter(Map.Entry::getValue).map(Map.Entry::getKey).collect(toSet()); } ConcurrentMap getClientsWaitingForInvalidation() { diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/store/ClusterTierPassiveEntity.java b/clustered/server/src/main/java/org/ehcache/clustered/server/store/ClusterTierPassiveEntity.java index 8ee9348c4f..50a7428a39 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/store/ClusterTierPassiveEntity.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/store/ClusterTierPassiveEntity.java @@ -37,6 +37,7 @@ import org.ehcache.clustered.server.internal.messages.PassiveReplicationMessage.InvalidationCompleteMessage; import org.ehcache.clustered.server.internal.messages.PassiveReplicationMessage.ChainReplicationMessage; import org.ehcache.clustered.server.management.ClusterTierManagement; +import org.ehcache.clustered.server.state.EhcacheStateContext; import org.ehcache.clustered.server.state.EhcacheStateService; import org.ehcache.clustered.server.state.config.EhcacheStoreStateServiceConfig; import org.slf4j.Logger; @@ -51,6 +52,7 @@ import org.terracotta.entity.ServiceException; import org.terracotta.entity.ServiceRegistry; import org.terracotta.entity.StateDumpCollector; +import org.terracotta.offheapstore.exceptions.OversizeMappingException; import java.util.concurrent.TimeoutException; @@ -111,7 +113,7 @@ public void addStateTo(StateDumpCollector dump) { @Override public void createNew() throws ConfigurationException { stateService.createStore(storeIdentifier, configuration, false); - management.init(); + management.entityCreated(); } private boolean isEventual() { @@ -160,11 +162,12 @@ public int getConcurrencyKey() { messageHandler.invoke(realContext, message, this::invokePassiveInternal); } + @SuppressWarnings("try") private EhcacheEntityResponse invokePassiveInternal(InvokeContext context, EhcacheEntityMessage message) { if (message instanceof EhcacheOperationMessage) { EhcacheOperationMessage operationMessage = (EhcacheOperationMessage) message; - EhcacheMessageType messageType = operationMessage.getMessageType(); - try { + try (EhcacheStateContext ignored = stateService.beginProcessing(operationMessage, storeIdentifier)) { + EhcacheMessageType messageType = operationMessage.getMessageType(); if (isStoreOperationMessage(messageType)) { invokeServerStoreOperation((ServerStoreOpMessage) message); } else if (isStateRepoOperationMessage(messageType)) { @@ -174,16 +177,12 @@ private EhcacheEntityResponse invokePassiveInternal(InvokeContext context, Ehcac } else { throw new AssertionError("Unsupported EhcacheOperationMessage: " + operationMessage.getMessageType()); } - } catch (ClusterException e) { + } catch (ClusterException | OversizeMappingException e) { // The above operations are not critical enough to fail a passive, so just log the exception LOGGER.error("Unexpected exception raised during operation: " + message, e); } } else if (message instanceof EhcacheSyncMessage) { - try { - invokeSyncOperation(context, (EhcacheSyncMessage) message); - } catch (ClusterException e) { - throw new IllegalStateException("Sync operation failed", e); - } + invokeSyncOperation(context, (EhcacheSyncMessage) message); } else { throw new AssertionError("Unsupported EhcacheEntityMessage: " + message.getClass()); } @@ -192,7 +191,7 @@ private EhcacheEntityResponse invokePassiveInternal(InvokeContext context, Ehcac return success(); } - private void invokeSyncOperation(InvokeContext context, EhcacheSyncMessage message) throws ClusterException { + private void invokeSyncOperation(InvokeContext context, EhcacheSyncMessage message) { switch (message.getMessageType()) { case DATA: EhcacheDataSyncMessage dataSyncMessage = (EhcacheDataSyncMessage) message; diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/store/LockManagerImpl.java b/clustered/server/src/main/java/org/ehcache/clustered/server/store/LockManagerImpl.java new file mode 100644 index 0000000000..cbaf00ee2a --- /dev/null +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/store/LockManagerImpl.java @@ -0,0 +1,72 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.server.store; + +import org.terracotta.entity.ClientDescriptor; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Consumer; + +public class LockManagerImpl implements ServerLockManager { + + private final Map blockedKeys = new ConcurrentHashMap<>(); + + @Override + public boolean lock(long key, ClientDescriptor client) { + if (blockedKeys.containsKey(key)) { + return false; + } + blockedKeys.put(key, client); + return true; + } + + @Override + public void unlock(long key) { + blockedKeys.remove(key); + } + + @Override + public void createLockStateAfterFailover(ClientDescriptor client, Set locksHeld) { + locksHeld.forEach(key -> { + ClientDescriptor absent = blockedKeys.putIfAbsent(key, client); + if (absent != null) { + throw new IllegalStateException("Key is already locked"); + } + }); + } + + @Override + public void sweepLocksForClient(ClientDescriptor client, Consumer> removeHeldKeys) { + Set> entries = new HashSet<>(Collections.unmodifiableSet(blockedKeys.entrySet())); + List heldKeys = new ArrayList<>(); + entries.forEach(entry -> { + if (entry.getValue().equals(client)) { + heldKeys.add(entry.getKey()); + blockedKeys.remove(entry.getKey()); + } + }); + + if (removeHeldKeys != null) { + removeHeldKeys.accept(heldKeys); + } + } +} diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/store/MessageToTrackerSegmentFunction.java b/clustered/server/src/main/java/org/ehcache/clustered/server/store/MessageToTrackerSegmentFunction.java index 79f73c4157..3cc048688f 100644 --- a/clustered/server/src/main/java/org/ehcache/clustered/server/store/MessageToTrackerSegmentFunction.java +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/store/MessageToTrackerSegmentFunction.java @@ -26,12 +26,12 @@ */ class MessageToTrackerSegmentFunction implements ToIntFunction { - private ConcurrencyStrategy concurrencyStrategy; + private final ConcurrencyStrategy concurrencyStrategy; MessageToTrackerSegmentFunction(ConcurrencyStrategy concurrencyStrategy) { - this.concurrencyStrategy = concurrencyStrategy; } + @Override public int applyAsInt(EhcacheEntityMessage value) { // Concurrency is 1 based, segments are 0 based diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/store/NoopLockManager.java b/clustered/server/src/main/java/org/ehcache/clustered/server/store/NoopLockManager.java new file mode 100644 index 0000000000..89be57a019 --- /dev/null +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/store/NoopLockManager.java @@ -0,0 +1,43 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.server.store; + +import org.terracotta.entity.ClientDescriptor; + +import java.util.List; +import java.util.Set; +import java.util.function.Consumer; + +public class NoopLockManager implements ServerLockManager { + @Override + public boolean lock(long key, ClientDescriptor client) { + return false; + } + + @Override + public void unlock(long key) { + } + + @Override + public void createLockStateAfterFailover(ClientDescriptor client, Set locksHeld) { + + } + + @Override + public void sweepLocksForClient(ClientDescriptor client, Consumer> removeHeldKeys) { + + } +} diff --git a/clustered/server/src/main/java/org/ehcache/clustered/server/store/ServerLockManager.java b/clustered/server/src/main/java/org/ehcache/clustered/server/store/ServerLockManager.java new file mode 100644 index 0000000000..2a22de6df2 --- /dev/null +++ b/clustered/server/src/main/java/org/ehcache/clustered/server/store/ServerLockManager.java @@ -0,0 +1,34 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.server.store; + +import org.terracotta.entity.ClientDescriptor; + +import java.util.List; +import java.util.Set; +import java.util.function.Consumer; + +public interface ServerLockManager { + + boolean lock(long key, ClientDescriptor client); + + void unlock(long key); + + void createLockStateAfterFailover(ClientDescriptor client, Set locksHeld); + + void sweepLocksForClient(ClientDescriptor client, Consumer> removeHeldKeys); + +} diff --git a/clustered/server/src/test/java/org/ehcache/clustered/lock/server/VoltronReadWriteLockActiveEntityTest.java b/clustered/server/src/test/java/org/ehcache/clustered/lock/server/VoltronReadWriteLockActiveEntityTest.java index 9f6178071a..3dd2518779 100644 --- a/clustered/server/src/test/java/org/ehcache/clustered/lock/server/VoltronReadWriteLockActiveEntityTest.java +++ b/clustered/server/src/test/java/org/ehcache/clustered/lock/server/VoltronReadWriteLockActiveEntityTest.java @@ -18,7 +18,13 @@ import org.ehcache.clustered.common.internal.lock.LockMessaging; import org.ehcache.clustered.common.internal.lock.LockMessaging.LockTransition; import org.hamcrest.beans.HasPropertyWithValue; +import org.junit.Rule; import org.junit.Test; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; +import org.terracotta.entity.ActiveInvokeContext; import org.terracotta.entity.ClientCommunicator; import org.terracotta.entity.ClientDescriptor; import org.terracotta.entity.EntityResponse; @@ -28,140 +34,129 @@ import static org.ehcache.clustered.common.internal.lock.LockMessaging.HoldType.WRITE; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import static org.mockito.hamcrest.MockitoHamcrest.argThat; public class VoltronReadWriteLockActiveEntityTest { - @Test - public void testWriteLock() { - ClientCommunicator communicator = mock(ClientCommunicator.class); - VoltronReadWriteLockActiveEntity entity = new VoltronReadWriteLockActiveEntity(communicator); + @Rule + public MockitoRule rule = MockitoJUnit.rule(); - ClientDescriptor client = mock(ClientDescriptor.class); + @Mock + private ClientCommunicator communicator = mock(ClientCommunicator.class); - LockTransition transition = entity.invoke(client, LockMessaging.lock(WRITE)); + @InjectMocks + VoltronReadWriteLockActiveEntity entity; - assertThat(transition.isAcquired(), is(true)); + private ActiveInvokeContext context = newContext(); + + private static ActiveInvokeContext newContext() { + @SuppressWarnings("unchecked") + ActiveInvokeContext context = mock(ActiveInvokeContext.class); + when(context.getClientDescriptor()).thenReturn(mock(ClientDescriptor.class)); + return context; } @Test - public void testReadLock() { - ClientCommunicator communicator = mock(ClientCommunicator.class); - VoltronReadWriteLockActiveEntity entity = new VoltronReadWriteLockActiveEntity(communicator); + public void testWriteLock() { + LockTransition transition = entity.invokeActive(context, LockMessaging.lock(WRITE)); - ClientDescriptor client = mock(ClientDescriptor.class); + assertThat(transition.isAcquired(), is(true)); + } - LockTransition transition = entity.invoke(client, LockMessaging.lock(READ)); + @Test + public void testReadLock() { + LockTransition transition = entity.invokeActive(context, LockMessaging.lock(READ)); assertThat(transition.isAcquired(), is(true)); } @Test public void testWriteUnlock() { - ClientCommunicator communicator = mock(ClientCommunicator.class); - VoltronReadWriteLockActiveEntity entity = new VoltronReadWriteLockActiveEntity(communicator); + entity.invokeActive(context, LockMessaging.lock(WRITE)); - ClientDescriptor client = mock(ClientDescriptor.class); - entity.invoke(client, LockMessaging.lock(WRITE)); - - LockTransition transition = entity.invoke(client, LockMessaging.unlock(WRITE)); + LockTransition transition = entity.invokeActive(context, LockMessaging.unlock(WRITE)); assertThat(transition.isReleased(), is(true)); } @Test public void testReadUnlock() { - ClientCommunicator communicator = mock(ClientCommunicator.class); - VoltronReadWriteLockActiveEntity entity = new VoltronReadWriteLockActiveEntity(communicator); - - ClientDescriptor client = mock(ClientDescriptor.class); - entity.invoke(client, LockMessaging.lock(READ)); + entity.invokeActive(context, LockMessaging.lock(READ)); - LockTransition transition = entity.invoke(client, LockMessaging.unlock(READ)); + LockTransition transition = entity.invokeActive(context, LockMessaging.unlock(READ)); assertThat(transition.isReleased(), is(true)); } @Test public void testTryWriteLockWhenWriteLocked() { - ClientCommunicator communicator = mock(ClientCommunicator.class); - VoltronReadWriteLockActiveEntity entity = new VoltronReadWriteLockActiveEntity(communicator); + entity.invokeActive(context, LockMessaging.lock(WRITE)); - entity.invoke(mock(ClientDescriptor.class), LockMessaging.lock(WRITE)); - - LockTransition transition = entity.invoke(mock(ClientDescriptor.class), LockMessaging.tryLock(WRITE)); + LockTransition transition = entity.invokeActive(newContext(), LockMessaging.tryLock(WRITE)); assertThat(transition.isAcquired(), is(false)); } @Test public void testTryReadLockWhenWriteLocked() { - ClientCommunicator communicator = mock(ClientCommunicator.class); - VoltronReadWriteLockActiveEntity entity = new VoltronReadWriteLockActiveEntity(communicator); - - entity.invoke(mock(ClientDescriptor.class), LockMessaging.lock(WRITE)); + entity.invokeActive(context, LockMessaging.lock(WRITE)); - LockTransition transition = entity.invoke(mock(ClientDescriptor.class), LockMessaging.tryLock(READ)); + LockTransition transition = entity.invokeActive(newContext(), LockMessaging.tryLock(READ)); assertThat(transition.isAcquired(), is(false)); } @Test public void testTryWriteLockWhenReadLocked() { - ClientCommunicator communicator = mock(ClientCommunicator.class); - VoltronReadWriteLockActiveEntity entity = new VoltronReadWriteLockActiveEntity(communicator); + entity.invokeActive(context, LockMessaging.lock(READ)); - entity.invoke(mock(ClientDescriptor.class), LockMessaging.lock(READ)); - - LockTransition transition = entity.invoke(mock(ClientDescriptor.class), LockMessaging.tryLock(WRITE)); + LockTransition transition = entity.invokeActive(newContext(), LockMessaging.tryLock(WRITE)); assertThat(transition.isAcquired(), is(false)); } @Test public void testTryReadLockWhenReadLocked() { - ClientCommunicator communicator = mock(ClientCommunicator.class); - VoltronReadWriteLockActiveEntity entity = new VoltronReadWriteLockActiveEntity(communicator); - - entity.invoke(mock(ClientDescriptor.class), LockMessaging.lock(READ)); + entity.invokeActive(context, LockMessaging.lock(READ)); - LockTransition transition = entity.invoke(mock(ClientDescriptor.class), LockMessaging.tryLock(READ)); + LockTransition transition = entity.invokeActive(newContext(), LockMessaging.tryLock(READ)); assertThat(transition.isAcquired(), is(true)); } @Test public void testWriteUnlockNotifiesListeners() throws MessageCodecException { - ClientCommunicator communicator = mock(ClientCommunicator.class); - VoltronReadWriteLockActiveEntity entity = new VoltronReadWriteLockActiveEntity(communicator); + ActiveInvokeContext locker = newContext(); + ActiveInvokeContext waiter = newContext(); - ClientDescriptor locker = mock(ClientDescriptor.class); - ClientDescriptor waiter = mock(ClientDescriptor.class); + ClientDescriptor waiterDescriptor = () -> null; + when(waiter.getClientDescriptor()).thenReturn(waiterDescriptor); - entity.invoke(locker, LockMessaging.lock(WRITE)); - entity.invoke(waiter, LockMessaging.lock(WRITE)); - entity.invoke(locker, LockMessaging.unlock(WRITE)); + entity.invokeActive(locker, LockMessaging.lock(WRITE)); + entity.invokeActive(waiter, LockMessaging.lock(WRITE)); + entity.invokeActive(locker, LockMessaging.unlock(WRITE)); - verify(communicator).sendNoResponse(eq(waiter), argThat( + verify(communicator).sendNoResponse(same(waiterDescriptor), argThat( HasPropertyWithValue.hasProperty("released", is(true)))); } @Test public void testReadUnlockNotifiesListeners() throws MessageCodecException { - ClientCommunicator communicator = mock(ClientCommunicator.class); - VoltronReadWriteLockActiveEntity entity = new VoltronReadWriteLockActiveEntity(communicator); + ActiveInvokeContext locker = newContext(); + ActiveInvokeContext waiter = newContext(); - ClientDescriptor locker = mock(ClientDescriptor.class); - ClientDescriptor waiter = mock(ClientDescriptor.class); + ClientDescriptor waiterDescriptor = () -> null; + when(waiter.getClientDescriptor()).thenReturn(waiterDescriptor); - entity.invoke(locker, LockMessaging.lock(READ)); - entity.invoke(waiter, LockMessaging.lock(WRITE)); - entity.invoke(locker, LockMessaging.unlock(READ)); + entity.invokeActive(locker, LockMessaging.lock(READ)); + entity.invokeActive(waiter, LockMessaging.lock(WRITE)); + entity.invokeActive(locker, LockMessaging.unlock(READ)); - verify(communicator).sendNoResponse(eq(waiter), argThat( + verify(communicator).sendNoResponse(same(waiterDescriptor), argThat( HasPropertyWithValue.hasProperty("released", is(true)))); } diff --git a/clustered/server/src/test/java/org/ehcache/clustered/server/ClusterTierManagerActiveEntityTest.java b/clustered/server/src/test/java/org/ehcache/clustered/server/ClusterTierManagerActiveEntityTest.java index 9fcf2a967e..89856b2326 100644 --- a/clustered/server/src/test/java/org/ehcache/clustered/server/ClusterTierManagerActiveEntityTest.java +++ b/clustered/server/src/test/java/org/ehcache/clustered/server/ClusterTierManagerActiveEntityTest.java @@ -46,7 +46,6 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; -import java.util.UUID; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -183,14 +182,14 @@ public void testConfigureMissingDefaultResource() throws Exception { public void testConfigureLargeSharedPool() throws Exception { final OffHeapIdentifierRegistry registry = new OffHeapIdentifierRegistry(); registry.addResource("defaultServerResource", 64, MemoryUnit.MEGABYTES); - registry.addResource("serverResource1", 32, MemoryUnit.MEGABYTES); - registry.addResource("serverResource2", 32, MemoryUnit.MEGABYTES); + registry.addResource("serverResource1", 8, MemoryUnit.MEGABYTES); + registry.addResource("serverResource2", 8, MemoryUnit.MEGABYTES); ServerSideConfiguration serverSideConfiguration = new ServerSideConfigBuilder() .defaultResource("defaultServerResource") .sharedPool("primary", "serverResource1", 4, MemoryUnit.MEGABYTES) - .sharedPool("secondary", "serverResource2", 8, MemoryUnit.MEGABYTES) - .sharedPool("tooBig", "serverResource2", 64, MemoryUnit.MEGABYTES) + .sharedPool("secondary", "serverResource2", 4, MemoryUnit.MEGABYTES) + .sharedPool("tooBig", "serverResource2", 16, MemoryUnit.MEGABYTES) .build(); ClusterTierManagerConfiguration configuration = new ClusterTierManagerConfiguration("identifier", serverSideConfiguration); EhcacheStateService ehcacheStateService = registry.getService(new EhcacheStateServiceConfig(configuration, registry, DEFAULT_MAPPER)); @@ -231,7 +230,6 @@ public void testValidate2Clients() throws Exception { assertSuccess(activeEntity.invokeActive(context, MESSAGE_FACTORY.validateStoreManager(serverSideConfig))); - UUID client2Id = UUID.randomUUID(); TestInvokeContext context2 = new TestInvokeContext(); activeEntity.connected(context2.getClientDescriptor()); @@ -410,7 +408,7 @@ public void testValidateClientSharedPoolSizeTooBig() throws Exception { ServerSideConfiguration configure = new ServerSideConfigBuilder() .defaultResource("defaultServerResource1") .sharedPool("primary", "serverResource1", 4, MemoryUnit.MEGABYTES) - .sharedPool("secondary", "serverResource2", 32, MemoryUnit.MEGABYTES) + .sharedPool("secondary", "serverResource2", 8, MemoryUnit.MEGABYTES) .build(); ClusterTierManagerConfiguration configuration = new ClusterTierManagerConfiguration("identifier", configure); EhcacheStateService ehcacheStateService = registry.getService(new EhcacheStateServiceConfig(configuration, registry, DEFAULT_MAPPER)); diff --git a/clustered/server/src/test/java/org/ehcache/clustered/server/ClusterTierManagerPassiveEntityTest.java b/clustered/server/src/test/java/org/ehcache/clustered/server/ClusterTierManagerPassiveEntityTest.java index 4dfdecef80..25a340b5a9 100644 --- a/clustered/server/src/test/java/org/ehcache/clustered/server/ClusterTierManagerPassiveEntityTest.java +++ b/clustered/server/src/test/java/org/ehcache/clustered/server/ClusterTierManagerPassiveEntityTest.java @@ -167,14 +167,14 @@ public void testConfigureMissingDefaultResource() throws Exception { public void testConfigureLargeSharedPool() throws Exception { final OffHeapIdentifierRegistry registry = new OffHeapIdentifierRegistry(); registry.addResource("defaultServerResource", 64, MemoryUnit.MEGABYTES); - registry.addResource("serverResource1", 32, MemoryUnit.MEGABYTES); - registry.addResource("serverResource2", 32, MemoryUnit.MEGABYTES); + registry.addResource("serverResource1", 8, MemoryUnit.MEGABYTES); + registry.addResource("serverResource2", 8, MemoryUnit.MEGABYTES); ServerSideConfiguration serverSideConfiguration = new ServerSideConfigBuilder() .defaultResource("defaultServerResource") .sharedPool("primary", "serverResource1", 4, MemoryUnit.MEGABYTES) - .sharedPool("secondary", "serverResource2", 8, MemoryUnit.MEGABYTES) - .sharedPool("tooBig", "serverResource2", 64, MemoryUnit.MEGABYTES) + .sharedPool("secondary", "serverResource2", 4, MemoryUnit.MEGABYTES) + .sharedPool("tooBig", "serverResource2", 16, MemoryUnit.MEGABYTES) .build(); ClusterTierManagerConfiguration configuration = new ClusterTierManagerConfiguration("identifier", serverSideConfiguration); EhcacheStateService ehcacheStateService = registry.getService(new EhcacheStateServiceConfig(configuration, registry, DEFAULT_MAPPER)); diff --git a/clustered/server/src/test/java/org/ehcache/clustered/server/ServerStoreCompatibilityTest.java b/clustered/server/src/test/java/org/ehcache/clustered/server/ServerStoreCompatibilityTest.java index e8661cbb8c..53fbdfd04a 100644 --- a/clustered/server/src/test/java/org/ehcache/clustered/server/ServerStoreCompatibilityTest.java +++ b/clustered/server/src/test/java/org/ehcache/clustered/server/ServerStoreCompatibilityTest.java @@ -52,14 +52,14 @@ public void testStoredKeyTypeMismatch() { STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.EVENTUAL); + Consistency.EVENTUAL, false); ServerStoreConfiguration clientConfiguration = new ServerStoreConfiguration(DEDICATED_POOL_ALLOCATION, String.class.getName(), STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.EVENTUAL); + Consistency.EVENTUAL, false); ServerStoreCompatibility serverStoreCompatibility = new ServerStoreCompatibility(); @@ -78,14 +78,14 @@ public void testStoredValueTypeMismatch() { STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.EVENTUAL); + Consistency.EVENTUAL, false); ServerStoreConfiguration clientConfiguration = new ServerStoreConfiguration(DEDICATED_POOL_ALLOCATION, STORED_KEY_TYPE, Long.class.getName(), KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.EVENTUAL); + Consistency.EVENTUAL, false); ServerStoreCompatibility serverStoreCompatibility = new ServerStoreCompatibility(); @@ -104,14 +104,14 @@ public void testKeySerializerTypeMismatch() { STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.EVENTUAL); + Consistency.EVENTUAL, false); ServerStoreConfiguration clientConfiguration = new ServerStoreConfiguration(DEDICATED_POOL_ALLOCATION, STORED_KEY_TYPE, STORED_VALUE_TYPE, Double.class.getName(), VALUE_SERIALIZER_TYPE, - Consistency.EVENTUAL); + Consistency.EVENTUAL, false); ServerStoreCompatibility serverStoreCompatibility = new ServerStoreCompatibility(); @@ -130,14 +130,14 @@ public void testValueSerializerTypeMismatch() { STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.EVENTUAL); + Consistency.EVENTUAL, false); ServerStoreConfiguration clientConfiguration = new ServerStoreConfiguration(DEDICATED_POOL_ALLOCATION, STORED_KEY_TYPE, STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, Double.class.getName(), - Consistency.EVENTUAL); + Consistency.EVENTUAL, false); ServerStoreCompatibility serverStoreCompatibility = new ServerStoreCompatibility(); @@ -156,14 +156,14 @@ public void testConsitencyMismatch() { STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.EVENTUAL); + Consistency.EVENTUAL, false); ServerStoreConfiguration clientConfiguration = new ServerStoreConfiguration(DEDICATED_POOL_ALLOCATION, STORED_KEY_TYPE, STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.STRONG); + Consistency.STRONG, false); ServerStoreCompatibility serverStoreCompatibility = new ServerStoreCompatibility(); @@ -182,14 +182,14 @@ public void testDedicatedPoolResourceTooBig() { STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.STRONG); + Consistency.STRONG, false); ServerStoreConfiguration clientConfiguration = new ServerStoreConfiguration(new Dedicated("primary",8), STORED_KEY_TYPE, STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.STRONG); + Consistency.STRONG, false); ServerStoreCompatibility serverStoreCompatibility = new ServerStoreCompatibility(); @@ -208,14 +208,14 @@ public void testDedicatedPoolResourceTooSmall() { STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.STRONG); + Consistency.STRONG, false); ServerStoreConfiguration clientConfiguration = new ServerStoreConfiguration(new Dedicated("primary",2), STORED_KEY_TYPE, STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.STRONG); + Consistency.STRONG, false); ServerStoreCompatibility serverStoreCompatibility = new ServerStoreCompatibility(); @@ -234,14 +234,14 @@ public void testDedicatedPoolResourceNameMismatch() { STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.STRONG); + Consistency.STRONG, false); ServerStoreConfiguration clientConfiguration = new ServerStoreConfiguration(new Dedicated("primaryBad",4), STORED_KEY_TYPE, STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.STRONG); + Consistency.STRONG, false); ServerStoreCompatibility serverStoreCompatibility = new ServerStoreCompatibility(); @@ -260,14 +260,14 @@ public void testSharedPoolResourceNameMismatch() { STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.STRONG); + Consistency.STRONG, false); ServerStoreConfiguration clientConfiguration = new ServerStoreConfiguration(new Shared("sharedPoolBad"), STORED_KEY_TYPE, STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.STRONG); + Consistency.STRONG, false); ServerStoreCompatibility serverStoreCompatibility = new ServerStoreCompatibility(); @@ -287,14 +287,14 @@ public void testAllResourceParametersMatch() throws Exception STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.EVENTUAL); + Consistency.EVENTUAL, false); ServerStoreConfiguration clientConfiguration = new ServerStoreConfiguration(DEDICATED_POOL_ALLOCATION, STORED_KEY_TYPE, STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.EVENTUAL); + Consistency.EVENTUAL, false); ServerStoreCompatibility serverStoreCompatibility = new ServerStoreCompatibility(); @@ -308,14 +308,14 @@ public void testPoolResourceTypeMismatch() { STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.STRONG); + Consistency.STRONG, false); ServerStoreConfiguration clientConfiguration = new ServerStoreConfiguration(SHARED_POOL_ALLOCATION, STORED_KEY_TYPE, STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.STRONG); + Consistency.STRONG, false); ServerStoreCompatibility serverStoreCompatibility = new ServerStoreCompatibility(); @@ -334,14 +334,14 @@ public void testClientStoreConfigurationUnknownPoolResource() throws InvalidServ STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.STRONG); + Consistency.STRONG, false); ServerStoreConfiguration clientConfiguration = new ServerStoreConfiguration(UNKNOWN_POOL_ALLOCATION, STORED_KEY_TYPE, STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.STRONG); + Consistency.STRONG, false); ServerStoreCompatibility serverStoreCompatibility = new ServerStoreCompatibility(); @@ -356,14 +356,14 @@ public void testServerStoreConfigurationUnknownPoolResourceInvalidKeyType() { STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.STRONG); + Consistency.STRONG, false); ServerStoreConfiguration clientConfiguration = new ServerStoreConfiguration(UNKNOWN_POOL_ALLOCATION, String.class.getName(), STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.STRONG); + Consistency.STRONG, false); ServerStoreCompatibility serverStoreCompatibility = new ServerStoreCompatibility(); @@ -383,9 +383,12 @@ public void testServerStoreConfigurationExtendedPoolAllocationType() { STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.STRONG); + Consistency.STRONG, false); PoolAllocation extendedPoolAllocation = new PoolAllocation.DedicatedPoolAllocation() { + + private static final long serialVersionUID = 1L; + @Override public long getSize() { return 4; @@ -407,7 +410,7 @@ public boolean isCompatible(final PoolAllocation other) { STORED_VALUE_TYPE, KEY_SERIALIZER_TYPE, VALUE_SERIALIZER_TYPE, - Consistency.STRONG); + Consistency.STRONG, false); ServerStoreCompatibility serverStoreCompatibility = new ServerStoreCompatibility(); diff --git a/clustered/server/src/test/java/org/ehcache/clustered/server/TestInvokeContext.java b/clustered/server/src/test/java/org/ehcache/clustered/server/TestInvokeContext.java index e6da1dac22..91f54ff049 100644 --- a/clustered/server/src/test/java/org/ehcache/clustered/server/TestInvokeContext.java +++ b/clustered/server/src/test/java/org/ehcache/clustered/server/TestInvokeContext.java @@ -16,6 +16,8 @@ package org.ehcache.clustered.server; +import java.util.Properties; +import org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse; import org.terracotta.entity.ActiveInvokeChannel; import org.terracotta.entity.ActiveInvokeContext; import org.terracotta.entity.ClientDescriptor; @@ -23,7 +25,7 @@ import java.util.concurrent.atomic.AtomicLong; -public final class TestInvokeContext implements ActiveInvokeContext { +public final class TestInvokeContext implements ActiveInvokeContext { private final AtomicLong currentTransactionId = new AtomicLong(); @@ -39,7 +41,7 @@ public ClientDescriptor getClientDescriptor() { } @Override - public ActiveInvokeChannel openInvokeChannel() { + public ActiveInvokeChannel openInvokeChannel() { return null; } @@ -72,4 +74,9 @@ public ClientSourceId makeClientSourceId(long l) { public int getConcurrencyKey() { return 1; } + + @Override + public Properties getClientSourceProperties() { + return new Properties(); + } } diff --git a/clustered/server/src/test/java/org/ehcache/clustered/server/internal/messages/EhcacheServerCodecTest.java b/clustered/server/src/test/java/org/ehcache/clustered/server/internal/messages/EhcacheServerCodecTest.java index 06b0d98fe6..9f0bb360f5 100644 --- a/clustered/server/src/test/java/org/ehcache/clustered/server/internal/messages/EhcacheServerCodecTest.java +++ b/clustered/server/src/test/java/org/ehcache/clustered/server/internal/messages/EhcacheServerCodecTest.java @@ -56,6 +56,8 @@ public void setUp() { public void testDelegatesToEhcacheCodeForEncoding() throws Exception { LifecycleMessage lifecycleMessage = new LifecycleMessage() { + private static final long serialVersionUID = 1L; + @Override public EhcacheMessageType getMessageType() { return EhcacheMessageType.APPEND; diff --git a/clustered/server/src/test/java/org/ehcache/clustered/server/offheap/ChainMapExtensionTest.java b/clustered/server/src/test/java/org/ehcache/clustered/server/offheap/ChainMapExtensionTest.java new file mode 100644 index 0000000000..ea0690a3a2 --- /dev/null +++ b/clustered/server/src/test/java/org/ehcache/clustered/server/offheap/ChainMapExtensionTest.java @@ -0,0 +1,368 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.server.offheap; + +import org.ehcache.clustered.common.internal.store.Element; +import org.hamcrest.Description; +import org.hamcrest.Matcher; +import org.hamcrest.TypeSafeMatcher; +import org.junit.Test; +import org.terracotta.offheapstore.ReadWriteLockedOffHeapClockCache; +import org.terracotta.offheapstore.buffersource.OffHeapBufferSource; +import org.terracotta.offheapstore.paging.OffHeapStorageArea; +import org.terracotta.offheapstore.paging.PageSource; +import org.terracotta.offheapstore.paging.UnlimitedPageSource; +import org.terracotta.offheapstore.storage.PointerSize; +import org.terracotta.offheapstore.storage.portability.Portability; +import org.terracotta.offheapstore.storage.portability.StringPortability; +import org.terracotta.offheapstore.storage.portability.WriteContext; +import org.terracotta.offheapstore.util.Factory; + +import java.nio.ByteBuffer; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.locks.Lock; + +import static org.ehcache.clustered.server.offheap.OffHeapChainMap.chain; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.emptyIterable; +import static org.hamcrest.collection.IsIterableContainingInOrder.contains; +import static org.junit.Assert.assertThat; + +/** + * Test extensibility of chain map storage engine, including binary engine capabilities. + */ +public class ChainMapExtensionTest { + private static final int ADDRESS_OFFSET = 0; + private static final int HASH_OFFSET = 8; + private static final int EXTENDED_HEADER_LENGTH = 16; + private static final long NULL_ENCODING = Long.MAX_VALUE; + + private static final int STORAGE_KEY_LENGTH_OFFSET = 0; + private static final int STORAGE_VALUE_LENGTH_OFFSET = 4; + private static final int STORAGE_HEADER_OFFSET = 8; + + static final ByteBuffer EMPTY_HEADER_NODE; + static { + ByteBuffer emptyHeader = ByteBuffer.allocateDirect(EXTENDED_HEADER_LENGTH); + emptyHeader.putLong(ADDRESS_OFFSET, NULL_ENCODING); + emptyHeader.putLong(HASH_OFFSET, -1); + EMPTY_HEADER_NODE = emptyHeader.asReadOnlyBuffer(); + } + + @Test + public void testAppend() { + OffHeapChainMap map = getChainMapWithExtendedStorageEngine(); + map.append("foo", buffer(1)); + assertThat(map.get("foo"), contains(element(1))); + ChainStorageEngine se = map.getStorageEngine(); + assertThat(se, is(instanceOf(ExtendedOffHeapChainStorageEngine.class))); + ExtendedOffHeapChainStorageEngine ese = (ExtendedOffHeapChainStorageEngine) se; + map = getNewMap(ese); + assertThat(map.get("foo"), contains(element(1))); + } + + @Test + public void testAppendAndReplace() { + OffHeapChainMap map = getChainMapWithExtendedStorageEngine(); + map.append("foo", buffer(1)); + assertThat(map.get("foo"), contains(element(1))); + map.replaceAtHead("foo", chain(buffer(1)), chain()); + ChainStorageEngine se = map.getStorageEngine(); + assertThat(se, is(instanceOf(ExtendedOffHeapChainStorageEngine.class))); + @SuppressWarnings("unchecked") + ExtendedOffHeapChainStorageEngine ese = (ExtendedOffHeapChainStorageEngine) se; + map = getNewMap(ese); + assertThat(map.get("foo"), emptyIterable()); + } + + @SuppressWarnings("unchecked") + @Test + public void testMultipleAppendAndReplace() { + OffHeapChainMap map = getChainMapWithExtendedStorageEngine(); + for (int i = 1; i <= 20; i++) { + map.append("foo" + i, buffer(i)); + assertThat(map.get("foo" + i), contains(element(i))); + } + for (int i = 1; i <= 20; i++) { + assertThat(map.getAndAppend("foo" + i, buffer(1)), contains(element(i))); + } + for (int i = 10; i < 15; i++) { + map.replaceAtHead("foo" + i, chain(buffer(i), buffer(1)), chain()); + } + + ChainStorageEngine se = map.getStorageEngine(); + assertThat(se, is(instanceOf(ExtendedOffHeapChainStorageEngine.class))); + ExtendedOffHeapChainStorageEngine ese = (ExtendedOffHeapChainStorageEngine) se; + map = getNewMap(ese); + for (int i = 1; i <= 20; i++) { + if (i < 10 || i >= 15) { + assertThat(map.get("foo" + i), contains(element(i), element(1))); + } else { + assertThat(map.get("foo" + i), emptyIterable()); + } + } + } + + private OffHeapChainMap getChainMapWithExtendedStorageEngine() { + PageSource chainSource = new UnlimitedPageSource(new OffHeapBufferSource()); + PageSource extendedSource = new UnlimitedPageSource(new OffHeapBufferSource()); + Factory> factory = ExtendedOffHeapChainStorageEngine.createFactory(chainSource, + StringPortability.INSTANCE, 4096, 4096, false, false, extendedSource); + return new OffHeapChainMap<>(chainSource, factory); + } + + private OffHeapChainMap getNewMap(ExtendedOffHeapChainStorageEngine ese) { + PageSource chainSource = new UnlimitedPageSource(new OffHeapBufferSource()); + Factory> factory = OffHeapChainStorageEngine.createFactory(chainSource, + StringPortability.INSTANCE, 4096, 4096, false, false); + OffHeapChainStorageEngine storageEngine = (OffHeapChainStorageEngine) factory.newInstance(); + ReadWriteLockedOffHeapClockCache newMap = + new ReadWriteLockedOffHeapClockCache<>(chainSource, storageEngine); + ese.replayIntoMap(newMap); + return new OffHeapChainMap<>(newMap, storageEngine); + } + + private static ByteBuffer buffer(int i) { + ByteBuffer buffer = ByteBuffer.allocate(i); + while (buffer.hasRemaining()) { + buffer.put((byte) i); + } + return (ByteBuffer) buffer.flip(); + } + + private static Matcher element(int i) { + return new TypeSafeMatcher() { + @Override + protected boolean matchesSafely(Element item) { + return item.getPayload().equals(buffer(i)); + } + + @Override + public void describeTo(Description description) { + description.appendText("element containing buffer[" + i +"]"); + } + }; + } + + private static final class ExtendedHeaderForTest { + private final ByteBuffer data; + private final WriteContext writeContext; + + ExtendedHeaderForTest(ByteBuffer buffer, WriteContext writeContext) { + this.data = buffer; + this.writeContext = writeContext; + } + + long getAddress() { + return getLong(ADDRESS_OFFSET); + } + + void setAddress(long val) { + writeContext.setLong(ADDRESS_OFFSET, val); + } + + int getHash() { + long hashAndSize = getLong(HASH_OFFSET) >> 32; + return (int) hashAndSize; + } + + int getSize() { + long hashAndSize = getLong(HASH_OFFSET); + return (int) hashAndSize; + } + + void setHashAndSize(int hash, int size) { + long val = ((long) hash << 32) | size; + writeContext.setLong(HASH_OFFSET, val); + } + + private long getLong(int address) { + return data.getLong(address); + } + } + + public static class ExtendedOffHeapChainStorageEngine extends OffHeapChainStorageEngine { + private final OffHeapStorageArea extendedArea; + private final Set chainAddresses; + private volatile boolean bypassEngineCommands = false; + + public static Factory> + createFactory(PageSource source, + Portability keyPortability, + int minPageSize, int maxPageSize, + boolean thief, boolean victim, PageSource cachePageSource) { + return (Factory>)() -> + new ExtendedOffHeapChainStorageEngine<>(source, keyPortability, + minPageSize, maxPageSize, thief, victim, cachePageSource); + } + + private ExtendedOffHeapChainStorageEngine(PageSource source, Portability keyPortability, int minPageSize, + int maxPageSize, boolean thief, boolean victim, + PageSource cachePageSource) { + super(source, keyPortability, minPageSize, maxPageSize, thief, victim, EMPTY_HEADER_NODE); + this.extendedArea = new OffHeapStorageArea(PointerSize.LONG, new ExtendedEngineOwner(), cachePageSource, minPageSize, maxPageSize, thief, victim); + this.chainAddresses = new HashSet<>(); + } + + @Override + public Long writeMapping(K key, InternalChain value, int hash, int metadata) { + bypassEngineCommands = true; + try { + return super.writeMapping(key, value, hash, metadata); + } finally { + bypassEngineCommands = false; + } + } + + @Override + public void freeMapping(long encoding, int hash, boolean removal) { + if (removal) { + // free the chain here if we are removing..otherwise chainFreed will be invoked from within + chainFreed(encoding); + } + super.freeMapping(encoding, hash, removal); + } + + @Override + public void chainAttached(long chainAddress) { + localPut(chainAddress); + } + + @Override + public void chainFreed(long chainAddress) { + if (bypassEngineCommands) { + // do not do anything when in write mapping + return; + } + localRemove(chainAddress); + } + + @Override + public void chainModified(long chainAddress) { + if (bypassEngineCommands) { + return; + } + localPut(chainAddress); + } + + @Override + public void chainMoved(long fromChainAddress, long toChainAddress) { + if (bypassEngineCommands) { + return; + } + localMove(fromChainAddress, toChainAddress); + } + + private ExtendedHeaderForTest createAtExtensionAddress(long chainAddress) { + return new ExtendedHeaderForTest(getExtensionHeader(chainAddress), + getExtensionWriteContext(chainAddress)); + } + + void replayIntoMap(ReadWriteLockedOffHeapClockCache newMap) { + Lock l = newMap.writeLock(); + l.lock(); + try { + chainAddresses.forEach((a) -> { + ExtendedHeaderForTest hdr = createAtExtensionAddress(a); + long address = hdr.getAddress(); + int keyLength = extendedArea.readInt(address + STORAGE_KEY_LENGTH_OFFSET); + int valueLength = extendedArea.readInt(address + STORAGE_VALUE_LENGTH_OFFSET); + ByteBuffer keyBuffer = extendedArea.readBuffer(address + STORAGE_HEADER_OFFSET, keyLength); + ByteBuffer valueBuffer = extendedArea.readBuffer(address + STORAGE_HEADER_OFFSET + keyLength, valueLength); + newMap.installMappingForHashAndEncoding(hdr.getHash(), keyBuffer, valueBuffer, 0); + }); + } finally { + l.unlock(); + } + } + + private void localPut(long chainAddress) { + ByteBuffer keyBuffer = super.readBinaryKey(chainAddress); + int hash = super.readKeyHash(chainAddress); + ByteBuffer valueBuffer = super.readBinaryValue(chainAddress); + writeToExtendedArea(chainAddress, hash, keyBuffer, valueBuffer); + } + + private void writeToExtendedArea(long chainAddress, int hash, ByteBuffer keyBuffer, ByteBuffer valueBuffer) { + ExtendedHeaderForTest hdr = createAtExtensionAddress(chainAddress); + long address = hdr.getAddress(); + if (address != NULL_ENCODING) { + // free previous + extendedArea.free(address); + } else { + chainAddresses.add(chainAddress); + } + int size = (2 * Integer.BYTES) + keyBuffer.remaining() + valueBuffer.remaining(); + address = extendedArea.allocate(size); + hdr.setAddress(address); + hdr.setHashAndSize(hash, size); + extendedArea.writeInt(address + STORAGE_KEY_LENGTH_OFFSET, keyBuffer.remaining()); + extendedArea.writeInt(address + STORAGE_VALUE_LENGTH_OFFSET, valueBuffer.remaining()); + extendedArea.writeBuffer(address + STORAGE_HEADER_OFFSET, keyBuffer.duplicate()); + extendedArea.writeBuffer(address + STORAGE_HEADER_OFFSET + keyBuffer.remaining(), valueBuffer.duplicate()); + } + + private void localRemove(long chainAddress) { + ExtendedHeaderForTest node = createAtExtensionAddress(chainAddress); + long address = node.getAddress(); + if (address != NULL_ENCODING) { + extendedArea.free(node.getAddress()); + chainAddresses.remove(chainAddress); + } + node.setAddress(NULL_ENCODING); + } + + private void localMove(long fromChainAddress, long toChainAddress) { + ExtendedHeaderForTest fromHeader = createAtExtensionAddress(fromChainAddress); + ExtendedHeaderForTest toHeader = createAtExtensionAddress(toChainAddress); + chainAddresses.remove(fromChainAddress); + chainAddresses.add(toChainAddress); + toHeader.setAddress(fromHeader.getAddress()); + toHeader.setHashAndSize(fromHeader.getHash(), fromHeader.getSize()); + } + + private class ExtendedEngineOwner implements OffHeapStorageArea.Owner { + @Override + public boolean evictAtAddress(long address, boolean shrink) { + return false; + } + + @Override + public Lock writeLock() { + return owner.writeLock(); + } + + @Override + public boolean isThief() { + return owner.isThiefForTableAllocations(); + } + + @Override + public boolean moved(long from, long to) { + // for now not supported + return false; + } + + @Override + public int sizeOf(long address) { + return extendedArea.readInt(address + STORAGE_KEY_LENGTH_OFFSET) + + extendedArea.readInt(address + STORAGE_VALUE_LENGTH_OFFSET) + STORAGE_HEADER_OFFSET; + } + } + } +} diff --git a/clustered/server/src/test/java/org/ehcache/clustered/server/offheap/ChainMapTest.java b/clustered/server/src/test/java/org/ehcache/clustered/server/offheap/ChainMapTest.java index 533ec3b007..5e2cf5e82b 100644 --- a/clustered/server/src/test/java/org/ehcache/clustered/server/offheap/ChainMapTest.java +++ b/clustered/server/src/test/java/org/ehcache/clustered/server/offheap/ChainMapTest.java @@ -23,6 +23,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; +import org.ehcache.clustered.common.internal.store.Chain; import org.ehcache.clustered.common.internal.store.Element; import org.hamcrest.Description; @@ -362,13 +363,13 @@ public void testActiveChainsThreadSafety() throws ExecutionException, Interrupte int nThreads = 10; ExecutorService executorService = Executors.newFixedThreadPool(nThreads); - List futures = new ArrayList<>(); + List> futures = new ArrayList<>(); for (int i = 0; i < nThreads ; i++) { futures.add(executorService.submit(() -> map.get("key"))); } - for (Future f : futures) { + for (Future f : futures) { f.get(); } @@ -392,6 +393,37 @@ public void testPutDoesNotLeakWhenMappingIsNotNull() { } + @Test + public void testRemoveMissingKey() { + OffHeapChainMap map = new OffHeapChainMap<>(new UnlimitedPageSource(new OffHeapBufferSource()), StringPortability.INSTANCE, minPageSize, maxPageSize, steal); + map.remove("foo"); + assertThat(map.get("foo").isEmpty(), is(true)); + } + + @Test + public void testRemoveSingleChain() { + OffHeapChainMap map = new OffHeapChainMap<>(new UnlimitedPageSource(new OffHeapBufferSource()), StringPortability.INSTANCE, minPageSize, maxPageSize, steal); + map.append("foo", buffer(1)); + map.append("bar", buffer(2)); + assertThat(map.get("foo"), contains(element(1))); + assertThat(map.get("bar"), contains(element(2))); + + map.remove("foo"); + assertThat(map.get("foo").isEmpty(), is(true)); + assertThat(map.get("bar"), contains(element(2))); + } + + @Test + public void testRemoveDoubleChain() { + OffHeapChainMap map = new OffHeapChainMap<>(new UnlimitedPageSource(new OffHeapBufferSource()), StringPortability.INSTANCE, minPageSize, maxPageSize, steal); + map.append("foo", buffer(1)); + map.append("foo", buffer(2)); + assertThat(map.get("foo"), contains(element(1), element(2))); + + map.remove("foo"); + assertThat(map.get("foo").isEmpty(), is(true)); + } + private static ByteBuffer buffer(int i) { ByteBuffer buffer = ByteBuffer.allocate(i); while (buffer.hasRemaining()) { diff --git a/clustered/server/src/test/java/org/ehcache/clustered/server/offheap/OffHeapServerStoreTest.java b/clustered/server/src/test/java/org/ehcache/clustered/server/offheap/OffHeapServerStoreTest.java index 5f85554248..a06f0d7fc5 100644 --- a/clustered/server/src/test/java/org/ehcache/clustered/server/offheap/OffHeapServerStoreTest.java +++ b/clustered/server/src/test/java/org/ehcache/clustered/server/offheap/OffHeapServerStoreTest.java @@ -17,16 +17,15 @@ import java.nio.ByteBuffer; import java.util.Random; +import java.util.concurrent.locks.ReentrantLock; import org.ehcache.clustered.common.internal.store.Chain; -import org.ehcache.clustered.common.internal.store.Element; import org.ehcache.clustered.server.KeySegmentMapper; import org.ehcache.clustered.server.store.ChainBuilder; import org.ehcache.clustered.server.store.ElementBuilder; import org.ehcache.clustered.common.internal.store.ServerStore; import org.ehcache.clustered.server.store.ServerStoreTest; import org.junit.Test; -import org.mockito.ArgumentMatchers; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import org.terracotta.offheapstore.buffersource.OffHeapBufferSource; @@ -34,6 +33,8 @@ import org.terracotta.offheapstore.paging.UnlimitedPageSource; import org.terracotta.offheapstore.paging.UpfrontAllocatingPageSource; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -41,12 +42,14 @@ import org.junit.Assert; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import static org.terracotta.offheapstore.util.MemoryUnit.GIGABYTES; -import static org.terracotta.offheapstore.util.MemoryUnit.KILOBYTES; import static org.terracotta.offheapstore.util.MemoryUnit.MEGABYTES; public class OffHeapServerStoreTest extends ServerStoreTest { @@ -58,9 +61,19 @@ private OffHeapChainMap getOffHeapChainMapMock() { return mock(OffHeapChainMap.class); } + @SuppressWarnings("unchecked") + private OffHeapChainMap getOffHeapChainMapLongMock() { + return mock(OffHeapChainMap.class); + } + + @SuppressWarnings("unchecked") + private ChainStorageEngine getChainStorageEngineLongMock() { + return mock(ChainStorageEngine.class); + } + @Override public ServerStore newStore() { - return new OffHeapServerStore(new UnlimitedPageSource(new OffHeapBufferSource()), DEFAULT_MAPPER); + return new OffHeapServerStore(new UnlimitedPageSource(new OffHeapBufferSource()), DEFAULT_MAPPER, false); } @Override @@ -92,6 +105,77 @@ public void testGetMaxSize() { assertThat(OffHeapServerStore.getMaxSize(GIGABYTES.toBytes(2)), is(8192L)); } + @Test + public void put_worked_the_first_time_test() throws Exception { + OffHeapChainMap offheapChainMap = getOffHeapChainMapLongMock(); + ChainStorageEngine storageEngine = getChainStorageEngineLongMock(); + when(offheapChainMap.getStorageEngine()).thenReturn(storageEngine); + + doNothing() + .when(offheapChainMap).put(anyLong(), any(Chain.class)); + + OffHeapServerStore offHeapServerStore = new OffHeapServerStore(singletonList(offheapChainMap), mock(KeySegmentMapper.class)); + offHeapServerStore.put(43L, mock(Chain.class)); + } + + + @Test(expected = OversizeMappingException.class) + public void put_should_throw_when_underlying_put_always_throw_test() throws Exception { + OffHeapChainMap offheapChainMap = getOffHeapChainMapLongMock(); + ChainStorageEngine storageEngine = getChainStorageEngineLongMock(); + when(offheapChainMap.getStorageEngine()).thenReturn(storageEngine); + when(offheapChainMap.writeLock()).thenReturn(new ReentrantLock()); + + doThrow(new OversizeMappingException()) + .when(offheapChainMap).put(anyLong(), any(Chain.class)); + + OffHeapServerStore offHeapServerStore = new OffHeapServerStore(singletonList(offheapChainMap), mock(KeySegmentMapper.class)); + offHeapServerStore.put(43L, mock(Chain.class)); + } + + @Test + public void put_should_return_when_underlying_put_does_not_throw_test() throws Exception { + OffHeapChainMap offheapChainMap = getOffHeapChainMapLongMock(); + ChainStorageEngine storageEngine = getChainStorageEngineLongMock(); + when(offheapChainMap.getStorageEngine()).thenReturn(storageEngine); + when(offheapChainMap.writeLock()).thenReturn(new ReentrantLock()); + + // throw once, then ok + doThrow(new OversizeMappingException()) + .doNothing() + .when(offheapChainMap).put(anyLong(), any(Chain.class)); + + OffHeapServerStore offHeapServerStore = new OffHeapServerStore(singletonList(offheapChainMap), mock(KeySegmentMapper.class)); + offHeapServerStore.put(43L, mock(Chain.class)); + } + + @Test + public void put_should_return_when_underlying_put_does_not_throw_with_keymapper_test() throws Exception { + long theKey = 43L; + ChainStorageEngine storageEngine = getChainStorageEngineLongMock(); + OffHeapChainMap offheapChainMap = getOffHeapChainMapLongMock(); + OffHeapChainMap otherOffheapChainMap = getOffHeapChainMapLongMock(); + when(offheapChainMap.shrink()).thenReturn(true); + when(offheapChainMap.getStorageEngine()).thenReturn(storageEngine); + when(offheapChainMap.writeLock()).thenReturn(new ReentrantLock()); + when(otherOffheapChainMap.writeLock()).thenReturn(new ReentrantLock()); + + // throw twice, then OK + doThrow(new OversizeMappingException()) + .doThrow(new OversizeMappingException()) + .doNothing() + .when(otherOffheapChainMap).put(anyLong(), any(Chain.class)); + + KeySegmentMapper keySegmentMapper = mock(KeySegmentMapper.class); + when(keySegmentMapper.getSegmentForKey(theKey)).thenReturn(1); + OffHeapServerStore offHeapServerStore = new OffHeapServerStore(asList(offheapChainMap, otherOffheapChainMap), keySegmentMapper); + offHeapServerStore.put(theKey, mock(Chain.class)); + + //getSegmentForKey was called 4 times : segmentFor, handleOversizeMappingException, segmentFor, segmentFor + verify(keySegmentMapper, times(4)).getSegmentForKey(theKey); + } + + @Test public void test_append_doesNotConsumeBuffer_evenWhenOversizeMappingException() throws Exception { OffHeapServerStore store = (OffHeapServerStore) spy(newStore()); @@ -109,7 +193,7 @@ public Object answer(InvocationOnMock invocation) throws Throwable { } } }); - when(store.handleOversizeMappingException(anyLong())).thenReturn(true); + when(store.tryShrinkOthers(anyLong())).thenReturn(true); ByteBuffer payload = createPayload(1L); @@ -134,7 +218,7 @@ public Object answer(InvocationOnMock invocation) throws Throwable { } } }); - when(store.handleOversizeMappingException(anyLong())).thenReturn(true); + when(store.tryShrinkOthers(anyLong())).thenReturn(true); ByteBuffer payload = createPayload(1L); @@ -165,7 +249,7 @@ public Object answer(InvocationOnMock invocation) throws Throwable { } } }); - when(store.handleOversizeMappingException(anyLong())).thenReturn(true); + when(store.tryShrinkOthers(anyLong())).thenReturn(true); ByteBuffer payload = createPayload(1L); @@ -181,7 +265,7 @@ public void testCrossSegmentShrinking() { long seed = System.nanoTime(); Random random = new Random(seed); try { - OffHeapServerStore store = new OffHeapServerStore(new UpfrontAllocatingPageSource(new OffHeapBufferSource(), MEGABYTES.toBytes(1L), MEGABYTES.toBytes(1)), DEFAULT_MAPPER); + OffHeapServerStore store = new OffHeapServerStore(new UpfrontAllocatingPageSource(new OffHeapBufferSource(), MEGABYTES.toBytes(1L), MEGABYTES.toBytes(1)), DEFAULT_MAPPER, false); ByteBuffer smallValue = ByteBuffer.allocate(1024); for (int i = 0; i < 10000; i++) { @@ -209,7 +293,7 @@ public void testCrossSegmentShrinking() { public void testServerSideUsageStats() { long maxBytes = MEGABYTES.toBytes(1); - OffHeapServerStore store = new OffHeapServerStore(new UpfrontAllocatingPageSource(new OffHeapBufferSource(), maxBytes, MEGABYTES.toBytes(1)), new KeySegmentMapper(16)); + OffHeapServerStore store = new OffHeapServerStore(new UpfrontAllocatingPageSource(new OffHeapBufferSource(), maxBytes, MEGABYTES.toBytes(1)), new KeySegmentMapper(16), false); int oneKb = 1024; long smallLoopCount = 5; diff --git a/clustered/server/src/test/java/org/ehcache/clustered/server/offheap/PinningOffHeapChainMapTest.java b/clustered/server/src/test/java/org/ehcache/clustered/server/offheap/PinningOffHeapChainMapTest.java new file mode 100644 index 0000000000..c1a36790ba --- /dev/null +++ b/clustered/server/src/test/java/org/ehcache/clustered/server/offheap/PinningOffHeapChainMapTest.java @@ -0,0 +1,176 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.server.offheap; + +import org.ehcache.clustered.common.internal.store.Chain; +import org.ehcache.clustered.common.internal.store.Element; +import org.ehcache.clustered.common.internal.store.Util; +import org.ehcache.clustered.common.internal.store.operations.OperationCode; +import org.junit.Test; +import org.terracotta.offheapstore.buffersource.OffHeapBufferSource; +import org.terracotta.offheapstore.paging.UnlimitedPageSource; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; + +import static org.ehcache.clustered.common.internal.store.operations.OperationCode.PUT; +import static org.ehcache.clustered.common.internal.store.operations.OperationCode.PUT_IF_ABSENT; +import static org.ehcache.clustered.common.internal.store.operations.OperationCode.PUT_WITH_WRITER; +import static org.ehcache.clustered.common.internal.store.operations.OperationCode.REMOVE; +import static org.ehcache.clustered.common.internal.store.operations.OperationCode.REMOVE_CONDITIONAL; +import static org.ehcache.clustered.common.internal.store.operations.OperationCode.REPLACE; +import static org.ehcache.clustered.common.internal.store.operations.OperationCode.REPLACE_CONDITIONAL; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; + +public class PinningOffHeapChainMapTest { + @Test + public void testAppendWithPinningOperation() { + PinningOffHeapChainMap pinningOffHeapChainMap = getPinningOffHeapChainMap(); + + pinningOffHeapChainMap.append(1L, buffer(PUT_WITH_WRITER)); + assertThat(pinningOffHeapChainMap.heads.isPinned(1L), is(true)); + } + + @Test + public void testAppendWithNormalOperation() { + PinningOffHeapChainMap pinningOffHeapChainMap = getPinningOffHeapChainMap(); + + pinningOffHeapChainMap.append(1L, buffer(PUT)); + assertThat(pinningOffHeapChainMap.heads.isPinned(1L), is(false)); + } + + @Test + public void testGetAndAppendWithPinningOperation() { + PinningOffHeapChainMap pinningOffHeapChainMap = getPinningOffHeapChainMap(); + + pinningOffHeapChainMap.getAndAppend(1L, buffer(REMOVE_CONDITIONAL)); + assertThat(pinningOffHeapChainMap.heads.isPinned(1L), is(true)); + } + + @Test + public void testGetAndAppendWithNormalOperation() { + PinningOffHeapChainMap pinningOffHeapChainMap = getPinningOffHeapChainMap(); + + pinningOffHeapChainMap.getAndAppend(1L, buffer(PUT)); + assertThat(pinningOffHeapChainMap.heads.isPinned(1L), is(false)); + } + + @Test + public void testPutWithPinningChain() { + PinningOffHeapChainMap pinningOffHeapChainMap = getPinningOffHeapChainMap(); + + pinningOffHeapChainMap.put(1L, chain(buffer(PUT), buffer(REMOVE))); + assertThat(pinningOffHeapChainMap.heads.isPinned(1L), is(true)); + } + + @Test + public void testPutWithNormalChain() { + PinningOffHeapChainMap pinningOffHeapChainMap = getPinningOffHeapChainMap(); + + pinningOffHeapChainMap.put(1L, chain(buffer(PUT), buffer(PUT))); + assertThat(pinningOffHeapChainMap.heads.isPinned(1L), is(false)); + } + + @Test + public void testReplaceAtHeadWithUnpinningChain() { + PinningOffHeapChainMap pinningOffHeapChainMap = getPinningOffHeapChainMap(); + + ByteBuffer buffer = buffer(PUT_IF_ABSENT); + Chain pinningChain = chain(buffer); + Chain unpinningChain = chain(buffer(PUT)); + + pinningOffHeapChainMap.append(1L, buffer); + assertThat(pinningOffHeapChainMap.heads.isPinned(1L), is(true)); + + pinningOffHeapChainMap.replaceAtHead(1L, pinningChain, unpinningChain); + assertThat(pinningOffHeapChainMap.heads.isPinned(1L), is(false)); + } + + @Test + public void testReplaceAtHeadWithPinningChain() { + PinningOffHeapChainMap pinningOffHeapChainMap = getPinningOffHeapChainMap(); + + ByteBuffer buffer = buffer(REPLACE); + Chain pinningChain = chain(buffer); + Chain unpinningChain = chain(buffer(REPLACE_CONDITIONAL)); + + pinningOffHeapChainMap.append(1L, buffer); + assertThat(pinningOffHeapChainMap.heads.isPinned(1L), is(true)); + + pinningOffHeapChainMap.replaceAtHead(1L, pinningChain, unpinningChain); + assertThat(pinningOffHeapChainMap.heads.isPinned(1L), is(true)); + } + + @Test + public void testReplaceAtHeadWithEmptyChain() { + PinningOffHeapChainMap pinningOffHeapChainMap = getPinningOffHeapChainMap(); + + ByteBuffer buffer = buffer(PUT_WITH_WRITER); + Chain pinningChain = chain(buffer); + Chain unpinningChain = chain(); + + pinningOffHeapChainMap.append(1L, buffer); + assertThat(pinningOffHeapChainMap.heads.isPinned(1L), is(true)); + + pinningOffHeapChainMap.replaceAtHead(1L, pinningChain, unpinningChain); + assertThat(pinningOffHeapChainMap.heads.isPinned(1L), is(false)); + } + + private ByteBuffer buffer(OperationCode first) { + return ByteBuffer.wrap(new byte[] { first.getValue() }); + } + + private PinningOffHeapChainMap getPinningOffHeapChainMap() { + return new PinningOffHeapChainMap<>(new UnlimitedPageSource(new OffHeapBufferSource()), LongPortability.INSTANCE, + 4096, 4096, false); + } + + public static Chain chain(ByteBuffer... buffers) { + final List list = new ArrayList<>(); + for (ByteBuffer b : buffers) { + list.add(b::asReadOnlyBuffer); + } + + return new Chain() { + + final List elements = Collections.unmodifiableList(list); + + @Override + public Iterator iterator() { + return elements.iterator(); + } + + @Override + public Iterator reverseIterator() { + return Util.reverseIterator(elements); + } + + @Override + public boolean isEmpty() { + return elements.isEmpty(); + } + + @Override + public int length() { + return elements.size(); + } + }; + } +} diff --git a/clustered/server/src/test/java/org/ehcache/clustered/server/repo/ServerStateRepositoryTest.java b/clustered/server/src/test/java/org/ehcache/clustered/server/repo/ServerStateRepositoryTest.java index 806b7d6e51..c2201f369d 100644 --- a/clustered/server/src/test/java/org/ehcache/clustered/server/repo/ServerStateRepositoryTest.java +++ b/clustered/server/src/test/java/org/ehcache/clustered/server/repo/ServerStateRepositoryTest.java @@ -24,7 +24,6 @@ import java.util.AbstractMap; import java.util.Map; import java.util.Set; -import java.util.UUID; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.is; diff --git a/clustered/server/src/test/java/org/ehcache/clustered/server/state/EhcacheStateServiceProviderTest.java b/clustered/server/src/test/java/org/ehcache/clustered/server/state/EhcacheStateServiceProviderTest.java index 311b6d8319..00fd836a27 100644 --- a/clustered/server/src/test/java/org/ehcache/clustered/server/state/EhcacheStateServiceProviderTest.java +++ b/clustered/server/src/test/java/org/ehcache/clustered/server/state/EhcacheStateServiceProviderTest.java @@ -44,6 +44,7 @@ import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class EhcacheStateServiceProviderTest { @@ -63,21 +64,8 @@ public void setUp() { configuration.getResource().add(resource); OffHeapResources offheapResources = new OffHeapResourcesProvider(configuration); - platformConfiguration = new PlatformConfiguration() { - @Override - public String getServerName() { - return "Server1"; - } - - @Override - public Collection getExtendedConfiguration(Class type) { - if (OffHeapResources.class.isAssignableFrom(type)) { - return Collections.singletonList(type.cast(offheapResources)); - } - throw new UnsupportedOperationException("TODO Implement me!"); - } - }; - + platformConfiguration = mock(PlatformConfiguration.class); + when(platformConfiguration.getExtendedConfiguration(OffHeapResources.class)).thenReturn(Collections.singletonList(offheapResources)); serviceProviderConfiguration = mock(ServiceProviderConfiguration.class); tierManagerConfiguration = new ClusterTierManagerConfiguration("identifier", new ServerSideConfiguration(emptyMap())); diff --git a/clustered/server/src/test/java/org/ehcache/clustered/server/store/ClusterTierActiveEntityTest.java b/clustered/server/src/test/java/org/ehcache/clustered/server/store/ClusterTierActiveEntityTest.java index 6fc35d6037..dfb28860d6 100644 --- a/clustered/server/src/test/java/org/ehcache/clustered/server/store/ClusterTierActiveEntityTest.java +++ b/clustered/server/src/test/java/org/ehcache/clustered/server/store/ClusterTierActiveEntityTest.java @@ -27,7 +27,6 @@ import org.ehcache.clustered.common.internal.messages.EhcacheEntityMessage; import org.ehcache.clustered.common.internal.messages.EhcacheEntityResponse; import org.ehcache.clustered.common.internal.messages.EhcacheResponseType; -import org.ehcache.clustered.common.internal.messages.LifeCycleMessageFactory; import org.ehcache.clustered.common.internal.messages.LifecycleMessage; import org.ehcache.clustered.common.internal.messages.ServerStoreOpMessage; import org.ehcache.clustered.common.internal.store.ClusterTierEntityConfiguration; @@ -46,12 +45,15 @@ import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; import org.terracotta.client.message.tracker.OOOMessageHandler; import org.terracotta.client.message.tracker.OOOMessageHandlerConfiguration; import org.terracotta.client.message.tracker.OOOMessageHandlerImpl; import org.terracotta.entity.ClientCommunicator; import org.terracotta.entity.ClientDescriptor; import org.terracotta.entity.ConfigurationException; +import org.terracotta.entity.EntityMessage; +import org.terracotta.entity.EntityResponse; import org.terracotta.entity.IEntityMessenger; import org.terracotta.entity.PassiveSynchronizationChannel; import org.terracotta.entity.ServiceConfiguration; @@ -72,7 +74,8 @@ import java.util.Map; import java.util.Random; import java.util.Set; -import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.function.BiConsumer; import static org.ehcache.clustered.common.internal.store.Util.createPayload; import static org.hamcrest.Matchers.contains; @@ -99,7 +102,6 @@ public class ClusterTierActiveEntityTest { - private static final LifeCycleMessageFactory MESSAGE_FACTORY = new LifeCycleMessageFactory(); private static final KeySegmentMapper DEFAULT_MAPPER = new KeySegmentMapper(16); private String defaultStoreName = "store"; @@ -216,7 +218,7 @@ public void testLoadExistingRegistersEvictionListener() throws Exception { ServerSideServerStore store = mock(ServerSideServerStore.class); when(stateService.loadStore(eq(defaultStoreName), any())).thenReturn(store); - IEntityMessenger entityMessenger = mock(IEntityMessenger.class); + IEntityMessenger entityMessenger = mock(IEntityMessenger.class); ServiceRegistry registry = getCustomMockedServiceRegistry(stateService, null, entityMessenger, null, null); ClusterTierActiveEntity activeEntity = new ClusterTierActiveEntity(registry, defaultConfiguration, DEFAULT_MAPPER); activeEntity.loadExisting(); @@ -292,9 +294,6 @@ public void testClearInvalidationAcksTakenIntoAccount() throws Exception { activeEntity.connected(context2.getClientDescriptor()); activeEntity.connected(context3.getClientDescriptor()); - UUID client2Id = UUID.randomUUID(); - UUID client3Id = UUID.randomUUID(); - // attach to the store assertSuccess( activeEntity.invokeActive(context1, new LifecycleMessage.ValidateServerStore(defaultStoreName, defaultStoreConfiguration)) @@ -396,9 +395,6 @@ public void testClearInvalidationDisconnectionOfInvalidatingClientsTakenIntoAcco activeEntity.connected(context2.getClientDescriptor()); activeEntity.connected(context3.getClientDescriptor()); - UUID client2Id = UUID.randomUUID(); - UUID client3Id = UUID.randomUUID(); - // attach to the store assertSuccess( activeEntity.invokeActive(context1, new LifecycleMessage.ValidateServerStore(defaultStoreName, defaultStoreConfiguration)) @@ -449,9 +445,6 @@ public void testAppendInvalidationDisconnectionOfBlockingClientTakenIntoAccount( activeEntity.connected(context2.getClientDescriptor()); activeEntity.connected(context3.getClientDescriptor()); - UUID client2Id = UUID.randomUUID(); - UUID client3Id = UUID.randomUUID(); - // attach to the store assertSuccess( activeEntity.invokeActive(context1, new LifecycleMessage.ValidateServerStore(defaultStoreName, serverStoreConfiguration)) @@ -492,9 +485,6 @@ public void testClearInvalidationDisconnectionOfBlockingClientTakenIntoAccount() activeEntity.connected(context2.getClientDescriptor()); activeEntity.connected(context3.getClientDescriptor()); - UUID client2Id = UUID.randomUUID(); - UUID client3Id = UUID.randomUUID(); - // attach to the store assertSuccess( activeEntity.invokeActive(context1, new LifecycleMessage.ValidateServerStore(defaultStoreName, serverStoreConfiguration)) @@ -877,37 +867,54 @@ public void testSyncToPassiveBatchedByDefault() throws Exception { @Test public void testDataSyncToPassiveCustomBatchSize() throws Exception { - ClusterTierActiveEntity activeEntity = new ClusterTierActiveEntity(defaultRegistry, defaultConfiguration, DEFAULT_MAPPER); - activeEntity.createNew(); - - TestInvokeContext context = new TestInvokeContext(); - activeEntity.connected(context.getClientDescriptor()); - - - assertSuccess(activeEntity.invokeActive(context, new LifecycleMessage.ValidateServerStore(defaultStoreName, defaultStoreConfiguration))); - - ByteBuffer payload = ByteBuffer.allocate(512); - // Put keys that maps to the same concurrency key - ServerStoreOpMessage.AppendMessage testMessage = new ServerStoreOpMessage.AppendMessage(1L, payload); - activeEntity.invokeActive(context, testMessage); - activeEntity.invokeActive(context, new ServerStoreOpMessage.AppendMessage(-2L, payload)); - activeEntity.invokeActive(context, new ServerStoreOpMessage.AppendMessage(17L, payload)); - activeEntity.invokeActive(context, new ServerStoreOpMessage.AppendMessage(33L, payload)); - System.setProperty(ClusterTierActiveEntity.SYNC_DATA_SIZE_PROP, "512"); - ConcurrencyStrategies.DefaultConcurrencyStrategy concurrencyStrategy = new ConcurrencyStrategies.DefaultConcurrencyStrategy(DEFAULT_MAPPER); - int concurrencyKey = concurrencyStrategy.concurrencyKey(testMessage); try { - @SuppressWarnings("unchecked") - PassiveSynchronizationChannel syncChannel = mock(PassiveSynchronizationChannel.class); - activeEntity.synchronizeKeyToPassive(syncChannel, concurrencyKey); - - verify(syncChannel, atLeast(2)).synchronizeToPassive(any(EhcacheDataSyncMessage.class)); + prepareAndRunActiveEntityForPassiveSync((activeEntity, concurrencyKey) -> { + @SuppressWarnings("unchecked") + PassiveSynchronizationChannel syncChannel = mock(PassiveSynchronizationChannel.class); + activeEntity.synchronizeKeyToPassive(syncChannel, concurrencyKey); + verify(syncChannel, atLeast(2)).synchronizeToPassive(any(EhcacheDataSyncMessage.class)); + }); } finally { System.clearProperty(ClusterTierActiveEntity.SYNC_DATA_SIZE_PROP); } } + @Test + public void testDataSyncToPassiveCustomGets() throws Exception { + System.setProperty(ClusterTierActiveEntity.SYNC_DATA_GETS_PROP, "2"); + try { + prepareAndRunActiveEntityForPassiveSync((activeEntity, concurrencyKey) -> { + @SuppressWarnings("unchecked") + PassiveSynchronizationChannel syncChannel = mock(PassiveSynchronizationChannel.class); + activeEntity.synchronizeKeyToPassive(syncChannel, concurrencyKey); + verify(syncChannel, atLeast(2)).synchronizeToPassive(any(EhcacheDataSyncMessage.class)); + }); + } finally { + System.clearProperty(ClusterTierActiveEntity.SYNC_DATA_GETS_PROP); + } + } + + @Test + public void testDataSyncToPassiveException() throws Exception { + System.setProperty(ClusterTierActiveEntity.SYNC_DATA_GETS_PROP, "1"); + try { + prepareAndRunActiveEntityForPassiveSync((activeEntity, concurrencyKey) -> { + @SuppressWarnings("unchecked") + PassiveSynchronizationChannel syncChannel = mock(PassiveSynchronizationChannel.class); + activeEntity.destroy(); + try { + activeEntity.synchronizeKeyToPassive(syncChannel, concurrencyKey); + fail("Destroyed entity not expected to sync"); + } catch (RuntimeException e) { + assertThat(e.getCause(), instanceOf(ExecutionException.class)); + } + }); + } finally { + System.clearProperty(ClusterTierActiveEntity.SYNC_DATA_GETS_PROP); + } + } + @Test public void testLoadExistingRecoversInflightInvalidationsForEventualCache() throws Exception { ClusterTierActiveEntity activeEntity = new ClusterTierActiveEntity(defaultRegistry, defaultConfiguration, DEFAULT_MAPPER); @@ -1009,6 +1016,28 @@ public void testActiveMessageTracking() throws Exception { assertThat(actual, sameInstance(expected)); } + private void prepareAndRunActiveEntityForPassiveSync(BiConsumer testConsumer) throws Exception { + ClusterTierActiveEntity activeEntity = new ClusterTierActiveEntity(defaultRegistry, defaultConfiguration, DEFAULT_MAPPER); + activeEntity.createNew(); + + TestInvokeContext context = new TestInvokeContext(); + activeEntity.connected(context.getClientDescriptor()); + + assertSuccess(activeEntity.invokeActive(context, new LifecycleMessage.ValidateServerStore(defaultStoreName, defaultStoreConfiguration))); + + ByteBuffer payload = ByteBuffer.allocate(512); + // Put keys that maps to the same concurrency key + ServerStoreOpMessage.AppendMessage testMessage = new ServerStoreOpMessage.AppendMessage(1L, payload); + activeEntity.invokeActive(context, testMessage); + activeEntity.invokeActive(context, new ServerStoreOpMessage.AppendMessage(-2L, payload)); + activeEntity.invokeActive(context, new ServerStoreOpMessage.AppendMessage(17L, payload)); + activeEntity.invokeActive(context, new ServerStoreOpMessage.AppendMessage(33L, payload)); + + ConcurrencyStrategies.DefaultConcurrencyStrategy concurrencyStrategy = new ConcurrencyStrategies.DefaultConcurrencyStrategy(DEFAULT_MAPPER); + int concurrencyKey = concurrencyStrategy.concurrencyKey(testMessage); + testConsumer.accept(activeEntity, concurrencyKey); + } + private void assertSuccess(EhcacheEntityResponse response) throws Exception { if (!EhcacheResponseType.SUCCESS.equals(response.getResponseType())) { throw ((EhcacheEntityResponse.Failure) response).getCause(); @@ -1029,7 +1058,7 @@ private void assertFailure(EhcacheEntityResponse response, Class entityMessenger, EntityMonitoringService entityMonitoringService, EntityManagementRegistry entityManagementRegistry) { return new ServiceRegistry() { @Override @@ -1046,7 +1075,7 @@ public T getService(final ServiceConfiguration configuration) { } else if (serviceType.isAssignableFrom(EntityManagementRegistry.class)) { return (T) entityManagementRegistry; } else if (serviceType.isAssignableFrom(OOOMessageHandler.class)) { - return (T) new OOOMessageHandlerImpl(message -> true, 1, message -> 0); + return (T) new OOOMessageHandlerImpl<>(message -> true, 1, message -> 0); } throw new AssertionError("Unknown service configuration of type: " + serviceType); } @@ -1112,7 +1141,7 @@ ServerStoreConfigBuilder setValueSerializerType(Class valueSerializerType) { ServerStoreConfiguration build() { return new ServerStoreConfiguration(poolAllocation, storedKeyType, storedValueType, - keySerializerType, valueSerializerType, consistency); + keySerializerType, valueSerializerType, consistency, false, false); } } @@ -1233,8 +1262,8 @@ public OffHeapResource getOffHeapResource(OffHeapResourceIdentifier identifier) } else if(serviceConfiguration instanceof EntityManagementRegistryConfiguration) { return null; } else if(serviceConfiguration instanceof OOOMessageHandlerConfiguration) { - OOOMessageHandlerConfiguration oooMessageHandlerConfiguration = (OOOMessageHandlerConfiguration) serviceConfiguration; - return (T) new OOOMessageHandlerImpl(oooMessageHandlerConfiguration.getTrackerPolicy(), + OOOMessageHandlerConfiguration oooMessageHandlerConfiguration = (OOOMessageHandlerConfiguration) serviceConfiguration; + return (T) new OOOMessageHandlerImpl<>(oooMessageHandlerConfiguration.getTrackerPolicy(), oooMessageHandlerConfiguration.getSegments(), oooMessageHandlerConfiguration.getSegmentationStrategy()); } @@ -1294,4 +1323,9 @@ private long getUsed() { return used; } } + + @SuppressWarnings("unchecked") + public static T mock(Class clazz) { + return Mockito.mock((Class) clazz); + } } diff --git a/clustered/server/src/test/java/org/ehcache/clustered/server/store/ClusterTierPassiveEntityTest.java b/clustered/server/src/test/java/org/ehcache/clustered/server/store/ClusterTierPassiveEntityTest.java index 78d685c733..abac774f35 100644 --- a/clustered/server/src/test/java/org/ehcache/clustered/server/store/ClusterTierPassiveEntityTest.java +++ b/clustered/server/src/test/java/org/ehcache/clustered/server/store/ClusterTierPassiveEntityTest.java @@ -20,6 +20,7 @@ import org.ehcache.clustered.common.PoolAllocation; import org.ehcache.clustered.common.ServerSideConfiguration; import org.ehcache.clustered.common.internal.ServerStoreConfiguration; +import org.ehcache.clustered.common.internal.messages.ServerStoreOpMessage; import org.ehcache.clustered.common.internal.store.Chain; import org.ehcache.clustered.common.internal.store.ClusterTierEntityConfiguration; import org.ehcache.clustered.common.internal.store.Util; @@ -34,6 +35,8 @@ import org.terracotta.client.message.tracker.OOOMessageHandlerImpl; import org.terracotta.entity.BasicServiceConfiguration; import org.terracotta.entity.ConfigurationException; +import org.terracotta.entity.EntityMessage; +import org.terracotta.entity.EntityResponse; import org.terracotta.entity.IEntityMessenger; import org.terracotta.entity.ServiceConfiguration; import org.terracotta.entity.ServiceRegistry; @@ -166,6 +169,39 @@ public void testPassiveTracksMessageDuplication() throws Exception { assertThat(passiveEntity.getStateService().getStore(passiveEntity.getStoreIdentifier()).get(2).isEmpty(), is(false)); } + @Test + public void testOversizeReplaceAtHeadMessage() throws Exception { + ClusterTierPassiveEntity passiveEntity = new ClusterTierPassiveEntity(defaultRegistry, defaultConfiguration, DEFAULT_MAPPER); + passiveEntity.createNew(); + TestInvokeContext context = new TestInvokeContext(); + + int key = 2; + + Chain chain = Util.getChain(true, createPayload(1L)); + PassiveReplicationMessage message = new PassiveReplicationMessage.ChainReplicationMessage(key, chain, 2L, 1L, 3L); + passiveEntity.invokePassive(context, message); + + Chain oversizeChain = Util.getChain(true, createPayload(2L, 1024 * 1024)); + ServerStoreOpMessage.ReplaceAtHeadMessage oversizeMsg = new ServerStoreOpMessage.ReplaceAtHeadMessage(key, chain, oversizeChain); + passiveEntity.invokePassive(context, oversizeMsg); + // Should be evicted, the value is oversize. + assertThat(passiveEntity.getStateService().getStore(passiveEntity.getStoreIdentifier()).get(key).isEmpty(), is(true)); + } + + @Test + public void testOversizeChainReplicationMessage() throws Exception { + ClusterTierPassiveEntity passiveEntity = new ClusterTierPassiveEntity(defaultRegistry, defaultConfiguration, DEFAULT_MAPPER); + passiveEntity.createNew(); + TestInvokeContext context = new TestInvokeContext(); + + long key = 2L; + Chain oversizeChain = Util.getChain(true, createPayload(key, 1024 * 1024)); + PassiveReplicationMessage oversizeMsg = new PassiveReplicationMessage.ChainReplicationMessage(key, oversizeChain, 2L, 1L, (long) 3); + passiveEntity.invokePassive(context, oversizeMsg); + // Should be cleared, the value is oversize. + assertThat(passiveEntity.getStateService().getStore(passiveEntity.getStoreIdentifier()).get(key).isEmpty(), is(true)); + } + /** * Builder for {@link ServerStoreConfiguration} instances. */ @@ -220,7 +256,7 @@ ServerStoreConfigBuilder setValueSerializerType(Class valueSerializerType) { ServerStoreConfiguration build() { return new ServerStoreConfiguration(poolAllocation, storedKeyType, storedValueType, - keySerializerType, valueSerializerType, consistency); + keySerializerType, valueSerializerType, consistency, false, false); } } @@ -320,8 +356,8 @@ public OffHeapResource getOffHeapResource(OffHeapResourceIdentifier identifier) } else if(serviceConfiguration instanceof BasicServiceConfiguration && serviceConfiguration.getServiceType() == IMonitoringProducer.class) { return null; } else if(serviceConfiguration instanceof OOOMessageHandlerConfiguration) { - OOOMessageHandlerConfiguration oooMessageHandlerConfiguration = (OOOMessageHandlerConfiguration) serviceConfiguration; - return (T) new OOOMessageHandlerImpl(oooMessageHandlerConfiguration.getTrackerPolicy(), + OOOMessageHandlerConfiguration oooMessageHandlerConfiguration = (OOOMessageHandlerConfiguration) serviceConfiguration; + return (T) new OOOMessageHandlerImpl<>(oooMessageHandlerConfiguration.getTrackerPolicy(), oooMessageHandlerConfiguration.getSegments(), oooMessageHandlerConfiguration.getSegmentationStrategy()); } diff --git a/clustered/server/src/test/java/org/ehcache/clustered/server/store/InvalidMessage.java b/clustered/server/src/test/java/org/ehcache/clustered/server/store/InvalidMessage.java index 5752ab2eb3..c7e96a02a5 100644 --- a/clustered/server/src/test/java/org/ehcache/clustered/server/store/InvalidMessage.java +++ b/clustered/server/src/test/java/org/ehcache/clustered/server/store/InvalidMessage.java @@ -17,7 +17,5 @@ import org.ehcache.clustered.common.internal.messages.EhcacheEntityMessage; -import java.util.UUID; - public class InvalidMessage extends EhcacheEntityMessage { } diff --git a/clustered/server/src/test/java/org/ehcache/clustered/server/store/LockManagerImplTest.java b/clustered/server/src/test/java/org/ehcache/clustered/server/store/LockManagerImplTest.java new file mode 100644 index 0000000000..6ca4982330 --- /dev/null +++ b/clustered/server/src/test/java/org/ehcache/clustered/server/store/LockManagerImplTest.java @@ -0,0 +1,118 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.clustered.server.store; + +import org.ehcache.clustered.server.TestClientDescriptor; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import org.terracotta.entity.ClientDescriptor; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; + +public class LockManagerImplTest { + + @Test + public void testLock() { + LockManagerImpl lockManager = new LockManagerImpl(); + ClientDescriptor clientDescriptor = new TestClientDescriptor(); + assertThat(lockManager.lock(1L, clientDescriptor), is(true)); + assertThat(lockManager.lock(1L, clientDescriptor), is(false)); + assertThat(lockManager.lock(2L, clientDescriptor), is(true)); + } + + @Test + public void testUnlock() { + LockManagerImpl lockManager = new LockManagerImpl(); + ClientDescriptor clientDescriptor = new TestClientDescriptor(); + assertThat(lockManager.lock(1L, clientDescriptor), is(true)); + lockManager.unlock(1L); + assertThat(lockManager.lock(1L, clientDescriptor), is(true)); + } + + @Test + @SuppressWarnings("unchecked") + public void testSweepLocksForClient() { + LockManagerImpl lockManager = new LockManagerImpl(); + ClientDescriptor clientDescriptor1 = new TestClientDescriptor(); + ClientDescriptor clientDescriptor2 = new TestClientDescriptor(); + + assertThat(lockManager.lock(1L, clientDescriptor1), is(true)); + assertThat(lockManager.lock(2L, clientDescriptor1), is(true)); + assertThat(lockManager.lock(3L, clientDescriptor1), is(true)); + assertThat(lockManager.lock(4L, clientDescriptor1), is(true)); + assertThat(lockManager.lock(5L, clientDescriptor2), is(true)); + assertThat(lockManager.lock(6L, clientDescriptor2), is(true)); + + AtomicInteger counter = new AtomicInteger(); + + Consumer> consumer = mock(Consumer.class); + + ArgumentCaptor> argumentCaptor = ArgumentCaptor.forClass(List.class); + + doAnswer(invocation -> counter.incrementAndGet()).when(consumer).accept(argumentCaptor.capture()); + + lockManager.sweepLocksForClient(clientDescriptor2, consumer); + + assertThat(counter.get(), is(1)); + + assertThat(argumentCaptor.getValue().size(), is(2)); + assertThat(argumentCaptor.getValue(), containsInAnyOrder(5L, 6L)); + + assertThat(lockManager.lock(5L, clientDescriptor2), is(true)); + assertThat(lockManager.lock(6L, clientDescriptor2), is(true)); + assertThat(lockManager.lock(1L, clientDescriptor1), is(false)); + assertThat(lockManager.lock(2L, clientDescriptor1), is(false)); + assertThat(lockManager.lock(3L, clientDescriptor1), is(false)); + assertThat(lockManager.lock(4L, clientDescriptor1), is(false)); + + } + + @Test + public void testCreateLockStateAfterFailover() { + LockManagerImpl lockManager = new LockManagerImpl(); + + ClientDescriptor clientDescriptor = new TestClientDescriptor(); + + Set locks = new HashSet<>(); + locks.add(1L); + locks.add(100L); + locks.add(1000L); + + lockManager.createLockStateAfterFailover(clientDescriptor, locks); + + ClientDescriptor clientDescriptor1 = new TestClientDescriptor(); + + assertThat(lockManager.lock(100L, clientDescriptor1), is(false)); + assertThat(lockManager.lock(1000L, clientDescriptor1), is(false)); + assertThat(lockManager.lock(1L, clientDescriptor1), is(false)); + + } + +} \ No newline at end of file diff --git a/core-spi-test/build.gradle b/core-spi-test/build.gradle index 60fe9e0e59..5f7ff037f7 100644 --- a/core-spi-test/build.gradle +++ b/core-spi-test/build.gradle @@ -15,12 +15,11 @@ */ dependencies { - compile project(':spi-tester'), project(':core'), "org.hamcrest:hamcrest-library:$hamcrestVersion", "junit:junit:$junitVersion" - compile ("org.mockito:mockito-core:$mockitoVersion") { - exclude group:'org.hamcrest', module:'hamcrest-core' - } + api project(':spi-tester') + implementation project(':core') + implementation project(':impl') + implementation "junit:junit:$junitVersion" + implementation "org.mockito:mockito-core:$mockitoVersion" + implementation "org.hamcrest:hamcrest-library:$hamcrestVersion" } -tasks.withType(JavaCompile) { - options.compilerArgs += ['-Werror'] -} diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreBulkComputeIfAbsentTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreBulkComputeIfAbsentTest.java index 7e9b6182d3..1d030950e6 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreBulkComputeIfAbsentTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreBulkComputeIfAbsentTest.java @@ -16,7 +16,8 @@ package org.ehcache.internal.store; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.core.exceptions.StorePassThroughException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.store.Store; import org.ehcache.spi.test.After; import org.ehcache.spi.test.Before; @@ -104,7 +105,7 @@ public void missingIterableEntriesAreIgnoredByTheStore() throws Exception { kvStore.bulkComputeIfAbsent(inputKeys, entries -> emptySet()); for (Map.Entry mappedEntry : mappedEntries.entrySet()) { - assertThat(kvStore.get(mappedEntry.getKey()).value(), is(mappedEntry.getValue())); + assertThat(kvStore.get(mappedEntry.getKey()).get(), is(mappedEntry.getValue())); } } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); @@ -138,7 +139,7 @@ public void mappingIsSameInTheStoreForEntriesReturnedWithDifferentValueFromMappi }); for (Map.Entry mappedEntry : mappedEntries.entrySet()) { - assertThat(kvStore.get(mappedEntry.getKey()).value(), is(mappedEntry.getValue())); + assertThat(kvStore.get(mappedEntry.getKey()).get(), is(mappedEntry.getValue())); } } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); @@ -193,7 +194,7 @@ public void computeValuesForEveryKeyUsingAMappingFunction() throws Exception { }); for (Map.Entry entry : computedEntries.entrySet()) { - assertThat(kvStore.get(entry.getKey()).value(), is(entry.getValue())); + assertThat(kvStore.get(entry.getKey()).get(), is(entry.getValue())); } } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); @@ -267,4 +268,31 @@ public void testMappingFunctionProducesWrongValueType() throws Exception { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } } + + @SPITest + public void exception() throws Exception { + Set inputKeys = Collections.singleton(factory.createKey(0)); + + RuntimeException exception = new RuntimeException("error"); + + try { + kvStore.bulkComputeIfAbsent(inputKeys, entries -> { throw exception; }); + } catch (StoreAccessException e) { + assertThat(e.getCause(), is(exception)); + } + } + + @SPITest + public void passThroughException() throws Exception { + Set inputKeys = Collections.singleton(factory.createKey(0)); + + RuntimeException exception = new RuntimeException("error"); + StorePassThroughException ste = new StorePassThroughException(exception); + + try { + kvStore.bulkComputeIfAbsent(inputKeys, entries -> { throw ste; }); + } catch (RuntimeException e) { + assertThat(e, is(exception)); + } + } } diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreBulkComputeTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreBulkComputeTest.java index 4156a33b8e..1ffdf221ff 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreBulkComputeTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreBulkComputeTest.java @@ -16,7 +16,8 @@ package org.ehcache.internal.store; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.core.exceptions.StorePassThroughException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.store.Store; import org.ehcache.spi.test.After; import org.ehcache.spi.test.Before; @@ -24,6 +25,7 @@ import org.ehcache.spi.test.SPITest; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -204,7 +206,7 @@ public void computeValuesForEveryKeyUsingARemappingFunction() throws Exception { }); for (K inputKey : inputKeys) { - assertThat(kvStore.get(inputKey).value(), is(computedEntries.get(inputKey))); + assertThat(kvStore.get(inputKey).get(), is(computedEntries.get(inputKey))); } } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); @@ -278,4 +280,31 @@ public void remappingFunctionProducesWrongValueType() throws Exception { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } } + + @SPITest + public void exception() throws Exception { + Set inputKeys = Collections.singleton(factory.createKey(0)); + + RuntimeException exception = new RuntimeException("error"); + + try { + kvStore.bulkCompute(inputKeys, entries -> { throw exception; }); + } catch (StoreAccessException e) { + assertThat(e.getCause(), is(exception)); + } + } + + @SPITest + public void passThroughException() throws Exception { + Set inputKeys = Collections.singleton(factory.createKey(0)); + + RuntimeException exception = new RuntimeException("error"); + StorePassThroughException ste = new StorePassThroughException(exception); + + try { + kvStore.bulkCompute(inputKeys, entries -> { throw ste; }); + } catch (RuntimeException e) { + assertThat(e, is(exception)); + } + } } diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreClearTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreClearTest.java index 1edb4dc6f4..b4f82c43d2 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreClearTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreClearTest.java @@ -16,7 +16,7 @@ package org.ehcache.internal.store; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.store.Store; import org.ehcache.spi.test.After; import org.ehcache.spi.test.Before; diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreCloseTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreCloseTest.java index b260b712a7..f68e75e377 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreCloseTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreCloseTest.java @@ -17,7 +17,7 @@ package org.ehcache.internal.store; import org.ehcache.core.spi.store.Store; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.spi.test.After; import org.ehcache.spi.test.Before; import org.ehcache.spi.test.SPITest; diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreComputeIfAbsentTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreComputeIfAbsentTest.java index 12de90867b..3017f59bcd 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreComputeIfAbsentTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreComputeIfAbsentTest.java @@ -15,18 +15,19 @@ */ package org.ehcache.internal.store; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.config.builders.ExpiryPolicyBuilder; +import org.ehcache.core.exceptions.StorePassThroughException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.store.Store; -import org.ehcache.expiry.Duration; -import org.ehcache.expiry.Expirations; +import org.ehcache.expiry.ExpiryPolicy; import org.ehcache.internal.TestTimeSource; import org.ehcache.spi.test.After; import org.ehcache.spi.test.LegalSPITesterException; import org.ehcache.spi.test.SPITest; +import java.time.Duration; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Function; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; @@ -40,7 +41,6 @@ public StoreComputeIfAbsentTest(StoreFactory factory) { } protected Store kvStore; - protected Store kvStore2; @After public void tearDown() { @@ -48,12 +48,6 @@ public void tearDown() { factory.close(kvStore); kvStore = null; } - if (kvStore2 != null) { - @SuppressWarnings("unchecked") - Store kvStore2 = (Store) this.kvStore2; - factory.close(kvStore2); - this.kvStore2 = null; - } } @SPITest @@ -91,7 +85,7 @@ public void testWrongReturnValueType() throws Exception { @SPITest @SuppressWarnings("unchecked") public void testWrongKeyType() throws Exception { - kvStore2 = factory.newStore(); + kvStore = factory.newStore(); if (factory.getKeyType() == Object.class) { System.err.println("Warning, store uses Object as key type, cannot verify in this configuration"); @@ -107,7 +101,7 @@ public void testWrongKeyType() throws Exception { try { // wrong key type - kvStore2.computeIfAbsent(badKey, key -> { + kvStore.computeIfAbsent((K) badKey, key -> { throw new AssertionError(); }); throw new AssertionError(); @@ -128,7 +122,7 @@ public void testComputePutsValueInStoreWhenKeyIsAbsent() throws Exception { assertThat(kvStore.get(key), nullValue()); try { kvStore.computeIfAbsent(key, keyParam -> value); - assertThat(kvStore.get(key).value(), is(value)); + assertThat(kvStore.get(key).get(), is(value)); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } @@ -151,7 +145,7 @@ public void testFunctionNotInvokedWhenPresent() throws Exception { } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } - assertThat(kvStore.get(key).value(), is(value)); + assertThat(kvStore.get(key).get(), is(value)); } @SPITest @@ -179,28 +173,42 @@ public void testFunctionReturnsNull() throws Exception { public void testException() throws Exception { kvStore = factory.newStore(); - final K key = factory.createKey(1L); - - assertThat(kvStore.get(key), nullValue()); + K key = factory.createKey(1L); - final RuntimeException re = new RuntimeException(); + RuntimeException re = new RuntimeException(); try { kvStore.computeIfAbsent(key, keyParam -> { throw re; }); - } catch (RuntimeException e) { - assertThat(e, is(re)); } catch (StoreAccessException e) { - throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); + assertThat(e.getCause(), is(re)); } assertThat(kvStore.get(key), nullValue()); } + @SPITest + public void testStorePassThroughException() throws Exception { + kvStore = factory.newStore(); + + K key = factory.createKey(1L); + + RuntimeException exception = new RuntimeException("error"); + StorePassThroughException re = new StorePassThroughException(exception); + + try { + kvStore.computeIfAbsent(key, keyParam -> { + throw re; + }); + } catch (RuntimeException e) { + assertThat(e, is(exception)); + } + } + @SPITest public void testComputeIfAbsentValuePresentExpiresOnAccess() throws LegalSPITesterException { TestTimeSource timeSource = new TestTimeSource(10043L); - kvStore = factory.newStoreWithExpiry(Expirations.builder().setAccess(Duration.ZERO).build(), timeSource); + kvStore = factory.newStoreWithExpiry(ExpiryPolicyBuilder.expiry().access(Duration.ZERO).build(), timeSource); K key = factory.createKey(250928L); V value = factory.createValue(2059820L); @@ -212,7 +220,7 @@ public void testComputeIfAbsentValuePresentExpiresOnAccess() throws LegalSPITest fail("Should not be invoked"); return newValue; }); - assertThat(result.value(), is(value)); + assertThat(result.get(), is(value)); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreContainsKeyTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreContainsKeyTest.java index a55f4109a1..695843a5c0 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreContainsKeyTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreContainsKeyTest.java @@ -17,7 +17,7 @@ package org.ehcache.internal.store; import org.ehcache.core.spi.store.Store; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.spi.test.After; import org.ehcache.spi.test.LegalSPITesterException; import org.ehcache.spi.test.SPITest; @@ -39,7 +39,6 @@ public StoreContainsKeyTest(final StoreFactory factory) { } protected Store kvStore; - protected Store kvStore2; @After public void tearDown() { @@ -47,12 +46,6 @@ public void tearDown() { factory.close(kvStore); kvStore = null; } - if (kvStore2 != null) { - @SuppressWarnings("unchecked") - Store kvStore2 = this.kvStore2; - factory.close(kvStore2); - this.kvStore2 = null; - } } @SPITest @@ -93,13 +86,13 @@ public void nullKeyThrowsException() @SuppressWarnings("unchecked") public void wrongKeyTypeThrowsException() throws IllegalAccessException, InstantiationException, LegalSPITesterException { - kvStore2 = factory.newStore(); + kvStore = factory.newStore(); try { if (this.factory.getKeyType() == String.class) { - kvStore2.containsKey(1.0f); + kvStore.containsKey((K) (Float) 1.0f); } else { - kvStore2.containsKey("key"); + kvStore.containsKey((K) "key"); } throw new AssertionError("Expected ClassCastException because the key is of the wrong type"); } catch (ClassCastException e) { diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreCreationEventListenerTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreCreationEventListenerTest.java index b3f9c79a0c..e8bd272ac7 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreCreationEventListenerTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreCreationEventListenerTest.java @@ -17,7 +17,7 @@ package org.ehcache.internal.store; import org.ehcache.event.EventType; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.store.Store; import org.ehcache.core.spi.store.events.StoreEvent; import org.ehcache.core.spi.store.events.StoreEventListener; @@ -29,9 +29,6 @@ import org.hamcrest.Matcher; import org.hamcrest.TypeSafeMatcher; -import java.util.function.BiFunction; -import java.util.function.Function; - import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.hamcrest.MockitoHamcrest.argThat; @@ -78,7 +75,7 @@ public void testPutIfAbsentCreates() throws LegalSPITesterException { StoreEventListener listener = addListener(store); try { - store.putIfAbsent(factory.createKey(42L), factory.createValue(42L)); + store.putIfAbsent(factory.createKey(42L), factory.createValue(42L), b -> {}); verifyListenerInteractions(listener); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); @@ -90,7 +87,7 @@ public void testComputeCreates() throws LegalSPITesterException { StoreEventListener listener = addListener(store); try { - store.compute(factory.createKey(125L), (k, v) -> factory.createValue(215L)); + store.getAndCompute(factory.createKey(125L), (k, v) -> factory.createValue(215L)); verifyListenerInteractions(listener); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreEvictionEventListenerTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreEvictionEventListenerTest.java index 07c29dde4d..bdfcd0bf98 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreEvictionEventListenerTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreEvictionEventListenerTest.java @@ -19,7 +19,7 @@ import org.ehcache.Cache; import org.ehcache.core.spi.store.Store; import org.ehcache.event.EventType; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.store.events.StoreEvent; import org.ehcache.core.spi.store.events.StoreEventListener; import org.ehcache.spi.test.After; @@ -27,9 +27,6 @@ import org.ehcache.spi.test.SPITest; import org.hamcrest.Matcher; -import java.util.function.BiFunction; -import java.util.function.Function; - import static org.ehcache.internal.store.StoreCreationEventListenerTest.eventType; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; @@ -48,10 +45,10 @@ public StoreEvictionEventListenerTest(StoreFactory factory) { } final K k = factory.createKey(1L); - final V v = factory.createValue(1l); + final V v = factory.createValue(1L); final K k2 = factory.createKey(2L); - final V v2 = factory.createValue(2l); - final V v3 = factory.createValue(3l); + final V v2 = factory.createValue(2L); + final V v3 = factory.createValue(3L); protected Store kvStore; @@ -77,7 +74,7 @@ public void testPutIfAbsentOnEviction() throws Exception { kvStore = factory.newStoreWithCapacity(1L); kvStore.put(k, v); StoreEventListener listener = addListener(kvStore); - kvStore.putIfAbsent(k2, v2); + kvStore.putIfAbsent(k2, v2, b -> {}); verifyListenerInteractions(listener); } @@ -92,7 +89,7 @@ public void testReplaceTwoArgsOnEviction() throws Exception { kvStore.put(k2, v2); verifyListenerInteractions(listener); kvStore.replace(getOnlyKey(kvStore.iterator()), v3); - assertThat(kvStore.get(getOnlyKey(kvStore.iterator())).value(), is(v3)); + assertThat(kvStore.get(getOnlyKey(kvStore.iterator())).get(), is(v3)); } @SPITest @@ -100,7 +97,7 @@ public void testComputeOnEviction() throws Exception { kvStore = factory.newStoreWithCapacity(1L); kvStore.put(k, v); StoreEventListener listener = addListener(kvStore); - kvStore.compute(k2, (mappedKey, mappedValue) -> v2); + kvStore.getAndCompute(k2, (mappedKey, mappedValue) -> v2); verifyListenerInteractions(listener); } diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreExpiryEventListenerTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreExpiryEventListenerTest.java index b2605a02df..c450f2e439 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreExpiryEventListenerTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreExpiryEventListenerTest.java @@ -16,23 +16,20 @@ package org.ehcache.internal.store; +import org.ehcache.config.builders.ExpiryPolicyBuilder; import org.ehcache.core.spi.store.Store; import org.ehcache.core.spi.store.Store.RemoveStatus; import org.ehcache.core.spi.store.Store.ReplaceStatus; import org.ehcache.event.EventType; import org.ehcache.core.spi.store.events.StoreEvent; import org.ehcache.core.spi.store.events.StoreEventListener; -import org.ehcache.expiry.Duration; -import org.ehcache.expiry.Expirations; import org.ehcache.internal.TestTimeSource; import org.ehcache.spi.test.After; import org.ehcache.spi.test.Before; import org.ehcache.spi.test.SPITest; import org.hamcrest.Matcher; -import java.util.concurrent.TimeUnit; -import java.util.function.BiFunction; -import java.util.function.Function; +import java.time.Duration; import static org.ehcache.internal.store.StoreCreationEventListenerTest.eventType; import static org.hamcrest.Matchers.*; @@ -54,15 +51,15 @@ public StoreExpiryEventListenerTest(StoreFactory factory) { } final K k = factory.createKey(1L); - final V v = factory.createValue(1l); - final V v2 = factory.createValue(2l); + final V v = factory.createValue(1L); + final V v2 = factory.createValue(2L); protected Store kvStore; @Before public void setUp() { timeSource = new TestTimeSource(); - kvStore = factory.newStoreWithExpiry(Expirations.timeToLiveExpiration(new Duration(1, TimeUnit.MILLISECONDS)), timeSource); + kvStore = factory.newStoreWithExpiry(ExpiryPolicyBuilder.timeToLiveExpiration(Duration.ofMillis(1)), timeSource); } @After @@ -95,7 +92,7 @@ public void testPutIfAbsentOnExpiration() throws Exception { kvStore.put(k, v); StoreEventListener listener = addListener(kvStore); timeSource.advanceTime(1); - assertThat(kvStore.putIfAbsent(k, v), is(nullValue())); + assertThat(kvStore.putIfAbsent(k, v, b -> {}), is(nullValue())); verifyListenerInteractions(listener); } @@ -136,11 +133,12 @@ public void testReplaceThreeArgsOnExpiration() throws Exception { } @SPITest - public void testComputeOnExpiration() throws Exception { + public void testGetAndComputeOnExpiration() throws Exception { kvStore.put(k, v); StoreEventListener listener = addListener(kvStore); timeSource.advanceTime(1); - assertThat(kvStore.compute(k, (mappedKey, mappedValue) -> v2).value(), is(v2)); + assertThat(kvStore.getAndCompute(k, (mappedKey, mappedValue) -> v2), nullValue()); + assertThat(kvStore.get(k).get(), is(v2)); verifyListenerInteractions(listener); } @@ -150,7 +148,7 @@ public void testComputeIfAbsentOnExpiration() throws Exception { StoreEventListener listener = addListener(kvStore); timeSource.advanceTime(1); - assertThat(kvStore.computeIfAbsent(k, mappedKey -> v2).value(), is(v2)); + assertThat(kvStore.computeIfAbsent(k, mappedKey -> v2).get(), is(v2)); verifyListenerInteractions(listener); } diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreFactory.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreFactory.java index 1cf75f0100..c2133a97ed 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreFactory.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreFactory.java @@ -17,8 +17,8 @@ package org.ehcache.internal.store; import org.ehcache.config.EvictionAdvisor; -import org.ehcache.expiry.Expiry; import org.ehcache.core.spi.time.TimeSource; +import org.ehcache.expiry.ExpiryPolicy; import org.ehcache.spi.service.ServiceProvider; import org.ehcache.core.spi.store.Store; import org.ehcache.spi.service.Service; @@ -35,7 +35,7 @@ public interface StoreFactory { Store newStoreWithEvictionAdvisor(EvictionAdvisor evictionAdvisor); - Store newStoreWithExpiry(Expiry expiry, TimeSource timeSource); + Store newStoreWithExpiry(ExpiryPolicy expiry, TimeSource timeSource); Store.ValueHolder newValueHolder(V value); diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreComputeTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreGetAndComputeTest.java similarity index 58% rename from core-spi-test/src/main/java/org/ehcache/internal/store/StoreComputeTest.java rename to core-spi-test/src/main/java/org/ehcache/internal/store/StoreGetAndComputeTest.java index 5b2fba1e72..5c65c5153b 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreComputeTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreGetAndComputeTest.java @@ -15,12 +15,14 @@ */ package org.ehcache.internal.store; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.config.builders.ExpiryPolicyBuilder; +import org.ehcache.core.exceptions.StorePassThroughException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.store.Store; -import org.ehcache.expiry.Duration; -import org.ehcache.expiry.Expirations; +import org.ehcache.expiry.ExpiryPolicy; import org.ehcache.internal.TestTimeSource; import org.ehcache.spi.test.After; +import org.ehcache.spi.test.Ignore; import org.ehcache.spi.test.LegalSPITesterException; import org.ehcache.spi.test.SPITest; @@ -31,17 +33,16 @@ import static org.hamcrest.Matchers.nullValue; import org.junit.Assert; +import java.time.Duration; import java.util.function.BiFunction; -import java.util.function.Supplier; -public class StoreComputeTest extends SPIStoreTester { +public class StoreGetAndComputeTest extends SPIStoreTester { - public StoreComputeTest(StoreFactory factory) { + public StoreGetAndComputeTest(StoreFactory factory) { super(factory); } protected Store kvStore; - protected Store kvStore2; @After public void tearDown() { @@ -49,12 +50,6 @@ public void tearDown() { factory.close(kvStore); kvStore = null; } - if (kvStore2 != null) { - @SuppressWarnings("unchecked") - Store kvStore2 = this.kvStore2; - factory.close(kvStore2); - this.kvStore2 = null; - } } @SuppressWarnings("unchecked") @@ -75,7 +70,7 @@ public void testWrongReturnValueType() throws Exception { final K key = factory.createKey(13); try { - kvStore.compute(key, (BiFunction) (key1, oldValue) -> { + kvStore.getAndCompute(key, (BiFunction) (key1, oldValue) -> { return value; // returning wrong value type from function }); throw new AssertionError(); @@ -89,7 +84,7 @@ public void testWrongReturnValueType() throws Exception { @SuppressWarnings("unchecked") @SPITest public void testWrongKeyType() throws Exception { - kvStore2 = factory.newStore(); + kvStore = factory.newStore(); if (factory.getKeyType() == Object.class) { System.err.println("Warning, store uses Object as key type, cannot verify in this configuration"); @@ -105,7 +100,7 @@ public void testWrongKeyType() throws Exception { try { // wrong key type - kvStore2.compute(key, (key1, oldValue) -> { + kvStore.getAndCompute((K) key, (key1, oldValue) -> { throw new AssertionError(); }); throw new AssertionError(); @@ -124,15 +119,16 @@ public void testComputePutsValueInStore() throws Exception { final V value = factory.createValue(153); try { - kvStore.compute(key, (keyParam, oldValue) -> value); - assertThat(kvStore.get(key).value(), is(value)); + Store.ValueHolder compute = kvStore.getAndCompute(key, (keyParam, oldValue) -> value); + assertThat(kvStore.get(key).get(), is(value)); + assertThat(compute, nullValue()); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } } @SPITest - public void testOverwriteExitingValue() throws Exception { + public void testOverwriteExistingValue() throws Exception { kvStore = factory.newStore(); final K key = factory.createKey(151); @@ -143,8 +139,9 @@ public void testOverwriteExitingValue() throws Exception { try { kvStore.put(key, value); - kvStore.compute(key, (keyParam, oldValue) -> value2); - assertThat(kvStore.get(key).value(), is(value2)); + Store.ValueHolder compute = kvStore.getAndCompute(key, (keyParam, oldValue) -> value2); + assertThat(kvStore.get(key).get(), is(value2)); + assertThat(compute.get(), is(value)); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } @@ -159,8 +156,9 @@ public void testNullReturnRemovesEntry() throws Exception { try { kvStore.put(key, value); - kvStore.compute(key, (keyParam, oldValue) -> null); + Store.ValueHolder compute = kvStore.getAndCompute(key, (keyParam, oldValue) -> null); assertThat(kvStore.get(key), nullValue()); + assertThat(compute.get(), is(value)); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } @@ -177,9 +175,9 @@ public void testException() throws Exception { try { kvStore.put(key, value); - assertThat(kvStore.get(key).value(), is(value)); + assertThat(kvStore.get(key).get(), is(value)); - kvStore.compute(key, (keyParam, oldValue) -> { + kvStore.getAndCompute(key, (keyParam, oldValue) -> { throw re; }); } catch (RuntimeException e) { @@ -188,13 +186,81 @@ public void testException() throws Exception { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } - assertThat(kvStore.get(key).value(), is(value)); + assertThat(kvStore.get(key).get(), is(value)); } + @SPITest + public void testStorePassThroughException() throws Exception { + kvStore = factory.newStore(); + + K key = factory.createKey(520928098); + V value = factory.createValue(15098209865L); + + RuntimeException exception = new RuntimeException("error"); + StorePassThroughException re = new StorePassThroughException(exception); + + try { + kvStore.put(key, value); + assertThat(kvStore.get(key).get(), is(value)); + + kvStore.getAndCompute(key, (keyParam, oldValue) -> { + throw re; + }); + } catch (RuntimeException e) { + assertThat(e, is(exception)); + } + + assertThat(kvStore.get(key).get(), is(value)); + } + + @SPITest + public void testExceptionOnSupplier() throws Exception { + kvStore = factory.newStore(); + + K key = factory.createKey(520928098); + V value = factory.createValue(15098209865L); + + RuntimeException re = new RuntimeException(); + + try { + kvStore.put(key, value); + assertThat(kvStore.get(key).get(), is(value)); + + kvStore.computeAndGet(key, (keyParam, oldValue) -> oldValue, () -> { throw re; }, () -> false); + } catch (StoreAccessException e) { + assertThat(e.getCause(), is(re)); + } + + assertThat(kvStore.get(key).get(), is(value)); + } + + @SPITest + public void testPassThroughExceptionOnSupplier() throws Exception { + kvStore = factory.newStore(); + + K key = factory.createKey(520928098); + V value = factory.createValue(15098209865L); + + RuntimeException exception = new RuntimeException("error"); + StorePassThroughException re = new StorePassThroughException(exception); + + try { + kvStore.put(key, value); + assertThat(kvStore.get(key).get(), is(value)); + + kvStore.computeAndGet(key, (keyParam, oldValue) -> oldValue, () -> { throw re; }, () -> false); + } catch (RuntimeException e) { + assertThat(e, is(exception)); + } + + assertThat(kvStore.get(key).get(), is(value)); + } + + @Ignore @SPITest public void testComputeExpiresOnAccess() throws Exception { TestTimeSource timeSource = new TestTimeSource(10042L); - kvStore = factory.newStoreWithExpiry(Expirations.builder().setAccess(Duration.ZERO).build(), timeSource); + kvStore = factory.newStoreWithExpiry(ExpiryPolicyBuilder.expiry().access(Duration.ZERO).build(), timeSource); final K key = factory.createKey(1042L); final V value = factory.createValue(1340142L); @@ -202,8 +268,9 @@ public void testComputeExpiresOnAccess() throws Exception { try { kvStore.put(key, value); - Store.ValueHolder result = kvStore.compute(key, (k, v) -> v, () -> false); - assertThat(result.value(), is(value)); + Store.ValueHolder result = kvStore.getAndCompute(key, (k, v) -> v); + assertThat(result.get(), is(value)); + assertThat(kvStore.get(key), nullValue()); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } @@ -212,7 +279,7 @@ public void testComputeExpiresOnAccess() throws Exception { @SPITest public void testComputeExpiresOnUpdate() throws Exception { TestTimeSource timeSource = new TestTimeSource(10042L); - kvStore = factory.newStoreWithExpiry(Expirations.builder().setUpdate(Duration.ZERO).build(), timeSource); + kvStore = factory.newStoreWithExpiry(ExpiryPolicyBuilder.expiry().update(Duration.ZERO).build(), timeSource); final K key = factory.createKey(1042L); final V value = factory.createValue(1340142L); @@ -221,8 +288,9 @@ public void testComputeExpiresOnUpdate() throws Exception { try { kvStore.put(key, value); - Store.ValueHolder result = kvStore.compute(key, (k, v) -> newValue, () -> false); - assertThat(result.value(), is(newValue)); + Store.ValueHolder result = kvStore.getAndCompute(key, (k, v) -> newValue); + assertThat(result.get(), is(value)); + assertThat(kvStore.get(key), nullValue()); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreGetTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreGetTest.java index 198cb048a6..abdc1f1a38 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreGetTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreGetTest.java @@ -16,18 +16,18 @@ package org.ehcache.internal.store; -import org.ehcache.ValueSupplier; +import org.ehcache.config.builders.ExpiryPolicyBuilder; import org.ehcache.core.spi.store.Store; -import org.ehcache.core.spi.store.StoreAccessException; -import org.ehcache.expiry.Duration; -import org.ehcache.expiry.Expirations; -import org.ehcache.expiry.Expiry; +import org.ehcache.expiry.ExpiryPolicy; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.internal.TestTimeSource; import org.ehcache.spi.test.After; import org.ehcache.spi.test.LegalSPITesterException; import org.ehcache.spi.test.SPITest; +import java.time.Duration; + import static org.ehcache.core.spi.store.Store.ValueHolder; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; @@ -50,7 +50,6 @@ public StoreGetTest(final StoreFactory factory) { } protected Store kvStore; - protected Store kvStore2; @After public void tearDown() { @@ -58,12 +57,6 @@ public void tearDown() { factory.close(kvStore); kvStore = null; } - if (kvStore2 != null) { - @SuppressWarnings("unchecked") - Store kvStore2 = this.kvStore2; - factory.close(kvStore2); - this.kvStore2 = null; - } } @SPITest @@ -108,7 +101,7 @@ public void existingKeyMappedInStoreReturnsCorrectValueHolder() kvStore.put(key, value); try { - assertThat(kvStore.get(key).value(), is(equalTo(value))); + assertThat(kvStore.get(key).get(), is(equalTo(value))); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } @@ -135,13 +128,13 @@ public void nullKeyThrowsException() @SuppressWarnings({ "unchecked", "rawtypes" }) public void wrongKeyTypeThrowsException() throws IllegalAccessException, InstantiationException, LegalSPITesterException { - kvStore2 = factory.newStore(); + kvStore = factory.newStore(); try { if (this.factory.getKeyType() == String.class) { - kvStore2.get(1.0f); + kvStore.get((K) (Float) 1.0f); } else { - kvStore2.get("key"); + kvStore.get((K) "key"); } throw new AssertionError("Expected ClassCastException because the key is of the wrong type"); } catch (ClassCastException e) { @@ -154,14 +147,15 @@ public void wrongKeyTypeThrowsException() @SPITest public void testGetExpiresOnAccess() throws LegalSPITesterException { TestTimeSource timeSource = new TestTimeSource(10043L); - kvStore = factory.newStoreWithExpiry(Expirations.builder().setAccess(Duration.ZERO).build(), timeSource); + kvStore = factory.newStoreWithExpiry(ExpiryPolicyBuilder.expiry() + .access(Duration.ZERO).build(), timeSource); K key = factory.createKey(250928L); V value = factory.createValue(2059820L); try { kvStore.put(key, value); - assertThat(kvStore.get(key).value(), is(value)); + assertThat(kvStore.get(key).get(), is(value)); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreIteratorHasNextTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreIteratorHasNextTest.java index b319599d12..0116bd3bd2 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreIteratorHasNextTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreIteratorHasNextTest.java @@ -18,7 +18,7 @@ import org.ehcache.Cache; import org.ehcache.core.spi.store.Store; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.spi.test.After; import org.ehcache.spi.test.LegalSPITesterException; import org.ehcache.spi.test.SPITest; diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreIteratorNextTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreIteratorNextTest.java index 322da98667..2cbcbbe1c4 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreIteratorNextTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreIteratorNextTest.java @@ -17,7 +17,7 @@ package org.ehcache.internal.store; import org.ehcache.Cache; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.store.Store; import org.ehcache.spi.test.After; import org.ehcache.spi.test.LegalSPITesterException; @@ -66,7 +66,7 @@ public void nextReturnsNextElement() try { Cache.Entry> entry = iterator.next(); assertThat(entry.getKey(), is(equalTo(key))); - assertThat(entry.getValue().value(), is(equalTo(value))); + assertThat(entry.getValue().get(), is(equalTo(value))); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreIteratorTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreIteratorTest.java index 3fabd91235..5843989e53 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreIteratorTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreIteratorTest.java @@ -17,7 +17,7 @@ package org.ehcache.internal.store; import org.ehcache.Cache; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.store.Store; import org.ehcache.spi.test.After; import org.ehcache.spi.test.Before; @@ -80,7 +80,7 @@ public void iterableContainsValuesInAnyOrder() while (iterator.hasNext()) { Cache.Entry> nextEntry = iterator.next(); keys.add(nextEntry.getKey()); - values.add(nextEntry.getValue().value()); + values.add(nextEntry.getValue().get()); } assertThat(keys, containsInAnyOrder(equalTo(key1), equalTo(key2), equalTo(key3))); assertThat(values, containsInAnyOrder(equalTo(value1), equalTo(value2), equalTo(value3))); diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StorePutIfAbsentTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StorePutIfAbsentTest.java index 6e2e4a5b36..bb5abe5102 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StorePutIfAbsentTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StorePutIfAbsentTest.java @@ -16,25 +16,25 @@ package org.ehcache.internal.store; -import org.ehcache.ValueSupplier; +import org.ehcache.config.builders.ExpiryPolicyBuilder; import org.ehcache.core.spi.store.Store; -import org.ehcache.core.spi.store.StoreAccessException; -import org.ehcache.expiry.Duration; -import org.ehcache.expiry.Expirations; -import org.ehcache.expiry.Expiry; +import org.ehcache.expiry.ExpiryPolicy; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.internal.TestTimeSource; import org.ehcache.spi.test.After; import org.ehcache.spi.test.LegalSPITesterException; import org.ehcache.spi.test.SPITest; +import java.time.Duration; + import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; - /** - * Test the {@link Store#putIfAbsent(Object, Object)} contract of the + * Test the {@link Store#putIfAbsent(Object, Object, java.util.function.Consumer)} contract of the * {@link Store Store} interface. * * @author Aurelien Broszniowski @@ -47,7 +47,6 @@ public StorePutIfAbsentTest(final StoreFactory factory) { } protected Store kvStore; - protected Store kvStore2; @After public void tearDown() { @@ -55,12 +54,6 @@ public void tearDown() { factory.close(kvStore); kvStore = null; } - if (kvStore2 != null) { - @SuppressWarnings("unchecked") - Store kvStore2 = this.kvStore2; - factory.close(kvStore2); - this.kvStore2 = null; - } } @SPITest @@ -72,7 +65,7 @@ public void mapsKeyToValueWhenMappingDoesntExist() V value = factory.createValue(1); try { - assertThat(kvStore.putIfAbsent(key, value), is(nullValue())); + assertThat(kvStore.putIfAbsent(key, value, b -> {}), is(nullValue())); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } @@ -95,7 +88,7 @@ public void doesntMapKeyToValueWhenMappingExists() V updatedValue = factory.createValue(2); try { - assertThat(kvStore.putIfAbsent(key, updatedValue).value(), is(equalTo(value))); + assertThat(kvStore.putIfAbsent(key, updatedValue, b -> {}).get(), is(equalTo(value))); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } @@ -110,7 +103,7 @@ public void nullKeyThrowsException() V value = factory.createValue(1); try { - kvStore.putIfAbsent(key, value); + kvStore.putIfAbsent(key, value, b -> {}); throw new AssertionError("Expected NullPointerException because the key is null"); } catch (NullPointerException e) { // expected @@ -126,7 +119,7 @@ public void nullValueThrowsException() V value = null; try { - kvStore.putIfAbsent(key, value); + kvStore.putIfAbsent(key, value, b -> {}); throw new AssertionError("Expected NullPointerException because the value is null"); } catch (NullPointerException e) { // expected @@ -137,15 +130,15 @@ public void nullValueThrowsException() @SuppressWarnings({ "unchecked", "rawtypes" }) public void wrongKeyTypeThrowsException() throws IllegalAccessException, InstantiationException, LegalSPITesterException { - kvStore2 = factory.newStore(); + kvStore = factory.newStore(); V value = factory.createValue(1); try { if (this.factory.getKeyType() == String.class) { - kvStore2.putIfAbsent(1.0f, value); + kvStore.putIfAbsent((K) (Float) 1.0f, value, b -> {}); } else { - kvStore2.putIfAbsent("key", value); + kvStore.putIfAbsent((K) "key", value, b -> {}); } throw new AssertionError("Expected ClassCastException because the key is of the wrong type"); } catch (ClassCastException e) { @@ -159,15 +152,15 @@ public void wrongKeyTypeThrowsException() @SuppressWarnings({ "unchecked", "rawtypes" }) public void wrongValueTypeThrowsException() throws IllegalAccessException, InstantiationException, LegalSPITesterException { - kvStore2 = factory.newStore(); + kvStore = factory.newStore(); K key = factory.createKey(1); try { if (this.factory.getValueType() == String.class) { - kvStore2.putIfAbsent(key, 1.0f); + kvStore.putIfAbsent(key, (V) (Float) 1.0f, b -> {}); } else { - kvStore2.putIfAbsent(key, "value"); + kvStore.putIfAbsent(key, (V) "value", b -> {}); } throw new AssertionError("Expected ClassCastException because the value is of the wrong type"); } catch (ClassCastException e) { @@ -180,7 +173,7 @@ public void wrongValueTypeThrowsException() @SPITest public void testPutIfAbsentValuePresentExpiresOnAccess() throws LegalSPITesterException { TestTimeSource timeSource = new TestTimeSource(10043L); - kvStore = factory.newStoreWithExpiry(Expirations.builder().setAccess(Duration.ZERO).build(), timeSource); + kvStore = factory.newStoreWithExpiry(ExpiryPolicyBuilder.expiry().access(Duration.ZERO).build(), timeSource); K key = factory.createKey(250928L); V value = factory.createValue(2059820L); @@ -188,7 +181,7 @@ public void testPutIfAbsentValuePresentExpiresOnAccess() throws LegalSPITesterEx try { kvStore.put(key, value); - assertThat(kvStore.putIfAbsent(key, newValue).value(), is(value)); + assertThat(kvStore.putIfAbsent(key, newValue, b -> {}).get(), is(value)); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StorePutTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StorePutTest.java index 9bb6977197..9c9c293403 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StorePutTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StorePutTest.java @@ -16,23 +16,22 @@ package org.ehcache.internal.store; -import org.ehcache.ValueSupplier; +import org.ehcache.config.builders.ExpiryPolicyBuilder; import org.ehcache.core.spi.store.Store; -import org.ehcache.core.spi.store.StoreAccessException; -import org.ehcache.expiry.Duration; -import org.ehcache.expiry.Expirations; -import org.ehcache.expiry.Expiry; +import org.ehcache.spi.resilience.StoreAccessException; +import org.ehcache.expiry.ExpiryPolicy; import org.ehcache.internal.TestTimeSource; import org.ehcache.spi.test.After; import org.ehcache.spi.test.LegalSPITesterException; import org.ehcache.spi.test.SPITest; +import java.time.Duration; + import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; - /** * Test the {@link Store#put(Object, Object)} contract of the * {@link Store Store} interface. @@ -47,7 +46,6 @@ public StorePutTest(final StoreFactory factory) { } protected Store kvStore; - protected Store kvStore2; @After public void tearDown() { @@ -55,12 +53,6 @@ public void tearDown() { factory.close(kvStore); kvStore = null; } - if (kvStore2 != null) { - @SuppressWarnings("unchecked") - Store kvStore2 = this.kvStore2; - factory.close(kvStore2); - this.kvStore2 = null; - } } @SPITest @@ -118,18 +110,18 @@ public void indicatesValuePutAndCanBeRetrievedWithEqualKey() } @SPITest - @SuppressWarnings({ "unchecked", "rawtypes" }) + @SuppressWarnings("unchecked") public void wrongKeyTypeThrowsException() throws IllegalAccessException, InstantiationException, LegalSPITesterException { - kvStore2 = factory.newStore(); + kvStore = factory.newStore(); V value = factory.createValue(1); try { if (this.factory.getKeyType() == String.class) { - kvStore2.put(1.0f, value); + kvStore.put((K) (Float) 1.0f, value); } else { - kvStore2.put("key", value); + kvStore.put((K) "key", value); } throw new AssertionError("Expected ClassCastException because the key is of the wrong type"); } catch (ClassCastException e) { @@ -143,15 +135,15 @@ public void wrongKeyTypeThrowsException() @SuppressWarnings({ "unchecked", "rawtypes" }) public void wrongValueTypeThrowsException() throws IllegalAccessException, InstantiationException, LegalSPITesterException { - kvStore2 = factory.newStore(); + kvStore = factory.newStore(); K key = factory.createKey(1); try { if (this.factory.getValueType() == String.class) { - kvStore2.put(key, 1.0f); + kvStore.put(key, (V) (Float) 1.0f); } else { - kvStore2.put(key, "value"); + kvStore.put(key, (V) "value"); } throw new AssertionError("Expected ClassCastException because the value is of the wrong type"); } catch (ClassCastException e) { @@ -183,7 +175,7 @@ public void indicatesValueReplaced() throws LegalSPITesterException { public void indicatesValueReplacedWhenUpdateExpires() throws LegalSPITesterException { TestTimeSource timeSource = new TestTimeSource(1000L); - kvStore = factory.newStoreWithExpiry(Expirations.builder().setUpdate(Duration.ZERO).build(), timeSource); + kvStore = factory.newStoreWithExpiry(ExpiryPolicyBuilder.expiry().update(Duration.ZERO).build(), timeSource); K key = factory.createKey(42L); V value = factory.createValue(42L); @@ -204,7 +196,7 @@ public void indicatesValueReplacedWhenUpdateExpires() throws LegalSPITesterExcep public void indicatesOperationNoOp() throws LegalSPITesterException { TestTimeSource timeSource = new TestTimeSource(1000L); - kvStore = factory.newStoreWithExpiry(Expirations.builder().setCreate(Duration.ZERO).build(), timeSource); + kvStore = factory.newStoreWithExpiry(ExpiryPolicyBuilder.expiry().create(Duration.ZERO).build(), timeSource); K key = factory.createKey(42L); try { diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreRemovalEventListenerTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreRemovalEventListenerTest.java index 2185254092..5b16be71f3 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreRemovalEventListenerTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreRemovalEventListenerTest.java @@ -19,7 +19,7 @@ import org.ehcache.event.EventType; import org.ehcache.core.spi.store.events.StoreEvent; import org.ehcache.core.spi.store.events.StoreEventListener; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.store.Store; import org.ehcache.spi.test.After; import org.ehcache.spi.test.Before; @@ -27,8 +27,6 @@ import org.ehcache.spi.test.SPITest; import org.hamcrest.Matcher; -import java.util.function.BiFunction; - import static org.ehcache.internal.store.StoreCreationEventListenerTest.eventType; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -93,7 +91,7 @@ public void testComputeRemoves() throws LegalSPITesterException { K key = factory.createKey(125L); store.put(key, factory.createValue(125L)); StoreEventListener listener = addListener(store); - store.compute(key, (k, v) -> null); + store.getAndCompute(key, (k, v) -> null); verifyListenerInteractions(listener); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreRemoveKeyTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreRemoveKeyTest.java index 87423b5b32..e84623830a 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreRemoveKeyTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreRemoveKeyTest.java @@ -17,7 +17,7 @@ package org.ehcache.internal.store; import org.ehcache.core.spi.store.Store; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.spi.test.After; import org.ehcache.spi.test.LegalSPITesterException; import org.ehcache.spi.test.SPITest; @@ -39,7 +39,6 @@ public StoreRemoveKeyTest(final StoreFactory factory) { } protected Store kvStore; - protected Store kvStore2; @After public void tearDown() { @@ -47,12 +46,6 @@ public void tearDown() { factory.close(kvStore); kvStore = null; } - if (kvStore2 != null) { - @SuppressWarnings("unchecked") - Store kvStore2 = this.kvStore2; - factory.close(kvStore2); - this.kvStore2 = null; - } } @SPITest @@ -109,13 +102,13 @@ public void nullKeyThrowsException() @SuppressWarnings({ "unchecked", "rawtypes" }) public void wrongKeyTypeThrowsException() throws IllegalAccessException, InstantiationException, LegalSPITesterException { - kvStore2 = factory.newStore(); + kvStore = factory.newStore(); try { if (this.factory.getKeyType() == String.class) { - kvStore2.remove(1.0f); + kvStore.remove((K) (Float) 1.0f); } else { - kvStore2.remove("key"); + kvStore.remove((K) "key"); } throw new AssertionError("Expected ClassCastException because the key is of the wrong type"); } catch (ClassCastException e) { diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreRemoveKeyValueTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreRemoveKeyValueTest.java index 76db98d3c9..31baf55fda 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreRemoveKeyValueTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreRemoveKeyValueTest.java @@ -18,7 +18,7 @@ import org.ehcache.core.spi.store.Store; import org.ehcache.core.spi.store.Store.RemoveStatus; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.spi.test.After; import org.ehcache.spi.test.LegalSPITesterException; import org.ehcache.spi.test.SPITest; @@ -203,7 +203,7 @@ public void wrongKeyTypeThrowsException() if (this.factory.getKeyType() == String.class) { kvStore2.remove((K) (Object) 1.0f, value); } else { - kvStore2.remove((K) (Object) "key", value); + kvStore2.remove((K) "key", value); } throw new AssertionError("Expected ClassCastException because the key is of the wrong type"); } catch (ClassCastException e) { @@ -225,7 +225,7 @@ public void wrongValueTypeThrowsException() if (this.factory.getValueType() == String.class) { kvStore2.remove(key, (V) (Object) 1.0f); } else { - kvStore2.remove(key, (V) (Object) "value"); + kvStore2.remove(key, (V) "value"); } throw new AssertionError("Expected ClassCastException because the value is of the wrong type"); } catch (ClassCastException e) { diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreReplaceKeyValueTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreReplaceKeyValueTest.java index 524c086a1f..e1df421889 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreReplaceKeyValueTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreReplaceKeyValueTest.java @@ -17,7 +17,7 @@ package org.ehcache.internal.store; import org.ehcache.core.spi.store.Store; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.spi.test.After; import org.ehcache.spi.test.LegalSPITesterException; import org.ehcache.spi.test.SPITest; @@ -41,7 +41,6 @@ public StoreReplaceKeyValueTest(final StoreFactory factory) { } protected Store kvStore; - protected Store kvStore2; @After public void tearDown() { @@ -49,12 +48,6 @@ public void tearDown() { factory.close(kvStore); kvStore = null; } - if (kvStore2 != null) { - @SuppressWarnings("unchecked") - Store kvStore2 = this.kvStore2; - factory.close(kvStore2); - this.kvStore2 = null; - } } @SPITest @@ -75,7 +68,7 @@ public void replaceKeyAndValue() throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } - assertThat(kvStore.get(key).value(), is(equalTo(newValue))); + assertThat(kvStore.get(key).get(), is(equalTo(newValue))); } @SPITest @@ -91,7 +84,7 @@ public void replaceReturnsOldValue() V newValue = factory.createValue(2); try { - assertThat(kvStore.replace(key, newValue).value(), is(equalTo(originalValue))); + assertThat(kvStore.replace(key, newValue).get(), is(equalTo(originalValue))); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } @@ -154,15 +147,15 @@ public void nullValueThrowsException() @SuppressWarnings("unchecked") public void wrongKeyTypeThrowsException() throws IllegalAccessException, InstantiationException, LegalSPITesterException { - kvStore2 = factory.newStore(); + kvStore = factory.newStore(); V value = factory.createValue(1); try { if (this.factory.getKeyType() == String.class) { - kvStore2.replace(1.0f, value); + kvStore.replace((K) (Float) 1.0f, value); } else { - kvStore2.replace("key", value); + kvStore.replace((K) "key", value); } throw new AssertionError("Expected ClassCastException because the key is of the wrong type"); } catch (ClassCastException e) { @@ -176,15 +169,15 @@ public void wrongKeyTypeThrowsException() @SuppressWarnings("unchecked") public void wrongValueTypeThrowsException() throws IllegalAccessException, InstantiationException, LegalSPITesterException { - kvStore2 = factory.newStore(); + kvStore = factory.newStore(); K key = factory.createKey(1); try { if (this.factory.getValueType() == String.class) { - kvStore2.replace(key, 1.0f); + kvStore.replace(key, (V) (Float) 1.0f); } else { - kvStore2.replace(key, "value"); + kvStore.replace(key, (V) "value"); } throw new AssertionError("Expected ClassCastException because the value is of the wrong type"); } catch (ClassCastException e) { diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreReplaceKeyValueValueTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreReplaceKeyValueValueTest.java index 1ba79839f9..298ad03196 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreReplaceKeyValueValueTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreReplaceKeyValueValueTest.java @@ -18,7 +18,7 @@ import org.ehcache.core.spi.store.Store; import org.ehcache.core.spi.store.Store.ReplaceStatus; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.spi.test.After; import org.ehcache.spi.test.LegalSPITesterException; import org.ehcache.spi.test.SPITest; @@ -43,7 +43,6 @@ public StoreReplaceKeyValueValueTest(final StoreFactory factory) { } protected Store kvStore; - protected Store kvStore2; @After public void tearDown() { @@ -51,12 +50,6 @@ public void tearDown() { factory.close(kvStore); kvStore = null; } - if (kvStore2 != null) { - @SuppressWarnings("unchecked") - Store kvStore2 = this.kvStore2; - factory.close(kvStore2); - this.kvStore2 = null; - } } @SPITest @@ -77,7 +70,7 @@ public void replaceCorrectKeyAndValue() throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } - assertThat(kvStore.get(key).value(), is(equalTo(newValue))); + assertThat(kvStore.get(key).get(), is(equalTo(newValue))); } @SPITest @@ -99,7 +92,7 @@ public void replaceCorrectKeyAndWrongValue() throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } - assertThat(kvStore.get(key).value(), is(not(equalTo(wrongValue)))); + assertThat(kvStore.get(key).get(), is(not(equalTo(wrongValue)))); } @SPITest @@ -145,16 +138,16 @@ public void unsuccessfulReplaceReturnsMiss() @SuppressWarnings({ "unchecked", "rawtypes" }) public void wrongKeyTypeThrowsException() throws IllegalAccessException, InstantiationException, LegalSPITesterException { - kvStore2 = factory.newStore(); + kvStore = factory.newStore(); V originalValue = factory.createValue(1); V newValue = factory.createValue(2); try { if (this.factory.getKeyType() == String.class) { - kvStore2.replace(1.0f, originalValue); + kvStore.replace((K) (Float) 1.0f, originalValue); } else { - kvStore2.replace("key", originalValue, newValue); + kvStore.replace((K) "key", originalValue, newValue); } throw new AssertionError("Expected ClassCastException because the key is of the wrong type"); } catch (ClassCastException e) { @@ -168,16 +161,16 @@ public void wrongKeyTypeThrowsException() @SuppressWarnings({ "unchecked", "rawtypes" }) public void wrongOriginalValueTypeThrowsException() throws IllegalAccessException, InstantiationException, LegalSPITesterException { - kvStore2 = factory.newStore(); + kvStore = factory.newStore(); K key = factory.createKey(1); V newValue = factory.createValue(1); try { if (this.factory.getValueType() == String.class) { - kvStore2.replace(key, 1.0f, newValue); + kvStore.replace(key, (V) (Float) 1.0f, newValue); } else { - kvStore2.replace(key, "value", newValue); + kvStore.replace(key, (V) "value", newValue); } throw new AssertionError("Expected ClassCastException because the value is of the wrong type"); } catch (ClassCastException e) { @@ -191,16 +184,16 @@ public void wrongOriginalValueTypeThrowsException() @SuppressWarnings({ "unchecked", "rawtypes" }) public void wrongNewValueTypeThrowsException() throws IllegalAccessException, InstantiationException, LegalSPITesterException { - kvStore2 = factory.newStore(); + kvStore = factory.newStore(); K key = factory.createKey(1); V originalValue = factory.createValue(1); try { if (this.factory.getValueType() == String.class) { - kvStore2.replace(key, originalValue, 1.0f); + kvStore.replace(key, originalValue, (V) (Float) 1.0f); } else { - kvStore2.replace(key, originalValue, "value"); + kvStore.replace(key, originalValue, (V) "value"); } throw new AssertionError("Expected ClassCastException because the value is of the wrong type"); } catch (ClassCastException e) { diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreSPITest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreSPITest.java index 8b4e9abe32..ed60355ef7 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreSPITest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreSPITest.java @@ -26,8 +26,8 @@ public abstract class StoreSPITest { protected abstract StoreFactory getStoreFactory(); @Test - public void testCompute() throws Exception { - StoreComputeTest testSuite = new StoreComputeTest<>(getStoreFactory()); + public void testGetAndCompute() throws Exception { + StoreGetAndComputeTest testSuite = new StoreGetAndComputeTest<>(getStoreFactory()); testSuite.runTestSuite().reportAndThrow(); } @@ -124,13 +124,6 @@ public void testValueHolderLastAccessTime() throws Exception { testSuite.runTestSuite().reportAndThrow(); } - @Test - public void testValueHolderHitRate() throws Exception { - StoreValueHolderHitRateTest testSuite = - new StoreValueHolderHitRateTest<>(getStoreFactory()); - testSuite.runTestSuite().reportAndThrow(); - } - @Test public void testIteratorHasNext() throws Exception { StoreIteratorHasNextTest testSuite = diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreUpdateEventListenerTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreUpdateEventListenerTest.java index 61657cfdd0..d31466391a 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreUpdateEventListenerTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreUpdateEventListenerTest.java @@ -19,7 +19,7 @@ import org.ehcache.event.EventType; import org.ehcache.core.spi.store.events.StoreEvent; import org.ehcache.core.spi.store.events.StoreEventListener; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.store.Store; import org.ehcache.spi.test.After; import org.ehcache.spi.test.Before; @@ -27,8 +27,6 @@ import org.ehcache.spi.test.SPITest; import org.hamcrest.Matcher; -import java.util.function.BiFunction; - import static org.ehcache.internal.store.StoreCreationEventListenerTest.eventType; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -109,7 +107,7 @@ public void testComputeUpdates() throws LegalSPITesterException { K key = factory.createKey(125L); store.put(key, factory.createValue(125L)); StoreEventListener listener = addListener(store); - store.compute(key, (k, v) -> factory.createValue(215L)); + store.getAndCompute(key, (k, v) -> factory.createValue(215L)); verifyListenerInteractions(listener); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreValueHolderHitRateTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreValueHolderHitRateTest.java deleted file mode 100644 index 7a990766ec..0000000000 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreValueHolderHitRateTest.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright Terracotta, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.ehcache.internal.store; - -import org.ehcache.core.spi.store.Store; -import org.ehcache.spi.test.SPITest; - -import java.util.concurrent.TimeUnit; - -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.core.Is.is; - -/** - * Test the {@link Store.ValueHolder#hitRate(long, TimeUnit)} contract of the - * {@link Store.ValueHolder Store.ValueHolder} interface. - * - * @author Aurelien Broszniowski - */ - -public class StoreValueHolderHitRateTest extends SPIStoreTester { - - public StoreValueHolderHitRateTest(final StoreFactory factory) { - super(factory); - } - - @SPITest - public void hitRateCanBeReturned() - throws IllegalAccessException, InstantiationException { - Store.ValueHolder valueHolder = factory.newValueHolder(factory.createValue(1)); - - assertThat(valueHolder.hitRate(TimeUnit.MILLISECONDS.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS), anyOf(is(Float.NaN), is(0.0f))); - } -} diff --git a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreValueHolderValueTest.java b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreValueHolderValueTest.java index be69354a3f..ee0c59289d 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/store/StoreValueHolderValueTest.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/store/StoreValueHolderValueTest.java @@ -24,7 +24,7 @@ import static org.hamcrest.core.Is.is; /** - * Test the {@link Store.ValueHolder#value()} contract of the + * Test the {@link Store.ValueHolder#get(Object)} contract of the * {@link Store.ValueHolder Store.ValueHolder} interface. * * @author Aurelien Broszniowski @@ -43,7 +43,7 @@ public void valueIsHeldByValueHolder() Store.ValueHolder valueHolder = factory.newValueHolder(value); try { - assertThat(valueHolder.value(), is(equalTo(value))); + assertThat(valueHolder.get(), is(equalTo(value))); } catch (Exception e) { System.err.println("Warning, an exception is thrown due to the SPI test"); e.printStackTrace(); diff --git a/core-spi-test/src/main/java/org/ehcache/internal/tier/AuthoritativeTierComputeIfAbsentAndFault.java b/core-spi-test/src/main/java/org/ehcache/internal/tier/AuthoritativeTierComputeIfAbsentAndFault.java index d747cdc834..8654c3a12b 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/tier/AuthoritativeTierComputeIfAbsentAndFault.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/tier/AuthoritativeTierComputeIfAbsentAndFault.java @@ -16,7 +16,7 @@ package org.ehcache.internal.tier; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.store.tiering.AuthoritativeTier; import org.ehcache.spi.test.After; import org.ehcache.spi.test.LegalSPITesterException; @@ -81,10 +81,10 @@ public void marksTheMappingAsNotEvictableAndComputeValue() throws LegalSPITester try { assertThat(tier.get(key), is(nullValue())); - assertThat(tier.computeIfAbsentAndFault(key, k -> factory.createValue(1L)).value(), is(equalTo(value))); + assertThat(tier.computeIfAbsentAndFault(key, k -> factory.createValue(1L)).get(), is(equalTo(value))); fillTierOverCapacity(tier, factory); - assertThat(tier.get(key).value(), is(equalTo(value))); + assertThat(tier.get(key).get(), is(equalTo(value))); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); diff --git a/core-spi-test/src/main/java/org/ehcache/internal/tier/AuthoritativeTierFactory.java b/core-spi-test/src/main/java/org/ehcache/internal/tier/AuthoritativeTierFactory.java index a79da797bc..dd55ba2413 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/tier/AuthoritativeTierFactory.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/tier/AuthoritativeTierFactory.java @@ -17,8 +17,8 @@ package org.ehcache.internal.tier; import org.ehcache.config.EvictionAdvisor; -import org.ehcache.expiry.Expiry; import org.ehcache.core.spi.time.TimeSource; +import org.ehcache.expiry.ExpiryPolicy; import org.ehcache.internal.store.StoreFactory; import org.ehcache.core.spi.store.tiering.AuthoritativeTier; @@ -34,7 +34,7 @@ public interface AuthoritativeTierFactory extends StoreFactory { AuthoritativeTier newStoreWithCapacity(long capacity); @Override - AuthoritativeTier newStoreWithExpiry(Expiry expiry, TimeSource timeSource); + AuthoritativeTier newStoreWithExpiry(ExpiryPolicy expiry, TimeSource timeSource); @Override AuthoritativeTier newStoreWithEvictionAdvisor(EvictionAdvisor evictionAdvisor); diff --git a/core-spi-test/src/main/java/org/ehcache/internal/tier/AuthoritativeTierFlush.java b/core-spi-test/src/main/java/org/ehcache/internal/tier/AuthoritativeTierFlush.java index 16c7356535..0b1c16e036 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/tier/AuthoritativeTierFlush.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/tier/AuthoritativeTierFlush.java @@ -16,11 +16,10 @@ package org.ehcache.internal.tier; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.store.Store; import org.ehcache.core.spi.store.tiering.AuthoritativeTier; import org.ehcache.spi.test.After; -import org.ehcache.spi.test.Before; import org.ehcache.spi.test.LegalSPITesterException; import org.ehcache.spi.test.SPITest; diff --git a/core-spi-test/src/main/java/org/ehcache/internal/tier/AuthoritativeTierGetAndFault.java b/core-spi-test/src/main/java/org/ehcache/internal/tier/AuthoritativeTierGetAndFault.java index 0f19d97245..a37f5cd922 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/tier/AuthoritativeTierGetAndFault.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/tier/AuthoritativeTierGetAndFault.java @@ -16,18 +16,16 @@ package org.ehcache.internal.tier; -import org.ehcache.core.spi.store.StoreAccessException; -import org.ehcache.expiry.Duration; -import org.ehcache.expiry.Expirations; +import org.ehcache.config.builders.ExpiryPolicyBuilder; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.internal.TestTimeSource; import org.ehcache.core.spi.store.tiering.AuthoritativeTier; -import org.ehcache.spi.test.After; -import org.ehcache.spi.test.Before; +import org.ehcache.spi.test.After;; import org.ehcache.spi.test.Ignore; import org.ehcache.spi.test.LegalSPITesterException; import org.ehcache.spi.test.SPITest; -import java.util.concurrent.TimeUnit; +import java.time.Duration; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.not; @@ -88,11 +86,11 @@ public void marksTheMappingAsNotEvictableAndReturnsValue() throws LegalSPITester try { tier.put(key, value); - assertThat(tier.getAndFault(key).value(), is(equalTo(value))); + assertThat(tier.getAndFault(key).get(), is(equalTo(value))); fillTierOverCapacity(tier, factory); - assertThat(tier.get(key).value(), is(equalTo(value))); + assertThat(tier.get(key).get(), is(equalTo(value))); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); @@ -103,7 +101,7 @@ public void marksTheMappingAsNotEvictableAndReturnsValue() throws LegalSPITester @Ignore public void marksTheMappingAsNotExpirable() throws LegalSPITesterException { TestTimeSource timeSource = new TestTimeSource(); - tier = factory.newStoreWithExpiry(Expirations.timeToIdleExpiration(new Duration(1, TimeUnit.MILLISECONDS)), timeSource); + tier = factory.newStoreWithExpiry(ExpiryPolicyBuilder.timeToIdleExpiration(Duration.ofMillis(1L)), timeSource); K key = factory.createKey(1); V value = factory.createValue(1); diff --git a/core-spi-test/src/main/java/org/ehcache/internal/tier/CachingTierClear.java b/core-spi-test/src/main/java/org/ehcache/internal/tier/CachingTierClear.java index 6a9cde5b7e..fd59728d72 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/tier/CachingTierClear.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/tier/CachingTierClear.java @@ -16,17 +16,15 @@ package org.ehcache.internal.tier; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.store.Store; import org.ehcache.core.spi.store.tiering.CachingTier; import org.ehcache.spi.test.After; -import org.ehcache.spi.test.Before; import org.ehcache.spi.test.LegalSPITesterException; import org.ehcache.spi.test.SPITest; import java.util.ArrayList; import java.util.List; -import java.util.function.Function; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.MatcherAssert.assertThat; @@ -48,10 +46,6 @@ public CachingTierClear(final CachingTierFactory factory) { super(factory); } - @Before - public void setUp() { - } - @After public void tearDown() { if (tier != null) { @@ -71,7 +65,7 @@ public void removeMapping() throws LegalSPITesterException { V newValue= factory.createValue(2); final Store.ValueHolder originalValueHolder = mock(Store.ValueHolder.class); - when(originalValueHolder.value()).thenReturn(originalValue); + when(originalValueHolder.get()).thenReturn(originalValue); try { List keys = new ArrayList<>(); @@ -85,13 +79,13 @@ public void removeMapping() throws LegalSPITesterException { tier.clear(); final Store.ValueHolder newValueHolder = mock(Store.ValueHolder.class); - when(newValueHolder.value()).thenReturn(newValue); + when(newValueHolder.get()).thenReturn(newValue); for (K key : keys) { tier.invalidate(key); Store.ValueHolder newReturnedValueHolder = tier.getOrComputeIfAbsent(key, o -> newValueHolder); - assertThat(newReturnedValueHolder.value(), is(equalTo(newValueHolder.value()))); + assertThat(newReturnedValueHolder.get(), is(equalTo(newValueHolder.get()))); } } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); diff --git a/core-spi-test/src/main/java/org/ehcache/internal/tier/CachingTierGetOrComputeIfAbsent.java b/core-spi-test/src/main/java/org/ehcache/internal/tier/CachingTierGetOrComputeIfAbsent.java index 2ae60634f6..8799027d56 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/tier/CachingTierGetOrComputeIfAbsent.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/tier/CachingTierGetOrComputeIfAbsent.java @@ -16,11 +16,10 @@ package org.ehcache.internal.tier; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.store.Store; import org.ehcache.core.spi.store.tiering.CachingTier; import org.ehcache.spi.test.After; -import org.ehcache.spi.test.Before; import org.ehcache.spi.test.LegalSPITesterException; import org.ehcache.spi.test.SPITest; @@ -49,10 +48,6 @@ public CachingTierGetOrComputeIfAbsent(final CachingTierFactory factory) { super(factory); } - @Before - public void setUp() { - } - @After public void tearDown() { if (tier != null) { @@ -68,14 +63,14 @@ public void returnTheValueHolderNotInTheCachingTier() throws LegalSPITesterExcep V value = factory.createValue(1); final Store.ValueHolder computedValueHolder = mock(Store.ValueHolder.class); - when(computedValueHolder.value()).thenReturn(value); + when(computedValueHolder.get()).thenReturn(value); tier = factory.newCachingTier(1L); try { Store.ValueHolder valueHolder = tier.getOrComputeIfAbsent(key, k -> computedValueHolder); - assertThat(valueHolder.value(), is(equalTo(value))); + assertThat(valueHolder.get(), is(equalTo(value))); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } @@ -87,7 +82,7 @@ public void returnTheValueHolderCurrentlyInTheCachingTier() throws LegalSPITeste K key = factory.createKey(1); V value = factory.createValue(1); final Store.ValueHolder computedValueHolder = mock(Store.ValueHolder.class); - when(computedValueHolder.value()).thenReturn(value); + when(computedValueHolder.get()).thenReturn(value); when(computedValueHolder.expirationTime(any(TimeUnit.class))).thenReturn(Store.ValueHolder.NO_EXPIRE); tier = factory.newCachingTier(); @@ -98,7 +93,7 @@ public void returnTheValueHolderCurrentlyInTheCachingTier() throws LegalSPITeste Store.ValueHolder valueHolder = tier.getOrComputeIfAbsent(key, k -> null); - assertThat(valueHolder.value(), is(equalTo(value))); + assertThat(valueHolder.get(), is(equalTo(value))); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } diff --git a/core-spi-test/src/main/java/org/ehcache/internal/tier/CachingTierInvalidate.java b/core-spi-test/src/main/java/org/ehcache/internal/tier/CachingTierInvalidate.java index 518178a994..1e3f1f3b12 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/tier/CachingTierInvalidate.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/tier/CachingTierInvalidate.java @@ -17,7 +17,7 @@ package org.ehcache.internal.tier; import org.ehcache.core.spi.store.Store; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.store.tiering.CachingTier; import org.ehcache.spi.test.After; import org.ehcache.spi.test.LegalSPITesterException; @@ -27,7 +27,6 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Function; import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; @@ -65,7 +64,7 @@ public void invalidateKey() throws LegalSPITesterException { // register invalidation listener final AtomicBoolean invalidated = new AtomicBoolean(false); tier.setInvalidationListener((key1, valueHolder) -> { - assertThat(valueHolder.value(), is(value)); + assertThat(valueHolder.get(), is(value)); invalidated.set(true); }); @@ -133,7 +132,7 @@ public void invalidateAll() throws LegalSPITesterException { private Store.ValueHolder wrap(final V value) { return new Store.ValueHolder() { @Override - public V value() { + public V get() { return value; } @@ -157,16 +156,6 @@ public long lastAccessTime(TimeUnit unit) { return 0L; } - @Override - public float hitRate(long now, TimeUnit unit) { - return 0L; - } - - @Override - public long hits() { - return 0L; - } - @Override public long getId() { return -1L; diff --git a/core-spi-test/src/main/java/org/ehcache/internal/tier/CachingTierRemove.java b/core-spi-test/src/main/java/org/ehcache/internal/tier/CachingTierRemove.java index a9b1c4db93..a7916c4906 100644 --- a/core-spi-test/src/main/java/org/ehcache/internal/tier/CachingTierRemove.java +++ b/core-spi-test/src/main/java/org/ehcache/internal/tier/CachingTierRemove.java @@ -16,16 +16,13 @@ package org.ehcache.internal.tier; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.spi.store.Store; import org.ehcache.core.spi.store.tiering.CachingTier; import org.ehcache.spi.test.After; -import org.ehcache.spi.test.Before; import org.ehcache.spi.test.LegalSPITesterException; import org.ehcache.spi.test.SPITest; -import java.util.function.Function; - import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; @@ -46,10 +43,6 @@ public CachingTierRemove(final CachingTierFactory factory) { super(factory); } - @Before - public void setUp() { - } - @After public void tearDown() { if (tier != null) { @@ -67,7 +60,7 @@ public void removeMapping() throws LegalSPITesterException { V newValue = factory.createValue(2); final Store.ValueHolder valueHolder = mock(Store.ValueHolder.class); - when(valueHolder.value()).thenReturn(originalValue); + when(valueHolder.get()).thenReturn(originalValue); tier = factory.newCachingTier(1L); @@ -77,10 +70,10 @@ public void removeMapping() throws LegalSPITesterException { tier.invalidate(key); final Store.ValueHolder newValueHolder = mock(Store.ValueHolder.class); - when(newValueHolder.value()).thenReturn(newValue); + when(newValueHolder.get()).thenReturn(newValue); Store.ValueHolder newReturnedValueHolder = tier.getOrComputeIfAbsent(key, o -> newValueHolder); - assertThat(newReturnedValueHolder.value(), is(equalTo(newValueHolder.value()))); + assertThat(newReturnedValueHolder.get(), is(equalTo(newValueHolder.get()))); } catch (StoreAccessException e) { throw new LegalSPITesterException("Warning, an exception is thrown due to the SPI test"); } diff --git a/core/build.gradle b/core/build.gradle index 84ce9d2858..ec8eea368d 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -17,13 +17,7 @@ apply plugin: EhDeploy dependencies { - compile project(':api'), "org.slf4j:slf4j-api:$parent.slf4jVersion" - compile ("org.terracotta:statistics:$parent.statisticVersion") { - exclude group:'org.slf4j', module:'slf4j-api' - } - testCompile project(':spi-tester') -} - -tasks.withType(JavaCompile) { - options.compilerArgs += ['-Werror'] + api project(':api') + api "org.terracotta:statistics:$parent.statisticVersion" + testImplementation project(':spi-tester') } diff --git a/core/src/main/java/org/ehcache/core/Ehcache.java b/core/src/main/java/org/ehcache/core/Ehcache.java index 8e73c9cb40..06a56408fc 100644 --- a/core/src/main/java/org/ehcache/core/Ehcache.java +++ b/core/src/main/java/org/ehcache/core/Ehcache.java @@ -16,63 +16,34 @@ package org.ehcache.core; -import java.util.AbstractMap; import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.EnumMap; import java.util.HashMap; -import java.util.Iterator; import java.util.LinkedHashMap; -import java.util.LinkedHashSet; +import java.util.List; import java.util.Map; -import java.util.NoSuchElementException; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.atomic.LongAdder; import java.util.function.BiFunction; +import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; import org.ehcache.Cache; -import org.ehcache.Status; import org.ehcache.config.CacheConfiguration; -import org.ehcache.config.CacheRuntimeConfiguration; import org.ehcache.core.events.CacheEventDispatcher; -import org.ehcache.core.internal.resilience.LoggingRobustResilienceStrategy; -import org.ehcache.core.internal.resilience.RecoveryCache; -import org.ehcache.core.internal.resilience.ResilienceStrategy; -import org.ehcache.core.spi.LifeCycled; +import org.ehcache.core.internal.util.CollectionUtil; import org.ehcache.core.spi.store.Store; -import org.ehcache.core.spi.store.Store.PutStatus; -import org.ehcache.core.spi.store.Store.RemoveStatus; -import org.ehcache.core.spi.store.Store.ReplaceStatus; import org.ehcache.core.spi.store.Store.ValueHolder; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.ResilienceStrategy; +import org.ehcache.spi.resilience.StoreAccessException; import org.ehcache.core.statistics.BulkOps; -import org.ehcache.core.statistics.CacheOperationOutcomes.ClearOutcome; -import org.ehcache.core.statistics.CacheOperationOutcomes.ConditionalRemoveOutcome; -import org.ehcache.core.statistics.CacheOperationOutcomes.GetAllOutcome; import org.ehcache.core.statistics.CacheOperationOutcomes.GetOutcome; -import org.ehcache.core.statistics.CacheOperationOutcomes.PutAllOutcome; -import org.ehcache.core.statistics.CacheOperationOutcomes.PutIfAbsentOutcome; import org.ehcache.core.statistics.CacheOperationOutcomes.PutOutcome; -import org.ehcache.core.statistics.CacheOperationOutcomes.RemoveAllOutcome; import org.ehcache.core.statistics.CacheOperationOutcomes.RemoveOutcome; -import org.ehcache.core.statistics.CacheOperationOutcomes.ReplaceOutcome; -import org.ehcache.expiry.Duration; -import org.ehcache.expiry.Expiry; -import org.ehcache.spi.loaderwriter.BulkCacheLoadingException; +import org.ehcache.expiry.ExpiryPolicy; import org.ehcache.spi.loaderwriter.BulkCacheWritingException; import org.ehcache.spi.loaderwriter.CacheLoaderWriter; import org.slf4j.Logger; -import org.terracotta.statistics.StatisticsManager; -import org.terracotta.statistics.observer.OperationObserver; - -import static org.ehcache.core.exceptions.ExceptionFactory.newCacheLoadingException; -import static org.ehcache.core.internal.util.ValueSuppliers.supplierOf; -import static org.terracotta.statistics.StatisticBuilder.operation; /** * Implementation of the {@link Cache} interface when no {@link CacheLoaderWriter} is involved. @@ -80,29 +51,10 @@ * {@code Ehcache} users should not have to depend on this type but rely exclusively on the api types in package * {@code org.ehcache}. * - * @see EhcacheWithLoaderWriter */ -public class Ehcache implements InternalCache { - - private final StatusTransitioner statusTransitioner; - - private final Store store; - private final ResilienceStrategy resilienceStrategy; - private final EhcacheRuntimeConfiguration runtimeConfiguration; - private final Jsr107CacheImpl jsr107Cache; - protected final Logger logger; +public class Ehcache extends EhcacheBase { - private final OperationObserver getObserver = operation(GetOutcome.class).named("get").of(this).tag("cache").build(); - private final OperationObserver getAllObserver = operation(GetAllOutcome.class).named("getAll").of(this).tag("cache").build(); - private final OperationObserver putObserver = operation(PutOutcome.class).named("put").of(this).tag("cache").build(); - private final OperationObserver putAllObserver = operation(PutAllOutcome.class).named("putAll").of(this).tag("cache").build(); - private final OperationObserver removeObserver = operation(RemoveOutcome.class).named("remove").of(this).tag("cache").build(); - private final OperationObserver removeAllObserver = operation(RemoveAllOutcome.class).named("removeAll").of(this).tag("cache").build(); - private final OperationObserver conditionalRemoveObserver = operation(ConditionalRemoveOutcome.class).named("conditionalRemove").of(this).tag("cache").build(); - private final OperationObserver putIfAbsentObserver = operation(PutIfAbsentOutcome.class).named("putIfAbsent").of(this).tag("cache").build(); - private final OperationObserver replaceObserver = operation(ReplaceOutcome.class).named("replace").of(this).tag("cache").build(); - private final Map bulkMethodEntries = new EnumMap<>(BulkOps.class); - private final OperationObserver clearObserver = operation(ClearOutcome.class).named("clear").of(this).tag("cache").build(); + private final CacheLoaderWriter cacheLoaderWriter; /** * Creates a new {@code Ehcache} based on the provided parameters. @@ -112,212 +64,49 @@ public class Ehcache implements InternalCache { * @param eventDispatcher the event dispatcher * @param logger the logger */ - public Ehcache(CacheConfiguration configuration, final Store store, CacheEventDispatcher eventDispatcher, Logger logger) { - this(new EhcacheRuntimeConfiguration<>(configuration), store, eventDispatcher, logger, new StatusTransitioner(logger)); - } - - Ehcache(EhcacheRuntimeConfiguration runtimeConfiguration, Store store, - CacheEventDispatcher eventDispatcher, Logger logger, StatusTransitioner statusTransitioner) { - this.store = store; - runtimeConfiguration.addCacheConfigurationListener(store.getConfigurationChangeListeners()); - StatisticsManager.associate(store).withParent(this); - - if (store instanceof RecoveryCache) { - this.resilienceStrategy = new LoggingRobustResilienceStrategy<>(castToRecoveryCache(store)); - } else { - this.resilienceStrategy = new LoggingRobustResilienceStrategy<>(recoveryCache(store)); - } - - this.runtimeConfiguration = runtimeConfiguration; - runtimeConfiguration.addCacheConfigurationListener(eventDispatcher.getConfigurationChangeListeners()); - this.jsr107Cache = new Jsr107CacheImpl(); - - this.logger=logger; - this.statusTransitioner = statusTransitioner; - for (BulkOps bulkOp : BulkOps.values()) { - bulkMethodEntries.put(bulkOp, new LongAdder()); - } - } - - /** - * {@inheritDoc} - */ - @Override - public Map getBulkMethodEntries() { - return bulkMethodEntries; - } - - @SuppressWarnings("unchecked") - private RecoveryCache castToRecoveryCache(Store store) { - return (RecoveryCache) store; - } - - private V getNoLoader(K key) { - return get(key); - } - - /** - * {@inheritDoc} - */ - @Override - public V get(final K key) { - getObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(key); - - try { - final Store.ValueHolder valueHolder = store.get(key); - - // Check for expiry first - if (valueHolder == null) { - getObserver.end(GetOutcome.MISS); - return null; - } else { - getObserver.end(GetOutcome.HIT); - return valueHolder.value(); - } - } catch (StoreAccessException e) { - try { - return resilienceStrategy.getFailure(key, e); - } finally { - getObserver.end(GetOutcome.FAILURE); - } - } - } - - /** - * {@inheritDoc} - */ - @Override - public void put(final K key, final V value) { - putObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(key, value); - - try { - PutStatus status = store.put(key, value); - switch (status) { - case PUT: - putObserver.end(PutOutcome.PUT); - break; - case NOOP: - putObserver.end(PutOutcome.NOOP); - break; - default: - throw new AssertionError("Invalid Status."); - } - } catch (StoreAccessException e) { - try { - resilienceStrategy.putFailure(key, value, e); - } finally { - putObserver.end(PutOutcome.FAILURE); - } - } + public Ehcache(CacheConfiguration configuration, final Store store, ResilienceStrategy resilienceStrategy, + CacheEventDispatcher eventDispatcher, Logger logger) { + this(new EhcacheRuntimeConfiguration<>(configuration), store, resilienceStrategy, eventDispatcher, logger, new StatusTransitioner(logger), null); } - private boolean newValueAlreadyExpired(K key, V oldValue, V newValue) { - return newValueAlreadyExpired(logger, runtimeConfiguration.getExpiry(), key, oldValue, newValue); + Ehcache(EhcacheRuntimeConfiguration runtimeConfiguration, Store store, ResilienceStrategy resilienceStrategy, + CacheEventDispatcher eventDispatcher, Logger logger, StatusTransitioner statusTransitioner, CacheLoaderWriter cacheLoaderWriter) { + super(runtimeConfiguration, store, resilienceStrategy, eventDispatcher, logger, statusTransitioner); + this.cacheLoaderWriter = cacheLoaderWriter; } - /** - * {@inheritDoc} - */ - @Override - public boolean containsKey(final K key) { - statusTransitioner.checkAvailable(); - checkNonNull(key); - try { - return store.containsKey(key); - } catch (StoreAccessException e) { - return resilienceStrategy.containsKeyFailure(key, e); - } + public Ehcache(CacheConfiguration configuration, final Store store, ResilienceStrategy resilienceStrategy, + CacheEventDispatcher eventDispatcher, Logger logger, CacheLoaderWriter cacheLoaderWriter) { + super(new EhcacheRuntimeConfiguration<>(configuration), store, resilienceStrategy, eventDispatcher, logger, new StatusTransitioner(logger)); + this.cacheLoaderWriter = cacheLoaderWriter; } /** * {@inheritDoc} */ @Override - public void remove(K key) { - removeInternal(key); // ignore return value; + protected Store.ValueHolder doGet(K key) throws StoreAccessException { + return store.get(key); } - - private boolean removeInternal(final K key) { - removeObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(key); - - boolean removed = false; - try { - removed = store.remove(key); - if (removed) { - removeObserver.end(RemoveOutcome.SUCCESS); - } else { - removeObserver.end(RemoveOutcome.NOOP); - } - } catch (StoreAccessException e) { - try { - resilienceStrategy.removeFailure(key, e); - } finally { - removeObserver.end(RemoveOutcome.FAILURE); - } - } - - return removed; - } - - /** - * {@inheritDoc} - */ - @Override - public void clear() { - this.clearObserver.begin(); - statusTransitioner.checkAvailable(); - try { - store.clear(); - this.clearObserver.end(ClearOutcome.SUCCESS); - } catch (StoreAccessException e) { - this.clearObserver.end(ClearOutcome.FAILURE); - resilienceStrategy.clearFailure(e); - } + protected Store.PutStatus doPut(K key, V value) throws StoreAccessException { + return store.put(key, value); } - /** - * {@inheritDoc} - */ - @Override - public Iterator> iterator() { - statusTransitioner.checkAvailable(); - return new CacheEntryIterator(false); - } - - /** - * {@inheritDoc} - */ - @Override - public Map getAll(Set keys) throws BulkCacheLoadingException { - return getAllInternal(keys, true); + protected boolean doRemoveInternal(final K key) throws StoreAccessException { + return store.remove(key); } - private Map getAllInternal(Set keys, boolean includeNulls) throws BulkCacheLoadingException { - getAllObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNullContent(keys); - if(keys.isEmpty()) { - getAllObserver.end(GetAllOutcome.SUCCESS); - return Collections.emptyMap(); - } - - Map result = new HashMap<>(); - try { + protected Map doGetAllInternal(Set keys, boolean includeNulls) throws StoreAccessException { Map> computedMap = store.bulkComputeIfAbsent(keys, new GetAllFunction<>()); + Map result = new HashMap<>(computedMap.size()); int hits = 0; int keyCount = 0; for (Map.Entry> entry : computedMap.entrySet()) { keyCount++; if (entry.getValue() != null) { - result.put(entry.getKey(), entry.getValue().value()); + result.put(entry.getKey(), entry.getValue().get()); hits++; } else if (includeNulls) { result.put(entry.getKey(), null); @@ -326,301 +115,54 @@ private Map getAllInternal(Set keys, boolean includeNulls) th addBulkMethodEntriesCount(BulkOps.GET_ALL_HITS, hits); addBulkMethodEntriesCount(BulkOps.GET_ALL_MISS, keyCount - hits); - getAllObserver.end(GetAllOutcome.SUCCESS); return result; - } catch (StoreAccessException e) { - try { - return resilienceStrategy.getAllFailure(keys, e); - } finally { - getAllObserver.end(GetAllOutcome.FAILURE); - } - } } - LinkedHashSet> nullValuesForKeys(final Iterable keys) { - final LinkedHashSet> entries = new LinkedHashSet<>(); - for (K key : keys) { - entries.add(new AbstractMap.SimpleEntry<>(key, null)); - } - return entries; - } - - /** - * {@inheritDoc} - */ @Override - public void putAll(final Map entries) throws BulkCacheWritingException { - putAllObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(entries); - if(entries.isEmpty()) { - putAllObserver.end(PutAllOutcome.SUCCESS); - return; - } - + public void doPutAll(final Map entries) throws StoreAccessException { // Copy all entries to write into a Map - final Map entriesToRemap = new HashMap<>(); - for (Map.Entry entry: entries.entrySet()) { - // If a key/value is null, throw NPE, nothing gets mutated - if (entry.getKey() == null || entry.getValue() == null) { - throw new NullPointerException(); - } - entriesToRemap.put(entry.getKey(), entry.getValue()); - } + Map entriesToRemap = CollectionUtil.copyMapButFailOnNull(entries); - try { - PutAllFunction putAllFunction = new PutAllFunction<>(logger, entriesToRemap, runtimeConfiguration.getExpiry()); - store.bulkCompute(entries.keySet(), putAllFunction); - addBulkMethodEntriesCount(BulkOps.PUT_ALL, putAllFunction.getActualPutCount().get()); - addBulkMethodEntriesCount(BulkOps.UPDATE_ALL, putAllFunction.getActualUpdateCount().get()); - putAllObserver.end(PutAllOutcome.SUCCESS); - } catch (StoreAccessException e) { - try { - resilienceStrategy.putAllFailure(entries, e); - } finally { - putAllObserver.end(PutAllOutcome.FAILURE); - } - } + PutAllFunction putAllFunction = new PutAllFunction<>(logger, entriesToRemap, runtimeConfiguration.getExpiryPolicy()); + store.bulkCompute(entries.keySet(), putAllFunction); + addBulkMethodEntriesCount(BulkOps.PUT_ALL, putAllFunction.getActualPutCount().get()); + addBulkMethodEntriesCount(BulkOps.UPDATE_ALL, putAllFunction.getActualUpdateCount().get()); } - /** - * {@inheritDoc} - */ - @Override - public void removeAll(final Set keys) throws BulkCacheWritingException { - removeAllObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(keys); - if(keys.isEmpty()) { - removeAllObserver.end(RemoveAllOutcome.SUCCESS); - return; - } - - for (K key: keys) { - if (key == null) { - throw new NullPointerException(); - } - } - - - try { - RemoveAllFunction removeAllFunction = new RemoveAllFunction<>(); - store.bulkCompute(keys, removeAllFunction); - addBulkMethodEntriesCount(BulkOps.REMOVE_ALL, removeAllFunction.getActualRemoveCount().get()); - removeAllObserver.end(RemoveAllOutcome.SUCCESS); - } catch (StoreAccessException e) { - try { - resilienceStrategy.removeAllFailure(keys, e); - } finally { - removeAllObserver.end(RemoveAllOutcome.FAILURE); - } - } + protected void doRemoveAll(final Set keys) throws BulkCacheWritingException, StoreAccessException { + RemoveAllFunction removeAllFunction = new RemoveAllFunction<>(); + store.bulkCompute(keys, removeAllFunction); + addBulkMethodEntriesCount(BulkOps.REMOVE_ALL, removeAllFunction.getActualRemoveCount().get()); } - /** - * {@inheritDoc} - */ @Override - public V putIfAbsent(final K key, final V value) { - putIfAbsentObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(key, value); - - boolean absent = false; - try { - ValueHolder inCache = store.putIfAbsent(key, value); - absent = (inCache == null); - if (absent) { - putIfAbsentObserver.end(PutIfAbsentOutcome.PUT); - return null; - } else { - putIfAbsentObserver.end(PutIfAbsentOutcome.HIT); - return inCache.value(); - } - } catch (StoreAccessException e) { - try { - return resilienceStrategy.putIfAbsentFailure(key, value, null, e, absent); - } finally { - putIfAbsentObserver.end(PutIfAbsentOutcome.FAILURE); - } + public ValueHolder doPutIfAbsent(final K key, final V value, Consumer put) throws StoreAccessException { + ValueHolder result = store.putIfAbsent(key, value, put); + if(result == null) { + put.accept(true); } + return result; } - /** - * {@inheritDoc} - */ @Override - public boolean remove(final K key, final V value) { - conditionalRemoveObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(key, value); - RemoveStatus status = null; - boolean removed = false; - - try { - status = store.remove(key, value); - switch (status) { - case REMOVED: - removed = true; - conditionalRemoveObserver.end(ConditionalRemoveOutcome.SUCCESS); - break; - case KEY_MISSING: - conditionalRemoveObserver.end(ConditionalRemoveOutcome.FAILURE_KEY_MISSING); - break; - case KEY_PRESENT: - conditionalRemoveObserver.end(ConditionalRemoveOutcome.FAILURE_KEY_PRESENT); - break; - default: - throw new AssertionError("Invalid Status."); - } - } catch (StoreAccessException e) { - try { - return resilienceStrategy.removeFailure(key, value, e, removed); - } finally { - conditionalRemoveObserver.end(ConditionalRemoveOutcome.FAILURE); - } - } - return removed; + protected Store.RemoveStatus doRemove(K key, V value) throws StoreAccessException { + return store.remove(key, value); } - /** - * {@inheritDoc} - */ @Override - public V replace(final K key, final V value) { - replaceObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(key, value); - - try { - ValueHolder old = store.replace(key, value); - if (old != null) { - replaceObserver.end(ReplaceOutcome.HIT); - } else { - replaceObserver.end(ReplaceOutcome.MISS_NOT_PRESENT); - } - return old == null ? null : old.value(); - } catch (StoreAccessException e) { - try { - return resilienceStrategy.replaceFailure(key, value, e); - } finally { - replaceObserver.end(ReplaceOutcome.FAILURE); - } - } + protected V doReplace(K key, V value) throws StoreAccessException { + ValueHolder old = store.replace(key, value); + return old == null ? null : old.get(); } - /** - * {@inheritDoc} - */ @Override - public boolean replace(final K key, final V oldValue, final V newValue) { - replaceObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(key, oldValue, newValue); - - ReplaceStatus status = null; - boolean success = false; - - try { - status = store.replace(key, oldValue, newValue); - switch (status) { - case HIT: - success = true; - replaceObserver.end(ReplaceOutcome.HIT); - break; - case MISS_PRESENT: - replaceObserver.end(ReplaceOutcome.MISS_PRESENT); - break; - case MISS_NOT_PRESENT: - replaceObserver.end(ReplaceOutcome.MISS_NOT_PRESENT); - break; - default: - throw new AssertionError("Invalid Status."); - } - - return success; - } catch (StoreAccessException e) { - try { - return resilienceStrategy.replaceFailure(key, oldValue, newValue, e, success); - } finally { - replaceObserver.end(ReplaceOutcome.FAILURE); - } - } + protected Store.ReplaceStatus doReplace(final K key, final V oldValue, final V newValue) throws StoreAccessException { + return store.replace(key, oldValue, newValue); } - /** - * {@inheritDoc} - */ @Override - public CacheRuntimeConfiguration getRuntimeConfiguration() { - return runtimeConfiguration; - } - - /** - * {@inheritDoc} - */ - @Override - public void init() { - statusTransitioner.init().succeeded(); - } - - /** - * {@inheritDoc} - */ - @Override - public void close() { - statusTransitioner.close().succeeded(); - } - - /** - * {@inheritDoc} - */ - @Override - public Status getStatus() { - return statusTransitioner.currentStatus(); - } - - /** - * {@inheritDoc} - */ - @Override - public void addHook(LifeCycled hook) { - statusTransitioner.addHook(hook); - } - - void removeHook(LifeCycled hook) { - statusTransitioner.removeHook(hook); - } - - private static void checkNonNull(Object thing) { - if(thing == null) { - throw new NullPointerException(); - } - } - - private static void checkNonNull(Object... things) { - for (Object thing : things) { - checkNonNull(thing); - } - } - - private void checkNonNullContent(Collection collectionOfThings) { - checkNonNull(collectionOfThings); - for (Object thing : collectionOfThings) { - checkNonNull(thing); - } - } - - private void addBulkMethodEntriesCount(BulkOps op, long count) { - bulkMethodEntries.get(op).add(count); - } - - /** - * {@inheritDoc} - */ - @Override - public Jsr107Cache getJsr107Cache() { - return jsr107Cache; + public Jsr107Cache createJsr107Cache() { + return new Jsr107CacheImpl(); } /** @@ -628,73 +170,10 @@ public Jsr107Cache getJsr107Cache() { */ @Override public CacheLoaderWriter getCacheLoaderWriter() { - return null; + return this.cacheLoaderWriter; } - private final class Jsr107CacheImpl implements Jsr107Cache { - @Override - public void loadAll(Set keys, boolean replaceExistingValues, Function, Map> loadFunction) { - if(keys.isEmpty()) { - return ; - } - if (replaceExistingValues) { - loadAllReplace(keys, loadFunction); - } else { - loadAllAbsent(keys, loadFunction); - } - } - - @Override - public Iterator> specIterator() { - return new SpecIterator<>(this, store); - } - - @Override - public V getNoLoader(K key) { - return Ehcache.this.getNoLoader(key); - } - - @Override - public Map getAll(Set keys) { - return Ehcache.this.getAllInternal(keys, false); - } - - private void loadAllAbsent(Set keys, final Function, Map> loadFunction) { - try { - store.bulkComputeIfAbsent(keys, absentKeys -> cacheLoaderWriterLoadAllForKeys(absentKeys, loadFunction).entrySet()); - } catch (StoreAccessException e) { - throw newCacheLoadingException(e); - } - } - - Map cacheLoaderWriterLoadAllForKeys(Iterable keys, Function, Map> loadFunction) { - try { - Map loaded = loadFunction.apply(keys); - - // put into a new map since we can't assume the 107 cache loader returns things ordered, or necessarily with all the desired keys - Map rv = new LinkedHashMap<>(); - for (K key : keys) { - rv.put(key, loaded.get(key)); - } - return rv; - } catch (Exception e) { - throw newCacheLoadingException(e); - } - } - - private void loadAllReplace(Set keys, final Function, Map> loadFunction) { - try { - store.bulkCompute(keys, entries -> { - Collection keys1 = new ArrayList<>(); - for (Map.Entry entry : entries) { - keys1.add(entry.getKey()); - } - return cacheLoaderWriterLoadAllForKeys(keys1, loadFunction).entrySet(); - }); - } catch (StoreAccessException e) { - throw newCacheLoadingException(e); - } - } + private final class Jsr107CacheImpl extends Jsr107CacheBase { @Override public void compute(K key, final BiFunction computeFunction, @@ -711,30 +190,19 @@ public void compute(K key, final BiFunction c getObserver.end(GetOutcome.HIT); } - V newValue = computeFunction.apply(mappedKey, mappedValue); - - if (newValue == mappedValue) { - if (! replaceEqual.get()) { - return mappedValue; - } - } + return computeFunction.apply(mappedKey, mappedValue); - if (newValueAlreadyExpired(mappedKey, mappedValue, newValue)) { - return null; - } - - if (withStatsAndEvents.get()) { - if (newValue == null) { - removeObserver.end(RemoveOutcome.SUCCESS); - } else { - putObserver.end(PutOutcome.PUT); - } - } - - return newValue; }; - store.compute(key, fn, replaceEqual); + ValueHolder compute = store.computeAndGet(key, fn, replaceEqual, invokeWriter); + V newValue = compute == null ? null : compute.get(); + if (withStatsAndEvents.get()) { + if (newValue == null) { + removeObserver.end(RemoveOutcome.SUCCESS); + } else { + putObserver.end(PutOutcome.PUT); + } + } } catch (StoreAccessException e) { throw new RuntimeException(e); } @@ -745,20 +213,16 @@ public V getAndRemove(K key) { getObserver.begin(); removeObserver.begin(); - final AtomicReference existingValue = new AtomicReference<>(); + ValueHolder existingValue; try { - store.compute(key, (mappedKey, mappedValue) -> { - existingValue.set(mappedValue); - - return null; - }); + existingValue = store.getAndCompute(key, (mappedKey, mappedValue) -> null); } catch (StoreAccessException e) { getObserver.end(org.ehcache.core.statistics.CacheOperationOutcomes.GetOutcome.FAILURE); removeObserver.end(RemoveOutcome.FAILURE); throw new RuntimeException(e); } - V returnValue = existingValue.get(); + V returnValue = existingValue == null ? null : existingValue.get(); if (returnValue != null) { getObserver.end(org.ehcache.core.statistics.CacheOperationOutcomes.GetOutcome.HIT); removeObserver.end(RemoveOutcome.SUCCESS); @@ -773,24 +237,16 @@ public V getAndPut(K key, final V value) { getObserver.begin(); putObserver.begin(); - final AtomicReference existingValue = new AtomicReference<>(); + ValueHolder existingValue; try { - store.compute(key, (mappedKey, mappedValue) -> { - existingValue.set(mappedValue); - - if (newValueAlreadyExpired(mappedKey, mappedValue, value)) { - return null; - } - - return value; - }); + existingValue = store.getAndCompute(key, (mappedKey, mappedValue) -> value); } catch (StoreAccessException e) { getObserver.end(org.ehcache.core.statistics.CacheOperationOutcomes.GetOutcome.FAILURE); putObserver.end(PutOutcome.FAILURE); throw new RuntimeException(e); } - V returnValue = existingValue.get(); + V returnValue = existingValue == null ? null : existingValue.get(); if (returnValue != null) { getObserver.end(org.ehcache.core.statistics.CacheOperationOutcomes.GetOutcome.HIT); } else { @@ -799,154 +255,6 @@ public V getAndPut(K key, final V value) { putObserver.end(PutOutcome.PUT); return returnValue; } - - @Override - public boolean remove(K key) { - return removeInternal(key); - } - - @Override - public void removeAll() { - Store.Iterator>> iterator = store.iterator(); - while (iterator.hasNext()) { - try { - Entry> next = iterator.next(); - remove(next.getKey()); - } catch (StoreAccessException cae) { - // skip - } - } - } - } - - private class CacheEntryIterator implements Iterator> { - - private final Store.Iterator>> iterator; - private final boolean quiet; - private Cache.Entry> current; - private Cache.Entry> next; - private StoreAccessException nextException; - - public CacheEntryIterator(boolean quiet) { - this.quiet = quiet; - this.iterator = store.iterator(); - advance(); - } - - private void advance() { - try { - while (iterator.hasNext()) { - next = iterator.next(); - if (getNoLoader(next.getKey()) != null) { - return; - } - } - next = null; - } catch (RuntimeException re) { - nextException = new StoreAccessException(re); - next = null; - } catch (StoreAccessException cae) { - nextException = cae; - next = null; - } - } - - @Override - public boolean hasNext() { - statusTransitioner.checkAvailable(); - return nextException != null || next != null; - } - - @Override - public Entry next() { - if (!hasNext()) { - throw new NoSuchElementException(); - } - - if (!quiet) getObserver.begin(); - if (nextException == null) { - if (!quiet) getObserver.end(org.ehcache.core.statistics.CacheOperationOutcomes.GetOutcome.HIT); - current = next; - advance(); - return new ValueHolderBasedEntry<>(current); - } else { - if (!quiet) getObserver.end(org.ehcache.core.statistics.CacheOperationOutcomes.GetOutcome.FAILURE); - StoreAccessException cae = nextException; - nextException = null; - return resilienceStrategy.iteratorFailure(cae); - } - } - - @Override - public void remove() { - statusTransitioner.checkAvailable(); - if (current == null) { - throw new IllegalStateException("No current element"); - } - Ehcache.this.remove(current.getKey(), current.getValue().value()); - current = null; - } - } - - private static RecoveryCache recoveryCache(final Store store) { - return new RecoveryCache() { - - @Override - public void obliterate() throws StoreAccessException { - store.clear(); - } - - @Override - public void obliterate(K key) throws StoreAccessException { - store.remove(key); - } - - @Override - public void obliterate(Iterable keys) throws StoreAccessException { - for (K key : keys) { - obliterate(key); - } - } - }; - } - - private static boolean newValueAlreadyExpired(Logger logger, Expiry expiry, K key, V oldValue, V newValue) { - if (newValue == null) { - return false; - } - - Duration duration; - try { - if (oldValue == null) { - duration = expiry.getExpiryForCreation(key, newValue); - } else { - duration = expiry.getExpiryForUpdate(key, supplierOf(oldValue), newValue); - } - } catch (RuntimeException re) { - logger.error("Expiry computation caused an exception - Expiry duration will be 0 ", re); - return true; - } - - return Duration.ZERO.equals(duration); - } - - private static class ValueHolderBasedEntry implements Cache.Entry { - private final Cache.Entry> storeEntry; - - ValueHolderBasedEntry(Cache.Entry> storeEntry) { - this.storeEntry = storeEntry; - } - - @Override - public K getKey() { - return storeEntry.getKey(); - } - - @Override - public V getValue() { - return storeEntry.getValue().value(); - } - } // The compute function that will return the keys to their NEW values, taking the keys to their old values as input; @@ -955,11 +263,11 @@ public static class PutAllFunction implements Function entriesToRemap; - private final Expiry expiry; + private final ExpiryPolicy expiry; private final AtomicInteger actualPutCount = new AtomicInteger(); private final AtomicInteger actualUpdateCount = new AtomicInteger(); - public PutAllFunction(Logger logger, Map entriesToRemap, Expiry expiry) { + public PutAllFunction(Logger logger, Map entriesToRemap, ExpiryPolicy expiry) { this.logger = logger; this.entriesToRemap = entriesToRemap; this.expiry = expiry; @@ -995,7 +303,7 @@ public Map getEntriesToRemap() { } private boolean newValueAlreadyExpired(K key, V oldValue, V newValue) { - return Ehcache.newValueAlreadyExpired(logger, expiry, key, oldValue, newValue); + return EhcacheBase.newValueAlreadyExpired(logger, expiry, key, oldValue, newValue); } public AtomicInteger getActualPutCount() { @@ -1037,14 +345,15 @@ public static class GetAllFunction implements Function> apply(final Iterable keys) { - Map computeResult = new LinkedHashMap<>(); + int size = CollectionUtil.findBestCollectionSize(keys, 1); // in our current implementation, we have one entry all the time + + List> computeResult = new ArrayList<>(size); - // put all the entries to get ordering correct for (K key : keys) { - computeResult.put(key, null); + computeResult.add(CollectionUtil.entry(key, null)); } - return computeResult.entrySet(); + return computeResult; } } diff --git a/core/src/main/java/org/ehcache/core/EhcacheBase.java b/core/src/main/java/org/ehcache/core/EhcacheBase.java new file mode 100644 index 0000000000..aca14f72c7 --- /dev/null +++ b/core/src/main/java/org/ehcache/core/EhcacheBase.java @@ -0,0 +1,806 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.core; + +import org.ehcache.Cache; +import org.ehcache.Status; +import org.ehcache.config.CacheRuntimeConfiguration; +import org.ehcache.core.events.CacheEventDispatcher; +import org.ehcache.core.spi.LifeCycled; +import org.ehcache.core.spi.store.Store; +import org.ehcache.core.spi.store.Store.ValueHolder; +import org.ehcache.core.statistics.BulkOps; +import org.ehcache.core.statistics.CacheOperationOutcomes.ClearOutcome; +import org.ehcache.core.statistics.CacheOperationOutcomes.ConditionalRemoveOutcome; +import org.ehcache.core.statistics.CacheOperationOutcomes.GetAllOutcome; +import org.ehcache.core.statistics.CacheOperationOutcomes.GetOutcome; +import org.ehcache.core.statistics.CacheOperationOutcomes.PutAllOutcome; +import org.ehcache.core.statistics.CacheOperationOutcomes.PutIfAbsentOutcome; +import org.ehcache.core.statistics.CacheOperationOutcomes.PutOutcome; +import org.ehcache.core.statistics.CacheOperationOutcomes.RemoveAllOutcome; +import org.ehcache.core.statistics.CacheOperationOutcomes.RemoveOutcome; +import org.ehcache.core.statistics.CacheOperationOutcomes.ReplaceOutcome; +import org.ehcache.expiry.ExpiryPolicy; +import org.ehcache.spi.loaderwriter.BulkCacheLoadingException; +import org.ehcache.spi.loaderwriter.BulkCacheWritingException; +import org.ehcache.spi.loaderwriter.CacheWritingException; +import org.ehcache.spi.resilience.ResilienceStrategy; +import org.ehcache.spi.resilience.StoreAccessException; +import org.slf4j.Logger; +import org.terracotta.statistics.StatisticsManager; +import org.terracotta.statistics.observer.OperationObserver; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.EnumMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.atomic.LongAdder; +import java.util.function.Consumer; +import java.util.function.Function; + +import static org.ehcache.core.exceptions.ExceptionFactory.newCacheLoadingException; +import static org.terracotta.statistics.StatisticBuilder.operation; + +/** + * Base implementation of the {@link Cache} interface that is common to all Ehcache implementation + */ +public abstract class EhcacheBase implements InternalCache { + + protected final Logger logger; + + protected final StatusTransitioner statusTransitioner; + + protected final Store store; + protected final ResilienceStrategy resilienceStrategy; + protected final EhcacheRuntimeConfiguration runtimeConfiguration; + + protected final OperationObserver getObserver = operation(GetOutcome.class).named("get").of(this).tag("cache").build(); + protected final OperationObserver getAllObserver = operation(GetAllOutcome.class).named("getAll").of(this).tag("cache").build(); + protected final OperationObserver putObserver = operation(PutOutcome.class).named("put").of(this).tag("cache").build(); + protected final OperationObserver putAllObserver = operation(PutAllOutcome.class).named("putAll").of(this).tag("cache").build(); + protected final OperationObserver removeObserver = operation(RemoveOutcome.class).named("remove").of(this).tag("cache").build(); + protected final OperationObserver removeAllObserver = operation(RemoveAllOutcome.class).named("removeAll").of(this).tag("cache").build(); + protected final OperationObserver conditionalRemoveObserver = operation(ConditionalRemoveOutcome.class).named("conditionalRemove").of(this).tag("cache").build(); + protected final OperationObserver putIfAbsentObserver = operation(PutIfAbsentOutcome.class).named("putIfAbsent").of(this).tag("cache").build(); + protected final OperationObserver replaceObserver = operation(ReplaceOutcome.class).named("replace").of(this).tag("cache").build(); + protected final OperationObserver clearObserver = operation(ClearOutcome.class).named("clear").of(this).tag("cache").build(); + + protected final Map bulkMethodEntries = new EnumMap<>(BulkOps.class); + + /** + * Creates a new {@code EhcacheBase} based on the provided parameters. + * + * @param runtimeConfiguration the cache configuration + * @param store the store to use + * @param eventDispatcher the event dispatcher + * @param logger the logger + */ + EhcacheBase(EhcacheRuntimeConfiguration runtimeConfiguration, Store store, ResilienceStrategy resilienceStrategy, + CacheEventDispatcher eventDispatcher, Logger logger, StatusTransitioner statusTransitioner) { + this.store = store; + runtimeConfiguration.addCacheConfigurationListener(store.getConfigurationChangeListeners()); + StatisticsManager.associate(store).withParent(this); + + this.resilienceStrategy = resilienceStrategy; + + this.runtimeConfiguration = runtimeConfiguration; + runtimeConfiguration.addCacheConfigurationListener(eventDispatcher.getConfigurationChangeListeners()); + + this.logger = logger; + this.statusTransitioner = statusTransitioner; + for (BulkOps bulkOp : BulkOps.values()) { + bulkMethodEntries.put(bulkOp, new LongAdder()); + } + } + + /** + * {@inheritDoc} + */ + @Override + public V get(K key) { + getObserver.begin(); + try { + statusTransitioner.checkAvailable(); + checkNonNull(key); + + try { + Store.ValueHolder valueHolder = doGet(key); + + // Check for expiry first + if (valueHolder == null) { + getObserver.end(GetOutcome.MISS); + return null; + } else { + getObserver.end(GetOutcome.HIT); + return valueHolder.get(); + } + } catch (StoreAccessException e) { + V value = resilienceStrategy.getFailure(key, e); + getObserver.end(GetOutcome.FAILURE); + return value; + } + } catch (Throwable e) { + getObserver.end(GetOutcome.FAILURE); + throw e; + } + } + + protected abstract Store.ValueHolder doGet(K key) throws StoreAccessException; + + protected V getNoLoader(K key) { + getObserver.begin(); + try { + statusTransitioner.checkAvailable(); + checkNonNull(key); + + try { + Store.ValueHolder valueHolder = store.get(key); + + // Check for expiry first + if (valueHolder == null) { + getObserver.end(GetOutcome.MISS); + return null; + } else { + getObserver.end(GetOutcome.HIT); + return valueHolder.get(); + } + } catch (StoreAccessException e) { + V value = resilienceStrategy.getFailure(key, e); + getObserver.end(GetOutcome.FAILURE); + return value; + } + } catch (Throwable e) { + getObserver.end(GetOutcome.FAILURE); + throw e; + } + } + + /** + * {@inheritDoc} + */ + @Override + public void put(K key, V value) { + putObserver.begin(); + try { + statusTransitioner.checkAvailable(); + checkNonNull(key, value); + + try { + Store.PutStatus status = doPut(key, value); + switch (status) { + case PUT: + putObserver.end(PutOutcome.PUT); + break; + case NOOP: + putObserver.end(PutOutcome.NOOP); + break; + default: + throw new AssertionError("Invalid Status."); + } + } catch (StoreAccessException e) { + resilienceStrategy.putFailure(key, value, e); + putObserver.end(PutOutcome.FAILURE); + } + } catch (Throwable e) { + putObserver.end(PutOutcome.FAILURE); + throw e; + } + } + + protected abstract Store.PutStatus doPut(K key, V value) throws StoreAccessException; + + /** + * {@inheritDoc} + */ + @Override + public boolean containsKey(final K key) { + statusTransitioner.checkAvailable(); + checkNonNull(key); + try { + return store.containsKey(key); + } catch (StoreAccessException e) { + return resilienceStrategy.containsKeyFailure(key, e); + } + } + + /** + * {@inheritDoc} + */ + @Override + public void remove(K key) throws CacheWritingException { + removeInternal(key); // ignore return value; + } + + protected boolean removeInternal(final K key) { + removeObserver.begin(); + try { + statusTransitioner.checkAvailable(); + checkNonNull(key); + + boolean removed = false; + try { + removed = doRemoveInternal(key); + if (removed) { + removeObserver.end(RemoveOutcome.SUCCESS); + } else { + removeObserver.end(RemoveOutcome.NOOP); + } + } catch (StoreAccessException e) { + resilienceStrategy.removeFailure(key, e); + removeObserver.end(RemoveOutcome.FAILURE); + } + + return removed; + } catch (Throwable e) { + removeObserver.end(RemoveOutcome.FAILURE); + throw e; + } + } + + protected abstract boolean doRemoveInternal(final K key) throws StoreAccessException; + + /** + * {@inheritDoc} + */ + @Override + public void clear() { + clearObserver.begin(); + try { + statusTransitioner.checkAvailable(); + try { + store.clear(); + clearObserver.end(ClearOutcome.SUCCESS); + } catch (StoreAccessException e) { + resilienceStrategy.clearFailure(e); + clearObserver.end(ClearOutcome.FAILURE); + } + } catch (Throwable e) { + clearObserver.end(ClearOutcome.FAILURE); + throw e; + } + } + + /** + * {@inheritDoc} + */ + @Override + public V putIfAbsent(final K key, final V value) { + putIfAbsentObserver.begin(); + try { + statusTransitioner.checkAvailable(); + checkNonNull(key, value); + + boolean[] put = { false }; + + try { + ValueHolder inCache = doPutIfAbsent(key, value, b -> put[0] = b); + if (put[0]) { + putIfAbsentObserver.end(PutIfAbsentOutcome.PUT); + return null; + } else if (inCache == null) { + putIfAbsentObserver.end(PutIfAbsentOutcome.HIT); + return null; + } else { + putIfAbsentObserver.end(PutIfAbsentOutcome.HIT); + return inCache.get(); + } + } catch (StoreAccessException e) { + V newValue = resilienceStrategy.putIfAbsentFailure(key, value, e); + putIfAbsentObserver.end(PutIfAbsentOutcome.FAILURE); + return newValue; + } + } catch (Throwable e) { + putIfAbsentObserver.end(PutIfAbsentOutcome.FAILURE); + throw e; + } + } + + protected abstract ValueHolder doPutIfAbsent(K key, V value, Consumer put) throws StoreAccessException; + + /** + * {@inheritDoc} + */ + @Override + public Iterator> iterator() { + statusTransitioner.checkAvailable(); + return new CacheEntryIterator(false); + } + + /** + * {@inheritDoc} + */ + @Override + public Map getAll(Set keys) throws BulkCacheLoadingException { + return getAllInternal(keys, true); + } + + protected Map getAllInternal(Set keys, boolean includeNulls) { + getAllObserver.begin(); + try { + statusTransitioner.checkAvailable(); + checkNonNullContent(keys); + if (keys.isEmpty()) { + getAllObserver.end(GetAllOutcome.SUCCESS); + return Collections.emptyMap(); + } + + try { + Map result = doGetAllInternal(keys, includeNulls); + getAllObserver.end(GetAllOutcome.SUCCESS); + return result; + } catch (StoreAccessException e) { + Map result = resilienceStrategy.getAllFailure(keys, e); + getAllObserver.end(GetAllOutcome.FAILURE); + return result; + } + } catch (Throwable e) { + getAllObserver.end(GetAllOutcome.FAILURE); + throw e; + } + } + + protected abstract Map doGetAllInternal(Set keys, boolean includeNulls) throws StoreAccessException; + + /** + * {@inheritDoc} + */ + @Override + public void putAll(Map entries) throws BulkCacheWritingException { + putAllObserver.begin(); + try { + statusTransitioner.checkAvailable(); + checkNonNull(entries); + if(entries.isEmpty()) { + putAllObserver.end(PutAllOutcome.SUCCESS); + return; + } + + try { + doPutAll(entries); + putAllObserver.end(PutAllOutcome.SUCCESS); + } catch (StoreAccessException e) { + resilienceStrategy.putAllFailure(entries, e); + putAllObserver.end(PutAllOutcome.FAILURE); + } + } catch (Exception e) { + putAllObserver.end(PutAllOutcome.FAILURE); + throw e; + } + } + + protected abstract void doPutAll(Map entries) throws StoreAccessException, BulkCacheWritingException; + + protected boolean newValueAlreadyExpired(K key, V oldValue, V newValue) { + return newValueAlreadyExpired(logger, runtimeConfiguration.getExpiryPolicy(), key, oldValue, newValue); + } + + /** + * {@inheritDoc} + */ + @Override + public void removeAll(Set keys) throws BulkCacheWritingException { + removeAllObserver.begin(); + try { + statusTransitioner.checkAvailable(); + checkNonNull(keys); + if (keys.isEmpty()) { + removeAllObserver.end(RemoveAllOutcome.SUCCESS); + return; + } + + for (K key : keys) { + if (key == null) { + throw new NullPointerException(); + } + } + + try { + doRemoveAll(keys); + removeAllObserver.end(RemoveAllOutcome.SUCCESS); + } catch (StoreAccessException e) { + resilienceStrategy.removeAllFailure(keys, e); + removeAllObserver.end(RemoveAllOutcome.FAILURE); + } + } catch (Throwable e) { + removeAllObserver.end(RemoveAllOutcome.FAILURE); + throw e; + } + } + + protected abstract void doRemoveAll(Set keys) throws BulkCacheWritingException, StoreAccessException; + + protected static boolean newValueAlreadyExpired(Logger logger, ExpiryPolicy expiry, K key, V oldValue, V newValue) { + if (newValue == null) { + return false; + } + + Duration duration; + try { + if (oldValue == null) { + duration = expiry.getExpiryForCreation(key, newValue); + } else { + duration = expiry.getExpiryForUpdate(key, () -> oldValue, newValue); + } + } catch (RuntimeException re) { + logger.error("Expiry computation caused an exception - Expiry duration will be 0 ", re); + return true; + } + + return Duration.ZERO.equals(duration); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean remove(K key, V value) { + conditionalRemoveObserver.begin(); + try { + statusTransitioner.checkAvailable(); + checkNonNull(key, value); + + try { + Store.RemoveStatus status = doRemove(key, value); + switch (status) { + case REMOVED: + conditionalRemoveObserver.end(ConditionalRemoveOutcome.SUCCESS); + return true; + case KEY_MISSING: + conditionalRemoveObserver.end(ConditionalRemoveOutcome.FAILURE_KEY_MISSING); + return false; + case KEY_PRESENT: + conditionalRemoveObserver.end(ConditionalRemoveOutcome.FAILURE_KEY_PRESENT); + return false; + default: + throw new AssertionError("Invalid Status: " + status); + } + } catch (StoreAccessException e) { + boolean removed = resilienceStrategy.removeFailure(key, value, e); + conditionalRemoveObserver.end(ConditionalRemoveOutcome.FAILURE); + return removed; + } + } catch (Throwable e) { + conditionalRemoveObserver.end(ConditionalRemoveOutcome.FAILURE); + throw e; + } + } + + protected abstract Store.RemoveStatus doRemove(final K key, final V value) throws StoreAccessException; + + /** + * {@inheritDoc} + */ + @Override + public V replace(K key, V value) { + replaceObserver.begin(); + try { + statusTransitioner.checkAvailable(); + checkNonNull(key, value); + + try { + V result = doReplace(key, value); + if(result == null) { + replaceObserver.end(ReplaceOutcome.MISS_NOT_PRESENT); + } else { + replaceObserver.end(ReplaceOutcome.HIT); + } + return result; + } catch (StoreAccessException e) { + V result = resilienceStrategy.replaceFailure(key, value, e); + replaceObserver.end(ReplaceOutcome.FAILURE); + return result; + } + } catch (Throwable e) { + replaceObserver.end(ReplaceOutcome.FAILURE); + throw e; + } + } + + protected abstract V doReplace(final K key, final V value) throws StoreAccessException; + + /** + * {@inheritDoc} + */ + @Override + public boolean replace(final K key, final V oldValue, final V newValue) { + replaceObserver.begin(); + try { + statusTransitioner.checkAvailable(); + checkNonNull(key, oldValue, newValue); + + try { + Store.ReplaceStatus status = doReplace(key, oldValue, newValue); + switch (status) { + case HIT: + replaceObserver.end(ReplaceOutcome.HIT); + return true; + case MISS_PRESENT: + replaceObserver.end(ReplaceOutcome.MISS_PRESENT); + return false; + case MISS_NOT_PRESENT: + replaceObserver.end(ReplaceOutcome.MISS_NOT_PRESENT); + return false; + default: + throw new AssertionError("Invalid Status:" + status); + } + } catch (StoreAccessException e) { + boolean success = resilienceStrategy.replaceFailure(key, oldValue, newValue, e); + replaceObserver.end(ReplaceOutcome.FAILURE); + return success; + } + } catch (Throwable e) { + replaceObserver.end(ReplaceOutcome.FAILURE); + throw e; + } + } + + protected abstract Store.ReplaceStatus doReplace(K key, V oldValue, V newValue) throws StoreAccessException; + + /** + * {@inheritDoc} + */ + @Override + public CacheRuntimeConfiguration getRuntimeConfiguration() { + return runtimeConfiguration; + } + + /** + * {@inheritDoc} + */ + @Override + public void init() { + statusTransitioner.init().succeeded(); + } + + /** + * {@inheritDoc} + */ + @Override + public void close() { + statusTransitioner.close().succeeded(); + } + + /** + * {@inheritDoc} + */ + @Override + public Status getStatus() { + return statusTransitioner.currentStatus(); + } + + /** + * {@inheritDoc} + */ + @Override + public void addHook(LifeCycled hook) { + statusTransitioner.addHook(hook); + } + + void removeHook(LifeCycled hook) { + statusTransitioner.removeHook(hook); + } + + protected void addBulkMethodEntriesCount(BulkOps op, long count) { + bulkMethodEntries.get(op).add(count); + } + + /** + * {@inheritDoc} + */ + @Override + public Map getBulkMethodEntries() { + return bulkMethodEntries; + } + + protected static void checkNonNull(Object thing) { + Objects.requireNonNull(thing); + } + + protected static void checkNonNull(Object... things) { + for (Object thing : things) { + checkNonNull(thing); + } + } + + protected void checkNonNullContent(Collection collectionOfThings) { + checkNonNull(collectionOfThings); + for (Object thing : collectionOfThings) { + checkNonNull(thing); + } + } + + protected abstract class Jsr107CacheBase implements Jsr107Cache { + + @Override + public void loadAll(Set keys, boolean replaceExistingValues, Function, Map> loadFunction) { + if(keys.isEmpty()) { + return ; + } + if (replaceExistingValues) { + loadAllReplace(keys, loadFunction); + } else { + loadAllAbsent(keys, loadFunction); + } + } + + @Override + public Iterator> specIterator() { + return new SpecIterator<>(this, store); + } + + @Override + public V getNoLoader(K key) { + return EhcacheBase.this.getNoLoader(key); + } + + @Override + public Map getAll(Set keys) { + return getAllInternal(keys, false); + } + + private void loadAllAbsent(Set keys, final Function, Map> loadFunction) { + try { + store.bulkComputeIfAbsent(keys, absentKeys -> cacheLoaderWriterLoadAllForKeys(absentKeys, loadFunction).entrySet()); + } catch (StoreAccessException e) { + throw newCacheLoadingException(e); + } + } + + Map cacheLoaderWriterLoadAllForKeys(Iterable keys, Function, Map> loadFunction) { + try { + Map loaded = loadFunction.apply(keys); + + // put into a new map since we can't assume the 107 cache loader returns things ordered, or necessarily with all the desired keys + Map rv = new LinkedHashMap<>(); + for (K key : keys) { + rv.put(key, loaded.get(key)); + } + return rv; + } catch (Exception e) { + throw newCacheLoadingException(e); + } + } + + private void loadAllReplace(Set keys, final Function, Map> loadFunction) { + try { + store.bulkCompute(keys, entries -> { + Collection keys1 = new ArrayList<>(); + for (Map.Entry entry : entries) { + keys1.add(entry.getKey()); + } + return cacheLoaderWriterLoadAllForKeys(keys1, loadFunction).entrySet(); + }); + } catch (StoreAccessException e) { + throw newCacheLoadingException(e); + } + } + + @Override + public boolean remove(K key) { + return EhcacheBase.this.removeInternal(key); + } + + @Override + public void removeAll() { + Store.Iterator>> iterator = store.iterator(); + while (iterator.hasNext()) { + try { + Entry> next = iterator.next(); + remove(next.getKey()); + } catch (StoreAccessException cae) { + // skip + } + } + } + + } + + private class CacheEntryIterator implements Iterator> { + + private final Store.Iterator>> iterator; + private final boolean quiet; + private Cache.Entry> current; + private Cache.Entry> next; + private StoreAccessException nextException; + + public CacheEntryIterator(boolean quiet) { + this.quiet = quiet; + this.iterator = store.iterator(); + advance(); + } + + private void advance() { + try { + while (iterator.hasNext()) { + next = iterator.next(); + if (getNoLoader(next.getKey()) != null) { + return; + } + } + next = null; + } catch (RuntimeException re) { + nextException = new StoreAccessException(re); + next = null; + } catch (StoreAccessException cae) { + nextException = cae; + next = null; + } + } + + @Override + public boolean hasNext() { + statusTransitioner.checkAvailable(); + return nextException != null || next != null; + } + + @Override + public Entry next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + + if (!quiet) getObserver.begin(); + if (nextException == null) { + if (!quiet) getObserver.end(GetOutcome.HIT); + current = next; + advance(); + return new ValueHolderBasedEntry<>(current); + } else { + if (!quiet) getObserver.end(GetOutcome.FAILURE); + StoreAccessException cae = nextException; + nextException = null; + return resilienceStrategy.iteratorFailure(cae); + } + } + + @Override + public void remove() { + statusTransitioner.checkAvailable(); + if (current == null) { + throw new IllegalStateException("No current element"); + } + EhcacheBase.this.remove(current.getKey(), current.getValue().get()); + current = null; + } + } + + private static class ValueHolderBasedEntry implements Cache.Entry { + private final Cache.Entry> storeEntry; + + ValueHolderBasedEntry(Cache.Entry> storeEntry) { + this.storeEntry = storeEntry; + } + + @Override + public K getKey() { + return storeEntry.getKey(); + } + + @Override + public V getValue() { + return storeEntry.getValue().get(); + } + + } +} + diff --git a/core/src/main/java/org/ehcache/core/EhcacheManager.java b/core/src/main/java/org/ehcache/core/EhcacheManager.java index 8288a34bf3..5c066c77cc 100644 --- a/core/src/main/java/org/ehcache/core/EhcacheManager.java +++ b/core/src/main/java/org/ehcache/core/EhcacheManager.java @@ -28,6 +28,7 @@ import org.ehcache.core.config.BaseCacheConfiguration; import org.ehcache.core.config.DefaultConfiguration; import org.ehcache.core.config.store.StoreEventSourceConfiguration; +import org.ehcache.core.config.store.StoreStatisticsConfiguration; import org.ehcache.core.events.CacheEventDispatcher; import org.ehcache.core.events.CacheEventDispatcherFactory; import org.ehcache.core.events.CacheEventListenerConfiguration; @@ -37,6 +38,7 @@ import org.ehcache.core.internal.store.StoreConfigurationImpl; import org.ehcache.core.internal.store.StoreSupport; import org.ehcache.core.internal.util.ClassLoading; +import org.ehcache.core.resilience.DefaultRecoveryStore; import org.ehcache.core.spi.LifeCycled; import org.ehcache.core.spi.LifeCycledAdapter; import org.ehcache.core.spi.service.CacheManagerProviderService; @@ -45,10 +47,13 @@ import org.ehcache.core.spi.store.Store; import org.ehcache.event.CacheEventListener; import org.ehcache.spi.loaderwriter.CacheLoaderWriter; +import org.ehcache.spi.loaderwriter.CacheLoaderWriterConfiguration; import org.ehcache.spi.loaderwriter.CacheLoaderWriterProvider; import org.ehcache.spi.loaderwriter.WriteBehindConfiguration; import org.ehcache.spi.loaderwriter.WriteBehindProvider; import org.ehcache.spi.persistence.PersistableResourceService; +import org.ehcache.spi.resilience.ResilienceStrategy; +import org.ehcache.spi.resilience.ResilienceStrategyProvider; import org.ehcache.spi.serialization.SerializationProvider; import org.ehcache.spi.serialization.Serializer; import org.ehcache.spi.serialization.UnsupportedTypeException; @@ -72,8 +77,11 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.atomic.AtomicReference; import static org.ehcache.core.internal.service.ServiceLocator.dependencySet; +import static org.ehcache.core.spi.service.ServiceUtils.findOptionalAmongst; +import static org.ehcache.core.spi.service.ServiceUtils.findSingletonAmongst; /** * Implementation class for the {@link org.ehcache.CacheManager} and {@link PersistentCacheManager} @@ -97,7 +105,7 @@ public class EhcacheManager implements PersistentCacheManager, InternalCacheMana protected final ServiceLocator serviceLocator; public EhcacheManager(Configuration config) { - this(config, Collections.emptyList(), true); + this(config, Collections.emptyList(), true); } public EhcacheManager(Configuration config, Collection services) { @@ -115,7 +123,7 @@ public EhcacheManager(Configuration config, Collection services, boolea } private void validateServicesConfigs() { - HashSet classes = new HashSet<>(); + Set> classes = new HashSet<>(); for (ServiceCreationConfiguration service : configuration.getServiceCreationConfigurations()) { if (!classes.add(service.getServiceType())) { throw new IllegalStateException("Duplicate creation configuration for service " + service.getServiceType()); @@ -130,6 +138,7 @@ private ServiceLocator resolveServices(Collection services) { .with(WriteBehindProvider.class) .with(CacheEventDispatcherFactory.class) .with(CacheEventListenerProvider.class) + .with(ResilienceStrategyProvider.class) .with(services); if (!builder.contains(CacheManagerProviderService.class)) { builder = builder.with(new DefaultCacheManagerProviderService(this)); @@ -238,16 +247,16 @@ public Cache createCache(final String alias, CacheConfiguration Cache createCache(final String alias, CacheConfiguration originalConfig, boolean addToConfig) throws IllegalArgumentException { + private Cache createCache(String alias, CacheConfiguration originalConfig, boolean addToConfig) throws IllegalArgumentException { statusTransitioner.checkAvailable(); LOGGER.debug("Creating Cache '{}' in {}.", alias, simpleName); - CacheConfiguration config = adjustConfigurationWithCacheManagerDefaults(originalConfig); + CacheConfiguration config = adjustConfigurationWithCacheManagerDefaults(alias, originalConfig); Class keyType = config.getKeyType(); Class valueType = config.getValueType(); - final CacheHolder value = new CacheHolder(keyType, valueType, null); + CacheHolder value = new CacheHolder(keyType, valueType); if (caches.putIfAbsent(alias, value) != null) { throw new IllegalArgumentException("Cache '" + alias +"' already exists"); } @@ -292,12 +301,12 @@ private Cache createCache(final String alias, CacheConfiguration InternalCache createNewEhcache(final String alias, final CacheConfiguration config, - final Class keyType, final Class valueType) { + InternalCache createNewEhcache(String alias, CacheConfiguration config, + Class keyType, Class valueType) { Collection> adjustedServiceConfigs = new ArrayList<>(config.getServiceConfigurations()); - List unknownServiceConfigs = new ArrayList<>(); - for (ServiceConfiguration serviceConfig : adjustedServiceConfigs) { + List> unknownServiceConfigs = new ArrayList<>(); + for (ServiceConfiguration serviceConfig : adjustedServiceConfigs) { if (!serviceLocator.knowsServiceFor(serviceConfig)) { unknownServiceConfigs.add(serviceConfig); } @@ -308,19 +317,17 @@ InternalCache createNewEhcache(final String alias, final CacheConfi List lifeCycledList = new ArrayList<>(); - final Store store = getStore(alias, config, keyType, valueType, adjustedServiceConfigs, lifeCycledList); - - final CacheLoaderWriterProvider cacheLoaderWriterProvider = serviceLocator.getService(CacheLoaderWriterProvider.class); - final CacheLoaderWriter decorator ; + CacheLoaderWriterProvider cacheLoaderWriterProvider = serviceLocator.getService(CacheLoaderWriterProvider.class); + CacheLoaderWriter decorator ; if(cacheLoaderWriterProvider != null) { - final CacheLoaderWriter loaderWriter; + CacheLoaderWriter loaderWriter; loaderWriter = cacheLoaderWriterProvider.createCacheLoaderWriter(alias, config); WriteBehindConfiguration writeBehindConfiguration = - ServiceUtils.findSingletonAmongst(WriteBehindConfiguration.class, config.getServiceConfigurations().toArray()); + ServiceUtils.findSingletonAmongst(WriteBehindConfiguration.class, config.getServiceConfigurations()); if(writeBehindConfiguration == null) { decorator = loaderWriter; } else { - final WriteBehindProvider factory = serviceLocator.getService(WriteBehindProvider.class); + WriteBehindProvider factory = serviceLocator.getService(WriteBehindProvider.class); decorator = factory.createWriteBehindLoaderWriter(loaderWriter, writeBehindConfiguration); if(decorator != null) { lifeCycledList.add(new LifeCycledAdapter() { @@ -336,7 +343,7 @@ public void close() { lifeCycledList.add(new LifeCycledAdapter() { @Override public void close() throws Exception { - cacheLoaderWriterProvider.releaseCacheLoaderWriter(loaderWriter); + cacheLoaderWriterProvider.releaseCacheLoaderWriter(alias, loaderWriter); } }); } @@ -344,9 +351,12 @@ public void close() throws Exception { decorator = null; } - final CacheEventDispatcherFactory cenlProvider = serviceLocator.getService(CacheEventDispatcherFactory.class); - final CacheEventDispatcher evtService = - cenlProvider.createCacheEventDispatcher(store, adjustedServiceConfigs.toArray(new ServiceConfiguration[adjustedServiceConfigs.size()])); + Store store = getStore(alias, config, keyType, valueType, adjustedServiceConfigs, lifeCycledList, decorator); + + + CacheEventDispatcherFactory cenlProvider = serviceLocator.getService(CacheEventDispatcherFactory.class); + CacheEventDispatcher evtService = + cenlProvider.createCacheEventDispatcher(store, adjustedServiceConfigs.toArray(new ServiceConfiguration[adjustedServiceConfigs.size()])); lifeCycledList.add(new LifeCycledAdapter() { @Override public void close() { @@ -355,26 +365,27 @@ public void close() { }); evtService.setStoreEventSource(store.getStoreEventSource()); - final InternalCache cache; + ResilienceStrategyProvider resilienceProvider = serviceLocator.getService(ResilienceStrategyProvider.class); + ResilienceStrategy resilienceStrategy; if (decorator == null) { - cache = new Ehcache<>(config, store, evtService, LoggerFactory.getLogger(Ehcache.class + "-" + alias)); + resilienceStrategy = resilienceProvider.createResilienceStrategy(alias, config, new DefaultRecoveryStore<>(store)); } else { - cache = new EhcacheWithLoaderWriter<>(config, store, decorator, evtService, - useLoaderInAtomics, LoggerFactory.getLogger(EhcacheWithLoaderWriter.class + "-" + alias)); + resilienceStrategy = resilienceProvider.createResilienceStrategy(alias, config, new DefaultRecoveryStore<>(store), decorator); } + InternalCache cache = new Ehcache<>(config, store, resilienceStrategy, evtService, LoggerFactory.getLogger(Ehcache.class + "-" + alias), decorator); - final CacheEventListenerProvider evntLsnrFactory = serviceLocator.getService(CacheEventListenerProvider.class); + CacheEventListenerProvider evntLsnrFactory = serviceLocator.getService(CacheEventListenerProvider.class); if (evntLsnrFactory != null) { Collection evtLsnrConfigs = ServiceUtils.findAmongst(CacheEventListenerConfiguration.class, config.getServiceConfigurations()); for (CacheEventListenerConfiguration lsnrConfig: evtLsnrConfigs) { - final CacheEventListener lsnr = evntLsnrFactory.createEventListener(alias, lsnrConfig); + CacheEventListener lsnr = evntLsnrFactory.createEventListener(alias, lsnrConfig); if (lsnr != null) { cache.getRuntimeConfiguration().registerCacheEventListener(lsnr, lsnrConfig.orderingMode(), lsnrConfig.firingMode(), lsnrConfig.fireOn()); lifeCycledList.add(new LifeCycled() { @Override - public void init() throws Exception { + public void init() { // no-op for now } @@ -406,15 +417,16 @@ public void close() throws Exception { * this list may be augmented by the implementation of this method * @param lifeCycledList the {@code List} of {@code LifeCycled} instances used to manage components of the * cache; this list may be augmented by the implementation of this method + * @param loaderWriter the {@code CacheLoaderWriter} to be used by the {@code Cache} * @param the cache key type * @param the cache value type * * @return the {@code Store} instance used to create the cache */ - protected Store getStore(final String alias, final CacheConfiguration config, - final Class keyType, final Class valueType, - final Collection> serviceConfigs, - final List lifeCycledList) { + protected Store getStore(String alias, CacheConfiguration config, + Class keyType, Class valueType, + Collection> serviceConfigs, + List lifeCycledList, CacheLoaderWriter loaderWriter) { final Set> resourceTypes = config.getResourcePools().getResourceTypeSet(); for (ResourceType resourceType : resourceTypes) { @@ -437,12 +449,10 @@ public void close() throws Exception { } } - final Store.Provider storeProvider = StoreSupport.selectStoreProvider(serviceLocator, resourceTypes, serviceConfigs); - Serializer keySerializer = null; Serializer valueSerializer = null; final SerializationProvider serialization = serviceLocator.getService(SerializationProvider.class); - ServiceConfiguration[] serviceConfigArray = serviceConfigs.toArray(new ServiceConfiguration[serviceConfigs.size()]); + ServiceConfiguration[] serviceConfigArray = serviceConfigs.toArray(new ServiceConfiguration[serviceConfigs.size()]); if (serialization != null) { try { final Serializer keySer = serialization.createKeySerializer(keyType, config.getClassLoader(), serviceConfigArray); @@ -480,28 +490,38 @@ public void close() throws Exception { } } - int dispatcherConcurrency; - StoreEventSourceConfiguration eventSourceConfiguration = ServiceUtils.findSingletonAmongst(StoreEventSourceConfiguration.class, config - .getServiceConfigurations() - .toArray()); - if (eventSourceConfiguration != null) { - dispatcherConcurrency = eventSourceConfiguration.getDispatcherConcurrency(); - } else { - dispatcherConcurrency = StoreEventSourceConfiguration.DEFAULT_DISPATCHER_CONCURRENCY; + Collection> serviceConfigurations = config.getServiceConfigurations(); + + int dispatcherConcurrency = findOptionalAmongst(StoreEventSourceConfiguration.class, serviceConfigurations) + .map(StoreEventSourceConfiguration::getDispatcherConcurrency) + .orElse(StoreEventSourceConfiguration.DEFAULT_DISPATCHER_CONCURRENCY); + + boolean operationStatisticsEnabled = findOptionalAmongst(StoreStatisticsConfiguration.class, serviceConfigurations) + .map(StoreStatisticsConfiguration::isOperationStatisticsEnabled) + // By default, we enable statistics only in a tiered environment + .orElseGet(() -> config.getResourcePools().getResourceTypeSet().size() > 1); + + Store.Configuration storeConfiguration = new StoreConfigurationImpl<>(config, dispatcherConcurrency, + operationStatisticsEnabled, keySerializer, valueSerializer, loaderWriter, useLoaderInAtomics); + + Store.Provider storeProvider = StoreSupport.selectWrapperStoreProvider(serviceLocator, serviceConfigs); + if (storeProvider == null) { + storeProvider = StoreSupport.selectStoreProvider(serviceLocator, resourceTypes, serviceConfigs); } - Store.Configuration storeConfiguration = new StoreConfigurationImpl<>(config, dispatcherConcurrency, keySerializer, valueSerializer); - final Store store = storeProvider.createStore(storeConfiguration, serviceConfigArray); + Store store = storeProvider.createStore(storeConfiguration, serviceConfigArray); + + AtomicReference storeProviderRef = new AtomicReference<>(storeProvider); lifeCycledList.add(new LifeCycled() { @Override - public void init() throws Exception { - storeProvider.initStore(store); + public void init() { + storeProviderRef.get().initStore(store); } @Override public void close() { - storeProvider.releaseStore(store); + storeProviderRef.get().releaseStore(store); } }); @@ -521,16 +541,39 @@ private PersistableResourceService getPersistableResourceService(ResourceType /** * adjusts the config to reflect new classloader & serialization provider */ - private CacheConfiguration adjustConfigurationWithCacheManagerDefaults(CacheConfiguration config) { + private CacheConfiguration adjustConfigurationWithCacheManagerDefaults(String alias, CacheConfiguration config) { ClassLoader cacheClassLoader = config.getClassLoader(); + + List> configurationList = new ArrayList<>(); + configurationList.addAll(config.getServiceConfigurations()); + + CacheLoaderWriterConfiguration loaderWriterConfiguration = findSingletonAmongst(CacheLoaderWriterConfiguration.class, config.getServiceConfigurations()); + if (loaderWriterConfiguration == null) { + CacheLoaderWriterProvider loaderWriterProvider = serviceLocator.getService(CacheLoaderWriterProvider.class); + ServiceConfiguration preConfiguredCacheLoaderWriterConfig = loaderWriterProvider.getPreConfiguredCacheLoaderWriterConfig(alias); + if (preConfiguredCacheLoaderWriterConfig != null) { + configurationList.add(preConfiguredCacheLoaderWriterConfig); + } + if (loaderWriterProvider.isLoaderJsrProvided(alias)) { + configurationList.add(new CacheLoaderWriterConfiguration() { + }); + } + } + + ServiceConfiguration[] serviceConfigurations = new ServiceConfiguration[configurationList.size()]; + configurationList.toArray(serviceConfigurations); + if (cacheClassLoader == null) { cacheClassLoader = cacheManagerClassLoader; } if (cacheClassLoader != config.getClassLoader() ) { config = new BaseCacheConfiguration<>(config.getKeyType(), config.getValueType(), - config.getEvictionAdvisor(), cacheClassLoader, config.getExpiry(), - config.getResourcePools(), config.getServiceConfigurations().toArray( - new ServiceConfiguration[config.getServiceConfigurations().size()])); + config.getEvictionAdvisor(), cacheClassLoader, config.getExpiryPolicy(), + config.getResourcePools(), serviceConfigurations); + } else { + config = new BaseCacheConfiguration<>(config.getKeyType(), config.getValueType(), + config.getEvictionAdvisor(), config.getClassLoader(), config.getExpiryPolicy(), + config.getResourcePools(), serviceConfigurations); } return config; } @@ -756,10 +799,9 @@ private static final class CacheHolder { private volatile InternalCache cache; private volatile boolean isValueSet = false; - CacheHolder(Class keyType, Class valueType, InternalCache cache) { + CacheHolder(Class keyType, Class valueType) { this.keyType = keyType; this.valueType = valueType; - this.cache = cache; } InternalCache retrieve(Class refKeyType, Class refValueType) { diff --git a/core/src/main/java/org/ehcache/core/EhcacheRuntimeConfiguration.java b/core/src/main/java/org/ehcache/core/EhcacheRuntimeConfiguration.java index a2af56fbdd..8417844d90 100644 --- a/core/src/main/java/org/ehcache/core/EhcacheRuntimeConfiguration.java +++ b/core/src/main/java/org/ehcache/core/EhcacheRuntimeConfiguration.java @@ -20,12 +20,13 @@ import org.ehcache.config.CacheRuntimeConfiguration; import org.ehcache.config.EvictionAdvisor; import org.ehcache.config.ResourcePools; +import org.ehcache.core.config.ExpiryUtils; import org.ehcache.core.internal.events.EventListenerWrapper; import org.ehcache.event.CacheEventListener; import org.ehcache.event.EventFiring; import org.ehcache.event.EventOrdering; import org.ehcache.event.EventType; -import org.ehcache.expiry.Expiry; +import org.ehcache.expiry.ExpiryPolicy; import org.ehcache.spi.service.ServiceConfiguration; import java.util.ArrayList; @@ -44,7 +45,7 @@ class EhcacheRuntimeConfiguration implements CacheRuntimeConfiguration valueType; private final EvictionAdvisor evictionAdvisor; private final ClassLoader classLoader; - private final Expiry expiry; + private final ExpiryPolicy expiry; private volatile ResourcePools resourcePools; private final List cacheConfigurationListenerList @@ -57,7 +58,7 @@ class EhcacheRuntimeConfiguration implements CacheRuntimeConfiguration getExpiry() { + public org.ehcache.expiry.Expiry getExpiry() { + return ExpiryUtils.convertToExpiry(expiry); + } + + @Override + public ExpiryPolicy getExpiryPolicy() { return expiry; } @@ -156,7 +163,7 @@ private void fireCacheConfigurationChange(CacheConfigurationProperty prop, f @Override public String readableString() { StringBuilder serviceConfigurationsToStringBuilder = new StringBuilder(); - for (ServiceConfiguration serviceConfiguration : serviceConfigurations) { + for (ServiceConfiguration serviceConfiguration : serviceConfigurations) { serviceConfigurationsToStringBuilder .append("\n ") .append("- "); @@ -177,12 +184,20 @@ public String readableString() { serviceConfigurationsToStringBuilder.append(" None"); } + String expiryPolicy; + + if (ExpiryPolicy.NO_EXPIRY == expiry) { + expiryPolicy = "NoExpiryPolicy"; + } else { + expiryPolicy = expiry.toString(); + } + return "keyType: " + keyType.getName() + "\n" + "valueType: " + valueType.getName() + "\n" + "serviceConfigurations:" + serviceConfigurationsToStringBuilder.toString().replace("\n", "\n ") + "\n" + "evictionAdvisor: " + ((evictionAdvisor != null) ? evictionAdvisor.getClass().getName() : "None") + "\n" + - "expiry: " + ((expiry != null) ? expiry.getClass().getSimpleName() : "") + "\n" + + "expiry: " + expiryPolicy + "\n" + "resourcePools: " + "\n " + ((resourcePools instanceof HumanReadable) ? ((HumanReadable)resourcePools).readableString() : "").replace("\n", "\n "); } } diff --git a/core/src/main/java/org/ehcache/core/EhcacheWithLoaderWriter.java b/core/src/main/java/org/ehcache/core/EhcacheWithLoaderWriter.java deleted file mode 100644 index 0f6b94611f..0000000000 --- a/core/src/main/java/org/ehcache/core/EhcacheWithLoaderWriter.java +++ /dev/null @@ -1,1427 +0,0 @@ -/* - * Copyright Terracotta, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.ehcache.core; - -import org.ehcache.Cache; -import org.ehcache.Status; -import org.ehcache.config.CacheConfiguration; -import org.ehcache.config.CacheRuntimeConfiguration; -import org.ehcache.core.events.CacheEventDispatcher; -import org.ehcache.core.exceptions.StorePassThroughException; -import org.ehcache.spi.loaderwriter.BulkCacheLoadingException; -import org.ehcache.spi.loaderwriter.BulkCacheWritingException; -import org.ehcache.core.spi.store.StoreAccessException; -import org.ehcache.spi.loaderwriter.CacheLoadingException; -import org.ehcache.spi.loaderwriter.CacheWritingException; -import org.ehcache.expiry.Duration; -import org.ehcache.core.internal.resilience.LoggingRobustResilienceStrategy; -import org.ehcache.core.internal.resilience.RecoveryCache; -import org.ehcache.core.internal.resilience.ResilienceStrategy; -import org.ehcache.core.spi.LifeCycled; -import org.ehcache.core.spi.store.Store; -import org.ehcache.core.spi.store.Store.ValueHolder; -import org.ehcache.spi.loaderwriter.CacheLoaderWriter; -import org.ehcache.core.statistics.BulkOps; -import org.ehcache.core.statistics.CacheOperationOutcomes.CacheLoadingOutcome; -import org.ehcache.core.statistics.CacheOperationOutcomes.ConditionalRemoveOutcome; -import org.ehcache.core.statistics.CacheOperationOutcomes.GetAllOutcome; -import org.ehcache.core.statistics.CacheOperationOutcomes.GetOutcome; -import org.ehcache.core.statistics.CacheOperationOutcomes.PutIfAbsentOutcome; -import org.ehcache.core.statistics.CacheOperationOutcomes.PutAllOutcome; -import org.ehcache.core.statistics.CacheOperationOutcomes.PutOutcome; -import org.ehcache.core.statistics.CacheOperationOutcomes.RemoveAllOutcome; -import org.ehcache.core.statistics.CacheOperationOutcomes.RemoveOutcome; -import org.ehcache.core.statistics.CacheOperationOutcomes.ReplaceOutcome; -import org.slf4j.Logger; -import org.terracotta.statistics.StatisticsManager; -import org.terracotta.statistics.observer.OperationObserver; - -import java.util.AbstractMap; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.EnumMap; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.Map; -import java.util.NoSuchElementException; -import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.atomic.LongAdder; -import java.util.function.BiFunction; -import java.util.function.Function; -import java.util.function.Supplier; - -import static org.ehcache.core.internal.util.Functions.memoize; -import static org.ehcache.core.exceptions.ExceptionFactory.newCacheLoadingException; -import static org.ehcache.core.exceptions.ExceptionFactory.newCacheWritingException; -import static org.ehcache.core.internal.util.ValueSuppliers.supplierOf; -import static org.terracotta.statistics.StatisticBuilder.operation; - -/** - * Implementation of the {@link Cache} interface when a {@link CacheLoaderWriter} is involved. - *

- * {@code Ehcache} users should not have to depend on this type but rely exclusively on the api types in package - * {@code org.ehcache}. - * - * @see Ehcache - */ -public class EhcacheWithLoaderWriter implements InternalCache { - - private final StatusTransitioner statusTransitioner; - - private final Store store; - private final CacheLoaderWriter cacheLoaderWriter; - private final ResilienceStrategy resilienceStrategy; - private final EhcacheRuntimeConfiguration runtimeConfiguration; - private final Jsr107CacheImpl jsr107Cache; - private final boolean useLoaderInAtomics; - protected final Logger logger; - - private final OperationObserver getObserver = operation(GetOutcome.class).named("get").of(this).tag("cache").build(); - private final OperationObserver getAllObserver = operation(GetAllOutcome.class).named("getAll").of(this).tag("cache").build(); - private final OperationObserver putObserver = operation(PutOutcome.class).named("put").of(this).tag("cache").build(); - private final OperationObserver putAllObserver = operation(PutAllOutcome.class).named("putAll").of(this).tag("cache").build(); - private final OperationObserver removeObserver = operation(RemoveOutcome.class).named("remove").of(this).tag("cache").build(); - private final OperationObserver removeAllObserver = operation(RemoveAllOutcome.class).named("removeAll").of(this).tag("cache").build(); - private final OperationObserver conditionalRemoveObserver = operation(ConditionalRemoveOutcome.class).named("conditionalRemove").of(this).tag("cache").build(); - private final OperationObserver cacheLoadingObserver = operation(CacheLoadingOutcome.class).named("cacheLoading").of(this).tag("cache").build(); - private final OperationObserver putIfAbsentObserver = operation(PutIfAbsentOutcome.class).named("putIfAbsent").of(this).tag("cache").build(); - private final OperationObserver replaceObserver = operation(ReplaceOutcome.class).named("replace").of(this).tag("cache").build(); - private final Map bulkMethodEntries = new EnumMap<>(BulkOps.class); - - private static final Supplier REPLACE_FALSE = () -> Boolean.FALSE; - - /** - * Constructs a new {@code EhcacheWithLoaderWriter} based on the provided parameters. - * - * @param configuration the cache configuration - * @param store the underlying store - * @param cacheLoaderWriter the cache loader writer - * @param eventDispatcher the event dispatcher - * @param logger the logger - */ - public EhcacheWithLoaderWriter(CacheConfiguration configuration, Store store, - final CacheLoaderWriter cacheLoaderWriter, - CacheEventDispatcher eventDispatcher, - Logger logger) { - this(configuration, store, cacheLoaderWriter, eventDispatcher, true, logger); - } - - EhcacheWithLoaderWriter(CacheConfiguration runtimeConfiguration, Store store, - CacheLoaderWriter cacheLoaderWriter, - CacheEventDispatcher eventDispatcher, boolean useLoaderInAtomics, Logger logger) { - this(new EhcacheRuntimeConfiguration<>(runtimeConfiguration), store, cacheLoaderWriter, eventDispatcher, useLoaderInAtomics, logger, new StatusTransitioner(logger)); - } - - EhcacheWithLoaderWriter(EhcacheRuntimeConfiguration runtimeConfiguration, Store store, - CacheLoaderWriter cacheLoaderWriter, - CacheEventDispatcher eventDispatcher, boolean useLoaderInAtomics, Logger logger, StatusTransitioner statusTransitioner) { - this.store = store; - runtimeConfiguration.addCacheConfigurationListener(store.getConfigurationChangeListeners()); - StatisticsManager.associate(store).withParent(this); - if (cacheLoaderWriter == null) { - throw new NullPointerException("CacheLoaderWriter cannot be null."); - } - this.cacheLoaderWriter = cacheLoaderWriter; - if (store instanceof RecoveryCache) { - this.resilienceStrategy = new LoggingRobustResilienceStrategy<>(castToRecoveryCache(store)); - } else { - this.resilienceStrategy = new LoggingRobustResilienceStrategy<>(recoveryCache(store)); - } - - this.runtimeConfiguration = runtimeConfiguration; - runtimeConfiguration.addCacheConfigurationListener(eventDispatcher.getConfigurationChangeListeners()); - this.jsr107Cache = new Jsr107CacheImpl(); - - this.useLoaderInAtomics = useLoaderInAtomics; - this.logger=logger; - this.statusTransitioner = statusTransitioner; - for (BulkOps bulkOp : BulkOps.values()) { - bulkMethodEntries.put(bulkOp, new LongAdder()); - } - } - - /** - * {@inheritDoc} - */ - @Override - public Map getBulkMethodEntries() { - return bulkMethodEntries; - } - - @SuppressWarnings("unchecked") - private RecoveryCache castToRecoveryCache(Store store) { - return (RecoveryCache) store; - } - - private V getNoLoader(K key) { - getObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(key); - - try { - final Store.ValueHolder valueHolder = store.get(key); - - // Check for expiry first - if (valueHolder == null) { - getObserver.end(GetOutcome.MISS); - return null; - } else { - getObserver.end(GetOutcome.HIT); - return valueHolder.value(); - } - } catch (StoreAccessException e) { - try { - return resilienceStrategy.getFailure(key, e); - } finally { - getObserver.end(GetOutcome.FAILURE); - } - } - } - - /** - * {@inheritDoc} - */ - @Override - public V get(final K key) throws CacheLoadingException { - getObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(key); - final Function mappingFunction = memoize(k -> { - V loaded = null; - try { - cacheLoadingObserver.begin(); - loaded = cacheLoaderWriter.load(k); - cacheLoadingObserver.end(CacheLoadingOutcome.SUCCESS); - } catch (Exception e) { - cacheLoadingObserver.end(CacheLoadingOutcome.FAILURE); - throw new StorePassThroughException(newCacheLoadingException(e)); - } - - return loaded; - }); - - try { - final Store.ValueHolder valueHolder = store.computeIfAbsent(key, mappingFunction); - - // Check for expiry first - if (valueHolder == null) { - getObserver.end(GetOutcome.MISS); - return null; - } else { - getObserver.end(GetOutcome.HIT); - return valueHolder.value(); - } - } catch (StoreAccessException e) { - try { - V fromLoader; - try { - fromLoader = mappingFunction.apply(key); - } catch (StorePassThroughException cpte) { - return resilienceStrategy.getFailure(key, e, (CacheLoadingException) cpte.getCause()); - } - return resilienceStrategy.getFailure(key, fromLoader, e); - } finally { - getObserver.end(GetOutcome.FAILURE); - } - } - } - - /** - * {@inheritDoc} - */ - @Override - public void put(final K key, final V value) throws CacheWritingException { - putObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(key, value); - final BiFunction remappingFunction = memoize((key1, previousValue) -> { - try { - cacheLoaderWriter.write(key1, value); - } catch (Exception e) { - throw new StorePassThroughException(newCacheWritingException(e)); - } - return value; - }); - - try { - store.compute(key, remappingFunction); - putObserver.end(PutOutcome.PUT); - } catch (StoreAccessException e) { - try { - try { - remappingFunction.apply(key, value); - } catch (StorePassThroughException cpte) { - resilienceStrategy.putFailure(key, value, e, (CacheWritingException) cpte.getCause()); - return; - } - resilienceStrategy.putFailure(key, value, e); - } finally { - putObserver.end(PutOutcome.FAILURE); - } - } - } - - private boolean newValueAlreadyExpired(K key, V oldValue, V newValue) { - if (newValue == null) { - return false; - } - - final Duration duration; - if (oldValue == null) { - try { - duration = runtimeConfiguration.getExpiry().getExpiryForCreation(key, newValue); - } catch (RuntimeException re) { - logger.error("Expiry computation caused an exception - Expiry duration will be 0 ", re); - return true; - } - } else { - try { - duration = runtimeConfiguration.getExpiry().getExpiryForUpdate(key, supplierOf(oldValue), newValue); - } catch (RuntimeException re) { - logger.error("Expiry computation caused an exception - Expiry duration will be 0 ", re); - return true; - } - } - - return Duration.ZERO.equals(duration); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean containsKey(final K key) { - statusTransitioner.checkAvailable(); - checkNonNull(key); - try { - return store.containsKey(key); - } catch (StoreAccessException e) { - return resilienceStrategy.containsKeyFailure(key, e); - } - } - - /** - * {@inheritDoc} - */ - @Override - public void remove(K key) throws CacheWritingException { - removeInternal(key); // ignore return value; - } - - - private boolean removeInternal(final K key) throws CacheWritingException { - removeObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(key); - - final AtomicBoolean modified = new AtomicBoolean(); - - final BiFunction remappingFunction = memoize((key1, previousValue) -> { - modified.set(previousValue != null); - - try { - cacheLoaderWriter.delete(key1); - } catch (Exception e) { - throw new StorePassThroughException(newCacheWritingException(e)); - } - return null; - }); - - try { - store.compute(key, remappingFunction); - if (modified.get()) { - removeObserver.end(RemoveOutcome.SUCCESS); - } else { - removeObserver.end(RemoveOutcome.NOOP); - } - } catch (StoreAccessException e) { - try { - try { - remappingFunction.apply(key, null); - } catch (StorePassThroughException f) { - resilienceStrategy.removeFailure(key, e, (CacheWritingException) f.getCause()); - } - resilienceStrategy.removeFailure(key, e); - } finally { - removeObserver.end(RemoveOutcome.FAILURE); - } - } - - return modified.get(); - } - - /** - * {@inheritDoc} - */ - @Override - public void clear() { - statusTransitioner.checkAvailable(); - try { - store.clear(); - } catch (StoreAccessException e) { - resilienceStrategy.clearFailure(e); - } - } - - /** - * {@inheritDoc} - */ - @Override - public Iterator> iterator() { - statusTransitioner.checkAvailable(); - return new CacheEntryIterator(false); - } - - /** - * {@inheritDoc} - */ - @Override - public Map getAll(Set keys) throws BulkCacheLoadingException { - return getAllInternal(keys, true); - } - - private Map getAllInternal(Set keys, boolean includeNulls) throws BulkCacheLoadingException { - getAllObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNullContent(keys); - if(keys.isEmpty()) { - getAllObserver.end(GetAllOutcome.SUCCESS); - return Collections.emptyMap(); - } - final Map successes = new HashMap<>(); - final Map failures = new HashMap<>(); - - - Function, Iterable>> computeFunction = - keys1 -> { - Map computeResult = new LinkedHashMap<>(); - - // put all the entries to get ordering correct - for (K key : keys1) { - computeResult.put(key, null); - } - - Map loaded = Collections.emptyMap(); - try { - loaded = cacheLoaderWriter.loadAll(computeResult.keySet()); - } catch(BulkCacheLoadingException bcle) { - collectSuccessesAndFailures(bcle, successes, failures); - } catch (Exception e) { - for (K key : computeResult.keySet()) { - failures.put(key, e); - } - } - - if (!loaded.isEmpty()) { - for (K key : computeResult.keySet()) { - V value = loaded.get(key); - successes.put(key, value); - computeResult.put(key, value); - } - } - - return computeResult.entrySet(); - }; - - Map result = new HashMap<>(); - try { - Map> computedMap = store.bulkComputeIfAbsent(keys, computeFunction); - - int hits = 0; - int keyCount = 0; - for (Map.Entry> entry : computedMap.entrySet()) { - keyCount++; - if (entry.getValue() != null) { - result.put(entry.getKey(), entry.getValue().value()); - hits++; - } else if (includeNulls && failures.isEmpty()) { - result.put(entry.getKey(), null); - } - } - - addBulkMethodEntriesCount(BulkOps.GET_ALL_HITS, hits); - if (failures.isEmpty()) { - addBulkMethodEntriesCount(BulkOps.GET_ALL_MISS, keyCount - hits); - getAllObserver.end(GetAllOutcome.SUCCESS); - return result; - } else { - successes.putAll(result); - getAllObserver.end(GetAllOutcome.FAILURE); - throw new BulkCacheLoadingException(failures, successes); - } - } catch (StoreAccessException e) { - try { - Set toLoad = new HashSet<>(); - for (K key : keys) { - toLoad.add(key); - } - toLoad.removeAll(successes.keySet()); - toLoad.removeAll(failures.keySet()); - computeFunction.apply(toLoad); - if (failures.isEmpty()) { - return resilienceStrategy.getAllFailure(keys, successes, e); - } else { - return resilienceStrategy.getAllFailure(keys, e, new BulkCacheLoadingException(failures, successes)); - } - } finally { - getAllObserver.end(GetAllOutcome.FAILURE); - } - } - } - - LinkedHashSet> nullValuesForKeys(final Iterable keys) { - final LinkedHashSet> entries = new LinkedHashSet<>(); - for (K key : keys) { - entries.add(new AbstractMap.SimpleEntry<>(key, null)); - } - return entries; - } - - /** - * {@inheritDoc} - */ - @Override - public void putAll(final Map entries) throws BulkCacheWritingException { - putAllObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(entries); - if(entries.isEmpty()) { - putAllObserver.end(PutAllOutcome.SUCCESS); - return; - } - final Set successes = new HashSet<>(); - final Map failures = new HashMap<>(); - - // Copy all entries to write into a Map - final Map entriesToRemap = new HashMap<>(); - for (Map.Entry entry: entries.entrySet()) { - // If a key/value is null, throw NPE, nothing gets mutated - if (entry.getKey() == null || entry.getValue() == null) { - throw new NullPointerException(); - } - entriesToRemap.put(entry.getKey(), entry.getValue()); - } - - final AtomicInteger actualPutCount = new AtomicInteger(); - - // The compute function that will return the keys to their NEW values, taking the keys to their old values as input; - // but this could happen in batches, i.e. not necessary containing all of the entries of the Iterable passed to this method - Function>, Iterable>> computeFunction = - entries1 -> { - // If we have a writer, first write this batch - cacheLoaderWriterWriteAllCall(entries1, entriesToRemap, successes, failures); - - Map mutations = new LinkedHashMap<>(); - - // then record we handled these mappings - for (Map.Entry entry: entries1) { - K key = entry.getKey(); - V existingValue = entry.getValue(); - V newValue = entriesToRemap.remove(key); - - if (newValueAlreadyExpired(key, existingValue, newValue)) { - mutations.put(key, null); - } else if (successes.contains(key)) { - actualPutCount.incrementAndGet(); - mutations.put(key, newValue); - - } else { - mutations.put(key, existingValue); - } - } - - // Finally return the values to be installed in the Cache's Store - return mutations.entrySet(); - }; - - try { - store.bulkCompute(entries.keySet(), computeFunction); - addBulkMethodEntriesCount(BulkOps.PUT_ALL, actualPutCount.get()); - if (failures.isEmpty()) { - putAllObserver.end(PutAllOutcome.SUCCESS); - } else { - BulkCacheWritingException cacheWritingException = new BulkCacheWritingException(failures, successes); - tryRemoveFailedKeys(entries, failures, cacheWritingException); - putAllObserver.end(PutAllOutcome.FAILURE); - throw cacheWritingException; - } - } catch (StoreAccessException e) { - try { - // just in case not all writes happened: - if (!entriesToRemap.isEmpty()) { - cacheLoaderWriterWriteAllCall(entriesToRemap.entrySet(), entriesToRemap, successes, failures); - } - if (failures.isEmpty()) { - resilienceStrategy.putAllFailure(entries, e); - } else { - resilienceStrategy.putAllFailure(entries, e, new BulkCacheWritingException(failures, successes)); - } - } finally { - putAllObserver.end(PutAllOutcome.FAILURE); - } - } - } - - private void tryRemoveFailedKeys(Map entries, Map failures, BulkCacheWritingException cacheWritingException) { - try { - store.bulkCompute(failures.keySet(), entries1 -> { - HashMap result = new HashMap<>(); - for (Map.Entry entry : entries1) { - result.put(entry.getKey(), null); - } - return result.entrySet(); - }); - } catch (StoreAccessException e) { - resilienceStrategy.putAllFailure(entries, e, cacheWritingException); - } - } - - private void cacheLoaderWriterWriteAllCall(Iterable> entries, Map entriesToRemap, Set successes, Map failures) throws IllegalStateException { - Map toWrite = new HashMap<>(); - for (Map.Entry entry: entries) { - V value = entriesToRemap.get(entry.getKey()); - if (value == null) { - continue; - } - - toWrite.put(entry.getKey(), value); - } - try { - if (! toWrite.isEmpty()) { - // write all entries of this batch - cacheLoaderWriter.writeAll(toWrite.entrySet()); - successes.addAll(toWrite.keySet()); - } - } catch (BulkCacheWritingException bcwe) { - collectSuccessesAndFailures(bcwe, successes, failures); - } catch (Exception e) { - for (K key: toWrite.keySet()) { - failures.put(key, e); - } - } - } - - @SuppressWarnings({ "unchecked" }) - private static void collectSuccessesAndFailures(BulkCacheWritingException bcwe, Set successes, Map failures) { - successes.addAll((Collection)bcwe.getSuccesses()); - failures.putAll((Map)bcwe.getFailures()); - } - @SuppressWarnings({ "unchecked" }) - private void collectSuccessesAndFailures(BulkCacheLoadingException bcle, Map successes, Map failures) { - successes.putAll((Map)bcle.getSuccesses()); - failures.putAll((Map)bcle.getFailures()); - } - - /** - * {@inheritDoc} - */ - @Override - public void removeAll(final Set keys) throws BulkCacheWritingException { - removeAllObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(keys); - if(keys.isEmpty()) { - removeAllObserver.end(RemoveAllOutcome.SUCCESS); - return; - } - final Set successes = new HashSet<>(); - final Map failures = new HashMap<>(); - - final Map entriesToRemove = new HashMap<>(); - for (K key: keys) { - if (key == null) { - throw new NullPointerException(); - } - entriesToRemove.put(key, null); - } - - final AtomicInteger actualRemoveCount = new AtomicInteger(); - - Function>, Iterable>> removalFunction = - entries -> { - Set unknowns = cacheLoaderWriterDeleteAllCall(entries, entriesToRemove, successes, failures); - - Map results = new LinkedHashMap<>(); - - for (Map.Entry entry : entries) { - K key = entry.getKey(); - V existingValue = entry.getValue(); - - if (successes.contains(key)) { - if (existingValue != null) { - actualRemoveCount.incrementAndGet(); - } - results.put(key, null); - entriesToRemove.remove(key); - } else { - if (unknowns.contains(key)) { - results.put(key, null); - } else { - results.put(key, existingValue); - } - } - } - - return results.entrySet(); - }; - - try { - store.bulkCompute(keys, removalFunction); - addBulkMethodEntriesCount(BulkOps.REMOVE_ALL, actualRemoveCount.get()); - if (failures.isEmpty()) { - removeAllObserver.end(RemoveAllOutcome.SUCCESS); - } else { - removeAllObserver.end(RemoveAllOutcome.FAILURE); - throw new BulkCacheWritingException(failures, successes); - } - } catch (StoreAccessException e) { - try { - // just in case not all writes happened: - if (!entriesToRemove.isEmpty()) { - cacheLoaderWriterDeleteAllCall(entriesToRemove.entrySet(), entriesToRemove, successes, failures); - } - if (failures.isEmpty()) { - resilienceStrategy.removeAllFailure(keys, e); - } else { - resilienceStrategy.removeAllFailure(keys, e, new BulkCacheWritingException(failures, successes)); - } - } finally { - removeAllObserver.end(RemoveAllOutcome.FAILURE); - } - } - } - - private Set cacheLoaderWriterDeleteAllCall(Iterable> entries, Map entriesToRemove, Set successes, Map failures) { - final Set unknowns = new HashSet<>(); - Set toDelete = new HashSet<>(); - for (Map.Entry entry : entries) { - K key = entry.getKey(); - if (entriesToRemove.containsKey(key)) { - toDelete.add(key); - } - } - - try { - cacheLoaderWriter.deleteAll(toDelete); - successes.addAll(toDelete); - } catch (BulkCacheWritingException bcwe) { - collectSuccessesAndFailures(bcwe, successes, failures); - } catch (Exception e) { - for (K key : toDelete) { - failures.put(key, e); - unknowns.add(key); - } - } - return unknowns; - } - - /** - * {@inheritDoc} - */ - @Override - public V putIfAbsent(final K key, final V value) throws CacheWritingException { - putIfAbsentObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(key, value); - final AtomicBoolean installed = new AtomicBoolean(false); - - final Function mappingFunction = memoize(k -> { - if (useLoaderInAtomics) { - try { - V loaded = cacheLoaderWriter.load(k); - if (loaded != null) { - return loaded; // populate the cache - } - } catch (Exception e) { - throw new StorePassThroughException(newCacheLoadingException(e)); - } - } - - try { - cacheLoaderWriter.write(k, value); - } catch (Exception e) { - throw new StorePassThroughException(newCacheWritingException(e)); - } - - installed.set(true); - return value; - }); - - try { - ValueHolder inCache = store.computeIfAbsent(key, mappingFunction); - if (installed.get()) { - putIfAbsentObserver.end(PutIfAbsentOutcome.PUT); - return null; - } else if (inCache == null) { - putIfAbsentObserver.end(PutIfAbsentOutcome.HIT); - return null; - } else { - putIfAbsentObserver.end(PutIfAbsentOutcome.HIT); - return inCache.value(); - } - } catch (StoreAccessException e) { - try { - V loaded = null; - try { - loaded = mappingFunction.apply(key); - } catch (StorePassThroughException f) { - Throwable cause = f.getCause(); - if(cause instanceof CacheLoadingException) { - return resilienceStrategy.putIfAbsentFailure(key, value, e, (CacheLoadingException) cause); - } else if(cause instanceof CacheWritingException) { - return resilienceStrategy.putIfAbsentFailure(key, value, e, (CacheWritingException) cause); - } else { - throw new AssertionError(); - } - } - return resilienceStrategy.putIfAbsentFailure(key, value, loaded, e, installed.get()); - } finally { - putIfAbsentObserver.end(PutIfAbsentOutcome.FAILURE); - } - } - } - - /** - * {@inheritDoc} - */ - @Override - public boolean remove(final K key, final V value) throws CacheWritingException { - conditionalRemoveObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(key, value); - final AtomicBoolean hit = new AtomicBoolean(); - final AtomicBoolean removed = new AtomicBoolean(); - final BiFunction remappingFunction = memoize((k, inCache) -> { - if (inCache == null) { - if (useLoaderInAtomics) { - try { - inCache = cacheLoaderWriter.load(key); - if (inCache == null) { - return null; - } - } catch (Exception e) { - throw new StorePassThroughException(newCacheLoadingException(e)); - } - } else { - return null; - } - } - - hit.set(true); - if (value.equals(inCache)) { - try { - cacheLoaderWriter.delete(k); - } catch (Exception e) { - throw new StorePassThroughException(newCacheWritingException(e)); - } - removed.set(true); - return null; - } - return inCache; - }); - try { - store.compute(key, remappingFunction, REPLACE_FALSE); - if (removed.get()) { - conditionalRemoveObserver.end(ConditionalRemoveOutcome.SUCCESS); - } else { - if (hit.get()) { - conditionalRemoveObserver.end(ConditionalRemoveOutcome.FAILURE_KEY_PRESENT); - } else { - conditionalRemoveObserver.end(ConditionalRemoveOutcome.FAILURE_KEY_MISSING); - } - } - } catch (StoreAccessException e) { - try { - try { - remappingFunction.apply(key, null); - } catch (StorePassThroughException f) { - Throwable cause = f.getCause(); - if(cause instanceof CacheLoadingException) { - return resilienceStrategy.removeFailure(key, value, e, (CacheLoadingException) cause); - } else if(cause instanceof CacheWritingException) { - return resilienceStrategy.removeFailure(key, value, e, (CacheWritingException) cause); - } else { - throw new AssertionError(); - } - } - return resilienceStrategy.removeFailure(key, value, e, removed.get()); - } finally { - conditionalRemoveObserver.end(ConditionalRemoveOutcome.FAILURE); - } - } - return removed.get(); - } - - /** - * {@inheritDoc} - */ - @Override - public V replace(final K key, final V value) throws CacheLoadingException, CacheWritingException { - replaceObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(key, value); - final AtomicReference old = new AtomicReference<>(); - final BiFunction remappingFunction = memoize((k, inCache) -> { - if (inCache == null) { - if (useLoaderInAtomics) { - try { - inCache = cacheLoaderWriter.load(key); - if (inCache == null) { - return null; - } - } catch (Exception e) { - throw new StorePassThroughException(newCacheLoadingException(e)); - } - } else { - return null; - } - } - - try { - cacheLoaderWriter.write(key, value); - } catch (Exception e) { - throw new StorePassThroughException(newCacheWritingException(e)); - } - - old.set(inCache); - - if (newValueAlreadyExpired(key, inCache, value)) { - return null; - } - return value; - }); - - try { - store.compute(key, remappingFunction); - if (old.get() != null) { - replaceObserver.end(ReplaceOutcome.HIT); - } else { - replaceObserver.end(ReplaceOutcome.MISS_NOT_PRESENT); - } - return old.get(); - } catch (StoreAccessException e) { - try { - try { - remappingFunction.apply(key, null); - } catch (StorePassThroughException f) { - Throwable cause = f.getCause(); - if(cause instanceof CacheLoadingException) { - return resilienceStrategy.replaceFailure(key, value, e, (CacheLoadingException) cause); - } else if(cause instanceof CacheWritingException) { - return resilienceStrategy.replaceFailure(key, value, e, (CacheWritingException)cause); - } else { - throw new AssertionError(); - } - } - return resilienceStrategy.replaceFailure(key, value, e); - } finally { - replaceObserver.end(ReplaceOutcome.FAILURE); - } - } - } - - /** - * {@inheritDoc} - */ - @Override - public boolean replace(final K key, final V oldValue, final V newValue) throws CacheLoadingException, CacheWritingException { - replaceObserver.begin(); - statusTransitioner.checkAvailable(); - checkNonNull(key, oldValue, newValue); - - final AtomicBoolean success = new AtomicBoolean(); - final AtomicBoolean hit = new AtomicBoolean(); - - final BiFunction remappingFunction = memoize((k, inCache) -> { - if (inCache == null) { - if (useLoaderInAtomics) { - try { - inCache = cacheLoaderWriter.load(key); - if (inCache == null) { - return null; - } - } catch (Exception e) { - throw new StorePassThroughException(newCacheLoadingException(e)); - } - } else { - return null; - } - } - - hit.set(true); - if (oldValue.equals(inCache)) { - try { - cacheLoaderWriter.write(key, newValue); - } catch (Exception e) { - throw new StorePassThroughException(newCacheWritingException(e)); - } - - success.set(true); - - if (newValueAlreadyExpired(key, oldValue, newValue)) { - return null; - } - return newValue; - } - return inCache; - }); - try { - store.compute(key, remappingFunction, REPLACE_FALSE); - if (success.get()) { - replaceObserver.end(ReplaceOutcome.HIT); - } else { - if (hit.get()) { - replaceObserver.end(ReplaceOutcome.MISS_PRESENT); - } else { - replaceObserver.end(ReplaceOutcome.MISS_NOT_PRESENT); - } - } - return success.get(); - } catch (StoreAccessException e) { - try { - try { - remappingFunction.apply(key, null); - } catch (StorePassThroughException f) { - Throwable cause = f.getCause(); - if(cause instanceof CacheLoadingException) { - return resilienceStrategy.replaceFailure(key, oldValue, newValue, e, (CacheLoadingException) cause); - } else if(cause instanceof CacheWritingException) { - return resilienceStrategy.replaceFailure(key, oldValue, newValue, e, (CacheWritingException)cause); - } else { - throw new AssertionError(); - } - } - return resilienceStrategy.replaceFailure(key, oldValue, newValue, e, success.get()); - } finally { - replaceObserver.end(ReplaceOutcome.FAILURE); - } - } - } - - /** - * {@inheritDoc} - */ - @Override - public CacheRuntimeConfiguration getRuntimeConfiguration() { - return runtimeConfiguration; - } - - /** - * {@inheritDoc} - */ - @Override - public void init() { - statusTransitioner.init().succeeded(); - } - - /** - * {@inheritDoc} - */ - @Override - public void close() { - statusTransitioner.close().succeeded(); - } - - /** - * {@inheritDoc} - */ - @Override - public Status getStatus() { - return statusTransitioner.currentStatus(); - } - - /** - * {@inheritDoc} - */ - @Override - public void addHook(LifeCycled hook) { - statusTransitioner.addHook(hook); - } - - void removeHook(LifeCycled hook) { - statusTransitioner.removeHook(hook); - } - - private static void checkNonNull(Object thing) { - if(thing == null) { - throw new NullPointerException(); - } - } - - private static void checkNonNull(Object... things) { - for (Object thing : things) { - checkNonNull(thing); - } - } - - private void checkNonNullContent(Collection collectionOfThings) { - checkNonNull(collectionOfThings); - for (Object thing : collectionOfThings) { - checkNonNull(thing); - } - } - - private void addBulkMethodEntriesCount(BulkOps op, long count) { - bulkMethodEntries.get(op).add(count); - } - - /** - * {@inheritDoc} - */ - @Override - public Jsr107Cache getJsr107Cache() { - return jsr107Cache; - } - - /** - * {@inheritDoc} - */ - @Override - public CacheLoaderWriter getCacheLoaderWriter() { - return this.cacheLoaderWriter; - } - - private final class Jsr107CacheImpl implements Jsr107Cache { - @Override - public void loadAll(Set keys, boolean replaceExistingValues, Function, Map> loadFunction) { - if(keys.isEmpty()) { - return ; - } - if (replaceExistingValues) { - loadAllReplace(keys, loadFunction); - } else { - loadAllAbsent(keys, loadFunction); - } - } - - @Override - public Iterator> specIterator() { - return new SpecIterator<>(this, store); - } - - @Override - public V getNoLoader(K key) { - return EhcacheWithLoaderWriter.this.getNoLoader(key); - } - - @Override - public Map getAll(Set keys) { - return EhcacheWithLoaderWriter.this.getAllInternal(keys, false); - } - - private void loadAllAbsent(Set keys, final Function, Map> loadFunction) { - try { - store.bulkComputeIfAbsent(keys, absentKeys -> cacheLoaderWriterLoadAllForKeys(absentKeys, loadFunction).entrySet()); - } catch (StoreAccessException e) { - throw newCacheLoadingException(e); - } - } - - Map cacheLoaderWriterLoadAllForKeys(Iterable keys, Function, Map> loadFunction) { - try { - Map loaded = loadFunction.apply(keys); - - // put into a new map since we can't assume the 107 cache loader returns things ordered, or necessarily with all the desired keys - Map rv = new LinkedHashMap<>(); - for (K key : keys) { - rv.put(key, loaded.get(key)); - } - return rv; - } catch (Exception e) { - throw newCacheLoadingException(e); - } - } - - private void loadAllReplace(Set keys, final Function, Map> loadFunction) { - try { - store.bulkCompute(keys, entries -> { - Collection keys1 = new ArrayList<>(); - for (Map.Entry entry : entries) { - keys1.add(entry.getKey()); - } - return cacheLoaderWriterLoadAllForKeys(keys1, loadFunction).entrySet(); - }); - } catch (StoreAccessException e) { - throw newCacheLoadingException(e); - } - } - - @Override - public void compute(K key, final BiFunction computeFunction, - final Supplier replaceEqual, final Supplier invokeWriter, final Supplier withStatsAndEvents) { - putObserver.begin(); - removeObserver.begin(); - getObserver.begin(); - - try { - BiFunction fn = (mappedKey, mappedValue) -> { - if (mappedValue == null) { - getObserver.end(GetOutcome.MISS); - } else { - getObserver.end(GetOutcome.HIT); - } - - V newValue = computeFunction.apply(mappedKey, mappedValue); - - if (newValue == mappedValue) { - if (! replaceEqual.get()) { - return mappedValue; - } - } - - if (invokeWriter.get()) { - try { - if (newValue != null) { - cacheLoaderWriter.write(mappedKey, newValue); - } else { - cacheLoaderWriter.delete(mappedKey); - } - } catch (Exception e) { - throw new StorePassThroughException(newCacheWritingException(e)); - } - } - - if (newValueAlreadyExpired(mappedKey, mappedValue, newValue)) { - return null; - } - - if (withStatsAndEvents.get()) { - if (newValue == null) { - removeObserver.end(RemoveOutcome.SUCCESS); - } else { - putObserver.end(PutOutcome.PUT); - } - } - - return newValue; - }; - - store.compute(key, fn, replaceEqual); - } catch (StoreAccessException e) { - throw new RuntimeException(e); - } - } - - @Override - public V getAndRemove(K key) { - getObserver.begin(); - removeObserver.begin(); - - final AtomicReference existingValue = new AtomicReference<>(); - try { - store.compute(key, (mappedKey, mappedValue) -> { - existingValue.set(mappedValue); - - try { - cacheLoaderWriter.delete(mappedKey); - } catch (Exception e) { - throw new StorePassThroughException(newCacheWritingException(e)); - } - return null; - }); - } catch (StoreAccessException e) { - getObserver.end(GetOutcome.FAILURE); - removeObserver.end(RemoveOutcome.FAILURE); - throw new RuntimeException(e); - } - - V returnValue = existingValue.get(); - if (returnValue != null) { - getObserver.end(GetOutcome.HIT); - removeObserver.end(RemoveOutcome.SUCCESS); - } else { - getObserver.end(GetOutcome.MISS); - } - return returnValue; - } - - @Override - public V getAndPut(K key, final V value) { - getObserver.begin(); - putObserver.begin(); - - final AtomicReference existingValue = new AtomicReference<>(); - try { - store.compute(key, (mappedKey, mappedValue) -> { - existingValue.set(mappedValue); - - try { - cacheLoaderWriter.write(mappedKey, value); - } catch (Exception e) { - throw new StorePassThroughException(newCacheWritingException(e)); - } - - if (newValueAlreadyExpired(mappedKey, mappedValue, value)) { - return null; - } - - return value; - }); - } catch (StoreAccessException e) { - getObserver.end(GetOutcome.FAILURE); - putObserver.end(PutOutcome.FAILURE); - throw new RuntimeException(e); - } - - V returnValue = existingValue.get(); - if (returnValue != null) { - getObserver.end(GetOutcome.HIT); - } else { - getObserver.end(GetOutcome.MISS); - } - putObserver.end(PutOutcome.PUT); - return returnValue; - } - - @Override - public boolean remove(K key) { - return removeInternal(key); - } - - @Override - public void removeAll() { - Store.Iterator>> iterator = store.iterator(); - while (iterator.hasNext()) { - try { - Entry> next = iterator.next(); - remove(next.getKey()); - } catch (StoreAccessException cae) { - // skip - } - } - } - } - - private class CacheEntryIterator implements Iterator> { - - private final Store.Iterator>> iterator; - private final boolean quiet; - private Cache.Entry> current; - private Cache.Entry> next; - private StoreAccessException nextException; - - public CacheEntryIterator(boolean quiet) { - this.quiet = quiet; - this.iterator = store.iterator(); - advance(); - } - - private void advance() { - try { - while (iterator.hasNext()) { - next = iterator.next(); - if (getNoLoader(next.getKey()) != null) { - return; - } - } - next = null; - } catch (RuntimeException re) { - nextException = new StoreAccessException(re); - next = null; - } catch (StoreAccessException cae) { - nextException = cae; - next = null; - } - } - - @Override - public boolean hasNext() { - statusTransitioner.checkAvailable(); - return nextException != null || next != null; - } - - @Override - public Entry next() { - if (!hasNext()) { - throw new NoSuchElementException(); - } - - if (!quiet) getObserver.begin(); - if (nextException == null) { - if (!quiet) getObserver.end(GetOutcome.HIT); - current = next; - advance(); - return new ValueHolderBasedEntry<>(current); - } else { - if (!quiet) getObserver.end(GetOutcome.FAILURE); - StoreAccessException cae = nextException; - nextException = null; - return resilienceStrategy.iteratorFailure(cae); - } - } - - @Override - public void remove() { - statusTransitioner.checkAvailable(); - if (current == null) { - throw new IllegalStateException("No current element"); - } - EhcacheWithLoaderWriter.this.remove(current.getKey(), current.getValue().value()); - current = null; - } - } - - - private static RecoveryCache recoveryCache(final Store store) { - return new RecoveryCache() { - - @Override - public void obliterate() throws StoreAccessException { - store.clear(); - } - - @Override - public void obliterate(K key) throws StoreAccessException { - store.remove(key); - } - - @Override - public void obliterate(Iterable keys) throws StoreAccessException { - for (K key : keys) { - obliterate(key); - } - } - }; - } - - private static class ValueHolderBasedEntry implements Cache.Entry { - private final Cache.Entry> storeEntry; - - ValueHolderBasedEntry(Cache.Entry> storeEntry) { - this.storeEntry = storeEntry; - } - - @Override - public K getKey() { - return storeEntry.getKey(); - } - - @Override - public V getValue() { - return storeEntry.getValue().value(); - } - - } - -} diff --git a/core/src/main/java/org/ehcache/core/InternalCache.java b/core/src/main/java/org/ehcache/core/InternalCache.java index 462999b604..f05edd63e8 100644 --- a/core/src/main/java/org/ehcache/core/InternalCache.java +++ b/core/src/main/java/org/ehcache/core/InternalCache.java @@ -44,7 +44,7 @@ public interface InternalCache extends UserManagedCache { * * @return Jsr107Cache */ - Jsr107Cache getJsr107Cache(); + Jsr107Cache createJsr107Cache(); /** * CacheLoaderWriter diff --git a/core/src/main/java/org/ehcache/core/PersistentUserManagedEhcache.java b/core/src/main/java/org/ehcache/core/PersistentUserManagedEhcache.java index c2108c46ae..8b54fa9768 100644 --- a/core/src/main/java/org/ehcache/core/PersistentUserManagedEhcache.java +++ b/core/src/main/java/org/ehcache/core/PersistentUserManagedEhcache.java @@ -31,6 +31,7 @@ import org.ehcache.spi.loaderwriter.CacheWritingException; import org.ehcache.core.spi.LifeCycled; import org.ehcache.spi.loaderwriter.CacheLoaderWriter; +import org.ehcache.spi.resilience.ResilienceStrategy; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,14 +64,10 @@ public class PersistentUserManagedEhcache implements PersistentUserManaged * @param eventDispatcher the event dispatcher * @param id an id for this cache */ - public PersistentUserManagedEhcache(CacheConfiguration configuration, Store store, DiskResourceService diskPersistenceService, CacheLoaderWriter cacheLoaderWriter, CacheEventDispatcher eventDispatcher, String id) { + public PersistentUserManagedEhcache(CacheConfiguration configuration, Store store, ResilienceStrategy resilienceStrategy, DiskResourceService diskPersistenceService, CacheLoaderWriter cacheLoaderWriter, CacheEventDispatcher eventDispatcher, String id) { this.logger = LoggerFactory.getLogger(PersistentUserManagedEhcache.class.getName() + "-" + id); this.statusTransitioner = new StatusTransitioner(logger); - if (cacheLoaderWriter == null) { - this.cache = new Ehcache<>(new EhcacheRuntimeConfiguration<>(configuration), store, eventDispatcher, logger, statusTransitioner); - } else { - this.cache = new EhcacheWithLoaderWriter<>(new EhcacheRuntimeConfiguration<>(configuration), store, cacheLoaderWriter, eventDispatcher, true, logger, statusTransitioner); - } + this.cache = new Ehcache<>(new EhcacheRuntimeConfiguration<>(configuration), store, resilienceStrategy, eventDispatcher, logger, statusTransitioner, cacheLoaderWriter); this.diskPersistenceService = diskPersistenceService; this.id = id; } diff --git a/core/src/main/java/org/ehcache/core/SpecIterator.java b/core/src/main/java/org/ehcache/core/SpecIterator.java index b772a6fa88..2bf64c7444 100644 --- a/core/src/main/java/org/ehcache/core/SpecIterator.java +++ b/core/src/main/java/org/ehcache/core/SpecIterator.java @@ -17,7 +17,7 @@ import org.ehcache.Cache; import org.ehcache.core.spi.store.Store; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; import java.util.Iterator; @@ -59,7 +59,7 @@ public Cache.Entry next() { current = next; - final V nextValue = nextValueHolder.value(); + final V nextValue = nextValueHolder.get(); return new Cache.Entry() { @Override public K getKey() { diff --git a/core/src/main/java/org/ehcache/core/StatusTransitioner.java b/core/src/main/java/org/ehcache/core/StatusTransitioner.java index 6994d4ab9e..1727920038 100644 --- a/core/src/main/java/org/ehcache/core/StatusTransitioner.java +++ b/core/src/main/java/org/ehcache/core/StatusTransitioner.java @@ -220,7 +220,7 @@ public void succeeded() { public StateTransitionException failed(Throwable t) { if (st.done()) { if (t != null) { - throw (AssertionError) new AssertionError("Throwable cannot be thrown if Transition is done.").initCause(t); + throw new AssertionError("Throwable cannot be thrown if Transition is done.", t); } return null; } diff --git a/core/src/main/java/org/ehcache/core/config/BaseCacheConfiguration.java b/core/src/main/java/org/ehcache/core/config/BaseCacheConfiguration.java index 09e775b3b0..c7089c3c2a 100644 --- a/core/src/main/java/org/ehcache/core/config/BaseCacheConfiguration.java +++ b/core/src/main/java/org/ehcache/core/config/BaseCacheConfiguration.java @@ -23,8 +23,7 @@ import org.ehcache.config.CacheConfiguration; import org.ehcache.config.EvictionAdvisor; import org.ehcache.config.ResourcePools; -import org.ehcache.expiry.Expirations; -import org.ehcache.expiry.Expiry; +import org.ehcache.expiry.ExpiryPolicy; import org.ehcache.spi.service.ServiceConfiguration; /** @@ -37,7 +36,7 @@ public class BaseCacheConfiguration implements CacheConfiguration { private final EvictionAdvisor evictionAdvisor; private final Collection> serviceConfigurations; private final ClassLoader classLoader; - private final Expiry expiry; + private final ExpiryPolicy expiry; private final ResourcePools resourcePools; /** @@ -53,7 +52,7 @@ public class BaseCacheConfiguration implements CacheConfiguration { */ public BaseCacheConfiguration(Class keyType, Class valueType, EvictionAdvisor evictionAdvisor, - ClassLoader classLoader, Expiry expiry, + ClassLoader classLoader, ExpiryPolicy expiry, ResourcePools resourcePools, ServiceConfiguration... serviceConfigurations) { if (keyType == null) { throw new NullPointerException("keyType cannot be null"); @@ -71,7 +70,7 @@ public BaseCacheConfiguration(Class keyType, Class valueType, if (expiry != null) { this.expiry = expiry; } else { - this.expiry = Expirations.noExpiration(); + this.expiry = ExpiryPolicy.NO_EXPIRY; } this.resourcePools = resourcePools; this.serviceConfigurations = Collections.unmodifiableCollection(Arrays.asList(serviceConfigurations)); @@ -120,8 +119,17 @@ public ClassLoader getClassLoader() { /** * {@inheritDoc} */ + @SuppressWarnings("deprecation") @Override - public Expiry getExpiry() { + public org.ehcache.expiry.Expiry getExpiry() { + return ExpiryUtils.convertToExpiry(expiry); + } + + /** + * {@inheritDoc} + */ + @Override + public ExpiryPolicy getExpiryPolicy() { return expiry; } diff --git a/core/src/main/java/org/ehcache/core/config/DefaultConfiguration.java b/core/src/main/java/org/ehcache/core/config/DefaultConfiguration.java index 8c733026be..408637fc57 100644 --- a/core/src/main/java/org/ehcache/core/config/DefaultConfiguration.java +++ b/core/src/main/java/org/ehcache/core/config/DefaultConfiguration.java @@ -168,7 +168,7 @@ public String readableString() { } StringBuilder serviceCreationConfigurationsToStringBuilder = new StringBuilder(); - for (ServiceCreationConfiguration serviceCreationConfiguration : services) { + for (ServiceCreationConfiguration serviceCreationConfiguration : services) { serviceCreationConfigurationsToStringBuilder.append("- "); if(serviceCreationConfiguration instanceof HumanReadable) { serviceCreationConfigurationsToStringBuilder diff --git a/core/src/main/java/org/ehcache/core/config/ExpiryUtils.java b/core/src/main/java/org/ehcache/core/config/ExpiryUtils.java new file mode 100644 index 0000000000..407b76ee57 --- /dev/null +++ b/core/src/main/java/org/ehcache/core/config/ExpiryUtils.java @@ -0,0 +1,215 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.ehcache.core.config; + +import org.ehcache.expiry.ExpiryPolicy; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.Duration; +import java.time.temporal.ChronoUnit; +import java.time.temporal.TemporalUnit; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +/** + * ExpiryUtils + */ +@SuppressWarnings("deprecation") +public class ExpiryUtils { + + private static final Logger LOG = LoggerFactory.getLogger(ExpiryUtils.class); + + public static boolean isExpiryDurationInfinite(Duration duration) { + return duration.compareTo(ExpiryPolicy.INFINITE) >= 0; + } + + public static org.ehcache.expiry.Expiry convertToExpiry(ExpiryPolicy expiryPolicy) { + + if (expiryPolicy == ExpiryPolicy.NO_EXPIRY) { + @SuppressWarnings("unchecked") + org.ehcache.expiry.Expiry expiry = (org.ehcache.expiry.Expiry) org.ehcache.expiry.Expirations.noExpiration(); + return expiry; + } + + return new org.ehcache.expiry.Expiry() { + + @Override + public org.ehcache.expiry.Duration getExpiryForCreation(K key, V value) { + return convertDuration(expiryPolicy.getExpiryForCreation(key, value)); + } + + @Override + public org.ehcache.expiry.Duration getExpiryForAccess(K key, org.ehcache.ValueSupplier value) { + return convertDuration(expiryPolicy.getExpiryForAccess(key, value::value)); + } + + @Override + public org.ehcache.expiry.Duration getExpiryForUpdate(K key, org.ehcache.ValueSupplier oldValue, V newValue) { + return convertDuration(expiryPolicy.getExpiryForUpdate(key, oldValue::value, newValue)); + } + + @Override + public String toString() { + return "Expiry wrapper of {" + expiryPolicy + " }"; + } + }; + } + + private static org.ehcache.expiry.Duration convertDuration(Duration duration) { + if (duration == null) { + return null; + } + if (duration.isNegative()) { + throw new IllegalArgumentException("Ehcache duration cannot be negative and so does not accept negative java.time.Duration: " + duration); + } + if (duration.isZero()) { + return org.ehcache.expiry.Duration.ZERO; + } else { + long nanos = duration.getNano(); + if (nanos == 0) { + return org.ehcache.expiry.Duration.of(duration.getSeconds(), TimeUnit.SECONDS); + } + long seconds = duration.getSeconds(); + long secondsInNanos = TimeUnit.SECONDS.toNanos(seconds); + if (secondsInNanos != Long.MAX_VALUE && Long.MAX_VALUE - secondsInNanos > nanos) { + return org.ehcache.expiry.Duration.of(duration.toNanos(), TimeUnit.NANOSECONDS); + } else { + long secondsInMicros = TimeUnit.SECONDS.toMicros(seconds); + if (secondsInMicros != Long.MAX_VALUE && Long.MAX_VALUE - secondsInMicros > nanos / 1_000) { + return org.ehcache.expiry.Duration.of(secondsInMicros + nanos / 1_000, TimeUnit.MICROSECONDS); + } else { + long secondsInMillis = TimeUnit.SECONDS.toMillis(seconds); + if (secondsInMillis != Long.MAX_VALUE && Long.MAX_VALUE - secondsInMillis > nanos / 1_000_000) { + return org.ehcache.expiry.Duration.of(duration.toMillis(), TimeUnit.MILLISECONDS); + } + } + } + return org.ehcache.expiry.Duration.of(seconds, TimeUnit.SECONDS); + } + } + + public static ExpiryPolicy convertToExpiryPolicy(org.ehcache.expiry.Expiry expiry) { + if (expiry == org.ehcache.expiry.Expirations.noExpiration()) { + @SuppressWarnings("unchecked") + ExpiryPolicy expiryPolicy = (ExpiryPolicy) ExpiryPolicy.NO_EXPIRY; + return expiryPolicy; + } + + return new ExpiryPolicy() { + @Override + public Duration getExpiryForCreation(K key, V value) { + org.ehcache.expiry.Duration duration = expiry.getExpiryForCreation(key, value); + return convertDuration(duration); + } + + @Override + public Duration getExpiryForAccess(K key, Supplier value) { + org.ehcache.expiry.Duration duration = expiry.getExpiryForAccess(key, value::get); + return convertDuration(duration); + } + + @Override + public Duration getExpiryForUpdate(K key, Supplier oldValue, V newValue) { + org.ehcache.expiry.Duration duration = expiry.getExpiryForUpdate(key, oldValue::get, newValue); + return convertDuration(duration); + } + + @Override + public String toString() { + return "Expiry wrapper of {" + expiry + " }"; + } + + private Duration convertDuration(org.ehcache.expiry.Duration duration) { + if (duration == null) { + return null; + } + if (duration.isInfinite()) { + return ExpiryPolicy.INFINITE; + } + try { + return Duration.of(duration.getLength(), jucTimeUnitToTemporalUnit(duration.getTimeUnit())); + } catch (ArithmeticException e) { + return ExpiryPolicy.INFINITE; + } + } + }; + } + + public static TemporalUnit jucTimeUnitToTemporalUnit(TimeUnit timeUnit) { + switch (timeUnit) { + case NANOSECONDS: + return ChronoUnit.NANOS; + case MICROSECONDS: + return ChronoUnit.MICROS; + case MILLISECONDS: + return ChronoUnit.MILLIS; + case SECONDS: + return ChronoUnit.SECONDS; + case MINUTES: + return ChronoUnit.MINUTES; + case HOURS: + return ChronoUnit.HOURS; + case DAYS: + return ChronoUnit.DAYS; + default: + throw new AssertionError("Unkown TimeUnit: " + timeUnit); + } + } + + public static long getExpirationMillis(long now, Duration duration) { + try { + return duration.plusMillis(now).toMillis(); + } catch (ArithmeticException e) { + return Long.MAX_VALUE; + } + + } + + /** + * Returns the expiry for creation duration returned by the provided {@link ExpiryPolicy} but checks for immediate + * expiry, null expiry and exceptions. In all those cases, {@code null} will be returned. + * + * @param key key to pass to {@link ExpiryPolicy#getExpiryForCreation(Object, Object)} + * @param value value to pass to to pass to {@link ExpiryPolicy#getExpiryForCreation(Object, Object)} + * @param expiry expiry queried + * @param type of key + * @param type of value + * @return the duration returned by to pass to {@link ExpiryPolicy#getExpiryForCreation(Object, Object)}, {@code null} + * if the call throws an exception, if the returned duration is {@code null} or if it is lower or equal to 0 + */ + public static Duration getExpiryForCreation(K key, V value, ExpiryPolicy expiry) { + Duration duration; + try { + duration = expiry.getExpiryForCreation(key, value); + } catch (RuntimeException e) { + LOG.error("Expiry computation caused an exception - Expiry duration will be 0", e); + return Duration.ZERO; + } + + if (duration == null) { + LOG.error("Expiry for creation can't be null - Expiry duration will be 0"); + return Duration.ZERO; + } + + if (Duration.ZERO.compareTo(duration) >= 0) { + return Duration.ZERO; + } + + return duration; + } +} diff --git a/core/src/main/java/org/ehcache/core/config/ResourcePoolsImpl.java b/core/src/main/java/org/ehcache/core/config/ResourcePoolsImpl.java index 10e0b494e7..dca19f48ee 100644 --- a/core/src/main/java/org/ehcache/core/config/ResourcePoolsImpl.java +++ b/core/src/main/java/org/ehcache/core/config/ResourcePoolsImpl.java @@ -25,8 +25,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; -import java.util.EnumMap; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -69,25 +67,27 @@ public Set> getResourceTypeSet() { */ @Override public ResourcePools validateAndMerge(ResourcePools toBeUpdated) { + Set> resourceTypeSet = toBeUpdated.getResourceTypeSet(); + // Ensure update pool types already exist in existing pools - if(!getResourceTypeSet().containsAll(toBeUpdated.getResourceTypeSet())) { + if(!getResourceTypeSet().containsAll(resourceTypeSet)) { throw new IllegalArgumentException("Pools to be updated cannot contain previously undefined resources pools"); } // Can not update OFFHEAP - if(toBeUpdated.getResourceTypeSet().contains(ResourceType.Core.OFFHEAP)) { + if(resourceTypeSet.contains(ResourceType.Core.OFFHEAP)) { throw new UnsupportedOperationException("Updating OFFHEAP resource is not supported"); } // Can not update DISK - if(toBeUpdated.getResourceTypeSet().contains(ResourceType.Core.DISK)) { + if(resourceTypeSet.contains(ResourceType.Core.DISK)) { throw new UnsupportedOperationException("Updating DISK resource is not supported"); } - for(ResourceType currentResourceType : toBeUpdated.getResourceTypeSet()) { + for(ResourceType currentResourceType : resourceTypeSet) { getPoolForResource(currentResourceType).validateUpdate(toBeUpdated.getPoolForResource(currentResourceType)); } - Map, ResourcePool> poolsMap = new HashMap<>(); + Map, ResourcePool> poolsMap = new HashMap<>(pools.size() + resourceTypeSet.size()); poolsMap.putAll(pools); - for(ResourceType currentResourceType : toBeUpdated.getResourceTypeSet()) { + for(ResourceType currentResourceType : resourceTypeSet) { ResourcePool poolForResource = toBeUpdated.getPoolForResource(currentResourceType); poolsMap.put(currentResourceType, poolForResource); } @@ -107,9 +107,9 @@ public static void validateResourcePools(Collection pool ordered.add((SizedResourcePool)pool); } } - Collections.sort(ordered, (o1, o2) -> { + ordered.sort((o1, o2) -> { int retVal = o2.getType().getTierHeight() - o1.getType().getTierHeight(); - if(retVal == 0) { + if (retVal == 0) { return o1.toString().compareTo(o2.toString()); } else { return retVal; diff --git a/core/src/main/java/org/ehcache/core/config/store/StoreStatisticsConfiguration.java b/core/src/main/java/org/ehcache/core/config/store/StoreStatisticsConfiguration.java new file mode 100644 index 0000000000..473861b995 --- /dev/null +++ b/core/src/main/java/org/ehcache/core/config/store/StoreStatisticsConfiguration.java @@ -0,0 +1,46 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.core.config.store; + +import org.ehcache.core.spi.store.Store; +import org.ehcache.spi.service.ServiceConfiguration; + +/** + * Configure if statistics are unable on stores. By default they are enabled in a tiered + * configuration to accurately track the usage of each tier. If a store is + * standing alone, then they will be disabled by default since they are a mirror + * of the cache statistics. + *

+ * Note that statistics about the store size, mapping and so on are not affected + * by this configuration. Only operation statistics (e.g. get/put counts) are disabled. + */ +public class StoreStatisticsConfiguration implements ServiceConfiguration { + + private final boolean operationStatisticsEnabled; + + public StoreStatisticsConfiguration(boolean operationStatisticsEnabled) { + this.operationStatisticsEnabled = operationStatisticsEnabled; + } + + public boolean isOperationStatisticsEnabled() { + return operationStatisticsEnabled; + } + + @Override + public Class getServiceType() { + return Store.Provider.class; + } +} diff --git a/core/src/main/java/org/ehcache/core/events/NullStoreEventDispatcher.java b/core/src/main/java/org/ehcache/core/events/NullStoreEventDispatcher.java index 8723d1b365..8bc80d7c4a 100644 --- a/core/src/main/java/org/ehcache/core/events/NullStoreEventDispatcher.java +++ b/core/src/main/java/org/ehcache/core/events/NullStoreEventDispatcher.java @@ -16,10 +16,11 @@ package org.ehcache.core.events; -import org.ehcache.ValueSupplier; import org.ehcache.core.spi.store.events.StoreEventFilter; import org.ehcache.core.spi.store.events.StoreEventListener; +import java.util.function.Supplier; + /** * NullStoreEventDispatcher */ @@ -31,12 +32,12 @@ public static StoreEventDispatcher nullStoreEventDispatcher() { private final StoreEventSink storeEventSink = new StoreEventSink() { @Override - public void evicted(K key, ValueSupplier value) { + public void evicted(K key, Supplier value) { // Do nothing } @Override - public void expired(K key, ValueSupplier value) { + public void expired(K key, Supplier value) { // Do nothing } @@ -46,12 +47,12 @@ public void created(K key, V value) { } @Override - public void updated(K key, ValueSupplier previousValue, V newValue) { + public void updated(K key, Supplier previousValue, V newValue) { // Do nothing } @Override - public void removed(K key, ValueSupplier removed) { + public void removed(K key, Supplier removed) { // Do nothing } }; diff --git a/core/src/main/java/org/ehcache/core/events/StoreEventSink.java b/core/src/main/java/org/ehcache/core/events/StoreEventSink.java index 4dbe7426ad..f817573a1b 100644 --- a/core/src/main/java/org/ehcache/core/events/StoreEventSink.java +++ b/core/src/main/java/org/ehcache/core/events/StoreEventSink.java @@ -16,7 +16,7 @@ package org.ehcache.core.events; -import org.ehcache.ValueSupplier; +import java.util.function.Supplier; /** * Interface on which {@link org.ehcache.core.spi.store.Store} operations are to record events. @@ -29,7 +29,7 @@ public interface StoreEventSink { * @param key removed key * @param value value supplier of removed value */ - void removed(K key, ValueSupplier value); + void removed(K key, Supplier value); /** * Indicates the mapping was updated. @@ -38,7 +38,7 @@ public interface StoreEventSink { * @param oldValue value supplier of old value * @param newValue the new value */ - void updated(K key, ValueSupplier oldValue, V newValue); + void updated(K key, Supplier oldValue, V newValue); /** * Indicates the mapping was expired. @@ -46,7 +46,7 @@ public interface StoreEventSink { * @param key the expired key * @param value value supplier of expired value */ - void expired(K key, ValueSupplier value); + void expired(K key, Supplier value); /** * Indicates a mapping was created. @@ -62,5 +62,5 @@ public interface StoreEventSink { * @param key the evicted key * @param value value supplier of evicted value */ - void evicted(K key, ValueSupplier value); + void evicted(K key, Supplier value); } diff --git a/core/src/main/java/org/ehcache/core/exceptions/ExceptionFactory.java b/core/src/main/java/org/ehcache/core/exceptions/ExceptionFactory.java index e012ac5bbe..520ddc9ba7 100644 --- a/core/src/main/java/org/ehcache/core/exceptions/ExceptionFactory.java +++ b/core/src/main/java/org/ehcache/core/exceptions/ExceptionFactory.java @@ -47,4 +47,30 @@ public static CacheWritingException newCacheWritingException(Exception e) { public static CacheLoadingException newCacheLoadingException(Exception e) { return new CacheLoadingException(e); } + + /** + * Creates a new {@code CacheWritingException} with the provided exception as cause and a suppressed one. + * + * @param e the cause + * @param suppressed the suppressed exception to add to the new exception + * @return a cache writing exception + */ + public static CacheWritingException newCacheWritingException(Exception e, Exception suppressed) { + CacheWritingException ne = new CacheWritingException(e); + ne.addSuppressed(suppressed); + return ne; + } + + /** + * Creates a new {@code CacheLoadingException} with the provided exception as cause and a suppressed one. + * + * @param e the cause + * @param suppressed the suppressed exception to add to the new exception + * @return a cache loading exception + */ + public static CacheLoadingException newCacheLoadingException(Exception e, Exception suppressed) { + CacheLoadingException ne = new CacheLoadingException(e); + ne.addSuppressed(e); + return ne; + } } diff --git a/core/src/main/java/org/ehcache/core/exceptions/StorePassThroughException.java b/core/src/main/java/org/ehcache/core/exceptions/StorePassThroughException.java index 219ca9787b..9bcc5beff6 100644 --- a/core/src/main/java/org/ehcache/core/exceptions/StorePassThroughException.java +++ b/core/src/main/java/org/ehcache/core/exceptions/StorePassThroughException.java @@ -16,7 +16,7 @@ package org.ehcache.core.exceptions; -import org.ehcache.core.spi.store.StoreAccessException; +import org.ehcache.spi.resilience.StoreAccessException; /** * A generic wrapper runtime exception that will not be caught and @@ -48,6 +48,13 @@ public StorePassThroughException(final Throwable cause) { super(cause); } + @Override + public synchronized Throwable fillInStackTrace() { + // skip the stack trace filling because this exception is just a placeholder and won't ever be caught outside of + // a store + return this; + } + /** * Helper method for handling runtime exceptions. *

@@ -59,11 +66,11 @@ public StorePassThroughException(final Throwable cause) { * @return StoreAccessException to be thrown * @throws RuntimeException if {@code re} is a {@code StorePassThroughException} containing a {@code RuntimeException} */ - public static StoreAccessException handleRuntimeException(RuntimeException re) { + public static StoreAccessException handleException(Exception re) { if(re instanceof StorePassThroughException) { Throwable cause = re.getCause(); if(cause instanceof RuntimeException) { - throw (RuntimeException) cause; + throw (RuntimeException) cause; } else { return new StoreAccessException(cause); } diff --git a/core/src/main/java/org/ehcache/core/internal/events/EventListenerWrapper.java b/core/src/main/java/org/ehcache/core/internal/events/EventListenerWrapper.java index e8474f8e1f..f91477ca1a 100644 --- a/core/src/main/java/org/ehcache/core/internal/events/EventListenerWrapper.java +++ b/core/src/main/java/org/ehcache/core/internal/events/EventListenerWrapper.java @@ -73,7 +73,7 @@ public boolean equals(Object other) { if (!(other instanceof EventListenerWrapper)) { return false; } - EventListenerWrapper l2 = (EventListenerWrapper)other; + EventListenerWrapper l2 = (EventListenerWrapper)other; return listener.equals(l2.listener); } @@ -82,7 +82,7 @@ public void onEvent(CacheEvent event) { listener.onEvent(event); } - public CacheEventListener getListener() { + public CacheEventListener getListener() { return listener; } diff --git a/core/src/main/java/org/ehcache/core/internal/resilience/AbstractResilienceStrategy.java b/core/src/main/java/org/ehcache/core/internal/resilience/AbstractResilienceStrategy.java new file mode 100644 index 0000000000..60ac37f9cc --- /dev/null +++ b/core/src/main/java/org/ehcache/core/internal/resilience/AbstractResilienceStrategy.java @@ -0,0 +1,193 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.core.internal.resilience; + +import org.ehcache.Cache; +import org.ehcache.CacheIterationException; +import org.ehcache.core.internal.util.Pacer; +import org.ehcache.core.spi.time.SystemTimeSource; +import org.ehcache.spi.resilience.RecoveryStore; +import org.ehcache.spi.resilience.ResilienceStrategy; +import org.ehcache.spi.resilience.StoreAccessException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Base class that can be used for resilience strategies. It provides helper methods for the resilience strategy to use + * for cleanup in case or error. It then notifies if the cache was able to recover from the error or is now in an + * inconsistent state. By default, the notification is to log the error. + */ +public abstract class AbstractResilienceStrategy implements ResilienceStrategy { + + private final Logger LOGGER = LoggerFactory.getLogger(getClass()); + + private final RecoveryStore store; + + /** + * Used to prevent logging the same error continuously. Instead, we will log in error every 30 seconds. + */ + private final Pacer pacer = new Pacer(SystemTimeSource.INSTANCE, 30_000); + + /** + * Unique constructor. It takes a {@link RecoveryStore} that will be used for cleanup operations. + * + * @param store store to clean + */ + protected AbstractResilienceStrategy(RecoveryStore store) { + this.store = store; + } + + /** + * {@inheritDoc} + */ + @Override + public Cache.Entry iteratorFailure(StoreAccessException e) { + LOGGER.error("Ehcache iterator terminated early due to exception", e); + throw new CacheIterationException(e); + } + + /** + * Clear all entries from the store. + * + * @param from original failure causing the cleanup + */ + protected void cleanup(StoreAccessException from) { + try { + store.obliterate(); + } catch (StoreAccessException e) { + inconsistent(from, e); + return; + } + recovered(from); + } + + /** + * Clean all keys from the store. + * + * @param keys keys to clean + * @param from original failure causing the cleanup + */ + protected void cleanup(Iterable keys, StoreAccessException from) { + try { + store.obliterate(keys); + } catch (StoreAccessException e) { + inconsistent(keys, from, e); + return; + } + recovered(keys, from); + } + + /** + * Clean the key from the store. + * + * @param key key to clean + * @param from original failure causing the cleanup + */ + protected void cleanup(K key, StoreAccessException from) { + try { + store.obliterate(key); + } catch (StoreAccessException e) { + inconsistent(key, from, e); + return; + } + recovered(key, from); + } + + /** + * Called when the cache recovered from a failing store operation on the key. + * + * @param key key that failed + * @param from exception thrown by the failing operation + */ + protected void recovered(K key, StoreAccessException from) { + LOGGER.info("Ehcache key {} recovered from", key, from); + } + + /** + * Called when the cache recovered from a failing store operation on a list of keys. + * + * @param keys keys that failed + * @param from exception thrown by the failing operation + */ + protected void recovered(Iterable keys, StoreAccessException from) { + LOGGER.info("Ehcache keys {} recovered from", keys, from); + } + + /** + * Called when the cache recovered from a failing store global operation (no specific key involved e.g. + * {@code clear()}. + * + * @param from exception thrown by the failing operation + */ + protected void recovered(StoreAccessException from) { + LOGGER.info("Ehcache recovered from", from); + } + + /** + * Called when the cache failed to recover from a failing store operation on a key. + * + * @param key key now inconsistent + * @param because exception thrown by the failing operation + * @param cleanup all the exceptions that occurred during cleanup + */ + protected void inconsistent(K key, StoreAccessException because, StoreAccessException... cleanup) { + pacedErrorLog("Ehcache key {} in possible inconsistent state", key, because); + } + + /** + * Called when the cache failed to recover from a failing store operation on a list of keys. + * + * @param keys + * @param because exception thrown by the failing operation + * @param cleanup all the exceptions that occurred during cleanup + */ + protected void inconsistent(Iterable keys, StoreAccessException because, StoreAccessException... cleanup) { + pacedErrorLog("Ehcache keys {} in possible inconsistent state", keys, because); + } + + /** + * Called when the cache failed to recover from a failing store global operation (no specific key involved e.g. + * {@code clear()}. + * + * @param because exception thrown by the failing operation + * @param cleanup all the exceptions that occurred during cleanup + */ + protected void inconsistent(StoreAccessException because, StoreAccessException... cleanup) { + pacedErrorLog("Ehcache in possible inconsistent state", because); + } + + /** + * Log messages in error at worst every 30 seconds. Log everything at debug level. + * + * @param message message to log + * @param e exception to log + */ + protected void pacedErrorLog(String message, StoreAccessException e) { + pacer.pacedCall(() -> LOGGER.error(message + " - Similar messages will be suppressed for 30 seconds", e), () -> LOGGER.debug(message, e)); + } + + /** + * Log messages in error at worst every 30 seconds. Log everything at debug level. + * + * @param message message to log + * @param arg1 first log param + * @param arg2 second log param + */ + protected void pacedErrorLog(String message, Object arg1, Object arg2) { + pacer.pacedCall(() -> LOGGER.error(message + " - Similar messages will be suppressed for 30 seconds", arg1, arg2), () -> LOGGER.debug(message, arg1, arg2)); + } + +} diff --git a/core/src/main/java/org/ehcache/core/internal/resilience/LoggingRobustResilienceStrategy.java b/core/src/main/java/org/ehcache/core/internal/resilience/LoggingRobustResilienceStrategy.java deleted file mode 100644 index 5d79ca5b47..0000000000 --- a/core/src/main/java/org/ehcache/core/internal/resilience/LoggingRobustResilienceStrategy.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright Terracotta, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.ehcache.core.internal.resilience; - -import org.ehcache.Cache; -import org.ehcache.core.spi.store.StoreAccessException; -import org.ehcache.CacheIterationException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * - * @author cdennis - */ -public class LoggingRobustResilienceStrategy extends RobustResilienceStrategy { - - private static final Logger LOGGER = LoggerFactory.getLogger(LoggingRobustResilienceStrategy.class); - - public LoggingRobustResilienceStrategy(RecoveryCache store) { - super(store); - } - - @Override - public Cache.Entry iteratorFailure(StoreAccessException e) { - LOGGER.error("Ehcache iterator terminated early due to exception", e); - throw new CacheIterationException(e); - } - - @Override - protected void recovered(K key, StoreAccessException from) { - LOGGER.info("Ehcache key {} recovered from", key, from); - } - - @Override - protected void recovered(Iterable keys, StoreAccessException from) { - LOGGER.info("Ehcache keys {} recovered from", keys, from); - } - - @Override - protected void recovered(StoreAccessException from) { - LOGGER.info("Ehcache recovered from", from); - } - - @Override - protected void inconsistent(K key, StoreAccessException because, StoreAccessException... cleanup) { - LOGGER.error("Ehcache key {} in possible inconsistent state due to ", key, because); - } - - @Override - protected void inconsistent(Iterable keys, StoreAccessException because, StoreAccessException... cleanup) { - LOGGER.error("Ehcache keys {} in possible inconsistent state due to ", keys, because); - } - - @Override - protected void inconsistent(StoreAccessException because, StoreAccessException... cleanup) { - LOGGER.error("Ehcache in possible inconsistent state due to ", because); - } -} diff --git a/core/src/main/java/org/ehcache/core/internal/resilience/ResilienceStrategy.java b/core/src/main/java/org/ehcache/core/internal/resilience/ResilienceStrategy.java deleted file mode 100644 index b033d9f002..0000000000 --- a/core/src/main/java/org/ehcache/core/internal/resilience/ResilienceStrategy.java +++ /dev/null @@ -1,392 +0,0 @@ -/* - * Copyright Terracotta, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.ehcache.core.internal.resilience; - -import java.util.Map; - -import org.ehcache.Cache; -import org.ehcache.spi.loaderwriter.BulkCacheLoadingException; -import org.ehcache.spi.loaderwriter.BulkCacheWritingException; -import org.ehcache.core.spi.store.StoreAccessException; -import org.ehcache.spi.loaderwriter.CacheLoadingException; -import org.ehcache.spi.loaderwriter.CacheWritingException; -import org.ehcache.spi.loaderwriter.CacheLoaderWriter; - -/** - * A strategy for providing cache resilience in the face of failure. - *

- * An implementation of this interface is used by a cache to decide how to - * recover after internal components of the cache fail. Implementations of - * these methods are expected to take suitable recovery steps. They can then - * choose between allowing the operation to terminate successfully, or throw an - * exception which will be propagated to the thread calling in to the cache. - *

- * Resilience in this context refers only to resilience against cache failures - * and not to resilience against failures of any underlying - * {@link CacheLoaderWriter}. To this end writer or loader failures will only be - * reported to the strategy in the context of a coincident cache failure. - * Isolated writer and loader exceptions will be thrown directly. - * - * @param the type of the keys used to access data within the cache - * @param the type of the values held within the cache - * - * @author Chris Dennis - */ -public interface ResilienceStrategy { - - /** - * Called when a {@link Cache#get(java.lang.Object)} fails on a cache without - * a cache loader due to an underlying store failure. - * - * @param key the key being retrieved - * @param e the triggered failure - * @return the value to return from the operation - */ - V getFailure(K key, StoreAccessException e); - - /** - * Called when a {@link Cache#get(java.lang.Object)} fails on a cache with a - * cache loader due to an underlying store failure. - * - * @param key the key being retrieved - * @param loaded the value from the loader - * @param e the triggered failure - * @return the value to return from the operation - */ - V getFailure(K key, V loaded, StoreAccessException e); - - /** - * Called when a {@link Cache#get(java.lang.Object)} fails on a cache with a - * cache loader due to an underlying store failure. - * - * @param key the key being retrieved - * @param e the cache failure - * @param f the loader failure - * @return the value to return from the operation - */ - V getFailure(K key, StoreAccessException e, CacheLoadingException f); - - /** - * Called when a {@link Cache#containsKey(java.lang.Object)} fails due to an - * underlying store failure, and the resultant cache load operation also fails. - * - * @param key the key being queried - * @param e the triggered failure - * @return the value to return from the operation - */ - boolean containsKeyFailure(K key, StoreAccessException e); - - /** - * Called when a {@link Cache#put(java.lang.Object, java.lang.Object)} fails - * due to an underlying store failure. - * - * @param key the key being put - * @param value the value being put - * @param e the triggered failure - */ - void putFailure(K key, V value, StoreAccessException e); - - /** - * Called when a {@link Cache#put(java.lang.Object, java.lang.Object)} fails - * due to an underlying store failure, and the associated cache write - * operation also failed. - * - * @param key the key being put - * @param value the value being put - * @param e the cache failure - * @param f the writer failure - */ - void putFailure(K key, V value, StoreAccessException e, CacheWritingException f); - - /** - * Called when a {@link Cache#remove(java.lang.Object)} fails due to an - * underlying store failure. - * - * @param key the key being removed - * @param e the triggered failure - */ - void removeFailure(K key, StoreAccessException e); - - /** - * Called when a {@link Cache#remove(java.lang.Object)} fails - * due to an underlying store failure, and the associated cache write - * operation also failed. - * - * @param key the key being removed - * @param e the cache failure - * @param f the writer failure - */ - void removeFailure(K key, StoreAccessException e, CacheWritingException f); - - /** - * Called when a {@link Cache#clear()} fails due to an underlying store - * failure. - * - * @param e the triggered failure - */ - void clearFailure(StoreAccessException e); - - /** - * Called when a cache iterator advancement fails due to an underlying store - * failure. - * - * @param e the triggered failure - * @return an entry to return on a failed iteration - */ - Cache.Entry iteratorFailure(StoreAccessException e); - - /** - * Called when a {@link Cache#putIfAbsent(java.lang.Object, java.lang.Object)} - * fails due to an underlying store failure. - *

- * If it is known at the time of calling that the key is absent from the cache - * (and the writer if one is present) then {@code knownToBeAbsent} will be - * {@code true}. - * - * @param key the key being put - * @param value the value being put - * @param e the triggered failure - * @param knownToBeAbsent {@code true} if the value is known to be absent - * @return the value to return from the operation - */ - V putIfAbsentFailure(K key, V value, V loaderWriterFunctionResult, StoreAccessException e, boolean knownToBeAbsent); - - /** - * Called when a {@link Cache#putIfAbsent(java.lang.Object, java.lang.Object)} - * fails due to an underlying store failure, and the associated cache write - * operation also failed. - * - * @param key the key being put - * @param value the value being put - * @param e the cache failure - * @param f the writer failure - * @return the value to return from the operation - */ - V putIfAbsentFailure(K key, V value, StoreAccessException e, CacheWritingException f); - - /** - * Called when a {@link Cache#putIfAbsent(java.lang.Object, java.lang.Object)} - * fails due to an underlying store failure, and the associated cache load - * operation also failed. - * - * @param key the key being put - * @param value the value being put - * @param e the cache failure - * @param f the loader failure - * @return the value to return from the operation - */ - V putIfAbsentFailure(K key, V value, StoreAccessException e, CacheLoadingException f); - - /** - * Called when a {@link Cache#remove(java.lang.Object, java.lang.Object)} - * fails due to an underlying store failure. - *

- * If it is known at the time of calling that the targeted mapping is present - * in the cache (or the writer if one is present) then {@code knownToBePresent} - * will be {@code true}. - * - * @param key the key being removed - * @param value the value being removed - * @param e the triggered failure - * @param knownToBePresent {@code true} if the value is known to be present - * @return the value to return from the operation - */ - boolean removeFailure(K key, V value, StoreAccessException e, boolean knownToBePresent); - - /** - * Called when a {@link Cache#remove(java.lang.Object, java.lang.Object)} - * fails due to an underlying store failure, and the associated cache write - * operation also failed. - * - * @param key the key being removed - * @param value the value being removed - * @param e the cache failure - * @param f the writer failure - * @return the value to return from the operation - */ - boolean removeFailure(K key, V value, StoreAccessException e, CacheWritingException f); - - /** - * Called when a {@link Cache#remove(java.lang.Object, java.lang.Object)} - * fails due to an underlying store failure, and the associated cache load - * operation also failed. - * - * @param key the key being removed - * @param value the value being removed - * @param e the cache failure - * @param f the loader failure - * @return the value to return from the operation - */ - boolean removeFailure(K key, V value, StoreAccessException e, CacheLoadingException f); - - /** - * Called when a {@link Cache#replace(java.lang.Object, java.lang.Object)} - * fails due to an underlying store failure. - * - * @param key the key being replaced - * @param value the value being replaced - * @param e the triggered failure - * @return the value to return from the operation - */ - V replaceFailure(K key, V value, StoreAccessException e); - - /** - * Called when a {@link Cache#replace(java.lang.Object, java.lang.Object)} - * fails due to an underlying store failure, and the associated cache write - * operation also failed. - * - * @param key the key being replaced - * @param value the value being replaced - * @param e the cache failure - * @param f the writer failure - * @return the value to return from the operation - */ - V replaceFailure(K key, V value, StoreAccessException e, CacheWritingException f); - - /** - * Called when a {@link Cache#replace(java.lang.Object, java.lang.Object)} - * fails due to an underlying store failure, and the associated cache load - * operation also failed. - * - * @param key the key being replaced - * @param value the value being replaced - * @param e the cache failure - * @param f the loader failure - * @return the value to return from the operation - */ - V replaceFailure(K key, V value, StoreAccessException e, CacheLoadingException f); - - /** - * Called when a {@link Cache#replace(java.lang.Object, java.lang.Object, java.lang.Object)} - * fails due to an underlying store failure. - *

- * If it is known at the time of calling that the target mapping is present - * in the cache (or the writer if one is present) then {@code knownToBeMatch} - * will be {@code true}. - * - * @param key the key being replaced - * @param value the expected value - * @param newValue the replacement value - * @param e the triggered failure - * @param knownToMatch {@code true} if the value is known to match - * @return the value to return from the operation - */ - boolean replaceFailure(K key, V value, V newValue, StoreAccessException e, boolean knownToMatch); - - /** - * Called when a {@link Cache#replace(java.lang.Object, java.lang.Object, java.lang.Object)} - * fails due to an underlying store failure, and the associated cache write - * operation also failed. - * - * @param key the key being replaced - * @param value the expected value - * @param newValue the replacement value - * @param e the cache failure - * @param f the writer failure - * @return the value to return from the operation - */ - boolean replaceFailure(K key, V value, V newValue, StoreAccessException e, CacheWritingException f); - - /** - * Called when a {@link Cache#replace(java.lang.Object, java.lang.Object, java.lang.Object)} - * fails due to an underlying store failure, and the associated cache load - * operation also failed. - * - * @param key the key being replaced - * @param value the expected value - * @param newValue the replacement value - * @param e the cache failure - * @param f the loader failure - * @return the value to return from the operation - */ - boolean replaceFailure(K key, V value, V newValue, StoreAccessException e, CacheLoadingException f); - - /** - * Called when a {@link Cache#getAll(java.util.Set)} fails on a cache - * without a cache loader due to an underlying store failure. - * - * @param keys the keys being retrieved - * @param e the triggered failure - * @return the value to return from the operation - */ - Map getAllFailure(Iterable keys, StoreAccessException e); - - /** - * Called when a {@link Cache#getAll(java.util.Set)} fails on a cache - * with a cache loader due to an underlying store failure. - * - * @param keys the keys being retrieved - * @param loaded the values from the loader - * @param e the triggered failure - * @return the value to return from the operation - */ - Map getAllFailure(Iterable keys, Map loaded, StoreAccessException e); - - /** - * Called when a {@link Cache#getAll(java.util.Set)} fails on a cache - * with a cache loader due to an underlying store failure, and the associated - * cache write operation also failed. - * - * @param keys the keys being retrieved - * @param e the cache failure - * @param f the writer failure - * @return the value to return from the operation - */ - Map getAllFailure(Iterable keys, StoreAccessException e, BulkCacheLoadingException f); - - /** - * Called when a {@link Cache#putAll(java.util.Map)} fails due to an - * underlying store failure. - * - * @param entries the entries being put - * @param e the triggered failure - */ - void putAllFailure(Map entries, StoreAccessException e); - - /** - * Called when a {@link Cache#putAll(java.util.Map)} fails due to an - * underlying store failure, and the associated cache write operation also - * failed. - * - * @param entries the entries being put - * @param e the cache failure - * @param f the writer failure - */ - void putAllFailure(Map entries, StoreAccessException e, BulkCacheWritingException f); - - /** - * Called when a {@link Cache#removeAll(java.util.Set)} fails due to an - * underlying store failure. - * - * @param keys the keys being removed - * @param e the triggered failure - * @return the value to return from the operation - */ - Map removeAllFailure(Iterable keys, StoreAccessException e); - - /** - * Called when a {@link Cache#removeAll(java.util.Set)} fails - * due to an underlying store failure, and the associated cache write - * operation also failed. - * - * @param keys the keys being removed - * @param e the cache failure - * @param f the writer failure - * @return the value to return from the operation - */ - Map removeAllFailure(Iterable keys, StoreAccessException e, BulkCacheWritingException f); -} diff --git a/core/src/main/java/org/ehcache/core/internal/resilience/RethrowingStoreAccessException.java b/core/src/main/java/org/ehcache/core/internal/resilience/RethrowingStoreAccessException.java deleted file mode 100644 index c6f155c52f..0000000000 --- a/core/src/main/java/org/ehcache/core/internal/resilience/RethrowingStoreAccessException.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright Terracotta, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.ehcache.core.internal.resilience; - -import org.ehcache.core.spi.store.StoreAccessException; - -/** - * Generic exception used when an internal operation fails on a {@link org.ehcache.Cache} but shouldn't be - * handled by a resilience strategy but rather rethrown to the caller. - * - * @deprecated This mechanism is a stop-gap solution until {@link ResilienceStrategy} - * instances can be plugged-in. - * - * @author Ludovic Orban - */ -@Deprecated -public class RethrowingStoreAccessException extends StoreAccessException { - - /** - * Create an instance of RethrowingStoreAccessException. - * @param cause the cause RuntimeException that will be rethrown. - */ - public RethrowingStoreAccessException(RuntimeException cause) { - super(cause); - } - - @Override - public synchronized RuntimeException getCause() { - return (RuntimeException) super.getCause(); - } -} diff --git a/core/src/main/java/org/ehcache/core/internal/resilience/RobustLoaderWriterResilienceStrategy.java b/core/src/main/java/org/ehcache/core/internal/resilience/RobustLoaderWriterResilienceStrategy.java new file mode 100644 index 0000000000..114149fb32 --- /dev/null +++ b/core/src/main/java/org/ehcache/core/internal/resilience/RobustLoaderWriterResilienceStrategy.java @@ -0,0 +1,322 @@ +/* + * Copyright Terracotta, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.ehcache.core.internal.resilience; + +import org.ehcache.core.exceptions.ExceptionFactory; +import org.ehcache.spi.loaderwriter.BulkCacheLoadingException; +import org.ehcache.spi.loaderwriter.BulkCacheWritingException; +import org.ehcache.spi.loaderwriter.CacheLoaderWriter; +import org.ehcache.spi.resilience.RecoveryStore; +import org.ehcache.spi.resilience.StoreAccessException; + +import java.util.Map; +import java.util.Objects; + +/** + * Default resilience strategy used by a {@link org.ehcache.Cache} with a {@link CacheLoaderWriter} specified. It will + * behaves in two ways: + *