Use a separate fork join pool for cache loading operations

This commit is contained in:
Luck 2019-03-05 12:35:29 +00:00
parent d1ca7684d6
commit 3726f6de41
No known key found for this signature in database
GPG Key ID: EFA9B3EC5FD90F8B
15 changed files with 109 additions and 78 deletions

View File

@ -25,7 +25,6 @@
package me.lucko.luckperms.bukkit.context;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import me.lucko.luckperms.api.Contexts;
@ -36,6 +35,7 @@ import me.lucko.luckperms.common.config.ConfigKeys;
import me.lucko.luckperms.common.context.ContextManager;
import me.lucko.luckperms.common.context.ContextsCache;
import me.lucko.luckperms.common.context.ContextsSupplier;
import me.lucko.luckperms.common.util.CaffeineFactory;
import me.lucko.luckperms.common.util.LoadingMap;
import org.bukkit.entity.Player;
@ -49,7 +49,7 @@ public class BukkitContextManager extends ContextManager<Player> {
private final LoadingMap<Player, ContextsCache<Player>> onlineSubjectCaches = LoadingMap.of(key -> new ContextsCache<>(key, this));
// cache the creation of ContextsCache instances for offline players with a 1m expiry
private final LoadingCache<Player, ContextsCache<Player>> offlineSubjectCaches = Caffeine.newBuilder()
private final LoadingCache<Player, ContextsCache<Player>> offlineSubjectCaches = CaffeineFactory.newBuilder()
.expireAfterAccess(1, TimeUnit.MINUTES)
.build(key -> {
ContextsCache<Player> cache = this.onlineSubjectCaches.getIfPresent(key);

View File

@ -25,7 +25,6 @@
package me.lucko.luckperms.bungee.context;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import me.lucko.luckperms.api.Contexts;
@ -33,6 +32,7 @@ import me.lucko.luckperms.api.context.ImmutableContextSet;
import me.lucko.luckperms.bungee.LPBungeePlugin;
import me.lucko.luckperms.common.context.ContextManager;
import me.lucko.luckperms.common.context.ContextsSupplier;
import me.lucko.luckperms.common.util.CaffeineFactory;
import net.md_5.bungee.api.connection.ProxiedPlayer;
@ -40,7 +40,7 @@ import java.util.concurrent.TimeUnit;
public class BungeeContextManager extends ContextManager<ProxiedPlayer> {
private final LoadingCache<ProxiedPlayer, Contexts> contextsCache = Caffeine.newBuilder()
private final LoadingCache<ProxiedPlayer, Contexts> contextsCache = CaffeineFactory.newBuilder()
.expireAfterWrite(50, TimeUnit.MILLISECONDS)
.build(this::calculate);

View File

@ -27,7 +27,6 @@ package me.lucko.luckperms.common.cacheddata;
import com.github.benmanes.caffeine.cache.AsyncLoadingCache;
import com.github.benmanes.caffeine.cache.CacheLoader;
import com.github.benmanes.caffeine.cache.Caffeine;
import me.lucko.luckperms.api.ChatMetaType;
import me.lucko.luckperms.api.Contexts;
@ -41,6 +40,7 @@ import me.lucko.luckperms.common.calculator.CalculatorFactory;
import me.lucko.luckperms.common.calculator.PermissionCalculator;
import me.lucko.luckperms.common.metastacking.SimpleMetaStack;
import me.lucko.luckperms.common.plugin.LuckPermsPlugin;
import me.lucko.luckperms.common.util.CaffeineFactory;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
@ -64,14 +64,14 @@ public abstract class AbstractCachedData implements CachedData {
/**
* The cache used for {@link PermissionCache} instances.
*/
private final AsyncLoadingCache<Contexts, PermissionCache> permission = Caffeine.newBuilder()
private final AsyncLoadingCache<Contexts, PermissionCache> permission = CaffeineFactory.newBuilder()
.expireAfterAccess(2, TimeUnit.MINUTES)
.buildAsync(new PermissionCacheLoader());
/**
* The cache used for {@link MetaCache} instances.
*/
private final AsyncLoadingCache<MetaContexts, MetaCache> meta = Caffeine.newBuilder()
private final AsyncLoadingCache<MetaContexts, MetaCache> meta = CaffeineFactory.newBuilder()
.expireAfterAccess(2, TimeUnit.MINUTES)
.buildAsync(new MetaCacheLoader());

View File

@ -25,7 +25,6 @@
package me.lucko.luckperms.common.commands.group;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import com.google.common.collect.ImmutableList;
@ -43,6 +42,7 @@ import me.lucko.luckperms.common.locale.command.CommandSpec;
import me.lucko.luckperms.common.model.Group;
import me.lucko.luckperms.common.plugin.LuckPermsPlugin;
import me.lucko.luckperms.common.sender.Sender;
import me.lucko.luckperms.common.util.CaffeineFactory;
import java.util.ArrayList;
import java.util.List;
@ -55,7 +55,7 @@ public class GroupMainCommand extends MainCommand<Group, String> {
// this helps prevent race conditions where commands are being executed concurrently
// and overriding each other.
// it's not a great solution, but it mostly works.
private final LoadingCache<String, ReentrantLock> locks = Caffeine.newBuilder()
private final LoadingCache<String, ReentrantLock> locks = CaffeineFactory.newBuilder()
.expireAfterAccess(1, TimeUnit.HOURS)
.build(key -> new ReentrantLock());

View File

@ -26,7 +26,6 @@
package me.lucko.luckperms.common.commands.misc;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
import me.lucko.luckperms.common.bulkupdate.BulkUpdate;
import me.lucko.luckperms.common.bulkupdate.BulkUpdateBuilder;
@ -48,6 +47,7 @@ import me.lucko.luckperms.common.locale.command.CommandSpec;
import me.lucko.luckperms.common.locale.message.Message;
import me.lucko.luckperms.common.plugin.LuckPermsPlugin;
import me.lucko.luckperms.common.sender.Sender;
import me.lucko.luckperms.common.util.CaffeineFactory;
import me.lucko.luckperms.common.util.Predicates;
import java.util.List;
@ -55,7 +55,7 @@ import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
public class BulkUpdateCommand extends SingleCommand {
private final Cache<String, BulkUpdate> pendingOperations = Caffeine.newBuilder().expireAfterWrite(30, TimeUnit.SECONDS).build();
private final Cache<String, BulkUpdate> pendingOperations = CaffeineFactory.newBuilder().expireAfterWrite(30, TimeUnit.SECONDS).build();
public BulkUpdateCommand(LocaleManager locale) {
super(CommandSpec.BULK_UPDATE.localize(locale), "BulkUpdate", CommandPermission.BULK_UPDATE, Predicates.alwaysFalse());

View File

@ -25,7 +25,6 @@
package me.lucko.luckperms.common.commands.track;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import com.google.common.collect.ImmutableList;
@ -37,6 +36,7 @@ import me.lucko.luckperms.common.locale.command.CommandSpec;
import me.lucko.luckperms.common.model.Track;
import me.lucko.luckperms.common.plugin.LuckPermsPlugin;
import me.lucko.luckperms.common.sender.Sender;
import me.lucko.luckperms.common.util.CaffeineFactory;
import java.util.ArrayList;
import java.util.List;
@ -49,7 +49,7 @@ public class TrackMainCommand extends MainCommand<Track, String> {
// this helps prevent race conditions where commands are being executed concurrently
// and overriding each other.
// it's not a great solution, but it mostly works.
private final LoadingCache<String, ReentrantLock> locks = Caffeine.newBuilder()
private final LoadingCache<String, ReentrantLock> locks = CaffeineFactory.newBuilder()
.expireAfterAccess(1, TimeUnit.HOURS)
.build(key -> new ReentrantLock());

View File

@ -25,7 +25,6 @@
package me.lucko.luckperms.common.commands.user;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import com.google.common.collect.ImmutableList;
@ -46,6 +45,7 @@ import me.lucko.luckperms.common.model.UserIdentifier;
import me.lucko.luckperms.common.plugin.LuckPermsPlugin;
import me.lucko.luckperms.common.sender.Sender;
import me.lucko.luckperms.common.storage.misc.DataConstraints;
import me.lucko.luckperms.common.util.CaffeineFactory;
import me.lucko.luckperms.common.util.Uuids;
import java.util.List;
@ -60,7 +60,7 @@ public class UserMainCommand extends MainCommand<User, UserIdentifier> {
// this helps prevent race conditions where commands are being executed concurrently
// and overriding each other.
// it's not a great solution, but it mostly works.
private final LoadingCache<UUID, ReentrantLock> locks = Caffeine.newBuilder()
private final LoadingCache<UUID, ReentrantLock> locks = CaffeineFactory.newBuilder()
.expireAfterAccess(1, TimeUnit.HOURS)
.build(key -> new ReentrantLock());

View File

@ -25,12 +25,12 @@
package me.lucko.luckperms.common.primarygroup;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import me.lucko.luckperms.api.Contexts;
import me.lucko.luckperms.common.model.User;
import me.lucko.luckperms.common.node.factory.NodeFactory;
import me.lucko.luckperms.common.util.CaffeineFactory;
import org.checkerframework.checker.nullness.qual.NonNull;
@ -44,7 +44,7 @@ import java.util.concurrent.TimeUnit;
public abstract class ContextualHolder extends StoredHolder {
// cache lookups
private final LoadingCache<Contexts, Optional<String>> cache = Caffeine.newBuilder()
private final LoadingCache<Contexts, Optional<String>> cache = CaffeineFactory.newBuilder()
.expireAfterAccess(1, TimeUnit.MINUTES)
.build(this::calculateValue);

View File

@ -0,0 +1,27 @@
package me.lucko.luckperms.common.util;
import com.github.benmanes.caffeine.cache.Caffeine;
import java.util.concurrent.ForkJoinPool;
public final class CaffeineFactory {
private CaffeineFactory() {}
/**
* Our own fork join pool for LuckPerms cache operations.
*
* By default, Caffeine uses the ForkJoinPool.commonPool instance.
* However... ForkJoinPool is a fixed size pool limited by Runtime.availableProcessors.
* Some (bad) plugins incorrectly use this pool for i/o operations, make calls to Thread.sleep
* or otherwise block waiting for something else to complete. This prevents the LP cache loading
* operations from running.
*
* By using our own pool, we ensure this will never happen.
*/
private static final ForkJoinPool loaderPool = new ForkJoinPool();
public static Caffeine<Object, Object> newBuilder() {
return Caffeine.newBuilder().executor(loaderPool);
}
}

View File

@ -25,7 +25,6 @@
package me.lucko.luckperms.nukkit.context;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import me.lucko.luckperms.api.Contexts;
@ -35,6 +34,7 @@ import me.lucko.luckperms.common.config.ConfigKeys;
import me.lucko.luckperms.common.context.ContextManager;
import me.lucko.luckperms.common.context.ContextsCache;
import me.lucko.luckperms.common.context.ContextsSupplier;
import me.lucko.luckperms.common.util.CaffeineFactory;
import me.lucko.luckperms.common.util.LoadingMap;
import me.lucko.luckperms.nukkit.LPNukkitPlugin;
@ -49,7 +49,7 @@ public class NukkitContextManager extends ContextManager<Player> {
private final LoadingMap<Player, ContextsCache<Player>> onlineSubjectCaches = LoadingMap.of(key -> new ContextsCache<>(key, this));
// cache the creation of ContextsCache instances for offline players with a 1m expiry
private final LoadingCache<Player, ContextsCache<Player>> offlineSubjectCaches = Caffeine.newBuilder()
private final LoadingCache<Player, ContextsCache<Player>> offlineSubjectCaches = CaffeineFactory.newBuilder()
.expireAfterAccess(1, TimeUnit.MINUTES)
.build(key -> {
ContextsCache<Player> cache = this.onlineSubjectCaches.getIfPresent(key);

View File

@ -25,10 +25,10 @@
package me.lucko.luckperms.sponge.service.reference;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import com.google.common.base.Splitter;
import me.lucko.luckperms.common.util.CaffeineFactory;
import me.lucko.luckperms.sponge.service.model.LPPermissionService;
import me.lucko.luckperms.sponge.service.model.LPSubject;
import me.lucko.luckperms.sponge.service.model.LPSubjectReference;
@ -64,7 +64,7 @@ public final class SubjectReferenceFactory {
public SubjectReferenceFactory(LPPermissionService service) {
this.service = service;
this.referenceCache = Caffeine.newBuilder()
this.referenceCache = CaffeineFactory.newBuilder()
.expireAfterAccess(1, TimeUnit.HOURS)
.build(a -> new CachedSubjectReference(this.service, a.collectionId, a.id));
}

View File

@ -25,7 +25,6 @@
package me.lucko.luckperms.sponge.context;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import me.lucko.luckperms.api.Contexts;
@ -33,6 +32,7 @@ import me.lucko.luckperms.api.context.ImmutableContextSet;
import me.lucko.luckperms.common.context.ContextManager;
import me.lucko.luckperms.common.context.ContextsCache;
import me.lucko.luckperms.common.context.ContextsSupplier;
import me.lucko.luckperms.common.util.CaffeineFactory;
import me.lucko.luckperms.sponge.LPSpongePlugin;
import org.spongepowered.api.service.permission.Subject;
@ -41,7 +41,7 @@ import java.util.concurrent.TimeUnit;
public class SpongeContextManager extends ContextManager<Subject> {
private final LoadingCache<Subject, ContextsCache<Subject>> subjectCaches = Caffeine.newBuilder()
private final LoadingCache<Subject, ContextsCache<Subject>> subjectCaches = CaffeineFactory.newBuilder()
.expireAfterAccess(1, TimeUnit.MINUTES)
.build(key -> new ContextsCache<>(key, this));

View File

@ -63,35 +63,37 @@ import java.util.function.Predicate;
public class SpongeGroupManager extends AbstractGroupManager<SpongeGroup> implements LPSubjectCollection {
private final LPSpongePlugin plugin;
private final LoadingCache<String, LPSubject> subjectLoadingCache;
private SubjectCollection spongeProxy = null;
private final LoadingCache<String, LPSubject> subjectLoadingCache = Caffeine.newBuilder()
.expireAfterWrite(1, TimeUnit.MINUTES)
.build(s -> {
SpongeGroup group = getIfLoaded(s);
if (group != null) {
// they're already loaded, but the data might not actually be there yet
// if stuff is being loaded, then the user's i/o lock will be locked by the storage impl
group.getIoLock().lock();
group.getIoLock().unlock();
return group.sponge();
}
// Request load
getPlugin().getStorage().createAndLoadGroup(s, CreationCause.INTERNAL).join();
group = getIfLoaded(s);
if (group == null) {
getPlugin().getLogger().severe("Error whilst loading group '" + s + "'.");
throw new RuntimeException();
}
return group.sponge();
});
public SpongeGroupManager(LPSpongePlugin plugin) {
this.plugin = plugin;
this.subjectLoadingCache = Caffeine.newBuilder()
.executor(plugin.getBootstrap().getScheduler().async())
.expireAfterWrite(1, TimeUnit.MINUTES)
.build(s -> {
SpongeGroup group = getIfLoaded(s);
if (group != null) {
// they're already loaded, but the data might not actually be there yet
// if stuff is being loaded, then the user's i/o lock will be locked by the storage impl
group.getIoLock().lock();
group.getIoLock().unlock();
return group.sponge();
}
// Request load
getPlugin().getStorage().createAndLoadGroup(s, CreationCause.INTERNAL).join();
group = getIfLoaded(s);
if (group == null) {
getPlugin().getLogger().severe("Error whilst loading group '" + s + "'.");
throw new RuntimeException();
}
return group.sponge();
});
}
@Override

View File

@ -65,39 +65,41 @@ import java.util.function.Predicate;
public class SpongeUserManager extends AbstractUserManager<SpongeUser> implements LPSubjectCollection {
private final LPSpongePlugin plugin;
private final LoadingCache<UUID, LPSubject> subjectLoadingCache;
private SubjectCollection spongeProxy = null;
private final LoadingCache<UUID, LPSubject> subjectLoadingCache = Caffeine.newBuilder()
.expireAfterWrite(1, TimeUnit.MINUTES)
.build(u -> {
// clock in with the housekeeper
getHouseKeeper().registerUsage(u);
// check if the user instance is already loaded.
SpongeUser user = getIfLoaded(u);
if (user != null) {
// they're already loaded, but the data might not actually be there yet
// if stuff is being loaded, then the user's i/o lock will be locked by the storage impl
user.getIoLock().lock();
user.getIoLock().unlock();
return user.sponge();
}
// Request load
getPlugin().getStorage().loadUser(u, null).join();
user = getIfLoaded(u);
if (user == null) {
getPlugin().getLogger().severe("Error whilst loading user '" + u + "'.");
throw new RuntimeException();
}
return user.sponge();
});
public SpongeUserManager(LPSpongePlugin plugin) {
super(plugin, UserHousekeeper.timeoutSettings(10, TimeUnit.MINUTES));
this.plugin = plugin;
this.subjectLoadingCache = Caffeine.newBuilder()
.executor(this.plugin.getBootstrap().getScheduler().async())
.expireAfterWrite(1, TimeUnit.MINUTES)
.build(u -> {
// clock in with the housekeeper
getHouseKeeper().registerUsage(u);
// check if the user instance is already loaded.
SpongeUser user = getIfLoaded(u);
if (user != null) {
// they're already loaded, but the data might not actually be there yet
// if stuff is being loaded, then the user's i/o lock will be locked by the storage impl
user.getIoLock().lock();
user.getIoLock().unlock();
return user.sponge();
}
// Request load
getPlugin().getStorage().loadUser(u, null).join();
user = getIfLoaded(u);
if (user == null) {
getPlugin().getLogger().severe("Error whilst loading user '" + u + "'.");
throw new RuntimeException();
}
return user.sponge();
});
}
@Override

View File

@ -25,7 +25,6 @@
package me.lucko.luckperms.velocity.context;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import com.velocitypowered.api.proxy.Player;
@ -34,13 +33,14 @@ import me.lucko.luckperms.api.context.ImmutableContextSet;
import me.lucko.luckperms.common.context.ContextManager;
import me.lucko.luckperms.common.context.ContextsCache;
import me.lucko.luckperms.common.context.ContextsSupplier;
import me.lucko.luckperms.common.util.CaffeineFactory;
import me.lucko.luckperms.velocity.LPVelocityPlugin;
import java.util.concurrent.TimeUnit;
public class VelocityContextManager extends ContextManager<Player> {
private final LoadingCache<Player, ContextsCache<Player>> subjectCaches = Caffeine.newBuilder()
private final LoadingCache<Player, ContextsCache<Player>> subjectCaches = CaffeineFactory.newBuilder()
.expireAfterAccess(1, TimeUnit.MINUTES)
.build(key -> new ContextsCache<>(key, this));