mirror of
https://github.com/BentoBoxWorld/BentoBox.git
synced 2024-11-24 11:45:31 +01:00
Fixes bug with concurrent saving of files to the database.
Implemented a FIFO to queue async save requests for YAML and MySQL. The assumption is that most database objects will be held in memory because the ordering of object loading is not handled. That means that it is theoretically possible to load something on the main thread before it has been saved in the async thread if you are really quick. So, in general, you should load objects once and ideally never load them again unless there's a big span of time in between so the async can finish. For most situations, this race condition should not occur, but I'm just warning about it. Saving on shutdown is not async and takes as long as it takes. Also added some defensive code to disable the plugin if the database connection info is wrong and print an error message instead of lots of errors. Changed CleanSuperFlatListener to use a LinkedList for its queue.
This commit is contained in:
parent
5ed86ea271
commit
044c67ea28
@ -9,6 +9,8 @@ import java.sql.Statement;
|
|||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.bukkit.Bukkit;
|
||||||
|
|
||||||
import com.google.gson.Gson;
|
import com.google.gson.Gson;
|
||||||
import com.google.gson.JsonSyntaxException;
|
import com.google.gson.JsonSyntaxException;
|
||||||
|
|
||||||
@ -42,6 +44,11 @@ public class MariaDBDatabaseHandler<T> extends AbstractJSONDatabaseHandler<T> {
|
|||||||
MariaDBDatabaseHandler(BentoBox plugin, Class<T> type, DatabaseConnector dbConnecter) {
|
MariaDBDatabaseHandler(BentoBox plugin, Class<T> type, DatabaseConnector dbConnecter) {
|
||||||
super(plugin, type, dbConnecter);
|
super(plugin, type, dbConnecter);
|
||||||
connection = (Connection)dbConnecter.createConnection();
|
connection = (Connection)dbConnecter.createConnection();
|
||||||
|
if (connection == null) {
|
||||||
|
plugin.logError("Are the settings in config.yml correct?");
|
||||||
|
Bukkit.getPluginManager().disablePlugin(plugin);
|
||||||
|
return;
|
||||||
|
}
|
||||||
// Check if the table exists in the database and if not, create it
|
// Check if the table exists in the database and if not, create it
|
||||||
createSchema();
|
createSchema();
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ import java.util.List;
|
|||||||
|
|
||||||
import org.bson.Document;
|
import org.bson.Document;
|
||||||
import org.bson.conversions.Bson;
|
import org.bson.conversions.Bson;
|
||||||
|
import org.bukkit.Bukkit;
|
||||||
|
|
||||||
import com.google.gson.Gson;
|
import com.google.gson.Gson;
|
||||||
import com.mongodb.client.MongoCollection;
|
import com.mongodb.client.MongoCollection;
|
||||||
@ -49,6 +50,11 @@ public class MongoDBDatabaseHandler<T> extends AbstractJSONDatabaseHandler<T> {
|
|||||||
|
|
||||||
// Connection to the database
|
// Connection to the database
|
||||||
MongoDatabase database = (MongoDatabase) dbConnecter.createConnection();
|
MongoDatabase database = (MongoDatabase) dbConnecter.createConnection();
|
||||||
|
if (database == null) {
|
||||||
|
plugin.logError("Are the settings in config.yml correct?");
|
||||||
|
Bukkit.getPluginManager().disablePlugin(plugin);
|
||||||
|
return;
|
||||||
|
}
|
||||||
collection = database.getCollection(dataObject.getCanonicalName());
|
collection = database.getCollection(dataObject.getCanonicalName());
|
||||||
IndexOptions indexOptions = new IndexOptions().unique(true);
|
IndexOptions indexOptions = new IndexOptions().unique(true);
|
||||||
collection.createIndex(Indexes.text(UNIQUEID), indexOptions);
|
collection.createIndex(Indexes.text(UNIQUEID), indexOptions);
|
||||||
|
@ -8,8 +8,11 @@ import java.sql.SQLException;
|
|||||||
import java.sql.Statement;
|
import java.sql.Statement;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Queue;
|
||||||
|
import java.util.concurrent.ConcurrentLinkedQueue;
|
||||||
|
|
||||||
import org.bukkit.Bukkit;
|
import org.bukkit.Bukkit;
|
||||||
|
import org.bukkit.scheduler.BukkitTask;
|
||||||
|
|
||||||
import com.google.gson.Gson;
|
import com.google.gson.Gson;
|
||||||
import com.google.gson.JsonSyntaxException;
|
import com.google.gson.JsonSyntaxException;
|
||||||
@ -34,6 +37,20 @@ public class MySQLDatabaseHandler<T> extends AbstractJSONDatabaseHandler<T> {
|
|||||||
*/
|
*/
|
||||||
private Connection connection;
|
private Connection connection;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* FIFO queue for saves. Note that the assumption here is that most database objects will be held
|
||||||
|
* in memory because loading is not handled with this queue. That means that it is theoretically
|
||||||
|
* possible to load something before it has been saved. So, in general, load your objects and then
|
||||||
|
* save them async only when you do not need the data again immediately.
|
||||||
|
*/
|
||||||
|
private Queue<Runnable> saveQueue;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Async save task that runs repeatedly
|
||||||
|
*/
|
||||||
|
private BukkitTask asyncSaveTask;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Handles the connection to the database and creation of the initial database schema (tables) for
|
* Handles the connection to the database and creation of the initial database schema (tables) for
|
||||||
* the class that will be stored.
|
* the class that will be stored.
|
||||||
@ -44,8 +61,32 @@ public class MySQLDatabaseHandler<T> extends AbstractJSONDatabaseHandler<T> {
|
|||||||
MySQLDatabaseHandler(BentoBox plugin, Class<T> type, DatabaseConnector dbConnecter) {
|
MySQLDatabaseHandler(BentoBox plugin, Class<T> type, DatabaseConnector dbConnecter) {
|
||||||
super(plugin, type, dbConnecter);
|
super(plugin, type, dbConnecter);
|
||||||
connection = (Connection)dbConnecter.createConnection();
|
connection = (Connection)dbConnecter.createConnection();
|
||||||
|
if (connection == null) {
|
||||||
|
plugin.logError("Are the settings in config.yml correct?");
|
||||||
|
Bukkit.getPluginManager().disablePlugin(plugin);
|
||||||
|
return;
|
||||||
|
}
|
||||||
// Check if the table exists in the database and if not, create it
|
// Check if the table exists in the database and if not, create it
|
||||||
createSchema();
|
createSchema();
|
||||||
|
saveQueue = new ConcurrentLinkedQueue<>();
|
||||||
|
if (plugin.isEnabled()) {
|
||||||
|
asyncSaveTask = Bukkit.getScheduler().runTaskAsynchronously(plugin, () -> {
|
||||||
|
// Loop continuously
|
||||||
|
while (plugin.isEnabled() || !saveQueue.isEmpty()) {
|
||||||
|
while (!saveQueue.isEmpty()) {
|
||||||
|
saveQueue.poll().run();
|
||||||
|
}
|
||||||
|
// Clear the queue and then sleep
|
||||||
|
try {
|
||||||
|
Thread.sleep(25);
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
plugin.logError("Thread sleep error " + e.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Cancel
|
||||||
|
asyncSaveTask.cancel();
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -134,8 +175,10 @@ public class MySQLDatabaseHandler<T> extends AbstractJSONDatabaseHandler<T> {
|
|||||||
Gson gson = getGson();
|
Gson gson = getGson();
|
||||||
String toStore = gson.toJson(instance);
|
String toStore = gson.toJson(instance);
|
||||||
if (plugin.isEnabled()) {
|
if (plugin.isEnabled()) {
|
||||||
Bukkit.getScheduler().runTaskAsynchronously(plugin, () -> store(instance, toStore, sb));
|
// Async
|
||||||
|
saveQueue.add(() -> store(instance, toStore, sb));
|
||||||
} else {
|
} else {
|
||||||
|
// Sync
|
||||||
store(instance, toStore, sb);
|
store(instance, toStore, sb);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,14 +19,17 @@ import java.util.List;
|
|||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Map.Entry;
|
import java.util.Map.Entry;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
import java.util.Queue;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
import java.util.concurrent.ConcurrentLinkedQueue;
|
||||||
|
|
||||||
import org.bukkit.Bukkit;
|
import org.bukkit.Bukkit;
|
||||||
import org.bukkit.Location;
|
import org.bukkit.Location;
|
||||||
import org.bukkit.World;
|
import org.bukkit.World;
|
||||||
import org.bukkit.configuration.MemorySection;
|
import org.bukkit.configuration.MemorySection;
|
||||||
import org.bukkit.configuration.file.YamlConfiguration;
|
import org.bukkit.configuration.file.YamlConfiguration;
|
||||||
|
import org.bukkit.scheduler.BukkitTask;
|
||||||
|
|
||||||
import world.bentobox.bentobox.BentoBox;
|
import world.bentobox.bentobox.BentoBox;
|
||||||
import world.bentobox.bentobox.api.configuration.ConfigComment;
|
import world.bentobox.bentobox.api.configuration.ConfigComment;
|
||||||
@ -53,11 +56,26 @@ public class YamlDatabaseHandler<T> extends AbstractDatabaseHandler<T> {
|
|||||||
|
|
||||||
private static final String YML = ".yml";
|
private static final String YML = ".yml";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* FIFO queue for saves. Note that the assumption here is that most database objects will be held
|
||||||
|
* in memory because loading is not handled with this queue. That means that it is theoretically
|
||||||
|
* possible to load something before it has been saved. So, in general, load your objects and then
|
||||||
|
* save them async only when you do not need the data again immediately.
|
||||||
|
*/
|
||||||
|
private Queue<Runnable> saveQueue;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Async save task that runs repeatedly
|
||||||
|
*/
|
||||||
|
private BukkitTask asyncSaveTask;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Flag to indicate if this is a config or a pure object database (difference is in comments and annotations)
|
* Flag to indicate if this is a config or a pure object database (difference is in comments and annotations)
|
||||||
*/
|
*/
|
||||||
protected boolean configFlag;
|
protected boolean configFlag;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor
|
* Constructor
|
||||||
* @param plugin - plugin
|
* @param plugin - plugin
|
||||||
@ -66,6 +84,25 @@ public class YamlDatabaseHandler<T> extends AbstractDatabaseHandler<T> {
|
|||||||
*/
|
*/
|
||||||
YamlDatabaseHandler(BentoBox plugin, Class<T> type, DatabaseConnector databaseConnector) {
|
YamlDatabaseHandler(BentoBox plugin, Class<T> type, DatabaseConnector databaseConnector) {
|
||||||
super(plugin, type, databaseConnector);
|
super(plugin, type, databaseConnector);
|
||||||
|
saveQueue = new ConcurrentLinkedQueue<>();
|
||||||
|
if (plugin.isEnabled()) {
|
||||||
|
asyncSaveTask = Bukkit.getScheduler().runTaskAsynchronously(plugin, () -> {
|
||||||
|
// Loop continuously
|
||||||
|
while (plugin.isEnabled() || !saveQueue.isEmpty()) {
|
||||||
|
while (!saveQueue.isEmpty()) {
|
||||||
|
saveQueue.poll().run();
|
||||||
|
}
|
||||||
|
// Clear the queue and then sleep
|
||||||
|
try {
|
||||||
|
Thread.sleep(25);
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
plugin.logError("Thread sleep error " + e.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Cancel
|
||||||
|
asyncSaveTask.cancel();
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* (non-Javadoc)
|
/* (non-Javadoc)
|
||||||
@ -445,7 +482,7 @@ public class YamlDatabaseHandler<T> extends AbstractDatabaseHandler<T> {
|
|||||||
String data = config.saveToString();
|
String data = config.saveToString();
|
||||||
if (plugin.isEnabled()) {
|
if (plugin.isEnabled()) {
|
||||||
// Async
|
// Async
|
||||||
Bukkit.getScheduler().runTaskAsynchronously(plugin, () -> ((YamlDatabaseConnector)databaseConnector).saveYamlFile(data, path, name, yamlComments));
|
saveQueue.add(() -> ((YamlDatabaseConnector)databaseConnector).saveYamlFile(data, path, name, yamlComments));
|
||||||
} else {
|
} else {
|
||||||
// Sync for shutdown
|
// Sync for shutdown
|
||||||
((YamlDatabaseConnector)databaseConnector).saveYamlFile(data, path, name, yamlComments);
|
((YamlDatabaseConnector)databaseConnector).saveYamlFile(data, path, name, yamlComments);
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
*/
|
*/
|
||||||
package world.bentobox.bentobox.listeners.flags;
|
package world.bentobox.bentobox.listeners.flags;
|
||||||
|
|
||||||
import java.util.ArrayDeque;
|
import java.util.LinkedList;
|
||||||
import java.util.Queue;
|
import java.util.Queue;
|
||||||
|
|
||||||
import org.bukkit.Bukkit;
|
import org.bukkit.Bukkit;
|
||||||
@ -29,7 +29,7 @@ import world.bentobox.bentobox.util.Pair;
|
|||||||
*/
|
*/
|
||||||
public class CleanSuperFlatListener extends FlagListener {
|
public class CleanSuperFlatListener extends FlagListener {
|
||||||
|
|
||||||
private Queue<Pair<Integer, Integer>> chunkQ = new ArrayDeque<>();
|
private Queue<Pair<Integer, Integer>> chunkQ = new LinkedList<>();
|
||||||
private BukkitTask task;
|
private BukkitTask task;
|
||||||
private boolean ready;
|
private boolean ready;
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ general:
|
|||||||
host: localhost
|
host: localhost
|
||||||
# Port 3306 is MySQL's default. Port 27017 is MongoDB's default.
|
# Port 3306 is MySQL's default. Port 27017 is MongoDB's default.
|
||||||
port: 3306
|
port: 3306
|
||||||
name: BSkyBlock
|
name: bentobox
|
||||||
username: username
|
username: username
|
||||||
password: password
|
password: password
|
||||||
# How often the data will be saved to file in mins. Default is 5 minutes.
|
# How often the data will be saved to file in mins. Default is 5 minutes.
|
||||||
|
Loading…
Reference in New Issue
Block a user