Fixes bug with concurrent saving of files to the database.

Implemented a FIFO to queue async save requests for YAML and MySQL.

The assumption is that most database objects will be held in memory
because the ordering of object loading is not handled. That means that
it is theoretically possible to load something on the main thread before
it has been saved in the async thread if you are really quick. So, in
general, you should load objects once and ideally never load them again
unless there's a big span of time in between so the async can finish.
For most situations, this race condition should not occur, but I'm just
warning about it.

Saving on shutdown is not async and takes as long as it takes.

Also added some defensive code to disable the plugin if the database
connection info is wrong and print an error message instead of lots of
errors.

Changed CleanSuperFlatListener to use a LinkedList for its queue.
This commit is contained in:
tastybento 2019-01-13 22:17:36 -08:00
parent 5ed86ea271
commit 044c67ea28
6 changed files with 98 additions and 5 deletions

View File

@ -9,6 +9,8 @@ import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import org.bukkit.Bukkit;
import com.google.gson.Gson;
import com.google.gson.JsonSyntaxException;
@ -42,6 +44,11 @@ public class MariaDBDatabaseHandler<T> extends AbstractJSONDatabaseHandler<T> {
MariaDBDatabaseHandler(BentoBox plugin, Class<T> type, DatabaseConnector dbConnecter) {
super(plugin, type, dbConnecter);
connection = (Connection)dbConnecter.createConnection();
if (connection == null) {
plugin.logError("Are the settings in config.yml correct?");
Bukkit.getPluginManager().disablePlugin(plugin);
return;
}
// Check if the table exists in the database and if not, create it
createSchema();
}

View File

@ -5,6 +5,7 @@ import java.util.List;
import org.bson.Document;
import org.bson.conversions.Bson;
import org.bukkit.Bukkit;
import com.google.gson.Gson;
import com.mongodb.client.MongoCollection;
@ -49,6 +50,11 @@ public class MongoDBDatabaseHandler<T> extends AbstractJSONDatabaseHandler<T> {
// Connection to the database
MongoDatabase database = (MongoDatabase) dbConnecter.createConnection();
if (database == null) {
plugin.logError("Are the settings in config.yml correct?");
Bukkit.getPluginManager().disablePlugin(plugin);
return;
}
collection = database.getCollection(dataObject.getCanonicalName());
IndexOptions indexOptions = new IndexOptions().unique(true);
collection.createIndex(Indexes.text(UNIQUEID), indexOptions);

View File

@ -8,8 +8,11 @@ import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import org.bukkit.Bukkit;
import org.bukkit.scheduler.BukkitTask;
import com.google.gson.Gson;
import com.google.gson.JsonSyntaxException;
@ -34,6 +37,20 @@ public class MySQLDatabaseHandler<T> extends AbstractJSONDatabaseHandler<T> {
*/
private Connection connection;
/**
* FIFO queue for saves. Note that the assumption here is that most database objects will be held
* in memory because loading is not handled with this queue. That means that it is theoretically
* possible to load something before it has been saved. So, in general, load your objects and then
* save them async only when you do not need the data again immediately.
*/
private Queue<Runnable> saveQueue;
/**
* Async save task that runs repeatedly
*/
private BukkitTask asyncSaveTask;
/**
* Handles the connection to the database and creation of the initial database schema (tables) for
* the class that will be stored.
@ -44,8 +61,32 @@ public class MySQLDatabaseHandler<T> extends AbstractJSONDatabaseHandler<T> {
MySQLDatabaseHandler(BentoBox plugin, Class<T> type, DatabaseConnector dbConnecter) {
super(plugin, type, dbConnecter);
connection = (Connection)dbConnecter.createConnection();
if (connection == null) {
plugin.logError("Are the settings in config.yml correct?");
Bukkit.getPluginManager().disablePlugin(plugin);
return;
}
// Check if the table exists in the database and if not, create it
createSchema();
saveQueue = new ConcurrentLinkedQueue<>();
if (plugin.isEnabled()) {
asyncSaveTask = Bukkit.getScheduler().runTaskAsynchronously(plugin, () -> {
// Loop continuously
while (plugin.isEnabled() || !saveQueue.isEmpty()) {
while (!saveQueue.isEmpty()) {
saveQueue.poll().run();
}
// Clear the queue and then sleep
try {
Thread.sleep(25);
} catch (InterruptedException e) {
plugin.logError("Thread sleep error " + e.getMessage());
}
}
// Cancel
asyncSaveTask.cancel();
});
}
}
/**
@ -134,8 +175,10 @@ public class MySQLDatabaseHandler<T> extends AbstractJSONDatabaseHandler<T> {
Gson gson = getGson();
String toStore = gson.toJson(instance);
if (plugin.isEnabled()) {
Bukkit.getScheduler().runTaskAsynchronously(plugin, () -> store(instance, toStore, sb));
// Async
saveQueue.add(() -> store(instance, toStore, sb));
} else {
// Sync
store(instance, toStore, sb);
}
}

View File

@ -19,14 +19,17 @@ import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Queue;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ConcurrentLinkedQueue;
import org.bukkit.Bukkit;
import org.bukkit.Location;
import org.bukkit.World;
import org.bukkit.configuration.MemorySection;
import org.bukkit.configuration.file.YamlConfiguration;
import org.bukkit.scheduler.BukkitTask;
import world.bentobox.bentobox.BentoBox;
import world.bentobox.bentobox.api.configuration.ConfigComment;
@ -53,11 +56,26 @@ public class YamlDatabaseHandler<T> extends AbstractDatabaseHandler<T> {
private static final String YML = ".yml";
/**
* FIFO queue for saves. Note that the assumption here is that most database objects will be held
* in memory because loading is not handled with this queue. That means that it is theoretically
* possible to load something before it has been saved. So, in general, load your objects and then
* save them async only when you do not need the data again immediately.
*/
private Queue<Runnable> saveQueue;
/**
* Async save task that runs repeatedly
*/
private BukkitTask asyncSaveTask;
/**
* Flag to indicate if this is a config or a pure object database (difference is in comments and annotations)
*/
protected boolean configFlag;
/**
* Constructor
* @param plugin - plugin
@ -66,6 +84,25 @@ public class YamlDatabaseHandler<T> extends AbstractDatabaseHandler<T> {
*/
YamlDatabaseHandler(BentoBox plugin, Class<T> type, DatabaseConnector databaseConnector) {
super(plugin, type, databaseConnector);
saveQueue = new ConcurrentLinkedQueue<>();
if (plugin.isEnabled()) {
asyncSaveTask = Bukkit.getScheduler().runTaskAsynchronously(plugin, () -> {
// Loop continuously
while (plugin.isEnabled() || !saveQueue.isEmpty()) {
while (!saveQueue.isEmpty()) {
saveQueue.poll().run();
}
// Clear the queue and then sleep
try {
Thread.sleep(25);
} catch (InterruptedException e) {
plugin.logError("Thread sleep error " + e.getMessage());
}
}
// Cancel
asyncSaveTask.cancel();
});
}
}
/* (non-Javadoc)
@ -445,7 +482,7 @@ public class YamlDatabaseHandler<T> extends AbstractDatabaseHandler<T> {
String data = config.saveToString();
if (plugin.isEnabled()) {
// Async
Bukkit.getScheduler().runTaskAsynchronously(plugin, () -> ((YamlDatabaseConnector)databaseConnector).saveYamlFile(data, path, name, yamlComments));
saveQueue.add(() -> ((YamlDatabaseConnector)databaseConnector).saveYamlFile(data, path, name, yamlComments));
} else {
// Sync for shutdown
((YamlDatabaseConnector)databaseConnector).saveYamlFile(data, path, name, yamlComments);

View File

@ -3,7 +3,7 @@
*/
package world.bentobox.bentobox.listeners.flags;
import java.util.ArrayDeque;
import java.util.LinkedList;
import java.util.Queue;
import org.bukkit.Bukkit;
@ -29,7 +29,7 @@ import world.bentobox.bentobox.util.Pair;
*/
public class CleanSuperFlatListener extends FlagListener {
private Queue<Pair<Integer, Integer>> chunkQ = new ArrayDeque<>();
private Queue<Pair<Integer, Integer>> chunkQ = new LinkedList<>();
private BukkitTask task;
private boolean ready;

View File

@ -23,7 +23,7 @@ general:
host: localhost
# Port 3306 is MySQL's default. Port 27017 is MongoDB's default.
port: 3306
name: BSkyBlock
name: bentobox
username: username
password: password
# How often the data will be saved to file in mins. Default is 5 minutes.