Speed up database loading
Turns out that slurping a 14MB file (offsets.sdb) during initialization added 30s to the production server startup time lol. On my development box, which has an old but real direct read-write SSD the cost of doing this is negligible. This patch simply memory maps the offsets file before reading so now we're back to reasonable startup times in production.
- Id
- 18977bad80fc0a73a4c843ad1d4e16c43d78c4b0
- Author
- Caio
- Commit time
- 2019-05-22T08:53:14+02:00
Modified src/main/java/co/caio/cerberus/db/SimpleRecipeMetadataDatabase.java
package co.caio.cerberus.db;
+import co.caio.cerberus.db.RecipeMetadataDatabase.RecipeMetadataDbException;
import co.caio.cerberus.flatbuffers.FlatRecipe;
import com.carrotsearch.hppc.LongIntHashMap;
import java.io.FileNotFoundException;
throw new RecipeMetadataDbException("Not a directory: " + baseDir);
}
- try (var raf = new RandomAccessFile(baseDir.resolve(FILE_OFFSETS).toFile(), "r")) {
+ var offsetsPath = baseDir.resolve(FILE_OFFSETS);
+ try (var raf = new RandomAccessFile(offsetsPath.toFile(), "r")) {
- int size = raf.readInt();
+ var mapped = raf.getChannel().map(MapMode.READ_ONLY, 0, Files.size(offsetsPath));
+
+ int size = mapped.getInt();
if (size < 0) {
throw new RecipeMetadataDbException("Invalid offsets file length");
}
idToOffset = new LongIntHashMap(size);
while (size-- > 0) {
- idToOffset.put(raf.readLong(), raf.readInt());
+ idToOffset.put(mapped.getLong(), mapped.getInt());
}
} catch (IOException e) {