UNPKG

3.54 kBJavaScriptView Raw
1// This script can be used to stress the ability of loki to load large databases.
2// I have found that within most javascript engines there seems to be memory
3// contraints and inefficiencies involved with using JSON.stringify.
4//
5// One way to limit memory overhead is to serialize smaller objects rather than
6// one large (single) JSON.stringify of the whole database. Loki has added
7// functionality to stream output of the database rather than saving a whole
8// database as a single string.
9//
10// This destress can be used to analyse memory overhead for loading a database
11// created by the stress.js script. Both stress.js and destress.js need to be
12// configured to use the same serialization method and adapter. By default,
13// this is configured to use the loki-fs-structured-adapter which will
14// stream output and input.
15
16var loki = require('../src/lokijs.js');
17var lfsa = require('../src/loki-fs-structured-adapter.js');
18var adapter = new lfsa();
19var db;
20var start, end;
21
22//var serializationMethod = "normal";
23// #
24// # Choose -one- method of serialization and make sure to match in destress.js
25// #
26
27//var mode = "fs-normal";
28//var mode = "fs-structured";
29//var mode = "fs-partitioned";
30var mode = "fs-structured-partitioned";
31
32var adapter;
33
34switch (mode) {
35 case "fs-normal":
36 case "fs-structured": adapter = new loki.LokiFsAdapter(); break;
37 case "fs-partitioned": adapter = new loki.LokiPartitioningAdapter(new loki.LokiFsAdapter()); break;
38 case "fs-structured-partitioned" : adapter = new lfsa(); break;
39 default:adapter = new loki.LokiFsAdapter(); break;
40};
41
42console.log(mode);
43
44function reloadDatabase() {
45 start = process.hrtime();
46
47 // loki fs structured adapter
48 db = new loki('sandbox.db', {
49 verbose: true,
50 autoload: true,
51 autoloadCallback: dbLoaded,
52 adapter:adapter
53 });
54}
55
56function formatBytes(bytes,decimals) {
57 if(bytes == 0) return '0 Byte';
58 var k = 1000; // or 1024 for binary
59 var dm = decimals + 1 || 3;
60 var sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'];
61 var i = Math.floor(Math.log(bytes) / Math.log(k));
62 return parseFloat((bytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes[i];
63}
64
65function logMemoryUsage(msg) {
66 var pmu = process.memoryUsage();
67 console.log(msg + " > rss : " + formatBytes(pmu.rss) + " heapTotal : " + formatBytes(pmu.heapTotal) + " heapUsed : " + formatBytes(pmu.heapUsed));
68}
69
70function dbLoaded() {
71 end = process.hrtime(start);
72 console.info("database loaded... time : %ds %dms", end[0], end[1]/1000000);
73 var doccount =0, cidx;
74 db.collections.forEach(function(coll) {
75 doccount += coll.data.length;
76 })
77
78 logMemoryUsage("After loading database : ");
79 console.log('number of docs in items collection(s) : ' + doccount);
80
81 // if you want to verify that only dirty collections are saved (and thus faster), uncomment line below
82 //dirtyCollAndSaveDatabase();
83}
84
85function dirtyCollAndSaveDatabase() {
86 var start, end;
87
88 start = process.hrtime();
89
90 // dirty up a collection and save to see if just that collection (along with db) gets written
91 db.collections[0].insert({ a: 1, b : 2});
92 db.saveDatabase(function(err) {
93 if (err === null) {
94 console.log('finished saving database');
95 logMemoryUsage("after database save : ");
96 end = process.hrtime(start);
97 console.info("database save time : %ds %dms", end[0], end[1]/1000000);
98 }
99 else {
100 console.log('error encountered saving database : ' + err);
101 }
102 });
103}
104
105logMemoryUsage("Before loading database : ");
106reloadDatabase();