1 | "use strict";
|
2 |
|
3 | Object.defineProperty(exports, "__esModule", {
|
4 | value: true
|
5 | });
|
6 | exports.generateVmdkData = generateVmdkData;
|
7 | var assert = _interopRequireWildcard(require("assert"));
|
8 | var _zlib = _interopRequireDefault(require("zlib"));
|
9 | var _definitions = require("./definitions");
|
10 | function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
|
11 | function _getRequireWildcardCache(e) { if ("function" != typeof WeakMap) return null; var r = new WeakMap(), t = new WeakMap(); return (_getRequireWildcardCache = function (e) { return e ? t : r; })(e); }
|
12 | function _interopRequireWildcard(e, r) { if (!r && e && e.__esModule) return e; if (null === e || "object" != typeof e && "function" != typeof e) return { default: e }; var t = _getRequireWildcardCache(r); if (t && t.has(e)) return t.get(e); var n = { __proto__: null }, a = Object.defineProperty && Object.getOwnPropertyDescriptor; for (var u in e) if ("default" !== u && Object.prototype.hasOwnProperty.call(e, u)) { var i = a ? Object.getOwnPropertyDescriptor(e, u) : null; i && (i.get || i.set) ? Object.defineProperty(n, u, i) : n[u] = e[u]; } return n.default = e, t && t.set(e, n), n; }
|
13 | const roundToSector = value => Math.ceil(value / _definitions.SECTOR_SIZE) * _definitions.SECTOR_SIZE;
|
14 | async function generateVmdkData(diskName, diskCapacityBytes, blockSizeBytes, blockGenerator, geometry = {
|
15 | sectorsPerTrackCylinder: 63,
|
16 | heads: 16,
|
17 | cylinders: 10402
|
18 | }, dataSize) {
|
19 | const cid = Math.floor(Math.random() * Math.pow(2, 32));
|
20 | const diskCapacitySectors = Math.ceil(diskCapacityBytes / _definitions.SECTOR_SIZE);
|
21 | const descriptor = `# Disk DescriptorFile
|
22 | version=1
|
23 | CID=${cid}
|
24 | parentCID=ffffffff
|
25 | createType="streamOptimized"
|
26 | # Extent description
|
27 | RW ${diskCapacitySectors} SPARSE "${diskName}"
|
28 | # The Disk Data Base
|
29 | #DDB
|
30 | ddb.adapterType = "ide"
|
31 | ddb.geometry.sectors = "${geometry.sectorsPerTrackCylinder}"
|
32 | ddb.geometry.heads = "${geometry.heads}"
|
33 | ddb.geometry.cylinders = "${geometry.cylinders}"
|
34 | `;
|
35 | const utf8Descriptor = Buffer.from(descriptor, 'utf8');
|
36 | const descriptorSizeSectors = Math.ceil(utf8Descriptor.length / _definitions.SECTOR_SIZE) + 10;
|
37 | const descriptorBuffer = Buffer.alloc(descriptorSizeSectors * _definitions.SECTOR_SIZE);
|
38 | utf8Descriptor.copy(descriptorBuffer);
|
39 | const headerData = (0, _definitions.createStreamOptimizedHeader)(diskCapacitySectors, descriptorSizeSectors);
|
40 | const parsedHeader = (0, _definitions.unpackHeader)(headerData.buffer);
|
41 | const grainSizeBytes = parsedHeader.grainSizeSectors * _definitions.SECTOR_SIZE;
|
42 | if (blockSizeBytes % grainSizeBytes !== 0 || blockSizeBytes === 0) {
|
43 | throw new Error(`createReadableVmdkStream can only accept block size multiple of ${grainSizeBytes}, got ${blockSizeBytes}`);
|
44 | }
|
45 | const grainTableEntries = headerData.grainTableEntries;
|
46 | const tableBuffer = Buffer.alloc(grainTableEntries * 4);
|
47 | let streamPosition = 0;
|
48 | let directoryOffset = 0;
|
49 | const endMetadataLength = computeEndMetadataLength();
|
50 | const metadataSize = headerData.buffer.length + descriptorBuffer.length + endMetadataLength;
|
51 | function track(buffer) {
|
52 | assert.equal(streamPosition % _definitions.SECTOR_SIZE, 0);
|
53 | if (buffer.length > 0) {
|
54 | streamPosition += buffer.length;
|
55 | }
|
56 | return buffer;
|
57 | }
|
58 | function createEmptyMarker(type) {
|
59 | const buff = Buffer.alloc(_definitions.SECTOR_SIZE);
|
60 | buff.writeBigUInt64LE(BigInt(0), 0);
|
61 | buff.writeUInt32LE(0, 8);
|
62 | buff.writeUInt32LE(type, 12);
|
63 | return buff;
|
64 | }
|
65 | function createDirectoryBuffer(grainDirectoryEntries, tablePosition) {
|
66 | const OFFSET_SIZE = 4;
|
67 | directoryOffset = streamPosition;
|
68 | const buff = Buffer.alloc(roundToSector(grainDirectoryEntries * OFFSET_SIZE));
|
69 | for (let i = 0; i < grainDirectoryEntries; i++) {
|
70 | buff.writeUInt32LE((tablePosition + i * parsedHeader.numGTEsPerGT * OFFSET_SIZE) / _definitions.SECTOR_SIZE, i * OFFSET_SIZE);
|
71 | }
|
72 | return buff;
|
73 | }
|
74 | function bufferIsBlank(buffer) {
|
75 | for (const b of buffer) {
|
76 | if (b !== 0) {
|
77 | return false;
|
78 | }
|
79 | }
|
80 | return true;
|
81 | }
|
82 | function createMarkedGrain(lbaBytes, buffer) {
|
83 | assert.strictEqual(buffer.length, grainSizeBytes);
|
84 | assert.strictEqual(lbaBytes % grainSizeBytes, 0);
|
85 | const markerOverHead = 12;
|
86 | const compressed = _zlib.default.deflateSync(buffer, {
|
87 | level: _zlib.default.constants.Z_BEST_SPEED
|
88 | });
|
89 | const outputBuffer = Buffer.alloc(roundToSector(markerOverHead + compressed.length));
|
90 | compressed.copy(outputBuffer, markerOverHead);
|
91 | outputBuffer.writeBigUInt64LE(BigInt(lbaBytes / _definitions.SECTOR_SIZE), 0);
|
92 | outputBuffer.writeUInt32LE(compressed.length, 8);
|
93 | return outputBuffer;
|
94 | }
|
95 | async function* emitBlock(blockLbaBytes, buffer, grainSizeBytes) {
|
96 | assert.strictEqual(buffer.length % grainSizeBytes, 0);
|
97 | const grainCount = buffer.length / grainSizeBytes;
|
98 | for (let i = 0; i < grainCount; i++) {
|
99 | const grainLbaBytes = blockLbaBytes + i * grainSizeBytes;
|
100 | const tableIndex = grainLbaBytes / grainSizeBytes;
|
101 | const grainData = buffer.slice(i * grainSizeBytes, (i + 1) * grainSizeBytes);
|
102 | if (!bufferIsBlank(grainData)) {
|
103 | tableBuffer.writeUInt32LE(streamPosition / _definitions.SECTOR_SIZE, tableIndex * 4);
|
104 | yield track(createMarkedGrain(grainLbaBytes, grainData));
|
105 | }
|
106 | }
|
107 | }
|
108 | async function* emitBlocks(grainSize, blockGenerator) {
|
109 | for await (const b of blockGenerator) {
|
110 | yield* emitBlock(b.lba, b.block, grainSize);
|
111 | }
|
112 | }
|
113 | function computeEndMetadataLength() {
|
114 | return _definitions.SECTOR_SIZE + roundToSector(tableBuffer.length) + _definitions.SECTOR_SIZE + roundToSector(headerData.grainDirectoryEntries * 4) + _definitions.SECTOR_SIZE + roundToSector(tableBuffer.length) + _definitions.SECTOR_SIZE + roundToSector(headerData.grainDirectoryEntries * 4) + _definitions.SECTOR_SIZE + _definitions.SECTOR_SIZE + _definitions.SECTOR_SIZE;
|
115 | }
|
116 | function* padding() {
|
117 | if (dataSize === undefined) {
|
118 | return;
|
119 | }
|
120 | const targetSize = dataSize + metadataSize;
|
121 | let remaining = targetSize - streamPosition - endMetadataLength;
|
122 | if (remaining < 0) {
|
123 | throw new Error(`vmdk is bigger than precalculed size`);
|
124 | }
|
125 | const size = 1024 * 1024;
|
126 | const fullBuffer = Buffer.alloc(size, 0);
|
127 | while (remaining > size) {
|
128 | yield track(fullBuffer);
|
129 | remaining -= size;
|
130 | }
|
131 | yield track(Buffer.alloc(remaining));
|
132 | }
|
133 | async function* iterator() {
|
134 | yield track(headerData.buffer);
|
135 | yield track(descriptorBuffer);
|
136 | yield* emitBlocks(grainSizeBytes, blockGenerator);
|
137 | yield* padding();
|
138 | yield track(createEmptyMarker(_definitions.MARKER_GT));
|
139 | let tableOffset = streamPosition;
|
140 | yield track(tableBuffer);
|
141 | yield track(createEmptyMarker(_definitions.MARKER_GD));
|
142 | yield track(createDirectoryBuffer(headerData.grainDirectoryEntries, tableOffset));
|
143 | const rDirectoryOffset = directoryOffset;
|
144 | yield track(createEmptyMarker(_definitions.MARKER_GT));
|
145 | tableOffset = streamPosition;
|
146 | yield track(tableBuffer);
|
147 | yield track(createEmptyMarker(_definitions.MARKER_GD));
|
148 | yield track(createDirectoryBuffer(headerData.grainDirectoryEntries, tableOffset));
|
149 | yield track(createEmptyMarker(_definitions.MARKER_FOOTER));
|
150 | const footer = (0, _definitions.createStreamOptimizedHeader)(diskCapacitySectors, descriptorSizeSectors, directoryOffset / _definitions.SECTOR_SIZE, rDirectoryOffset / _definitions.SECTOR_SIZE);
|
151 | yield track(footer.buffer);
|
152 | yield track(createEmptyMarker(_definitions.MARKER_EOS));
|
153 | }
|
154 | return {
|
155 | iterator: iterator(),
|
156 | metadataSize
|
157 | };
|
158 | }
|
159 |
|
\ | No newline at end of file |