UNPKG

12.8 kBTypeScriptView Raw
1import * as minioVendor from "minio";
2import * as postgresVendor from "postgres";
3import { queries } from "./src/generated/index.js";
4
5/**
6 * Reexported all of minio package
7 */
8export const minio: typeof minioVendor;
9
10/**
11 * Reexported all of postgres package
12 */
13export const postgres: typeof postgresVendor;
14
15export type Postgres = postgresVendor.Sql<{}>;
16
17/**
18 * Create a new minio client.
19 * By defaults reads configuration from environment variables as specified in docs/env.md
20 */
21export function newMinioClient(
22 options: minioVendor.ClientOptions,
23): minioVendor.Client;
24
25/**
26 * Create a bucket if it doesn't exist
27 */
28export function ensureBucket(
29 minio: minioVendor.Client,
30 bucketName: string,
31 region: minioVendor.Region,
32): Promise<void>;
33
34/**
35 * Remove a bucket
36 */
37export function removeBucket(
38 minio: minioVendor.Client,
39 bucketName: string,
40): Promise<void>;
41
42/**
43 * List all objects in a bucket.
44 * Note that this is not a fast operation
45 */
46export function listObjects(
47 minio: minioVendor.Client,
48 bucketName: string,
49 filter?: string,
50): Promise<
51 {
52 name: string;
53 prefix: string;
54 size: number;
55 etag: string;
56 lastModified: Date;
57 }[]
58>;
59
60/**
61 * Copy all objects from sourceBucket to destination bucket
62 */
63export function copyAllObjects(
64 minio: minioVendor.Client,
65 sourceBucket: string,
66 destinationBucket: string,
67): Promise<void>;
68
69/**
70 * Removes all objects and then deletes the bucket
71 */
72export function removeBucketAndObjectsInBucket(
73 minio: minioVendor.Client,
74 bucketName: string,
75): Promise<void>;
76
77/**
78 * Create a new postgres client.
79 * By defaults reads configuration from environment variables as specified in docs/env.md
80 *
81 * Will attempt database creation if `createIfNotExists` is set to true
82 */
83export function newPostgresConnection(
84 opts?: postgresVendor.Options<{}> & { createIfNotExists?: boolean },
85): Promise<Postgres>;
86
87/**
88 * Set test database.
89 * New createTestPostgresConnection calls will use this as a template,
90 * so things like seeding only need to happen once
91 */
92export function setPostgresDatabaseTemplate(
93 databaseNameOrConnection: Postgres | string,
94): Promise<void>;
95
96/**
97 * Cleanup the database, set with `setPostgresDatabaseTemplate`
98 */
99export function cleanupPostgresDatabaseTemplate(): Promise<void>;
100
101/**
102 * Drops connections to 'normal' database and creates a new one based on the 'normal' database.
103 * It will truncate all tables and return a connection to the new database.
104 */
105export function createTestPostgresDatabase(
106 verboseSql?: boolean,
107): Promise<Postgres>;
108
109/**
110 * Drop the test database created with `createTestPostgresDatabase` and end the connection
111 */
112export function cleanupTestPostgresDatabase(sql: Postgres): Promise<void>;
113
114/**
115 * Internal representation of a migration file
116 */
117export interface MigrateFile {
118 namespace: string;
119 number: number;
120 repeatable: boolean;
121 name: string;
122 fullPath: string;
123 isMigrated: boolean;
124 source: string;
125 hash: string;
126}
127
128/**
129 * Information used for doing migrations
130 */
131export interface MigrateContext {
132 files: MigrateFile[];
133 namespaces: string[];
134 storedHashes: Record<string, string>;
135 sql: Postgres;
136}
137
138/**
139 * Create a new MigrateContext, requires an advisory lock and does the necessary queries to
140 * get the state.
141 */
142export function newMigrateContext(sql: Postgres): Promise<MigrateContext>;
143
144/**
145 * Get a list of migrations to be applied
146 * Returns false if no migrations need to run
147 */
148export function getMigrationsToBeApplied(
149 mc: MigrateContext,
150): false | { name: string; number: number; repeatable: boolean }[];
151
152/**
153 * Run the migrations, as returned by `getMigrationsToBeApplied`
154 */
155export function runMigrations(mc: MigrateContext): Promise<void>;
156
157export interface StoreFile {
158 id: string;
159 bucketName: string;
160 contentLength: number;
161 contentType: string;
162 name: string;
163 createdAt: Date;
164 updatedAt: Date;
165}
166
167/**
168 * Create or update a file.
169 * If you pass in a non-existent id, the function will not error, but also not update the
170 * file
171 */
172export function createOrUpdateFile(
173 sql: Postgres,
174 minio: minioVendor.Client,
175 bucketName: string,
176 props: {
177 id?: string;
178 bucketName?: string;
179 contentLength?: number;
180 contentType?: string;
181 name: string;
182 createdAt?: string;
183 updatedAt?: string;
184 },
185 streamOrPath: string | NodeJS.ReadStream,
186): Promise<StoreFile>;
187
188/**
189 * Sync deleted files to the minio bucket
190 */
191export function syncDeletedFiles(
192 sql: Postgres,
193 minio: minioVendor.Client,
194 bucketName: string,
195);
196
197/**
198 * Create a file copy both in postgres and in minio
199 */
200export function copyFile(
201 sql: Postgres,
202 minio: minioVendor.Client,
203 bucketName: string,
204 id: string,
205 targetBucket?: string,
206): Promise<StoreFile>;
207
208/**
209 * Open a ReadStream for a (partial) file
210 */
211export function getFileStream(
212 minio: minioVendor.Client,
213 bucketName: string,
214 id: string,
215 range?: { start?: number; end?: number },
216): Promise<NodeJS.ReadStream>;
217
218export interface FileGroup {
219 id: string;
220 name?: string;
221 order: number;
222 meta: {};
223 file?: string;
224 parent?: string;
225 createdAt: Date;
226 updatedAt: Date;
227 deletedAt: Date;
228}
229
230export interface NestedFileGroup {
231 id: string;
232 name?: string;
233 order: number;
234 meta: {};
235 parent?: string;
236 isDirectory: boolean;
237 file?:
238 | {
239 id: string;
240 bucketName?: string;
241 contentLength?: number;
242 contentType?: string;
243 name?: string;
244 createdAt?: string;
245 updatedAt?: string;
246 }
247 | undefined
248 | string;
249 createdAt: Date;
250 updatedAt: Date;
251 deletedAt: Date;
252 children?: NestedFileGroup[];
253}
254
255/**
256 * Assigns children of the provided fileGroup to the parent.
257 * Returns the affected children.
258 */
259export function hoistChildrenToParent(
260 sql: Postgres,
261 fileGroup: FileGroup,
262): Promise<FileGroup[]>;
263
264/**
265 * Update the order of the provided id's in relation to each other.
266 * This function does not check if all files are in the same group.
267 */
268export function updateFileGroupOrder(
269 sql: Postgres,
270 ids: string[],
271): Promise<void>;
272
273/**
274 * Return a result with nested file groups and files, sorted completely by the order id.
275 */
276export function getNestedFileGroups(
277 sql: Postgres,
278 where?: {
279 deletedAtIncludeNotNull?: boolean;
280 rootId?: string;
281 excludeFiles?: boolean;
282 },
283): Promise<NestedFileGroup[]>;
284
285export interface FileCacheOptions {
286 /**
287 * Maximum byte size of a file to be stored in memory
288 */
289 inMemoryThreshold?: number;
290
291 /**
292 * Customize default Cache-Control header to give back
293 */
294 cacheControlHeader?: string;
295}
296
297/**
298 * A relatively simple local file cache implementation.
299 * Supports saving files in memory and on local disk
300 * Files#contentLength smaller than the provided threshold will be stored in memory.
301 * A file will always be cached in full, and then the range requests will be evaluated
302 * after The FileCache#clear does not remove files from disk, but will overwrite the
303 * file when added to the cache again
304 *
305 * FileCache#getFileStream is compatible with `sendFile` in @lbu/server
306 */
307export class FileCache {
308 static fileCachePath: string;
309
310 constructor(
311 sql: Postgres,
312 minio: minioVendor.Client,
313 bucketName: string,
314 options?: FileCacheOptions,
315 );
316
317 /**
318 * Get a file(part) from the cache.
319 * If the file(part) does not exist, it will try to fetch it from the FileStore
320 * If the file store throws an error / it doesn't exist, the error is propagated to the
321 * caller
322 */
323 public getStreamFn: (
324 file: StoreFile,
325 start?: number,
326 end?: number,
327 ) => Promise<{ stream: NodeJS.ReadStream; cacheControl: string }>;
328
329 /**
330 * Remove a file from cache, but not from local disk
331 */
332 clear(fileId: string): void;
333}
334
335/**
336 * Raw data for a specific job
337 */
338export interface JobData {
339 id: number;
340 createdAt: Date;
341 scheduledAt: Date;
342 name: string;
343 data: any;
344}
345
346/**
347 * Job creation parameters
348 */
349export interface JobInput {
350 /**
351 * Defaults to 0
352 */
353 priority?: number;
354
355 /**
356 * Defaults to empty object
357 */
358 data?: Record<string, any>;
359
360 /**
361 * Defaults to now
362 */
363 scheduledAt?: Date;
364
365 name: string;
366}
367
368export interface JobQueueWorkerOptions {
369 handler: (sql: Postgres, data: JobData) => void | Promise<void>;
370
371 /**
372 * Determine the poll interval in milliseconds if the queue was empty. Defaults to 1500 ms
373 */
374 pollInterval?: number;
375
376 /**
377 * Set the amount of parallel jobs to process. Defaults to 1.
378 * Make sure it is not higher than the amount of Postgres connections in the pool
379 */
380 parallelCount?: number;
381}
382
383/**
384 * Job Queue worker. Supports scheduling, priorities and parallel workers
385 * If a name is provided, this worker will only accept jobs with the exact same name
386 */
387export class JobQueueWorker {
388 constructor(
389 sql: Postgres,
390 nameOrOptions: string | JobQueueWorkerOptions,
391 options?: JobQueueWorkerOptions,
392 );
393
394 /**
395 * Start the JobQueueWorker
396 */
397 start(): void;
398
399 /**
400 * Stop the JobQueueWorker
401 * Running jobs will continue to run, but no new jobs are fetched
402 */
403 stop(): void;
404
405 /**
406 * Get the number of jobs that need to run
407 */
408 pendingQueueSize(): Promise<
409 { pendingCount: number; scheduledCount: number } | undefined
410 >;
411
412 /**
413 * Return the average time between scheduled and completed for jobs completed in the
414 * provided time range in milliseconds
415 */
416 averageTimeToCompletion(startDate: Date, endDate: Date): Promise<number>;
417
418 /**
419 * Uses this queue name and connection to add a job to the queue.
420 * If name is already set, it will not be overwritten
421 */
422 addJob(job: JobInput): Promise<number>;
423}
424
425/**
426 * Add a new item to the job queue
427 * Defaults to `process.env.APP_NAME` if name is not specified
428 */
429export function addJobToQueue(sql: Postgres, job: JobInput): Promise<number>;
430
431/**
432 * Add a recurring job, if no existing job with the same name is scheduled.
433 * Does not throw when a job is already pending with the same name.
434 * If already exists will update the priority and interval.
435 *
436 * The recurring job handler will reschedule the job based on it's own scheduledAt. However if
437 * the newly scheduled job is not in the future, the interval is added on to the current time.
438 */
439export function addRecurringJobToQueue(
440 sql: Postgres,
441 {
442 name,
443 priority,
444 interval,
445 }: {
446 name: string;
447 priority?: number;
448 interval: {
449 years?: number;
450 months?: number;
451 days?: number;
452 hours?: number;
453 minutes?: number;
454 seconds?: number;
455 };
456 },
457): Promise<void>;
458
459/**
460 * Stripped down from @lbu/server SessionStore
461 */
462export interface SessionStore {
463 get(id: string): Promise<object | boolean>;
464
465 set(id: string, session: object, age: number): Promise<void>;
466
467 destroy(id: string): Promise<void>;
468
469 /**
470 * Remove all expired sessions
471 */
472 clean(): Promise<void>;
473}
474
475/**
476 * Create a session store compatible with @lbu/server#session
477 */
478export function newSessionStore(sql: Postgres): SessionStore;
479
480/**
481 * Migration directory
482 */
483export const migrations: string;
484
485/**
486 * LBU structure.
487 * Can be used to extend functionality or reference one of the columns
488 */
489export const storeStructure: any;
490
491/**
492 * Build safe, parameterized queries.
493 */
494export interface QueryPart {
495 strings: string[];
496 values: any[];
497
498 append(part: QueryPart): QueryPart;
499
500 exec(sql: Postgres): postgresVendor.PendingQuery<any>;
501}
502
503/**
504 * Format and append query parts, and exec the final result in a safe way.
505 * Undefined values are skipped, as they are not allowed in queries.
506 * The provided values may contain other 'query``' calls, and they will be inserted
507 * appropriately.
508 *
509 * @example
510 * ```
511 * const getWhere = (value) => query`WHERE foo = ${value}`;
512 * const selectResult = await query`SELECT * FROM "myTable" ${getWhere(5)}`.exec(sql);
513 * // sql: SELECT * FROM "myTable" WHERE foo = $1
514 * // arguments: [ 5 ]
515 * ```
516 */
517export function query(strings: string[], ...values: any[]): QueryPart;
518
519/**
520 * Simple check if the passed in value is a query part
521 */
522export function isQueryObject(value: any): value is QueryPart;
523
524/**
525 * Creates a transaction, executes the query, and rollback the transaction afterwards.
526 * This is safe to use with insert, update and delete queries.
527 *
528 * By default returns text, but can also return json.
529 * Note that explain output is highly depended on the current data and usage of the tables.
530 */
531export function explainAnalyzeQuery(
532 sql: Postgres,
533 queryPart: QueryPart,
534 options?: { jsonResult?: true },
535): Promise<string | any>;
536
537/**
538 * Overwrite used generated queries.
539 * This is needed when you want cascading soft deletes to any of the exposed types
540 */
541export function setStoreQueries(q: typeof queries): void;