UNPKG

10.2 kBTypeScriptView Raw
1/**
2 * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
3 *
4 * This source code is licensed under the MIT license found in the
5 * LICENSE file in the root directory of this source tree.
6 */
7/// <reference types="node" />
8
9import type {EventEmitter} from 'events';
10import type {ForkOptions} from 'child_process';
11import type {ResourceLimits} from 'worker_threads';
12
13declare const CHILD_MESSAGE_CALL = 1;
14
15declare const CHILD_MESSAGE_END = 2;
16
17declare const CHILD_MESSAGE_INITIALIZE = 0;
18
19declare const CHILD_MESSAGE_MEM_USAGE = 3;
20
21declare type ChildMessage =
22 | ChildMessageInitialize
23 | ChildMessageCall
24 | ChildMessageEnd
25 | ChildMessageMemUsage;
26
27declare type ChildMessageCall = [
28 typeof CHILD_MESSAGE_CALL,
29 boolean,
30 string,
31 Array<unknown>,
32];
33
34declare type ChildMessageEnd = [typeof CHILD_MESSAGE_END, boolean];
35
36declare type ChildMessageInitialize = [
37 typeof CHILD_MESSAGE_INITIALIZE,
38 boolean,
39 string,
40 // file
41 Array<unknown> | undefined,
42 // setupArgs
43 MessagePort_2 | undefined,
44];
45
46declare type ChildMessageMemUsage = [typeof CHILD_MESSAGE_MEM_USAGE];
47
48declare type ComputeTaskPriorityCallback = (
49 method: string,
50 ...args: Array<unknown>
51) => number;
52
53declare type ExcludeReservedKeys<K> = Exclude<K, ReservedKeys>;
54
55/**
56 * First-in, First-out task queue that manages a dedicated pool
57 * for each worker as well as a shared queue. The FIFO ordering is guaranteed
58 * across the worker specific and shared queue.
59 */
60export declare class FifoQueue implements TaskQueue {
61 private _workerQueues;
62 private _sharedQueue;
63 enqueue(task: QueueChildMessage, workerId?: number): void;
64 dequeue(workerId: number): QueueChildMessage | null;
65}
66
67declare type FunctionLike = (...args: any) => unknown;
68
69declare type HeapItem = {
70 priority: number;
71};
72
73export declare type JestWorkerFarm<T extends Record<string, unknown>> =
74 Worker_2 & WorkerModule<T>;
75
76export declare function messageParent(
77 message: unknown,
78 parentProcess?: NodeJS.Process,
79): void;
80
81declare type MessagePort_2 = typeof EventEmitter & {
82 postMessage(message: unknown): void;
83};
84
85declare type MethodLikeKeys<T> = {
86 [K in keyof T]: T[K] extends FunctionLike ? K : never;
87}[keyof T];
88
89declare class MinHeap<TItem extends HeapItem> {
90 private _heap;
91 peek(): TItem | null;
92 add(item: TItem): void;
93 poll(): TItem | null;
94}
95
96declare type OnCustomMessage = (message: Array<unknown> | unknown) => void;
97
98declare type OnEnd = (err: Error | null, result: unknown) => void;
99
100declare type OnStart = (worker: WorkerInterface) => void;
101
102declare type OnStateChangeHandler = (
103 state: WorkerStates,
104 oldState: WorkerStates,
105) => void;
106
107declare type PoolExitResult = {
108 forceExited: boolean;
109};
110
111/**
112 * Priority queue that processes tasks in natural ordering (lower priority first)
113 * according to the priority computed by the function passed in the constructor.
114 *
115 * FIFO ordering isn't guaranteed for tasks with the same priority.
116 *
117 * Worker specific tasks with the same priority as a non-worker specific task
118 * are always processed first.
119 */
120export declare class PriorityQueue implements TaskQueue {
121 private _computePriority;
122 private _queue;
123 private _sharedQueue;
124 constructor(_computePriority: ComputeTaskPriorityCallback);
125 enqueue(task: QueueChildMessage, workerId?: number): void;
126 _enqueue(task: QueueChildMessage, queue: MinHeap<QueueItem>): void;
127 dequeue(workerId: number): QueueChildMessage | null;
128 _getWorkerQueue(workerId: number): MinHeap<QueueItem>;
129}
130
131export declare interface PromiseWithCustomMessage<T> extends Promise<T> {
132 UNSTABLE_onCustomMessage?: (listener: OnCustomMessage) => () => void;
133}
134
135declare type Promisify<T extends FunctionLike> = ReturnType<T> extends Promise<
136 infer R
137>
138 ? (...args: Parameters<T>) => Promise<R>
139 : (...args: Parameters<T>) => Promise<ReturnType<T>>;
140
141declare type QueueChildMessage = {
142 request: ChildMessageCall;
143 onStart: OnStart;
144 onEnd: OnEnd;
145 onCustomMessage: OnCustomMessage;
146};
147
148declare type QueueItem = {
149 task: QueueChildMessage;
150 priority: number;
151};
152
153declare type ReservedKeys =
154 | 'end'
155 | 'getStderr'
156 | 'getStdout'
157 | 'setup'
158 | 'teardown';
159
160export declare interface TaskQueue {
161 /**
162 * Enqueues the task in the queue for the specified worker or adds it to the
163 * queue shared by all workers
164 * @param task the task to queue
165 * @param workerId the id of the worker that should process this task or undefined
166 * if there's no preference.
167 */
168 enqueue(task: QueueChildMessage, workerId?: number): void;
169 /**
170 * Dequeues the next item from the queue for the specified worker
171 * @param workerId the id of the worker for which the next task should be retrieved
172 */
173 dequeue(workerId: number): QueueChildMessage | null;
174}
175
176/**
177 * The Jest farm (publicly called "Worker") is a class that allows you to queue
178 * methods across multiple child processes, in order to parallelize work. This
179 * is done by providing an absolute path to a module that will be loaded on each
180 * of the child processes, and bridged to the main process.
181 *
182 * Bridged methods are specified by using the "exposedMethods" property of the
183 * "options" object. This is an array of strings, where each of them corresponds
184 * to the exported name in the loaded module.
185 *
186 * You can also control the amount of workers by using the "numWorkers" property
187 * of the "options" object, and the settings passed to fork the process through
188 * the "forkOptions" property. The amount of workers defaults to the amount of
189 * CPUS minus one.
190 *
191 * Queueing calls can be done in two ways:
192 * - Standard method: calls will be redirected to the first available worker,
193 * so they will get executed as soon as they can.
194 *
195 * - Sticky method: if a "computeWorkerKey" method is provided within the
196 * config, the resulting string of this method will be used as a key.
197 * Every time this key is returned, it is guaranteed that your job will be
198 * processed by the same worker. This is specially useful if your workers
199 * are caching results.
200 */
201declare class Worker_2 {
202 private _ending;
203 private _farm;
204 private _options;
205 private _workerPool;
206 constructor(workerPath: string, options?: WorkerFarmOptions);
207 private _bindExposedWorkerMethods;
208 private _callFunctionWithArgs;
209 getStderr(): NodeJS.ReadableStream;
210 getStdout(): NodeJS.ReadableStream;
211 end(): Promise<PoolExitResult>;
212}
213export {Worker_2 as Worker};
214
215declare type WorkerCallback = (
216 workerId: number,
217 request: ChildMessage,
218 onStart: OnStart,
219 onEnd: OnEnd,
220 onCustomMessage: OnCustomMessage,
221) => void;
222
223declare enum WorkerEvents {
224 STATE_CHANGE = 'state-change',
225}
226
227export declare type WorkerFarmOptions = {
228 computeWorkerKey?: (method: string, ...args: Array<unknown>) => string | null;
229 enableWorkerThreads?: boolean;
230 exposedMethods?: ReadonlyArray<string>;
231 forkOptions?: ForkOptions;
232 maxRetries?: number;
233 numWorkers?: number;
234 resourceLimits?: ResourceLimits;
235 setupArgs?: Array<unknown>;
236 taskQueue?: TaskQueue;
237 WorkerPool?: new (
238 workerPath: string,
239 options?: WorkerPoolOptions,
240 ) => WorkerPoolInterface;
241 workerSchedulingPolicy?: WorkerSchedulingPolicy;
242 idleMemoryLimit?: number;
243};
244
245declare interface WorkerInterface {
246 get state(): WorkerStates;
247 send(
248 request: ChildMessage,
249 onProcessStart: OnStart,
250 onProcessEnd: OnEnd,
251 onCustomMessage: OnCustomMessage,
252 ): void;
253 waitForExit(): Promise<void>;
254 forceExit(): void;
255 getWorkerId(): number;
256 getStderr(): NodeJS.ReadableStream | null;
257 getStdout(): NodeJS.ReadableStream | null;
258 /**
259 * Some system level identifier for the worker. IE, process id, thread id, etc.
260 */
261 getWorkerSystemId(): number;
262 getMemoryUsage(): Promise<number | null>;
263 /**
264 * Checks to see if the child worker is actually running.
265 */
266 isWorkerRunning(): boolean;
267 /**
268 * When the worker child is started and ready to start handling requests.
269 *
270 * @remarks
271 * This mostly exists to help with testing so that you don't check the status
272 * of things like isWorkerRunning before it actually is.
273 */
274 waitForWorkerReady(): Promise<void>;
275}
276
277declare type WorkerModule<T> = {
278 [K in keyof T as Extract<
279 ExcludeReservedKeys<K>,
280 MethodLikeKeys<T>
281 >]: T[K] extends FunctionLike ? Promisify<T[K]> : never;
282};
283
284declare type WorkerOptions_2 = {
285 forkOptions: ForkOptions;
286 resourceLimits: ResourceLimits;
287 setupArgs: Array<unknown>;
288 maxRetries: number;
289 workerId: number;
290 workerData?: unknown;
291 workerPath: string;
292 /**
293 * After a job has executed the memory usage it should return to.
294 *
295 * @remarks
296 * Note this is different from ResourceLimits in that it checks at idle, after
297 * a job is complete. So you could have a resource limit of 500MB but an idle
298 * limit of 50MB. The latter will only trigger if after a job has completed the
299 * memory usage hasn't returned back down under 50MB.
300 */
301 idleMemoryLimit?: number;
302 /**
303 * This mainly exists so the path can be changed during testing.
304 * https://github.com/facebook/jest/issues/9543
305 */
306 childWorkerPath?: string;
307 /**
308 * This is useful for debugging individual tests allowing you to see
309 * the raw output of the worker.
310 */
311 silent?: boolean;
312 /**
313 * Used to immediately bind event handlers.
314 */
315 on?: {
316 [WorkerEvents.STATE_CHANGE]:
317 | OnStateChangeHandler
318 | ReadonlyArray<OnStateChangeHandler>;
319 };
320};
321
322export declare interface WorkerPoolInterface {
323 getStderr(): NodeJS.ReadableStream;
324 getStdout(): NodeJS.ReadableStream;
325 getWorkers(): Array<WorkerInterface>;
326 createWorker(options: WorkerOptions_2): WorkerInterface;
327 send: WorkerCallback;
328 end(): Promise<PoolExitResult>;
329}
330
331export declare type WorkerPoolOptions = {
332 setupArgs: Array<unknown>;
333 forkOptions: ForkOptions;
334 resourceLimits: ResourceLimits;
335 maxRetries: number;
336 numWorkers: number;
337 enableWorkerThreads: boolean;
338 idleMemoryLimit?: number;
339};
340
341declare type WorkerSchedulingPolicy = 'round-robin' | 'in-order';
342
343declare enum WorkerStates {
344 STARTING = 'starting',
345 OK = 'ok',
346 OUT_OF_MEMORY = 'oom',
347 RESTARTING = 'restarting',
348 SHUTTING_DOWN = 'shutting-down',
349 SHUT_DOWN = 'shut-down',
350}
351
352export {};