UNPKG

32.9 kBTypeScriptView Raw
1declare module "bottleneck" {
2 namespace Bottleneck {
3 type ConstructorOptions = {
4 /**
5 * How many jobs can be running at the same time.
6 */
7 readonly maxConcurrent?: number | null;
8 /**
9 * How long to wait after launching a job before launching another one.
10 */
11 readonly minTime?: number;
12 /**
13 * How long can the queue get? When the queue length exceeds that value, the selected `strategy` is executed to shed the load.
14 */
15 readonly highWater?: number | null;
16 /**
17 * Which strategy to use if the queue gets longer than the high water mark.
18 */
19 readonly strategy?: Bottleneck.Strategy;
20 /**
21 * The `penalty` value used by the `Bottleneck.strategy.BLOCK` strategy.
22 */
23 readonly penalty?: number | null;
24 /**
25 * How many jobs can be executed before the limiter stops executing jobs. If `reservoir` reaches `0`, no jobs will be executed until it is no longer `0`.
26 */
27 readonly reservoir?: number | null;
28 /**
29 * Every `reservoirRefreshInterval` milliseconds, the `reservoir` value will be automatically reset to `reservoirRefreshAmount`. This feature has an accuracy of +/- 5 seconds.
30 */
31 readonly reservoirRefreshInterval?: number | null;
32 /**
33 * The value to reset `reservoir` to when `reservoirRefreshInterval` is in use.
34 */
35 readonly reservoirRefreshAmount?: number | null;
36 /**
37 * Optional identifier
38 */
39 readonly id?: string;
40 /**
41 * Set to true to leave your failed jobs hanging instead of failing them.
42 */
43 readonly rejectOnDrop?: boolean;
44 /**
45 * Set to true to keep track of done jobs with counts() and jobStatus(). Uses more memory.
46 */
47 readonly trackDoneStatus?: boolean;
48 /**
49 * Where the limiter stores its internal state. The default (`local`) keeps the state in the limiter itself. Set it to `redis` to enable Clustering.
50 */
51 readonly datastore?: string;
52 /**
53 * Override the Promise library used by Bottleneck.
54 */
55 readonly Promise?: any;
56 /**
57 * This object is passed directly to the redis client library you've selected.
58 */
59 readonly clientOptions?: any;
60 /**
61 * **ioredis only.** When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)`.
62 */
63 readonly clusterNodes?: any;
64 /**
65 * An existing Bottleneck.RedisConnection or Bottleneck.IORedisConnection object to use.
66 * If using, `datastore`, `clientOptions` and `clusterNodes` will be ignored.
67 */
68 readonly connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection;
69 /**
70 * When set to `true`, on initial startup, the limiter will wipe any existing Bottleneck state data on the Redis db.
71 */
72 readonly clearDatastore?: boolean;
73 /**
74 * The Redis TTL in milliseconds for the keys created by the limiter. When `timeout` is set, the limiter's state will be automatically removed from Redis after timeout milliseconds of inactivity. Note: timeout is 300000 (5 minutes) by default when using a Group.
75 */
76 readonly timeout?: number | null;
77
78 [propName: string]: any;
79 };
80 type JobOptions = {
81 /**
82 * A priority between `0` and `9`. A job with a priority of `4` will _always_ be executed before a job with a priority of `5`.
83 */
84 readonly priority?: number;
85 /**
86 * Must be an integer equal to or higher than `0`. The `weight` is what increases the number of running jobs (up to `maxConcurrent`, if using) and decreases the `reservoir` value (if using).
87 */
88 readonly weight?: number;
89 /**
90 * The number milliseconds a job has to finish. Jobs that take longer than their `expiration` will be failed with a `BottleneckError`.
91 */
92 readonly expiration?: number | null;
93 /**
94 * Optional identifier, helps with debug output.
95 */
96 readonly id?: string;
97 };
98 type StopOptions = {
99 /**
100 * When `true`, drop all the RECEIVED, QUEUED and RUNNING jobs. When `false`, allow those jobs to complete before resolving the Promise returned by this method.
101 */
102 readonly dropWaitingJobs?: boolean;
103 /**
104 * The error message used to drop jobs when `dropWaitingJobs` is `true`.
105 */
106 readonly dropErrorMessage?: string;
107 /**
108 * The error message used to reject a job added to the limiter after `stop()` has been called.
109 */
110 readonly enqueueErrorMessage?: string;
111 };
112 type Callback<T> = (err: any, result: T) => void;
113 interface ClientsList { client?: any; subscriber?: any }
114 interface GroupLimiterPair { key: string; limiter: Bottleneck }
115 interface Strategy {}
116
117 enum Status {
118 RECEIVED = "RECEIVED",
119 QUEUED = "QUEUED",
120 RUNNING = "RUNNING",
121 EXECUTING = "EXECUTING",
122 DONE = "DONE"
123 }
124 interface Counts {
125 RECEIVED: number,
126 QUEUED: number,
127 RUNNING: number,
128 EXECUTING: number,
129 DONE?: number
130 }
131
132 type RedisConnectionOptions = {
133 /**
134 * This object is passed directly to NodeRedis' createClient() method.
135 */
136 readonly clientOptions?: any;
137 /**
138 * An existing NodeRedis client to use. If using, `clientOptions` will be ignored.
139 */
140 readonly client?: any;
141 };
142
143 type IORedisConnectionOptions = {
144 /**
145 * This object is passed directly to ioredis' constructor method.
146 */
147 readonly clientOptions?: any;
148 /**
149 * When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)`.
150 */
151 readonly clusterNodes?: any;
152 /**
153 * An existing ioredis client to use. If using, `clientOptions` and `clusterNodes` will be ignored.
154 */
155 readonly client?: any;
156 };
157
158 type BatcherOptions = {
159 /**
160 * Maximum acceptable time (in milliseconds) a request can have to wait before being flushed to the `"batch"` event.
161 */
162 readonly maxTime?: number;
163 /**
164 * Maximum number of requests in a batch.
165 */
166 readonly maxSize?: number;
167 };
168
169 class BottleneckError extends Error {
170 }
171
172 class RedisConnection {
173 constructor(options?: Bottleneck.RedisConnectionOptions);
174
175 /**
176 * Register an event listener.
177 * @param name - The event name.
178 * @param fn - The callback function.
179 */
180 on(name: string, fn: Function): void;
181 on(name: "error", fn: (error: any) => void): void;
182
183 /**
184 * Register an event listener for one event only.
185 * @param name - The event name.
186 * @param fn - The callback function.
187 */
188 once(name: string, fn: Function): void;
189 once(name: "error", fn: (error: any) => void): void;
190
191 /**
192 * Waits until the connection is ready and returns the raw Node_Redis clients.
193 */
194 ready(): Promise<ClientsList>;
195
196 /**
197 * Close the redis clients.
198 * @param flush - Write transient data before closing.
199 */
200 disconnect(flush?: boolean): Promise<void>;
201 }
202
203 class IORedisConnection {
204 constructor(options?: Bottleneck.IORedisConnectionOptions);
205
206 /**
207 * Register an event listener.
208 * @param name - The event name.
209 * @param fn - The callback function.
210 */
211 on(name: string, fn: Function): void;
212 on(name: "error", fn: (error: any) => void): void;
213
214 /**
215 * Register an event listener for one event only.
216 * @param name - The event name.
217 * @param fn - The callback function.
218 */
219 once(name: string, fn: Function): void;
220 once(name: "error", fn: (error: any) => void): void;
221
222 /**
223 * Waits until the connection is ready and returns the raw ioredis clients.
224 */
225 ready(): Promise<ClientsList>;
226
227 /**
228 * Close the redis clients.
229 * @param flush - Write transient data before closing.
230 */
231 disconnect(flush?: boolean): Promise<void>;
232 }
233
234 class Batcher {
235 constructor(options?: Bottleneck.BatcherOptions);
236
237 /**
238 * Register an event listener.
239 * @param name - The event name.
240 * @param fn - The callback function.
241 */
242 on(name: string, fn: Function): void;
243 on(name: "error", fn: (error: any) => void): void;
244 on(name: "batch", fn: (batch: any[]) => void): void;
245
246 /**
247 * Register an event listener for one event only.
248 * @param name - The event name.
249 * @param fn - The callback function.
250 */
251 once(name: string, fn: Function): void;
252 once(name: "error", fn: (error: any) => void): void;
253 once(name: "batch", fn: (batch: any[]) => void): void;
254
255 /**
256 * Add a request to the Batcher. Batches are flushed to the "batch" event.
257 */
258 add(data: any): Promise<void>;
259 }
260
261 class Group {
262 constructor(options?: Bottleneck.ConstructorOptions);
263
264 id: string;
265 datastore: string;
266 connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection;
267
268 /**
269 * Returns the limiter for the specified key.
270 * @param str - The limiter key.
271 */
272 key(str: string): Bottleneck;
273
274 /**
275 * Register an event listener.
276 * @param name - The event name.
277 * @param fn - The callback function.
278 */
279 on(name: string, fn: Function): void;
280 on(name: "error", fn: (error: any) => void): void;
281 on(name: "created", fn: (created: Bottleneck, key: string) => void): void;
282
283 /**
284 * Register an event listener for one event only.
285 * @param name - The event name.
286 * @param fn - The callback function.
287 */
288 once(name: string, fn: Function): void;
289 once(name: "error", fn: (error: any) => void): void;
290 once(name: "created", fn: (created: Bottleneck, key: string) => void): void;
291
292 /**
293 * Removes all registered event listeners.
294 * @param name - The optional event name to remove listeners from.
295 */
296 removeAllListeners(name?: string): void;
297
298 /**
299 * Updates the group settings.
300 * @param options - The new settings.
301 */
302 updateSettings(options: Bottleneck.ConstructorOptions): void;
303
304 /**
305 * Deletes the limiter for the given key.
306 * Returns true if a key was deleted.
307 * @param str - The key
308 */
309 deleteKey(str: string): Promise<boolean>;
310
311 /**
312 * Disconnects the underlying redis clients, unless the Group was created with the `connection` option.
313 * @param flush - Write transient data before closing.
314 */
315 disconnect(flush?: boolean): Promise<void>;
316
317 /**
318 * Returns all the key-limiter pairs.
319 */
320 limiters(): Bottleneck.GroupLimiterPair[];
321
322 /**
323 * Returns all Group keys in the local instance
324 */
325 keys(): string[];
326
327 /**
328 * Returns all Group keys in the Cluster
329 */
330 clusterKeys(): Promise<string[]>;
331 }
332
333 class Events {
334 constructor(object: Object);
335
336 /**
337 * Returns the number of limiters for the event name
338 * @param name - The event name.
339 */
340 listenerCount(name: string): number;
341
342 /**
343 * Returns a promise with the first non-null/non-undefined result from a listener
344 * @param name - The event name.
345 * @param args - The arguments to pass to the event listeners.
346 */
347 trigger(name: string, ...args: any[]): Promise<any>;
348 }
349 }
350
351 class Bottleneck {
352 public static readonly strategy: {
353 /**
354 * When adding a new job to a limiter, if the queue length reaches `highWater`, drop the oldest job with the lowest priority. This is useful when jobs that have been waiting for too long are not important anymore. If all the queued jobs are more important (based on their `priority` value) than the one being added, it will not be added.
355 */
356 readonly LEAK: Bottleneck.Strategy;
357 /**
358 * Same as `LEAK`, except it will only drop jobs that are less important than the one being added. If all the queued jobs are as or more important than the new one, it will not be added.
359 */
360 readonly OVERFLOW_PRIORITY: Bottleneck.Strategy;
361 /**
362 * When adding a new job to a limiter, if the queue length reaches `highWater`, do not add the new job. This strategy totally ignores priority levels.
363 */
364 readonly OVERFLOW: Bottleneck.Strategy;
365 /**
366 * When adding a new job to a limiter, if the queue length reaches `highWater`, the limiter falls into "blocked mode". All queued jobs are dropped and no new jobs will be accepted until the limiter unblocks. It will unblock after `penalty` milliseconds have passed without receiving a new job. `penalty` is equal to `15 * minTime` (or `5000` if `minTime` is `0`) by default and can be changed by calling `changePenalty()`. This strategy is ideal when bruteforce attacks are to be expected. This strategy totally ignores priority levels.
367 */
368 readonly BLOCK: Bottleneck.Strategy;
369 };
370
371 constructor(options?: Bottleneck.ConstructorOptions);
372
373 id: string;
374 datastore: string;
375 connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection;
376
377 /**
378 * Returns a promise which will be resolved once the limiter is ready to accept jobs
379 * or rejected if it fails to start up.
380 */
381 ready(): Promise<any>;
382
383 /**
384 * Returns a datastore-specific object of redis clients.
385 */
386 clients(): Bottleneck.ClientsList;
387
388 /**
389 * Returns the name of the Redis pubsub channel used for this limiter
390 */
391 channel(): string;
392
393 /**
394 * Disconnects the underlying redis clients, unless the limiter was created with the `connection` option.
395 * @param flush - Write transient data before closing.
396 */
397 disconnect(flush?: boolean): Promise<void>;
398
399 /**
400 * Broadcast a string to every limiter in the Cluster.
401 */
402 publish(message: string): Promise<void>;
403
404 /**
405 * Returns an object with the current number of jobs per status.
406 */
407 counts(): Bottleneck.Counts;
408
409 /**
410 * Returns the status of the job with the provided job id.
411 */
412 jobStatus(id: string): Bottleneck.Status;
413
414 /**
415 * Returns the status of the job with the provided job id.
416 */
417 jobs(status?: Bottleneck.Status): string[];
418
419 /**
420 * Returns the number of requests queued.
421 * @param priority - Returns the number of requests queued with the specified priority.
422 */
423 queued(priority?: number): number;
424
425 /**
426 * Returns whether there are any jobs currently in the queue or in the process of being added to the queue.
427 */
428 empty(): boolean;
429
430 /**
431 * Returns the total weight of jobs in a RUNNING or EXECUTING state in the Cluster.
432 */
433 running(): Promise<number>;
434
435 /**
436 * Returns the total weight of jobs in a DONE state in the Cluster.
437 */
438 done(): Promise<number>;
439
440 /**
441 * If a request was added right now, would it be run immediately?
442 * @param weight - The weight of the request
443 */
444 check(weight?: number): Promise<boolean>;
445
446 /**
447 * Register an event listener.
448 * @param name - The event name.
449 * @param fn - The callback function.
450 */
451 on(name: string, fn: Function): void;
452 on(name: "error", fn: (error: any) => void): void;
453 on(name: "empty", fn: () => void): void;
454 on(name: "idle", fn: () => void): void;
455 on(name: "depleted", fn: (empty: boolean) => void): void;
456 on(name: "dropped", fn: (dropped: any) => void): void;
457 on(name: "message", fn: (message: string) => void): void;
458 on(name: "debug", fn: (message: string, data: any) => void): void;
459
460 /**
461 * Register an event listener for one event only.
462 * @param name - The event name.
463 * @param fn - The callback function.
464 */
465 once(name: string, fn: Function): void;
466 once(name: "error", fn: (error: any) => void): void;
467 once(name: "empty", fn: () => void): void;
468 once(name: "idle", fn: () => void): void;
469 once(name: "depleted", fn: (empty: boolean) => void): void;
470 once(name: "dropped", fn: (dropped: any) => void): void;
471 once(name: "message", fn: (message: string) => void): void;
472 once(name: "debug", fn: (message: string, data: any) => void): void;
473
474 /**
475 * Removes all registered event listeners.
476 * @param name - The optional event name to remove listeners from.
477 */
478 removeAllListeners(name?: string): void;
479
480 /**
481 * Changes the settings for future requests.
482 * @param options - The new settings.
483 */
484 updateSettings(options?: Bottleneck.ConstructorOptions): Bottleneck;
485
486 /**
487 * Adds to the reservoir count and returns the new value.
488 */
489 incrementReservoir(incrementBy: number): Promise<number>;
490
491 /**
492 * The `stop()` method is used to safely shutdown a limiter. It prevents any new jobs from being added to the limiter and waits for all Executing jobs to complete.
493 */
494 stop(options?: Bottleneck.StopOptions): Promise<void>;
495
496 /**
497 * Returns the current reservoir count, if any.
498 */
499 currentReservoir(): Promise<number | null>;
500
501 /**
502 * Chain this limiter to another.
503 * @param limiter - The limiter that requests to this limiter must also follow.
504 */
505 chain(limiter?: Bottleneck): Bottleneck;
506
507 wrap<R>(fn: () => PromiseLike<R>): (() => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions) => Promise<R>; };
508 wrap<R, A1>(fn: (arg1: A1) => PromiseLike<R>): ((arg1: A1) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1) => Promise<R>; };
509 wrap<R, A1, A2>(fn: (arg1: A1, arg2: A2) => PromiseLike<R>): ((arg1: A1, arg2: A2) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2) => Promise<R>; };
510 wrap<R, A1, A2, A3>(fn: (arg1: A1, arg2: A2, arg3: A3) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3) => Promise<R>; };
511 wrap<R, A1, A2, A3, A4>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4) => Promise<R>; };
512 wrap<R, A1, A2, A3, A4, A5>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => Promise<R>; };
513 wrap<R, A1, A2, A3, A4, A5, A6>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => Promise<R>; };
514 wrap<R, A1, A2, A3, A4, A5, A6, A7>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => Promise<R>; };
515 wrap<R, A1, A2, A3, A4, A5, A6, A7, A8>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => Promise<R>; };
516 wrap<R, A1, A2, A3, A4, A5, A6, A7, A8, A9>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => Promise<R>; };
517 wrap<R, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => Promise<R>; };
518
519 submit<R>(fn: (callback: Bottleneck.Callback<R>) => void, callback: Bottleneck.Callback<R>): void;
520 submit<R, A1>(fn: (arg1: A1, callback: Bottleneck.Callback<R>) => void, arg1: A1, callback: Bottleneck.Callback<R>): void;
521 submit<R, A1, A2>(fn: (arg1: A1, arg2: A2, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, callback: Bottleneck.Callback<R>): void;
522 submit<R, A1, A2, A3>(fn: (arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback<R>): void;
523 submit<R, A1, A2, A3, A4>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback<R>): void;
524 submit<R, A1, A2, A3, A4, A5>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback<R>): void;
525 submit<R, A1, A2, A3, A4, A5, A6>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback<R>): void;
526 submit<R, A1, A2, A3, A4, A5, A6, A7>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback<R>): void;
527 submit<R, A1, A2, A3, A4, A5, A6, A7, A8>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback<R>): void;
528 submit<R, A1, A2, A3, A4, A5, A6, A7, A8, A9>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback<R>): void;
529 submit<R, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback<R>): void;
530
531 submit<R>(options: Bottleneck.JobOptions, fn: (callback: Bottleneck.Callback<R>) => void, callback: Bottleneck.Callback<R>): void;
532 submit<R, A1>(options: Bottleneck.JobOptions, fn: (arg1: A1, callback: Bottleneck.Callback<R>) => void, arg1: A1, callback: Bottleneck.Callback<R>): void;
533 submit<R, A1, A2>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, callback: Bottleneck.Callback<R>): void;
534 submit<R, A1, A2, A3>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback<R>): void;
535 submit<R, A1, A2, A3, A4>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback<R>): void;
536 submit<R, A1, A2, A3, A4, A5>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback<R>): void;
537 submit<R, A1, A2, A3, A4, A5, A6>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback<R>): void;
538 submit<R, A1, A2, A3, A4, A5, A6, A7>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback<R>): void;
539 submit<R, A1, A2, A3, A4, A5, A6, A7, A8>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback<R>): void;
540 submit<R, A1, A2, A3, A4, A5, A6, A7, A8, A9>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback<R>): void;
541 submit<R, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback<R>): void;
542
543 schedule<R>(fn: () => PromiseLike<R>): Promise<R>;
544 schedule<R, A1>(fn: (arg1: A1) => PromiseLike<R>, arg1: A1): Promise<R>;
545 schedule<R, A1, A2>(fn: (arg1: A1, arg2: A2) => PromiseLike<R>, arg1: A1, arg2: A2): Promise<R>;
546 schedule<R, A1, A2, A3>(fn: (arg1: A1, arg2: A2, arg3: A3) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3): Promise<R>;
547 schedule<R, A1, A2, A3, A4>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4): Promise<R>;
548 schedule<R, A1, A2, A3, A4, A5>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5): Promise<R>;
549 schedule<R, A1, A2, A3, A4, A5, A6>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6): Promise<R>;
550 schedule<R, A1, A2, A3, A4, A5, A6, A7>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7): Promise<R>;
551 schedule<R, A1, A2, A3, A4, A5, A6, A7, A8>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8): Promise<R>;
552 schedule<R, A1, A2, A3, A4, A5, A6, A7, A8, A9>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9): Promise<R>;
553 schedule<R, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10): Promise<R>;
554
555 schedule<R>(options: Bottleneck.JobOptions, fn: () => PromiseLike<R>): Promise<R>;
556 schedule<R, A1>(options: Bottleneck.JobOptions, fn: (arg1: A1) => PromiseLike<R>, arg1: A1): Promise<R>;
557 schedule<R, A1, A2>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2) => PromiseLike<R>, arg1: A1, arg2: A2): Promise<R>;
558 schedule<R, A1, A2, A3>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3): Promise<R>;
559 schedule<R, A1, A2, A3, A4>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4): Promise<R>;
560 schedule<R, A1, A2, A3, A4, A5>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5): Promise<R>;
561 schedule<R, A1, A2, A3, A4, A5, A6>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6): Promise<R>;
562 schedule<R, A1, A2, A3, A4, A5, A6, A7>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7): Promise<R>;
563 schedule<R, A1, A2, A3, A4, A5, A6, A7, A8>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8): Promise<R>;
564 schedule<R, A1, A2, A3, A4, A5, A6, A7, A8, A9>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9): Promise<R>;
565 schedule<R, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10): Promise<R>;
566 }
567
568 export default Bottleneck;
569}
570
571
\No newline at end of file