node_modules

This commit is contained in:
softprops 2019-10-20 19:16:50 -04:00
parent 0e414c630a
commit 78c309ef59
555 changed files with 103819 additions and 1 deletions

5
node_modules/bottleneck/.babelrc.es5 generated vendored Normal file
View file

@ -0,0 +1,5 @@
{
"presets": [
["@babel/preset-env", {}]
]
}

9
node_modules/bottleneck/.babelrc.lib generated vendored Normal file
View file

@ -0,0 +1,9 @@
{
"presets": [
["@babel/preset-env", {
"targets": {
"node": "6.0"
}
}]
]
}

2
node_modules/bottleneck/.env generated vendored Normal file
View file

@ -0,0 +1,2 @@
REDIS_HOST=127.0.0.1
REDIS_PORT=6379

25
node_modules/bottleneck/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,25 @@
language: node_js
node_js:
- 8
branches:
only:
- master
- next
services:
- redis-server
env:
global:
- "REDIS_HOST=127.0.0.1"
- "REDIS_PORT=6379"
cache:
directories:
- $HOME/.npm
install:
- npm i
sudo: required
after_success: npx codecov --file=./coverage/lcov.info
script: npm run test-all
before_install:
- npm i -g npm@5.10
- npm --version

20
node_modules/bottleneck/LICENSE generated vendored Normal file
View file

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2014 Simon Grondin
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

1027
node_modules/bottleneck/README.md generated vendored Normal file

File diff suppressed because it is too large Load diff

629
node_modules/bottleneck/bottleneck.d.ts generated vendored Normal file
View file

@ -0,0 +1,629 @@
declare module "bottleneck" {
namespace Bottleneck {
type ConstructorOptions = {
/**
* How many jobs can be running at the same time.
*/
readonly maxConcurrent?: number | null;
/**
* How long to wait after launching a job before launching another one.
*/
readonly minTime?: number | null;
/**
* How long can the queue get? When the queue length exceeds that value, the selected `strategy` is executed to shed the load.
*/
readonly highWater?: number | null;
/**
* Which strategy to use if the queue gets longer than the high water mark.
*/
readonly strategy?: Bottleneck.Strategy | null;
/**
* The `penalty` value used by the `Bottleneck.strategy.BLOCK` strategy.
*/
readonly penalty?: number | null;
/**
* How many jobs can be executed before the limiter stops executing jobs. If `reservoir` reaches `0`, no jobs will be executed until it is no longer `0`.
*/
readonly reservoir?: number | null;
/**
* Every `reservoirRefreshInterval` milliseconds, the `reservoir` value will be automatically reset to `reservoirRefreshAmount`.
*/
readonly reservoirRefreshInterval?: number | null;
/**
* The value to reset `reservoir` to when `reservoirRefreshInterval` is in use.
*/
readonly reservoirRefreshAmount?: number | null;
/**
* The increment applied to `reservoir` when `reservoirIncreaseInterval` is in use.
*/
readonly reservoirIncreaseAmount?: number | null;
/**
* Every `reservoirIncreaseInterval` milliseconds, the `reservoir` value will be automatically incremented by `reservoirIncreaseAmount`.
*/
readonly reservoirIncreaseInterval?: number | null;
/**
* The maximum value that `reservoir` can reach when `reservoirIncreaseInterval` is in use.
*/
readonly reservoirIncreaseMaximum?: number | null;
/**
* Optional identifier
*/
readonly id?: string | null;
/**
* Set to true to leave your failed jobs hanging instead of failing them.
*/
readonly rejectOnDrop?: boolean | null;
/**
* Set to true to keep track of done jobs with counts() and jobStatus(). Uses more memory.
*/
readonly trackDoneStatus?: boolean | null;
/**
* Where the limiter stores its internal state. The default (`local`) keeps the state in the limiter itself. Set it to `redis` to enable Clustering.
*/
readonly datastore?: string | null;
/**
* Override the Promise library used by Bottleneck.
*/
readonly Promise?: any;
/**
* This object is passed directly to the redis client library you've selected.
*/
readonly clientOptions?: any;
/**
* **ioredis only.** When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)`.
*/
readonly clusterNodes?: any;
/**
* An existing Bottleneck.RedisConnection or Bottleneck.IORedisConnection object to use.
* If using, `datastore`, `clientOptions` and `clusterNodes` will be ignored.
*/
/**
* Optional Redis/IORedis library from `require('ioredis')` or equivalent. If not, Bottleneck will attempt to require Redis/IORedis at runtime.
*/
readonly Redis?: any;
/**
* Bottleneck connection object created from `new Bottleneck.RedisConnection` or `new Bottleneck.IORedisConnection`.
*/
readonly connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection | null;
/**
* When set to `true`, on initial startup, the limiter will wipe any existing Bottleneck state data on the Redis db.
*/
readonly clearDatastore?: boolean | null;
/**
* The Redis TTL in milliseconds for the keys created by the limiter. When `timeout` is set, the limiter's state will be automatically removed from Redis after timeout milliseconds of inactivity. Note: timeout is 300000 (5 minutes) by default when using a Group.
*/
readonly timeout?: number | null;
[propName: string]: any;
};
type JobOptions = {
/**
* A priority between `0` and `9`. A job with a priority of `4` will _always_ be executed before a job with a priority of `5`.
*/
readonly priority?: number | null;
/**
* Must be an integer equal to or higher than `0`. The `weight` is what increases the number of running jobs (up to `maxConcurrent`, if using) and decreases the `reservoir` value (if using).
*/
readonly weight?: number | null;
/**
* The number milliseconds a job has to finish. Jobs that take longer than their `expiration` will be failed with a `BottleneckError`.
*/
readonly expiration?: number | null;
/**
* Optional identifier, helps with debug output.
*/
readonly id?: string | null;
};
type StopOptions = {
/**
* When `true`, drop all the RECEIVED, QUEUED and RUNNING jobs. When `false`, allow those jobs to complete before resolving the Promise returned by this method.
*/
readonly dropWaitingJobs?: boolean | null;
/**
* The error message used to drop jobs when `dropWaitingJobs` is `true`.
*/
readonly dropErrorMessage?: string | null;
/**
* The error message used to reject a job added to the limiter after `stop()` has been called.
*/
readonly enqueueErrorMessage?: string | null;
};
type Callback<T> = (err: any, result: T) => void;
type ClientsList = { client?: any; subscriber?: any };
type GroupLimiterPair = { key: string; limiter: Bottleneck };
interface Strategy {}
type EventInfo = {
readonly args: any[];
readonly options: {
readonly id: string;
readonly priority: number;
readonly weight: number;
readonly expiration?: number;
};
};
type EventInfoDropped = EventInfo & {
readonly task: Function;
readonly promise: Promise<any>;
};
type EventInfoQueued = EventInfo & {
readonly reachedHWM: boolean;
readonly blocked: boolean;
};
type EventInfoRetryable = EventInfo & { readonly retryCount: number; };
enum Status {
RECEIVED = "RECEIVED",
QUEUED = "QUEUED",
RUNNING = "RUNNING",
EXECUTING = "EXECUTING",
DONE = "DONE"
}
type Counts = {
RECEIVED: number,
QUEUED: number,
RUNNING: number,
EXECUTING: number,
DONE?: number
};
type RedisConnectionOptions = {
/**
* This object is passed directly to NodeRedis' createClient() method.
*/
readonly clientOptions?: any;
/**
* An existing NodeRedis client to use. If using, `clientOptions` will be ignored.
*/
readonly client?: any;
/**
* Optional Redis library from `require('redis')` or equivalent. If not, Bottleneck will attempt to require Redis at runtime.
*/
readonly Redis?: any;
};
type IORedisConnectionOptions = {
/**
* This object is passed directly to ioredis' constructor method.
*/
readonly clientOptions?: any;
/**
* When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)`.
*/
readonly clusterNodes?: any;
/**
* An existing ioredis client to use. If using, `clientOptions` and `clusterNodes` will be ignored.
*/
readonly client?: any;
/**
* Optional IORedis library from `require('ioredis')` or equivalent. If not, Bottleneck will attempt to require IORedis at runtime.
*/
readonly Redis?: any;
};
type BatcherOptions = {
/**
* Maximum acceptable time (in milliseconds) a request can have to wait before being flushed to the `"batch"` event.
*/
readonly maxTime?: number | null;
/**
* Maximum number of requests in a batch.
*/
readonly maxSize?: number | null;
};
class BottleneckError extends Error {
}
class RedisConnection {
constructor(options?: Bottleneck.RedisConnectionOptions);
/**
* Register an event listener.
* @param name - The event name.
* @param fn - The callback function.
*/
on(name: "error", fn: (error: any) => void): void;
/**
* Register an event listener for one event only.
* @param name - The event name.
* @param fn - The callback function.
*/
once(name: "error", fn: (error: any) => void): void;
/**
* Waits until the connection is ready and returns the raw Node_Redis clients.
*/
ready(): Promise<ClientsList>;
/**
* Close the redis clients.
* @param flush - Write transient data before closing.
*/
disconnect(flush?: boolean): Promise<void>;
}
class IORedisConnection {
constructor(options?: Bottleneck.IORedisConnectionOptions);
/**
* Register an event listener.
* @param name - The event name.
* @param fn - The callback function.
*/
on(name: "error", fn: (error: any) => void): void;
/**
* Register an event listener for one event only.
* @param name - The event name.
* @param fn - The callback function.
*/
once(name: "error", fn: (error: any) => void): void;
/**
* Waits until the connection is ready and returns the raw ioredis clients.
*/
ready(): Promise<ClientsList>;
/**
* Close the redis clients.
* @param flush - Write transient data before closing.
*/
disconnect(flush?: boolean): Promise<void>;
}
class Batcher {
constructor(options?: Bottleneck.BatcherOptions);
/**
* Register an event listener.
* @param name - The event name.
* @param fn - The callback function.
*/
on(name: string, fn: Function): void;
on(name: "error", fn: (error: any) => void): void;
on(name: "batch", fn: (batch: any[]) => void): void;
/**
* Register an event listener for one event only.
* @param name - The event name.
* @param fn - The callback function.
*/
once(name: string, fn: Function): void;
once(name: "error", fn: (error: any) => void): void;
once(name: "batch", fn: (batch: any[]) => void): void;
/**
* Add a request to the Batcher. Batches are flushed to the "batch" event.
*/
add(data: any): Promise<void>;
}
class Group {
constructor(options?: Bottleneck.ConstructorOptions);
id: string;
datastore: string;
connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection;
/**
* Returns the limiter for the specified key.
* @param str - The limiter key.
*/
key(str: string): Bottleneck;
/**
* Register an event listener.
* @param name - The event name.
* @param fn - The callback function.
*/
on(name: string, fn: Function): void;
on(name: "error", fn: (error: any) => void): void;
on(name: "created", fn: (limiter: Bottleneck, key: string) => void): void;
/**
* Register an event listener for one event only.
* @param name - The event name.
* @param fn - The callback function.
*/
once(name: string, fn: Function): void;
once(name: "error", fn: (error: any) => void): void;
once(name: "created", fn: (limiter: Bottleneck, key: string) => void): void;
/**
* Removes all registered event listeners.
* @param name - The optional event name to remove listeners from.
*/
removeAllListeners(name?: string): void;
/**
* Updates the group settings.
* @param options - The new settings.
*/
updateSettings(options: Bottleneck.ConstructorOptions): void;
/**
* Deletes the limiter for the given key.
* Returns true if a key was deleted.
* @param str - The key
*/
deleteKey(str: string): Promise<boolean>;
/**
* Disconnects the underlying redis clients, unless the Group was created with the `connection` option.
* @param flush - Write transient data before closing.
*/
disconnect(flush?: boolean): Promise<void>;
/**
* Returns all the key-limiter pairs.
*/
limiters(): Bottleneck.GroupLimiterPair[];
/**
* Returns all Group keys in the local instance
*/
keys(): string[];
/**
* Returns all Group keys in the Cluster
*/
clusterKeys(): Promise<string[]>;
}
class Events {
constructor(object: Object);
/**
* Returns the number of limiters for the event name
* @param name - The event name.
*/
listenerCount(name: string): number;
/**
* Returns a promise with the first non-null/non-undefined result from a listener
* @param name - The event name.
* @param args - The arguments to pass to the event listeners.
*/
trigger(name: string, ...args: any[]): Promise<any>;
}
}
class Bottleneck {
public static readonly strategy: {
/**
* When adding a new job to a limiter, if the queue length reaches `highWater`, drop the oldest job with the lowest priority. This is useful when jobs that have been waiting for too long are not important anymore. If all the queued jobs are more important (based on their `priority` value) than the one being added, it will not be added.
*/
readonly LEAK: Bottleneck.Strategy;
/**
* Same as `LEAK`, except it will only drop jobs that are less important than the one being added. If all the queued jobs are as or more important than the new one, it will not be added.
*/
readonly OVERFLOW_PRIORITY: Bottleneck.Strategy;
/**
* When adding a new job to a limiter, if the queue length reaches `highWater`, do not add the new job. This strategy totally ignores priority levels.
*/
readonly OVERFLOW: Bottleneck.Strategy;
/**
* When adding a new job to a limiter, if the queue length reaches `highWater`, the limiter falls into "blocked mode". All queued jobs are dropped and no new jobs will be accepted until the limiter unblocks. It will unblock after `penalty` milliseconds have passed without receiving a new job. `penalty` is equal to `15 * minTime` (or `5000` if `minTime` is `0`) by default and can be changed by calling `changePenalty()`. This strategy is ideal when bruteforce attacks are to be expected. This strategy totally ignores priority levels.
*/
readonly BLOCK: Bottleneck.Strategy;
};
constructor(options?: Bottleneck.ConstructorOptions);
id: string;
datastore: string;
connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection;
/**
* Returns a promise which will be resolved once the limiter is ready to accept jobs
* or rejected if it fails to start up.
*/
ready(): Promise<any>;
/**
* Returns a datastore-specific object of redis clients.
*/
clients(): Bottleneck.ClientsList;
/**
* Returns the name of the Redis pubsub channel used for this limiter
*/
channel(): string;
/**
* Disconnects the underlying redis clients, unless the limiter was created with the `connection` option.
* @param flush - Write transient data before closing.
*/
disconnect(flush?: boolean): Promise<void>;
/**
* Broadcast a string to every limiter in the Cluster.
*/
publish(message: string): Promise<void>;
/**
* Returns an object with the current number of jobs per status.
*/
counts(): Bottleneck.Counts;
/**
* Returns the status of the job with the provided job id.
*/
jobStatus(id: string): Bottleneck.Status;
/**
* Returns the status of the job with the provided job id.
*/
jobs(status?: Bottleneck.Status): string[];
/**
* Returns the number of requests queued.
* @param priority - Returns the number of requests queued with the specified priority.
*/
queued(priority?: number): number;
/**
* Returns the number of requests queued across the Cluster.
*/
clusterQueued(): Promise<number>;
/**
* Returns whether there are any jobs currently in the queue or in the process of being added to the queue.
*/
empty(): boolean;
/**
* Returns the total weight of jobs in a RUNNING or EXECUTING state in the Cluster.
*/
running(): Promise<number>;
/**
* Returns the total weight of jobs in a DONE state in the Cluster.
*/
done(): Promise<number>;
/**
* If a request was added right now, would it be run immediately?
* @param weight - The weight of the request
*/
check(weight?: number): Promise<boolean>;
/**
* Register an event listener.
* @param name - The event name.
* @param fn - The callback function.
*/
on(name: "error", fn: (error: any) => void): void;
on(name: "empty", fn: () => void): void;
on(name: "idle", fn: () => void): void;
on(name: "depleted", fn: (empty: boolean) => void): void;
on(name: "message", fn: (message: string) => void): void;
on(name: "debug", fn: (message: string, info: any) => void): void;
on(name: "dropped", fn: (dropped: Bottleneck.EventInfoDropped) => void): void;
on(name: "received", fn: (info: Bottleneck.EventInfo) => void): void;
on(name: "queued", fn: (info: Bottleneck.EventInfoQueued) => void): void;
on(name: "scheduled", fn: (info: Bottleneck.EventInfo) => void): void;
on(name: "executing", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
on(name: "failed", fn: (error: any, info: Bottleneck.EventInfoRetryable) => Promise<number | void | null> | number | void | null): void;
on(name: "retry", fn: (message: string, info: Bottleneck.EventInfoRetryable) => void): void;
on(name: "done", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
/**
* Register an event listener for one event only.
* @param name - The event name.
* @param fn - The callback function.
*/
once(name: "error", fn: (error: any) => void): void;
once(name: "empty", fn: () => void): void;
once(name: "idle", fn: () => void): void;
once(name: "depleted", fn: (empty: boolean) => void): void;
once(name: "message", fn: (message: string) => void): void;
once(name: "debug", fn: (message: string, info: any) => void): void;
once(name: "dropped", fn: (dropped: Bottleneck.EventInfoDropped) => void): void;
once(name: "received", fn: (info: Bottleneck.EventInfo) => void): void;
once(name: "queued", fn: (info: Bottleneck.EventInfoQueued) => void): void;
once(name: "scheduled", fn: (info: Bottleneck.EventInfo) => void): void;
once(name: "executing", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
once(name: "failed", fn: (error: any, info: Bottleneck.EventInfoRetryable) => Promise<number | void | null> | number | void | null): void;
once(name: "retry", fn: (message: string, info: Bottleneck.EventInfoRetryable) => void): void;
once(name: "done", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
/**
* Removes all registered event listeners.
* @param name - The optional event name to remove listeners from.
*/
removeAllListeners(name?: string): void;
/**
* Changes the settings for future requests.
* @param options - The new settings.
*/
updateSettings(options?: Bottleneck.ConstructorOptions): Bottleneck;
/**
* Adds to the reservoir count and returns the new value.
*/
incrementReservoir(incrementBy: number): Promise<number>;
/**
* The `stop()` method is used to safely shutdown a limiter. It prevents any new jobs from being added to the limiter and waits for all Executing jobs to complete.
*/
stop(options?: Bottleneck.StopOptions): Promise<void>;
/**
* Returns the current reservoir count, if any.
*/
currentReservoir(): Promise<number | null>;
/**
* Chain this limiter to another.
* @param limiter - The limiter that requests to this limiter must also follow.
*/
chain(limiter?: Bottleneck): Bottleneck;
wrap<R>(fn: () => PromiseLike<R>): (() => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions) => Promise<R>; };
wrap<R, A1>(fn: (arg1: A1) => PromiseLike<R>): ((arg1: A1) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1) => Promise<R>; };
wrap<R, A1, A2>(fn: (arg1: A1, arg2: A2) => PromiseLike<R>): ((arg1: A1, arg2: A2) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2) => Promise<R>; };
wrap<R, A1, A2, A3>(fn: (arg1: A1, arg2: A2, arg3: A3) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3) => Promise<R>; };
wrap<R, A1, A2, A3, A4>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4) => Promise<R>; };
wrap<R, A1, A2, A3, A4, A5>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => Promise<R>; };
wrap<R, A1, A2, A3, A4, A5, A6>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => Promise<R>; };
wrap<R, A1, A2, A3, A4, A5, A6, A7>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => Promise<R>; };
wrap<R, A1, A2, A3, A4, A5, A6, A7, A8>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => Promise<R>; };
wrap<R, A1, A2, A3, A4, A5, A6, A7, A8, A9>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => Promise<R>; };
wrap<R, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => Promise<R>; };
submit<R>(fn: (callback: Bottleneck.Callback<R>) => void, callback: Bottleneck.Callback<R>): void;
submit<R, A1>(fn: (arg1: A1, callback: Bottleneck.Callback<R>) => void, arg1: A1, callback: Bottleneck.Callback<R>): void;
submit<R, A1, A2>(fn: (arg1: A1, arg2: A2, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, callback: Bottleneck.Callback<R>): void;
submit<R, A1, A2, A3>(fn: (arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback<R>): void;
submit<R, A1, A2, A3, A4>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback<R>): void;
submit<R, A1, A2, A3, A4, A5>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback<R>): void;
submit<R, A1, A2, A3, A4, A5, A6>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback<R>): void;
submit<R, A1, A2, A3, A4, A5, A6, A7>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback<R>): void;
submit<R, A1, A2, A3, A4, A5, A6, A7, A8>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback<R>): void;
submit<R, A1, A2, A3, A4, A5, A6, A7, A8, A9>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback<R>): void;
submit<R, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback<R>): void;
submit<R>(options: Bottleneck.JobOptions, fn: (callback: Bottleneck.Callback<R>) => void, callback: Bottleneck.Callback<R>): void;
submit<R, A1>(options: Bottleneck.JobOptions, fn: (arg1: A1, callback: Bottleneck.Callback<R>) => void, arg1: A1, callback: Bottleneck.Callback<R>): void;
submit<R, A1, A2>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, callback: Bottleneck.Callback<R>): void;
submit<R, A1, A2, A3>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback<R>): void;
submit<R, A1, A2, A3, A4>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback<R>): void;
submit<R, A1, A2, A3, A4, A5>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback<R>): void;
submit<R, A1, A2, A3, A4, A5, A6>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback<R>): void;
submit<R, A1, A2, A3, A4, A5, A6, A7>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback<R>): void;
submit<R, A1, A2, A3, A4, A5, A6, A7, A8>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback<R>): void;
submit<R, A1, A2, A3, A4, A5, A6, A7, A8, A9>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback<R>): void;
submit<R, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback<R>): void;
schedule<R>(fn: () => PromiseLike<R>): Promise<R>;
schedule<R, A1>(fn: (arg1: A1) => PromiseLike<R>, arg1: A1): Promise<R>;
schedule<R, A1, A2>(fn: (arg1: A1, arg2: A2) => PromiseLike<R>, arg1: A1, arg2: A2): Promise<R>;
schedule<R, A1, A2, A3>(fn: (arg1: A1, arg2: A2, arg3: A3) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3): Promise<R>;
schedule<R, A1, A2, A3, A4>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4): Promise<R>;
schedule<R, A1, A2, A3, A4, A5>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5): Promise<R>;
schedule<R, A1, A2, A3, A4, A5, A6>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6): Promise<R>;
schedule<R, A1, A2, A3, A4, A5, A6, A7>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7): Promise<R>;
schedule<R, A1, A2, A3, A4, A5, A6, A7, A8>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8): Promise<R>;
schedule<R, A1, A2, A3, A4, A5, A6, A7, A8, A9>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9): Promise<R>;
schedule<R, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10): Promise<R>;
schedule<R>(options: Bottleneck.JobOptions, fn: () => PromiseLike<R>): Promise<R>;
schedule<R, A1>(options: Bottleneck.JobOptions, fn: (arg1: A1) => PromiseLike<R>, arg1: A1): Promise<R>;
schedule<R, A1, A2>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2) => PromiseLike<R>, arg1: A1, arg2: A2): Promise<R>;
schedule<R, A1, A2, A3>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3): Promise<R>;
schedule<R, A1, A2, A3, A4>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4): Promise<R>;
schedule<R, A1, A2, A3, A4, A5>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5): Promise<R>;
schedule<R, A1, A2, A3, A4, A5, A6>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6): Promise<R>;
schedule<R, A1, A2, A3, A4, A5, A6, A7>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7): Promise<R>;
schedule<R, A1, A2, A3, A4, A5, A6, A7, A8>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8): Promise<R>;
schedule<R, A1, A2, A3, A4, A5, A6, A7, A8, A9>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9): Promise<R>;
schedule<R, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10): Promise<R>;
}
export default Bottleneck;
}

588
node_modules/bottleneck/bottleneck.d.ts.ejs generated vendored Normal file
View file

@ -0,0 +1,588 @@
declare module "bottleneck" {
namespace Bottleneck {
type ConstructorOptions = {
/**
* How many jobs can be running at the same time.
*/
readonly maxConcurrent?: number | null;
/**
* How long to wait after launching a job before launching another one.
*/
readonly minTime?: number | null;
/**
* How long can the queue get? When the queue length exceeds that value, the selected `strategy` is executed to shed the load.
*/
readonly highWater?: number | null;
/**
* Which strategy to use if the queue gets longer than the high water mark.
*/
readonly strategy?: Bottleneck.Strategy | null;
/**
* The `penalty` value used by the `Bottleneck.strategy.BLOCK` strategy.
*/
readonly penalty?: number | null;
/**
* How many jobs can be executed before the limiter stops executing jobs. If `reservoir` reaches `0`, no jobs will be executed until it is no longer `0`.
*/
readonly reservoir?: number | null;
/**
* Every `reservoirRefreshInterval` milliseconds, the `reservoir` value will be automatically reset to `reservoirRefreshAmount`.
*/
readonly reservoirRefreshInterval?: number | null;
/**
* The value to reset `reservoir` to when `reservoirRefreshInterval` is in use.
*/
readonly reservoirRefreshAmount?: number | null;
/**
* The increment applied to `reservoir` when `reservoirIncreaseInterval` is in use.
*/
readonly reservoirIncreaseAmount?: number | null;
/**
* Every `reservoirIncreaseInterval` milliseconds, the `reservoir` value will be automatically incremented by `reservoirIncreaseAmount`.
*/
readonly reservoirIncreaseInterval?: number | null;
/**
* The maximum value that `reservoir` can reach when `reservoirIncreaseInterval` is in use.
*/
readonly reservoirIncreaseMaximum?: number | null;
/**
* Optional identifier
*/
readonly id?: string | null;
/**
* Set to true to leave your failed jobs hanging instead of failing them.
*/
readonly rejectOnDrop?: boolean | null;
/**
* Set to true to keep track of done jobs with counts() and jobStatus(). Uses more memory.
*/
readonly trackDoneStatus?: boolean | null;
/**
* Where the limiter stores its internal state. The default (`local`) keeps the state in the limiter itself. Set it to `redis` to enable Clustering.
*/
readonly datastore?: string | null;
/**
* Override the Promise library used by Bottleneck.
*/
readonly Promise?: any;
/**
* This object is passed directly to the redis client library you've selected.
*/
readonly clientOptions?: any;
/**
* **ioredis only.** When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)`.
*/
readonly clusterNodes?: any;
/**
* An existing Bottleneck.RedisConnection or Bottleneck.IORedisConnection object to use.
* If using, `datastore`, `clientOptions` and `clusterNodes` will be ignored.
*/
/**
* Optional Redis/IORedis library from `require('ioredis')` or equivalent. If not, Bottleneck will attempt to require Redis/IORedis at runtime.
*/
readonly Redis?: any;
/**
* Bottleneck connection object created from `new Bottleneck.RedisConnection` or `new Bottleneck.IORedisConnection`.
*/
readonly connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection | null;
/**
* When set to `true`, on initial startup, the limiter will wipe any existing Bottleneck state data on the Redis db.
*/
readonly clearDatastore?: boolean | null;
/**
* The Redis TTL in milliseconds for the keys created by the limiter. When `timeout` is set, the limiter's state will be automatically removed from Redis after timeout milliseconds of inactivity. Note: timeout is 300000 (5 minutes) by default when using a Group.
*/
readonly timeout?: number | null;
[propName: string]: any;
};
type JobOptions = {
/**
* A priority between `0` and `9`. A job with a priority of `4` will _always_ be executed before a job with a priority of `5`.
*/
readonly priority?: number | null;
/**
* Must be an integer equal to or higher than `0`. The `weight` is what increases the number of running jobs (up to `maxConcurrent`, if using) and decreases the `reservoir` value (if using).
*/
readonly weight?: number | null;
/**
* The number milliseconds a job has to finish. Jobs that take longer than their `expiration` will be failed with a `BottleneckError`.
*/
readonly expiration?: number | null;
/**
* Optional identifier, helps with debug output.
*/
readonly id?: string | null;
};
type StopOptions = {
/**
* When `true`, drop all the RECEIVED, QUEUED and RUNNING jobs. When `false`, allow those jobs to complete before resolving the Promise returned by this method.
*/
readonly dropWaitingJobs?: boolean | null;
/**
* The error message used to drop jobs when `dropWaitingJobs` is `true`.
*/
readonly dropErrorMessage?: string | null;
/**
* The error message used to reject a job added to the limiter after `stop()` has been called.
*/
readonly enqueueErrorMessage?: string | null;
};
type Callback<T> = (err: any, result: T) => void;
type ClientsList = { client?: any; subscriber?: any };
type GroupLimiterPair = { key: string; limiter: Bottleneck };
interface Strategy {}
type EventInfo = {
readonly args: any[];
readonly options: {
readonly id: string;
readonly priority: number;
readonly weight: number;
readonly expiration?: number;
};
};
type EventInfoDropped = EventInfo & {
readonly task: Function;
readonly promise: Promise<any>;
};
type EventInfoQueued = EventInfo & {
readonly reachedHWM: boolean;
readonly blocked: boolean;
};
type EventInfoRetryable = EventInfo & { readonly retryCount: number; };
enum Status {
RECEIVED = "RECEIVED",
QUEUED = "QUEUED",
RUNNING = "RUNNING",
EXECUTING = "EXECUTING",
DONE = "DONE"
}
type Counts = {
RECEIVED: number,
QUEUED: number,
RUNNING: number,
EXECUTING: number,
DONE?: number
};
type RedisConnectionOptions = {
/**
* This object is passed directly to NodeRedis' createClient() method.
*/
readonly clientOptions?: any;
/**
* An existing NodeRedis client to use. If using, `clientOptions` will be ignored.
*/
readonly client?: any;
/**
* Optional Redis library from `require('redis')` or equivalent. If not, Bottleneck will attempt to require Redis at runtime.
*/
readonly Redis?: any;
};
type IORedisConnectionOptions = {
/**
* This object is passed directly to ioredis' constructor method.
*/
readonly clientOptions?: any;
/**
* When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)`.
*/
readonly clusterNodes?: any;
/**
* An existing ioredis client to use. If using, `clientOptions` and `clusterNodes` will be ignored.
*/
readonly client?: any;
/**
* Optional IORedis library from `require('ioredis')` or equivalent. If not, Bottleneck will attempt to require IORedis at runtime.
*/
readonly Redis?: any;
};
type BatcherOptions = {
/**
* Maximum acceptable time (in milliseconds) a request can have to wait before being flushed to the `"batch"` event.
*/
readonly maxTime?: number | null;
/**
* Maximum number of requests in a batch.
*/
readonly maxSize?: number | null;
};
class BottleneckError extends Error {
}
class RedisConnection {
constructor(options?: Bottleneck.RedisConnectionOptions);
/**
* Register an event listener.
* @param name - The event name.
* @param fn - The callback function.
*/
on(name: "error", fn: (error: any) => void): void;
/**
* Register an event listener for one event only.
* @param name - The event name.
* @param fn - The callback function.
*/
once(name: "error", fn: (error: any) => void): void;
/**
* Waits until the connection is ready and returns the raw Node_Redis clients.
*/
ready(): Promise<ClientsList>;
/**
* Close the redis clients.
* @param flush - Write transient data before closing.
*/
disconnect(flush?: boolean): Promise<void>;
}
class IORedisConnection {
constructor(options?: Bottleneck.IORedisConnectionOptions);
/**
* Register an event listener.
* @param name - The event name.
* @param fn - The callback function.
*/
on(name: "error", fn: (error: any) => void): void;
/**
* Register an event listener for one event only.
* @param name - The event name.
* @param fn - The callback function.
*/
once(name: "error", fn: (error: any) => void): void;
/**
* Waits until the connection is ready and returns the raw ioredis clients.
*/
ready(): Promise<ClientsList>;
/**
* Close the redis clients.
* @param flush - Write transient data before closing.
*/
disconnect(flush?: boolean): Promise<void>;
}
class Batcher {
constructor(options?: Bottleneck.BatcherOptions);
/**
* Register an event listener.
* @param name - The event name.
* @param fn - The callback function.
*/
on(name: string, fn: Function): void;
on(name: "error", fn: (error: any) => void): void;
on(name: "batch", fn: (batch: any[]) => void): void;
/**
* Register an event listener for one event only.
* @param name - The event name.
* @param fn - The callback function.
*/
once(name: string, fn: Function): void;
once(name: "error", fn: (error: any) => void): void;
once(name: "batch", fn: (batch: any[]) => void): void;
/**
* Add a request to the Batcher. Batches are flushed to the "batch" event.
*/
add(data: any): Promise<void>;
}
class Group {
constructor(options?: Bottleneck.ConstructorOptions);
id: string;
datastore: string;
connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection;
/**
* Returns the limiter for the specified key.
* @param str - The limiter key.
*/
key(str: string): Bottleneck;
/**
* Register an event listener.
* @param name - The event name.
* @param fn - The callback function.
*/
on(name: string, fn: Function): void;
on(name: "error", fn: (error: any) => void): void;
on(name: "created", fn: (limiter: Bottleneck, key: string) => void): void;
/**
* Register an event listener for one event only.
* @param name - The event name.
* @param fn - The callback function.
*/
once(name: string, fn: Function): void;
once(name: "error", fn: (error: any) => void): void;
once(name: "created", fn: (limiter: Bottleneck, key: string) => void): void;
/**
* Removes all registered event listeners.
* @param name - The optional event name to remove listeners from.
*/
removeAllListeners(name?: string): void;
/**
* Updates the group settings.
* @param options - The new settings.
*/
updateSettings(options: Bottleneck.ConstructorOptions): void;
/**
* Deletes the limiter for the given key.
* Returns true if a key was deleted.
* @param str - The key
*/
deleteKey(str: string): Promise<boolean>;
/**
* Disconnects the underlying redis clients, unless the Group was created with the `connection` option.
* @param flush - Write transient data before closing.
*/
disconnect(flush?: boolean): Promise<void>;
/**
* Returns all the key-limiter pairs.
*/
limiters(): Bottleneck.GroupLimiterPair[];
/**
* Returns all Group keys in the local instance
*/
keys(): string[];
/**
* Returns all Group keys in the Cluster
*/
clusterKeys(): Promise<string[]>;
}
class Events {
constructor(object: Object);
/**
* Returns the number of limiters for the event name
* @param name - The event name.
*/
listenerCount(name: string): number;
/**
* Returns a promise with the first non-null/non-undefined result from a listener
* @param name - The event name.
* @param args - The arguments to pass to the event listeners.
*/
trigger(name: string, ...args: any[]): Promise<any>;
}
}
class Bottleneck {
public static readonly strategy: {
/**
* When adding a new job to a limiter, if the queue length reaches `highWater`, drop the oldest job with the lowest priority. This is useful when jobs that have been waiting for too long are not important anymore. If all the queued jobs are more important (based on their `priority` value) than the one being added, it will not be added.
*/
readonly LEAK: Bottleneck.Strategy;
/**
* Same as `LEAK`, except it will only drop jobs that are less important than the one being added. If all the queued jobs are as or more important than the new one, it will not be added.
*/
readonly OVERFLOW_PRIORITY: Bottleneck.Strategy;
/**
* When adding a new job to a limiter, if the queue length reaches `highWater`, do not add the new job. This strategy totally ignores priority levels.
*/
readonly OVERFLOW: Bottleneck.Strategy;
/**
* When adding a new job to a limiter, if the queue length reaches `highWater`, the limiter falls into "blocked mode". All queued jobs are dropped and no new jobs will be accepted until the limiter unblocks. It will unblock after `penalty` milliseconds have passed without receiving a new job. `penalty` is equal to `15 * minTime` (or `5000` if `minTime` is `0`) by default and can be changed by calling `changePenalty()`. This strategy is ideal when bruteforce attacks are to be expected. This strategy totally ignores priority levels.
*/
readonly BLOCK: Bottleneck.Strategy;
};
constructor(options?: Bottleneck.ConstructorOptions);
id: string;
datastore: string;
connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection;
/**
* Returns a promise which will be resolved once the limiter is ready to accept jobs
* or rejected if it fails to start up.
*/
ready(): Promise<any>;
/**
* Returns a datastore-specific object of redis clients.
*/
clients(): Bottleneck.ClientsList;
/**
* Returns the name of the Redis pubsub channel used for this limiter
*/
channel(): string;
/**
* Disconnects the underlying redis clients, unless the limiter was created with the `connection` option.
* @param flush - Write transient data before closing.
*/
disconnect(flush?: boolean): Promise<void>;
/**
* Broadcast a string to every limiter in the Cluster.
*/
publish(message: string): Promise<void>;
/**
* Returns an object with the current number of jobs per status.
*/
counts(): Bottleneck.Counts;
/**
* Returns the status of the job with the provided job id.
*/
jobStatus(id: string): Bottleneck.Status;
/**
* Returns the status of the job with the provided job id.
*/
jobs(status?: Bottleneck.Status): string[];
/**
* Returns the number of requests queued.
* @param priority - Returns the number of requests queued with the specified priority.
*/
queued(priority?: number): number;
/**
* Returns the number of requests queued across the Cluster.
*/
clusterQueued(): Promise<number>;
/**
* Returns whether there are any jobs currently in the queue or in the process of being added to the queue.
*/
empty(): boolean;
/**
* Returns the total weight of jobs in a RUNNING or EXECUTING state in the Cluster.
*/
running(): Promise<number>;
/**
* Returns the total weight of jobs in a DONE state in the Cluster.
*/
done(): Promise<number>;
/**
* If a request was added right now, would it be run immediately?
* @param weight - The weight of the request
*/
check(weight?: number): Promise<boolean>;
/**
* Register an event listener.
* @param name - The event name.
* @param fn - The callback function.
*/
on(name: "error", fn: (error: any) => void): void;
on(name: "empty", fn: () => void): void;
on(name: "idle", fn: () => void): void;
on(name: "depleted", fn: (empty: boolean) => void): void;
on(name: "message", fn: (message: string) => void): void;
on(name: "debug", fn: (message: string, info: any) => void): void;
on(name: "dropped", fn: (dropped: Bottleneck.EventInfoDropped) => void): void;
on(name: "received", fn: (info: Bottleneck.EventInfo) => void): void;
on(name: "queued", fn: (info: Bottleneck.EventInfoQueued) => void): void;
on(name: "scheduled", fn: (info: Bottleneck.EventInfo) => void): void;
on(name: "executing", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
on(name: "failed", fn: (error: any, info: Bottleneck.EventInfoRetryable) => Promise<number | void | null> | number | void | null): void;
on(name: "retry", fn: (message: string, info: Bottleneck.EventInfoRetryable) => void): void;
on(name: "done", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
/**
* Register an event listener for one event only.
* @param name - The event name.
* @param fn - The callback function.
*/
once(name: "error", fn: (error: any) => void): void;
once(name: "empty", fn: () => void): void;
once(name: "idle", fn: () => void): void;
once(name: "depleted", fn: (empty: boolean) => void): void;
once(name: "message", fn: (message: string) => void): void;
once(name: "debug", fn: (message: string, info: any) => void): void;
once(name: "dropped", fn: (dropped: Bottleneck.EventInfoDropped) => void): void;
once(name: "received", fn: (info: Bottleneck.EventInfo) => void): void;
once(name: "queued", fn: (info: Bottleneck.EventInfoQueued) => void): void;
once(name: "scheduled", fn: (info: Bottleneck.EventInfo) => void): void;
once(name: "executing", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
once(name: "failed", fn: (error: any, info: Bottleneck.EventInfoRetryable) => Promise<number | void | null> | number | void | null): void;
once(name: "retry", fn: (message: string, info: Bottleneck.EventInfoRetryable) => void): void;
once(name: "done", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
/**
* Removes all registered event listeners.
* @param name - The optional event name to remove listeners from.
*/
removeAllListeners(name?: string): void;
/**
* Changes the settings for future requests.
* @param options - The new settings.
*/
updateSettings(options?: Bottleneck.ConstructorOptions): Bottleneck;
/**
* Adds to the reservoir count and returns the new value.
*/
incrementReservoir(incrementBy: number): Promise<number>;
/**
* The `stop()` method is used to safely shutdown a limiter. It prevents any new jobs from being added to the limiter and waits for all Executing jobs to complete.
*/
stop(options?: Bottleneck.StopOptions): Promise<void>;
/**
* Returns the current reservoir count, if any.
*/
currentReservoir(): Promise<number | null>;
/**
* Chain this limiter to another.
* @param limiter - The limiter that requests to this limiter must also follow.
*/
chain(limiter?: Bottleneck): Bottleneck;
<%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%>
wrap<R<%_ for (var idx = 1; idx <= count; idx++) { _%>, A<%= idx %><%_ } _%>>(fn: (<%= Array.apply(null, Array(count)).map((e, i) => i+1).map(i => `arg${i}: A${i}`).join(", ") %>) => PromiseLike<R>): ((<%_ for (var idx = 1; idx <= count; idx++) { _%><%_ if (idx > 1) { %>, <% } %>arg<%= idx %>: A<%= idx %><%_ } _%>) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions<%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>) => Promise<R>; };
<%_ } _%>
<%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%>
submit<R<%_ for (var idx = 1; idx <= count; idx++) { _%>, A<%= idx %><%_ } _%>>(fn: (<%_ for (var idx = 1; idx <= count; idx++) { _%>arg<%= idx %>: A<%= idx %>, <% } _%>callback: Bottleneck.Callback<R>) => void<%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>, callback: Bottleneck.Callback<R>): void;
<%_ } _%>
<%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%>
submit<R<%_ for (var idx = 1; idx <= count; idx++) { _%>, A<%= idx %><%_ } _%>>(options: Bottleneck.JobOptions, fn: (<%_ for (var idx = 1; idx <= count; idx++) { _%>arg<%= idx %>: A<%= idx %>, <% } _%>callback: Bottleneck.Callback<R>) => void<%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>, callback: Bottleneck.Callback<R>): void;
<%_ } _%>
<%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%>
schedule<R<%_ for (var idx = 1; idx <= count; idx++) { _%>, A<%= idx %><%_ } _%>>(fn: (<%= Array.apply(null, Array(count)).map((e, i) => i+1).map(i => `arg${i}: A${i}`).join(", ") %>) => PromiseLike<R><%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>): Promise<R>;
<%_ } _%>
<%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%>
schedule<R<%_ for (var idx = 1; idx <= count; idx++) { _%>, A<%= idx %><%_ } _%>>(options: Bottleneck.JobOptions, fn: (<%= Array.apply(null, Array(count)).map((e, i) => i+1).map(i => `arg${i}: A${i}`).join(", ") %>) => PromiseLike<R><%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>): Promise<R>;
<%_ } _%>
}
export default Bottleneck;
}

30
node_modules/bottleneck/bower.json generated vendored Normal file
View file

@ -0,0 +1,30 @@
{
"name": "bottleneck",
"main": "bottleneck.js",
"version": "2.19.5",
"homepage": "https://github.com/SGrondin/bottleneck",
"authors": [
"SGrondin <github@simongrondin.name>"
],
"description": "Distributed task scheduler and rate limiter",
"moduleType": [
"globals",
"node"
],
"keywords": [
"async",
"rate",
"limiter",
"limiting",
"throttle",
"throttling",
"load",
"ddos"
],
"license": "MIT",
"ignore": [
"**/.*",
"node_modules",
"bower_components"
]
}

5064
node_modules/bottleneck/es5.js generated vendored Normal file

File diff suppressed because one or more lines are too long

66
node_modules/bottleneck/lib/Batcher.js generated vendored Normal file
View file

@ -0,0 +1,66 @@
"use strict";
var Batcher, Events, parser;
parser = require("./parser");
Events = require("./Events");
Batcher = function () {
class Batcher {
constructor(options = {}) {
this.options = options;
parser.load(this.options, this.defaults, this);
this.Events = new Events(this);
this._arr = [];
this._resetPromise();
this._lastFlush = Date.now();
}
_resetPromise() {
return this._promise = new this.Promise((res, rej) => {
return this._resolve = res;
});
}
_flush() {
clearTimeout(this._timeout);
this._lastFlush = Date.now();
this._resolve();
this.Events.trigger("batch", this._arr);
this._arr = [];
return this._resetPromise();
}
add(data) {
var ret;
this._arr.push(data);
ret = this._promise;
if (this._arr.length === this.maxSize) {
this._flush();
} else if (this.maxTime != null && this._arr.length === 1) {
this._timeout = setTimeout(() => {
return this._flush();
}, this.maxTime);
}
return ret;
}
}
;
Batcher.prototype.defaults = {
maxTime: null,
maxSize: null,
Promise: Promise
};
return Batcher;
}.call(void 0);
module.exports = Batcher;

594
node_modules/bottleneck/lib/Bottleneck.js generated vendored Normal file
View file

@ -0,0 +1,594 @@
"use strict";
function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); }
function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; }
function _toArray(arr) { return _arrayWithHoles(arr) || _iterableToArray(arr) || _nonIterableRest(); }
function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); }
function _iterableToArray(iter) { if (Symbol.iterator in Object(iter) || Object.prototype.toString.call(iter) === "[object Arguments]") return Array.from(iter); }
function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; }
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
var Bottleneck,
DEFAULT_PRIORITY,
Events,
Job,
LocalDatastore,
NUM_PRIORITIES,
Queues,
RedisDatastore,
States,
Sync,
parser,
splice = [].splice;
NUM_PRIORITIES = 10;
DEFAULT_PRIORITY = 5;
parser = require("./parser");
Queues = require("./Queues");
Job = require("./Job");
LocalDatastore = require("./LocalDatastore");
RedisDatastore = require("./RedisDatastore");
Events = require("./Events");
States = require("./States");
Sync = require("./Sync");
Bottleneck = function () {
class Bottleneck {
constructor(options = {}, ...invalid) {
var storeInstanceOptions, storeOptions;
this._addToQueue = this._addToQueue.bind(this);
this._validateOptions(options, invalid);
parser.load(options, this.instanceDefaults, this);
this._queues = new Queues(NUM_PRIORITIES);
this._scheduled = {};
this._states = new States(["RECEIVED", "QUEUED", "RUNNING", "EXECUTING"].concat(this.trackDoneStatus ? ["DONE"] : []));
this._limiter = null;
this.Events = new Events(this);
this._submitLock = new Sync("submit", this.Promise);
this._registerLock = new Sync("register", this.Promise);
storeOptions = parser.load(options, this.storeDefaults, {});
this._store = function () {
if (this.datastore === "redis" || this.datastore === "ioredis" || this.connection != null) {
storeInstanceOptions = parser.load(options, this.redisStoreDefaults, {});
return new RedisDatastore(this, storeOptions, storeInstanceOptions);
} else if (this.datastore === "local") {
storeInstanceOptions = parser.load(options, this.localStoreDefaults, {});
return new LocalDatastore(this, storeOptions, storeInstanceOptions);
} else {
throw new Bottleneck.prototype.BottleneckError(`Invalid datastore type: ${this.datastore}`);
}
}.call(this);
this._queues.on("leftzero", () => {
var ref;
return (ref = this._store.heartbeat) != null ? typeof ref.ref === "function" ? ref.ref() : void 0 : void 0;
});
this._queues.on("zero", () => {
var ref;
return (ref = this._store.heartbeat) != null ? typeof ref.unref === "function" ? ref.unref() : void 0 : void 0;
});
}
_validateOptions(options, invalid) {
if (!(options != null && typeof options === "object" && invalid.length === 0)) {
throw new Bottleneck.prototype.BottleneckError("Bottleneck v2 takes a single object argument. Refer to https://github.com/SGrondin/bottleneck#upgrading-to-v2 if you're upgrading from Bottleneck v1.");
}
}
ready() {
return this._store.ready;
}
clients() {
return this._store.clients;
}
channel() {
return `b_${this.id}`;
}
channel_client() {
return `b_${this.id}_${this._store.clientId}`;
}
publish(message) {
return this._store.__publish__(message);
}
disconnect(flush = true) {
return this._store.__disconnect__(flush);
}
chain(_limiter) {
this._limiter = _limiter;
return this;
}
queued(priority) {
return this._queues.queued(priority);
}
clusterQueued() {
return this._store.__queued__();
}
empty() {
return this.queued() === 0 && this._submitLock.isEmpty();
}
running() {
return this._store.__running__();
}
done() {
return this._store.__done__();
}
jobStatus(id) {
return this._states.jobStatus(id);
}
jobs(status) {
return this._states.statusJobs(status);
}
counts() {
return this._states.statusCounts();
}
_randomIndex() {
return Math.random().toString(36).slice(2);
}
check(weight = 1) {
return this._store.__check__(weight);
}
_clearGlobalState(index) {
if (this._scheduled[index] != null) {
clearTimeout(this._scheduled[index].expiration);
delete this._scheduled[index];
return true;
} else {
return false;
}
}
_free(index, job, options, eventInfo) {
var _this = this;
return _asyncToGenerator(function* () {
var e, running;
try {
var _ref = yield _this._store.__free__(index, options.weight);
running = _ref.running;
_this.Events.trigger("debug", `Freed ${options.id}`, eventInfo);
if (running === 0 && _this.empty()) {
return _this.Events.trigger("idle");
}
} catch (error1) {
e = error1;
return _this.Events.trigger("error", e);
}
})();
}
_run(index, job, wait) {
var clearGlobalState, free, run;
job.doRun();
clearGlobalState = this._clearGlobalState.bind(this, index);
run = this._run.bind(this, index, job);
free = this._free.bind(this, index, job);
return this._scheduled[index] = {
timeout: setTimeout(() => {
return job.doExecute(this._limiter, clearGlobalState, run, free);
}, wait),
expiration: job.options.expiration != null ? setTimeout(function () {
return job.doExpire(clearGlobalState, run, free);
}, wait + job.options.expiration) : void 0,
job: job
};
}
_drainOne(capacity) {
return this._registerLock.schedule(() => {
var args, index, next, options, queue;
if (this.queued() === 0) {
return this.Promise.resolve(null);
}
queue = this._queues.getFirst();
var _next2 = next = queue.first();
options = _next2.options;
args = _next2.args;
if (capacity != null && options.weight > capacity) {
return this.Promise.resolve(null);
}
this.Events.trigger("debug", `Draining ${options.id}`, {
args,
options
});
index = this._randomIndex();
return this._store.__register__(index, options.weight, options.expiration).then(({
success,
wait,
reservoir
}) => {
var empty;
this.Events.trigger("debug", `Drained ${options.id}`, {
success,
args,
options
});
if (success) {
queue.shift();
empty = this.empty();
if (empty) {
this.Events.trigger("empty");
}
if (reservoir === 0) {
this.Events.trigger("depleted", empty);
}
this._run(index, next, wait);
return this.Promise.resolve(options.weight);
} else {
return this.Promise.resolve(null);
}
});
});
}
_drainAll(capacity, total = 0) {
return this._drainOne(capacity).then(drained => {
var newCapacity;
if (drained != null) {
newCapacity = capacity != null ? capacity - drained : capacity;
return this._drainAll(newCapacity, total + drained);
} else {
return this.Promise.resolve(total);
}
}).catch(e => {
return this.Events.trigger("error", e);
});
}
_dropAllQueued(message) {
return this._queues.shiftAll(function (job) {
return job.doDrop({
message
});
});
}
stop(options = {}) {
var done, waitForExecuting;
options = parser.load(options, this.stopDefaults);
waitForExecuting = at => {
var finished;
finished = () => {
var counts;
counts = this._states.counts;
return counts[0] + counts[1] + counts[2] + counts[3] === at;
};
return new this.Promise((resolve, reject) => {
if (finished()) {
return resolve();
} else {
return this.on("done", () => {
if (finished()) {
this.removeAllListeners("done");
return resolve();
}
});
}
});
};
done = options.dropWaitingJobs ? (this._run = function (index, next) {
return next.doDrop({
message: options.dropErrorMessage
});
}, this._drainOne = () => {
return this.Promise.resolve(null);
}, this._registerLock.schedule(() => {
return this._submitLock.schedule(() => {
var k, ref, v;
ref = this._scheduled;
for (k in ref) {
v = ref[k];
if (this.jobStatus(v.job.options.id) === "RUNNING") {
clearTimeout(v.timeout);
clearTimeout(v.expiration);
v.job.doDrop({
message: options.dropErrorMessage
});
}
}
this._dropAllQueued(options.dropErrorMessage);
return waitForExecuting(0);
});
})) : this.schedule({
priority: NUM_PRIORITIES - 1,
weight: 0
}, () => {
return waitForExecuting(1);
});
this._receive = function (job) {
return job._reject(new Bottleneck.prototype.BottleneckError(options.enqueueErrorMessage));
};
this.stop = () => {
return this.Promise.reject(new Bottleneck.prototype.BottleneckError("stop() has already been called"));
};
return done;
}
_addToQueue(job) {
var _this2 = this;
return _asyncToGenerator(function* () {
var args, blocked, error, options, reachedHWM, shifted, strategy;
args = job.args;
options = job.options;
try {
var _ref2 = yield _this2._store.__submit__(_this2.queued(), options.weight);
reachedHWM = _ref2.reachedHWM;
blocked = _ref2.blocked;
strategy = _ref2.strategy;
} catch (error1) {
error = error1;
_this2.Events.trigger("debug", `Could not queue ${options.id}`, {
args,
options,
error
});
job.doDrop({
error
});
return false;
}
if (blocked) {
job.doDrop();
return true;
} else if (reachedHWM) {
shifted = strategy === Bottleneck.prototype.strategy.LEAK ? _this2._queues.shiftLastFrom(options.priority) : strategy === Bottleneck.prototype.strategy.OVERFLOW_PRIORITY ? _this2._queues.shiftLastFrom(options.priority + 1) : strategy === Bottleneck.prototype.strategy.OVERFLOW ? job : void 0;
if (shifted != null) {
shifted.doDrop();
}
if (shifted == null || strategy === Bottleneck.prototype.strategy.OVERFLOW) {
if (shifted == null) {
job.doDrop();
}
return reachedHWM;
}
}
job.doQueue(reachedHWM, blocked);
_this2._queues.push(job);
yield _this2._drainAll();
return reachedHWM;
})();
}
_receive(job) {
if (this._states.jobStatus(job.options.id) != null) {
job._reject(new Bottleneck.prototype.BottleneckError(`A job with the same id already exists (id=${job.options.id})`));
return false;
} else {
job.doReceive();
return this._submitLock.schedule(this._addToQueue, job);
}
}
submit(...args) {
var cb, fn, job, options, ref, ref1, task;
if (typeof args[0] === "function") {
var _ref3, _ref4, _splice$call, _splice$call2;
ref = args, (_ref3 = ref, _ref4 = _toArray(_ref3), fn = _ref4[0], args = _ref4.slice(1), _ref3), (_splice$call = splice.call(args, -1), _splice$call2 = _slicedToArray(_splice$call, 1), cb = _splice$call2[0], _splice$call);
options = parser.load({}, this.jobDefaults);
} else {
var _ref5, _ref6, _splice$call3, _splice$call4;
ref1 = args, (_ref5 = ref1, _ref6 = _toArray(_ref5), options = _ref6[0], fn = _ref6[1], args = _ref6.slice(2), _ref5), (_splice$call3 = splice.call(args, -1), _splice$call4 = _slicedToArray(_splice$call3, 1), cb = _splice$call4[0], _splice$call3);
options = parser.load(options, this.jobDefaults);
}
task = (...args) => {
return new this.Promise(function (resolve, reject) {
return fn(...args, function (...args) {
return (args[0] != null ? reject : resolve)(args);
});
});
};
job = new Job(task, args, options, this.jobDefaults, this.rejectOnDrop, this.Events, this._states, this.Promise);
job.promise.then(function (args) {
return typeof cb === "function" ? cb(...args) : void 0;
}).catch(function (args) {
if (Array.isArray(args)) {
return typeof cb === "function" ? cb(...args) : void 0;
} else {
return typeof cb === "function" ? cb(args) : void 0;
}
});
return this._receive(job);
}
schedule(...args) {
var job, options, task;
if (typeof args[0] === "function") {
var _args = args;
var _args2 = _toArray(_args);
task = _args2[0];
args = _args2.slice(1);
options = {};
} else {
var _args3 = args;
var _args4 = _toArray(_args3);
options = _args4[0];
task = _args4[1];
args = _args4.slice(2);
}
job = new Job(task, args, options, this.jobDefaults, this.rejectOnDrop, this.Events, this._states, this.Promise);
this._receive(job);
return job.promise;
}
wrap(fn) {
var schedule, wrapped;
schedule = this.schedule.bind(this);
wrapped = function wrapped(...args) {
return schedule(fn.bind(this), ...args);
};
wrapped.withOptions = function (options, ...args) {
return schedule(options, fn, ...args);
};
return wrapped;
}
updateSettings(options = {}) {
var _this3 = this;
return _asyncToGenerator(function* () {
yield _this3._store.__updateSettings__(parser.overwrite(options, _this3.storeDefaults));
parser.overwrite(options, _this3.instanceDefaults, _this3);
return _this3;
})();
}
currentReservoir() {
return this._store.__currentReservoir__();
}
incrementReservoir(incr = 0) {
return this._store.__incrementReservoir__(incr);
}
}
;
Bottleneck.default = Bottleneck;
Bottleneck.Events = Events;
Bottleneck.version = Bottleneck.prototype.version = require("./version.json").version;
Bottleneck.strategy = Bottleneck.prototype.strategy = {
LEAK: 1,
OVERFLOW: 2,
OVERFLOW_PRIORITY: 4,
BLOCK: 3
};
Bottleneck.BottleneckError = Bottleneck.prototype.BottleneckError = require("./BottleneckError");
Bottleneck.Group = Bottleneck.prototype.Group = require("./Group");
Bottleneck.RedisConnection = Bottleneck.prototype.RedisConnection = require("./RedisConnection");
Bottleneck.IORedisConnection = Bottleneck.prototype.IORedisConnection = require("./IORedisConnection");
Bottleneck.Batcher = Bottleneck.prototype.Batcher = require("./Batcher");
Bottleneck.prototype.jobDefaults = {
priority: DEFAULT_PRIORITY,
weight: 1,
expiration: null,
id: "<no-id>"
};
Bottleneck.prototype.storeDefaults = {
maxConcurrent: null,
minTime: 0,
highWater: null,
strategy: Bottleneck.prototype.strategy.LEAK,
penalty: null,
reservoir: null,
reservoirRefreshInterval: null,
reservoirRefreshAmount: null,
reservoirIncreaseInterval: null,
reservoirIncreaseAmount: null,
reservoirIncreaseMaximum: null
};
Bottleneck.prototype.localStoreDefaults = {
Promise: Promise,
timeout: null,
heartbeatInterval: 250
};
Bottleneck.prototype.redisStoreDefaults = {
Promise: Promise,
timeout: null,
heartbeatInterval: 5000,
clientTimeout: 10000,
Redis: null,
clientOptions: {},
clusterNodes: null,
clearDatastore: false,
connection: null
};
Bottleneck.prototype.instanceDefaults = {
datastore: "local",
connection: null,
id: "<no-id>",
rejectOnDrop: true,
trackDoneStatus: false,
Promise: Promise
};
Bottleneck.prototype.stopDefaults = {
enqueueErrorMessage: "This limiter has been stopped and cannot accept new jobs.",
dropWaitingJobs: true,
dropErrorMessage: "This limiter has been stopped."
};
return Bottleneck;
}.call(void 0);
module.exports = Bottleneck;

5
node_modules/bottleneck/lib/BottleneckError.js generated vendored Normal file
View file

@ -0,0 +1,5 @@
"use strict";
var BottleneckError;
BottleneckError = class BottleneckError extends Error {};
module.exports = BottleneckError;

107
node_modules/bottleneck/lib/DLList.js generated vendored Normal file
View file

@ -0,0 +1,107 @@
"use strict";
var DLList;
DLList = class DLList {
constructor(incr, decr) {
this.incr = incr;
this.decr = decr;
this._first = null;
this._last = null;
this.length = 0;
}
push(value) {
var node;
this.length++;
if (typeof this.incr === "function") {
this.incr();
}
node = {
value,
prev: this._last,
next: null
};
if (this._last != null) {
this._last.next = node;
this._last = node;
} else {
this._first = this._last = node;
}
return void 0;
}
shift() {
var value;
if (this._first == null) {
return;
} else {
this.length--;
if (typeof this.decr === "function") {
this.decr();
}
}
value = this._first.value;
if ((this._first = this._first.next) != null) {
this._first.prev = null;
} else {
this._last = null;
}
return value;
}
first() {
if (this._first != null) {
return this._first.value;
}
}
getArray() {
var node, ref, results;
node = this._first;
results = [];
while (node != null) {
results.push((ref = node, node = node.next, ref.value));
}
return results;
}
forEachShift(cb) {
var node;
node = this.shift();
while (node != null) {
cb(node), node = this.shift();
}
return void 0;
}
debug() {
var node, ref, ref1, ref2, results;
node = this._first;
results = [];
while (node != null) {
results.push((ref = node, node = node.next, {
value: ref.value,
prev: (ref1 = ref.prev) != null ? ref1.value : void 0,
next: (ref2 = ref.next) != null ? ref2.value : void 0
}));
}
return results;
}
};
module.exports = DLList;

128
node_modules/bottleneck/lib/Events.js generated vendored Normal file
View file

@ -0,0 +1,128 @@
"use strict";
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
var Events;
Events = class Events {
constructor(instance) {
this.instance = instance;
this._events = {};
if (this.instance.on != null || this.instance.once != null || this.instance.removeAllListeners != null) {
throw new Error("An Emitter already exists for this object");
}
this.instance.on = (name, cb) => {
return this._addListener(name, "many", cb);
};
this.instance.once = (name, cb) => {
return this._addListener(name, "once", cb);
};
this.instance.removeAllListeners = (name = null) => {
if (name != null) {
return delete this._events[name];
} else {
return this._events = {};
}
};
}
_addListener(name, status, cb) {
var base;
if ((base = this._events)[name] == null) {
base[name] = [];
}
this._events[name].push({
cb,
status
});
return this.instance;
}
listenerCount(name) {
if (this._events[name] != null) {
return this._events[name].length;
} else {
return 0;
}
}
trigger(name, ...args) {
var _this = this;
return _asyncToGenerator(function* () {
var e, promises;
try {
if (name !== "debug") {
_this.trigger("debug", `Event triggered: ${name}`, args);
}
if (_this._events[name] == null) {
return;
}
_this._events[name] = _this._events[name].filter(function (listener) {
return listener.status !== "none";
});
promises = _this._events[name].map(
/*#__PURE__*/
function () {
var _ref = _asyncToGenerator(function* (listener) {
var e, returned;
if (listener.status === "none") {
return;
}
if (listener.status === "once") {
listener.status = "none";
}
try {
returned = typeof listener.cb === "function" ? listener.cb(...args) : void 0;
if (typeof (returned != null ? returned.then : void 0) === "function") {
return yield returned;
} else {
return returned;
}
} catch (error) {
e = error;
if ("name" !== "error") {
_this.trigger("error", e);
}
return null;
}
});
return function (_x) {
return _ref.apply(this, arguments);
};
}());
return (yield Promise.all(promises)).find(function (x) {
return x != null;
});
} catch (error) {
e = error;
if ("name" !== "error") {
_this.trigger("error", e);
}
return null;
}
})();
}
};
module.exports = Events;

198
node_modules/bottleneck/lib/Group.js generated vendored Normal file
View file

@ -0,0 +1,198 @@
"use strict";
function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); }
function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); }
function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; }
function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; }
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
var Events, Group, IORedisConnection, RedisConnection, Scripts, parser;
parser = require("./parser");
Events = require("./Events");
RedisConnection = require("./RedisConnection");
IORedisConnection = require("./IORedisConnection");
Scripts = require("./Scripts");
Group = function () {
class Group {
constructor(limiterOptions = {}) {
this.deleteKey = this.deleteKey.bind(this);
this.limiterOptions = limiterOptions;
parser.load(this.limiterOptions, this.defaults, this);
this.Events = new Events(this);
this.instances = {};
this.Bottleneck = require("./Bottleneck");
this._startAutoCleanup();
this.sharedConnection = this.connection != null;
if (this.connection == null) {
if (this.limiterOptions.datastore === "redis") {
this.connection = new RedisConnection(Object.assign({}, this.limiterOptions, {
Events: this.Events
}));
} else if (this.limiterOptions.datastore === "ioredis") {
this.connection = new IORedisConnection(Object.assign({}, this.limiterOptions, {
Events: this.Events
}));
}
}
}
key(key = "") {
var ref;
return (ref = this.instances[key]) != null ? ref : (() => {
var limiter;
limiter = this.instances[key] = new this.Bottleneck(Object.assign(this.limiterOptions, {
id: `${this.id}-${key}`,
timeout: this.timeout,
connection: this.connection
}));
this.Events.trigger("created", limiter, key);
return limiter;
})();
}
deleteKey(key = "") {
var _this = this;
return _asyncToGenerator(function* () {
var deleted, instance;
instance = _this.instances[key];
if (_this.connection) {
deleted = yield _this.connection.__runCommand__(['del', ...Scripts.allKeys(`${_this.id}-${key}`)]);
}
if (instance != null) {
delete _this.instances[key];
yield instance.disconnect();
}
return instance != null || deleted > 0;
})();
}
limiters() {
var k, ref, results, v;
ref = this.instances;
results = [];
for (k in ref) {
v = ref[k];
results.push({
key: k,
limiter: v
});
}
return results;
}
keys() {
return Object.keys(this.instances);
}
clusterKeys() {
var _this2 = this;
return _asyncToGenerator(function* () {
var cursor, end, found, i, k, keys, len, next, start;
if (_this2.connection == null) {
return _this2.Promise.resolve(_this2.keys());
}
keys = [];
cursor = null;
start = `b_${_this2.id}-`.length;
end = "_settings".length;
while (cursor !== 0) {
var _ref = yield _this2.connection.__runCommand__(["scan", cursor != null ? cursor : 0, "match", `b_${_this2.id}-*_settings`, "count", 10000]);
var _ref2 = _slicedToArray(_ref, 2);
next = _ref2[0];
found = _ref2[1];
cursor = ~~next;
for (i = 0, len = found.length; i < len; i++) {
k = found[i];
keys.push(k.slice(start, -end));
}
}
return keys;
})();
}
_startAutoCleanup() {
var _this3 = this;
var base;
clearInterval(this.interval);
return typeof (base = this.interval = setInterval(
/*#__PURE__*/
_asyncToGenerator(function* () {
var e, k, ref, results, time, v;
time = Date.now();
ref = _this3.instances;
results = [];
for (k in ref) {
v = ref[k];
try {
if (yield v._store.__groupCheck__(time)) {
results.push(_this3.deleteKey(k));
} else {
results.push(void 0);
}
} catch (error) {
e = error;
results.push(v.Events.trigger("error", e));
}
}
return results;
}), this.timeout / 2)).unref === "function" ? base.unref() : void 0;
}
updateSettings(options = {}) {
parser.overwrite(options, this.defaults, this);
parser.overwrite(options, options, this.limiterOptions);
if (options.timeout != null) {
return this._startAutoCleanup();
}
}
disconnect(flush = true) {
var ref;
if (!this.sharedConnection) {
return (ref = this.connection) != null ? ref.disconnect(flush) : void 0;
}
}
}
;
Group.prototype.defaults = {
timeout: 1000 * 60 * 5,
connection: null,
Promise: Promise,
id: "group-key"
};
return Group;
}.call(void 0);
module.exports = Group;

186
node_modules/bottleneck/lib/IORedisConnection.js generated vendored Normal file
View file

@ -0,0 +1,186 @@
"use strict";
function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); }
function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); }
function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; }
function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; }
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
var Events, IORedisConnection, Scripts, parser;
parser = require("./parser");
Events = require("./Events");
Scripts = require("./Scripts");
IORedisConnection = function () {
class IORedisConnection {
constructor(options = {}) {
parser.load(options, this.defaults, this);
if (this.Redis == null) {
this.Redis = eval("require")("ioredis"); // Obfuscated or else Webpack/Angular will try to inline the optional ioredis module. To override this behavior: pass the ioredis module to Bottleneck as the 'Redis' option.
}
if (this.Events == null) {
this.Events = new Events(this);
}
this.terminated = false;
if (this.clusterNodes != null) {
this.client = new this.Redis.Cluster(this.clusterNodes, this.clientOptions);
this.subscriber = new this.Redis.Cluster(this.clusterNodes, this.clientOptions);
} else if (this.client != null && this.client.duplicate == null) {
this.subscriber = new this.Redis.Cluster(this.client.startupNodes, this.client.options);
} else {
if (this.client == null) {
this.client = new this.Redis(this.clientOptions);
}
this.subscriber = this.client.duplicate();
}
this.limiters = {};
this.ready = this.Promise.all([this._setup(this.client, false), this._setup(this.subscriber, true)]).then(() => {
this._loadScripts();
return {
client: this.client,
subscriber: this.subscriber
};
});
}
_setup(client, sub) {
client.setMaxListeners(0);
return new this.Promise((resolve, reject) => {
client.on("error", e => {
return this.Events.trigger("error", e);
});
if (sub) {
client.on("message", (channel, message) => {
var ref;
return (ref = this.limiters[channel]) != null ? ref._store.onMessage(channel, message) : void 0;
});
}
if (client.status === "ready") {
return resolve();
} else {
return client.once("ready", resolve);
}
});
}
_loadScripts() {
return Scripts.names.forEach(name => {
return this.client.defineCommand(name, {
lua: Scripts.payload(name)
});
});
}
__runCommand__(cmd) {
var _this = this;
return _asyncToGenerator(function* () {
var _, deleted;
yield _this.ready;
var _ref = yield _this.client.pipeline([cmd]).exec();
var _ref2 = _slicedToArray(_ref, 1);
var _ref2$ = _slicedToArray(_ref2[0], 2);
_ = _ref2$[0];
deleted = _ref2$[1];
return deleted;
})();
}
__addLimiter__(instance) {
return this.Promise.all([instance.channel(), instance.channel_client()].map(channel => {
return new this.Promise((resolve, reject) => {
return this.subscriber.subscribe(channel, () => {
this.limiters[channel] = instance;
return resolve();
});
});
}));
}
__removeLimiter__(instance) {
var _this2 = this;
return [instance.channel(), instance.channel_client()].forEach(
/*#__PURE__*/
function () {
var _ref3 = _asyncToGenerator(function* (channel) {
if (!_this2.terminated) {
yield _this2.subscriber.unsubscribe(channel);
}
return delete _this2.limiters[channel];
});
return function (_x) {
return _ref3.apply(this, arguments);
};
}());
}
__scriptArgs__(name, id, args, cb) {
var keys;
keys = Scripts.keys(name, id);
return [keys.length].concat(keys, args, cb);
}
__scriptFn__(name) {
return this.client[name].bind(this.client);
}
disconnect(flush = true) {
var i, k, len, ref;
ref = Object.keys(this.limiters);
for (i = 0, len = ref.length; i < len; i++) {
k = ref[i];
clearInterval(this.limiters[k]._store.heartbeat);
}
this.limiters = {};
this.terminated = true;
if (flush) {
return this.Promise.all([this.client.quit(), this.subscriber.quit()]);
} else {
this.client.disconnect();
this.subscriber.disconnect();
return this.Promise.resolve();
}
}
}
;
IORedisConnection.prototype.datastore = "ioredis";
IORedisConnection.prototype.defaults = {
Redis: null,
clientOptions: {},
clusterNodes: null,
client: null,
Promise: Promise,
Events: null
};
return IORedisConnection;
}.call(void 0);
module.exports = IORedisConnection;

215
node_modules/bottleneck/lib/Job.js generated vendored Normal file
View file

@ -0,0 +1,215 @@
"use strict";
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
var BottleneckError, DEFAULT_PRIORITY, Job, NUM_PRIORITIES, parser;
NUM_PRIORITIES = 10;
DEFAULT_PRIORITY = 5;
parser = require("./parser");
BottleneckError = require("./BottleneckError");
Job = class Job {
constructor(task, args, options, jobDefaults, rejectOnDrop, Events, _states, Promise) {
this.task = task;
this.args = args;
this.rejectOnDrop = rejectOnDrop;
this.Events = Events;
this._states = _states;
this.Promise = Promise;
this.options = parser.load(options, jobDefaults);
this.options.priority = this._sanitizePriority(this.options.priority);
if (this.options.id === jobDefaults.id) {
this.options.id = `${this.options.id}-${this._randomIndex()}`;
}
this.promise = new this.Promise((_resolve, _reject) => {
this._resolve = _resolve;
this._reject = _reject;
});
this.retryCount = 0;
}
_sanitizePriority(priority) {
var sProperty;
sProperty = ~~priority !== priority ? DEFAULT_PRIORITY : priority;
if (sProperty < 0) {
return 0;
} else if (sProperty > NUM_PRIORITIES - 1) {
return NUM_PRIORITIES - 1;
} else {
return sProperty;
}
}
_randomIndex() {
return Math.random().toString(36).slice(2);
}
doDrop({
error,
message = "This job has been dropped by Bottleneck"
} = {}) {
if (this._states.remove(this.options.id)) {
if (this.rejectOnDrop) {
this._reject(error != null ? error : new BottleneckError(message));
}
this.Events.trigger("dropped", {
args: this.args,
options: this.options,
task: this.task,
promise: this.promise
});
return true;
} else {
return false;
}
}
_assertStatus(expected) {
var status;
status = this._states.jobStatus(this.options.id);
if (!(status === expected || expected === "DONE" && status === null)) {
throw new BottleneckError(`Invalid job status ${status}, expected ${expected}. Please open an issue at https://github.com/SGrondin/bottleneck/issues`);
}
}
doReceive() {
this._states.start(this.options.id);
return this.Events.trigger("received", {
args: this.args,
options: this.options
});
}
doQueue(reachedHWM, blocked) {
this._assertStatus("RECEIVED");
this._states.next(this.options.id);
return this.Events.trigger("queued", {
args: this.args,
options: this.options,
reachedHWM,
blocked
});
}
doRun() {
if (this.retryCount === 0) {
this._assertStatus("QUEUED");
this._states.next(this.options.id);
} else {
this._assertStatus("EXECUTING");
}
return this.Events.trigger("scheduled", {
args: this.args,
options: this.options
});
}
doExecute(chained, clearGlobalState, run, free) {
var _this = this;
return _asyncToGenerator(function* () {
var error, eventInfo, passed;
if (_this.retryCount === 0) {
_this._assertStatus("RUNNING");
_this._states.next(_this.options.id);
} else {
_this._assertStatus("EXECUTING");
}
eventInfo = {
args: _this.args,
options: _this.options,
retryCount: _this.retryCount
};
_this.Events.trigger("executing", eventInfo);
try {
passed = yield chained != null ? chained.schedule(_this.options, _this.task, ..._this.args) : _this.task(..._this.args);
if (clearGlobalState()) {
_this.doDone(eventInfo);
yield free(_this.options, eventInfo);
_this._assertStatus("DONE");
return _this._resolve(passed);
}
} catch (error1) {
error = error1;
return _this._onFailure(error, eventInfo, clearGlobalState, run, free);
}
})();
}
doExpire(clearGlobalState, run, free) {
var error, eventInfo;
if (this._states.jobStatus(this.options.id === "RUNNING")) {
this._states.next(this.options.id);
}
this._assertStatus("EXECUTING");
eventInfo = {
args: this.args,
options: this.options,
retryCount: this.retryCount
};
error = new BottleneckError(`This job timed out after ${this.options.expiration} ms.`);
return this._onFailure(error, eventInfo, clearGlobalState, run, free);
}
_onFailure(error, eventInfo, clearGlobalState, run, free) {
var _this2 = this;
return _asyncToGenerator(function* () {
var retry, retryAfter;
if (clearGlobalState()) {
retry = yield _this2.Events.trigger("failed", error, eventInfo);
if (retry != null) {
retryAfter = ~~retry;
_this2.Events.trigger("retry", `Retrying ${_this2.options.id} after ${retryAfter} ms`, eventInfo);
_this2.retryCount++;
return run(retryAfter);
} else {
_this2.doDone(eventInfo);
yield free(_this2.options, eventInfo);
_this2._assertStatus("DONE");
return _this2._reject(error);
}
}
})();
}
doDone(eventInfo) {
this._assertStatus("EXECUTING");
this._states.next(this.options.id);
return this.Events.trigger("done", eventInfo);
}
};
module.exports = Job;

287
node_modules/bottleneck/lib/LocalDatastore.js generated vendored Normal file
View file

@ -0,0 +1,287 @@
"use strict";
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
var BottleneckError, LocalDatastore, parser;
parser = require("./parser");
BottleneckError = require("./BottleneckError");
LocalDatastore = class LocalDatastore {
constructor(instance, storeOptions, storeInstanceOptions) {
this.instance = instance;
this.storeOptions = storeOptions;
this.clientId = this.instance._randomIndex();
parser.load(storeInstanceOptions, storeInstanceOptions, this);
this._nextRequest = this._lastReservoirRefresh = this._lastReservoirIncrease = Date.now();
this._running = 0;
this._done = 0;
this._unblockTime = 0;
this.ready = this.Promise.resolve();
this.clients = {};
this._startHeartbeat();
}
_startHeartbeat() {
var base;
if (this.heartbeat == null && (this.storeOptions.reservoirRefreshInterval != null && this.storeOptions.reservoirRefreshAmount != null || this.storeOptions.reservoirIncreaseInterval != null && this.storeOptions.reservoirIncreaseAmount != null)) {
return typeof (base = this.heartbeat = setInterval(() => {
var amount, incr, maximum, now, reservoir;
now = Date.now();
if (this.storeOptions.reservoirRefreshInterval != null && now >= this._lastReservoirRefresh + this.storeOptions.reservoirRefreshInterval) {
this._lastReservoirRefresh = now;
this.storeOptions.reservoir = this.storeOptions.reservoirRefreshAmount;
this.instance._drainAll(this.computeCapacity());
}
if (this.storeOptions.reservoirIncreaseInterval != null && now >= this._lastReservoirIncrease + this.storeOptions.reservoirIncreaseInterval) {
var _this$storeOptions = this.storeOptions;
amount = _this$storeOptions.reservoirIncreaseAmount;
maximum = _this$storeOptions.reservoirIncreaseMaximum;
reservoir = _this$storeOptions.reservoir;
this._lastReservoirIncrease = now;
incr = maximum != null ? Math.min(amount, maximum - reservoir) : amount;
if (incr > 0) {
this.storeOptions.reservoir += incr;
return this.instance._drainAll(this.computeCapacity());
}
}
}, this.heartbeatInterval)).unref === "function" ? base.unref() : void 0;
} else {
return clearInterval(this.heartbeat);
}
}
__publish__(message) {
var _this = this;
return _asyncToGenerator(function* () {
yield _this.yieldLoop();
return _this.instance.Events.trigger("message", message.toString());
})();
}
__disconnect__(flush) {
var _this2 = this;
return _asyncToGenerator(function* () {
yield _this2.yieldLoop();
clearInterval(_this2.heartbeat);
return _this2.Promise.resolve();
})();
}
yieldLoop(t = 0) {
return new this.Promise(function (resolve, reject) {
return setTimeout(resolve, t);
});
}
computePenalty() {
var ref;
return (ref = this.storeOptions.penalty) != null ? ref : 15 * this.storeOptions.minTime || 5000;
}
__updateSettings__(options) {
var _this3 = this;
return _asyncToGenerator(function* () {
yield _this3.yieldLoop();
parser.overwrite(options, options, _this3.storeOptions);
_this3._startHeartbeat();
_this3.instance._drainAll(_this3.computeCapacity());
return true;
})();
}
__running__() {
var _this4 = this;
return _asyncToGenerator(function* () {
yield _this4.yieldLoop();
return _this4._running;
})();
}
__queued__() {
var _this5 = this;
return _asyncToGenerator(function* () {
yield _this5.yieldLoop();
return _this5.instance.queued();
})();
}
__done__() {
var _this6 = this;
return _asyncToGenerator(function* () {
yield _this6.yieldLoop();
return _this6._done;
})();
}
__groupCheck__(time) {
var _this7 = this;
return _asyncToGenerator(function* () {
yield _this7.yieldLoop();
return _this7._nextRequest + _this7.timeout < time;
})();
}
computeCapacity() {
var maxConcurrent, reservoir;
var _this$storeOptions2 = this.storeOptions;
maxConcurrent = _this$storeOptions2.maxConcurrent;
reservoir = _this$storeOptions2.reservoir;
if (maxConcurrent != null && reservoir != null) {
return Math.min(maxConcurrent - this._running, reservoir);
} else if (maxConcurrent != null) {
return maxConcurrent - this._running;
} else if (reservoir != null) {
return reservoir;
} else {
return null;
}
}
conditionsCheck(weight) {
var capacity;
capacity = this.computeCapacity();
return capacity == null || weight <= capacity;
}
__incrementReservoir__(incr) {
var _this8 = this;
return _asyncToGenerator(function* () {
var reservoir;
yield _this8.yieldLoop();
reservoir = _this8.storeOptions.reservoir += incr;
_this8.instance._drainAll(_this8.computeCapacity());
return reservoir;
})();
}
__currentReservoir__() {
var _this9 = this;
return _asyncToGenerator(function* () {
yield _this9.yieldLoop();
return _this9.storeOptions.reservoir;
})();
}
isBlocked(now) {
return this._unblockTime >= now;
}
check(weight, now) {
return this.conditionsCheck(weight) && this._nextRequest - now <= 0;
}
__check__(weight) {
var _this10 = this;
return _asyncToGenerator(function* () {
var now;
yield _this10.yieldLoop();
now = Date.now();
return _this10.check(weight, now);
})();
}
__register__(index, weight, expiration) {
var _this11 = this;
return _asyncToGenerator(function* () {
var now, wait;
yield _this11.yieldLoop();
now = Date.now();
if (_this11.conditionsCheck(weight)) {
_this11._running += weight;
if (_this11.storeOptions.reservoir != null) {
_this11.storeOptions.reservoir -= weight;
}
wait = Math.max(_this11._nextRequest - now, 0);
_this11._nextRequest = now + wait + _this11.storeOptions.minTime;
return {
success: true,
wait,
reservoir: _this11.storeOptions.reservoir
};
} else {
return {
success: false
};
}
})();
}
strategyIsBlock() {
return this.storeOptions.strategy === 3;
}
__submit__(queueLength, weight) {
var _this12 = this;
return _asyncToGenerator(function* () {
var blocked, now, reachedHWM;
yield _this12.yieldLoop();
if (_this12.storeOptions.maxConcurrent != null && weight > _this12.storeOptions.maxConcurrent) {
throw new BottleneckError(`Impossible to add a job having a weight of ${weight} to a limiter having a maxConcurrent setting of ${_this12.storeOptions.maxConcurrent}`);
}
now = Date.now();
reachedHWM = _this12.storeOptions.highWater != null && queueLength === _this12.storeOptions.highWater && !_this12.check(weight, now);
blocked = _this12.strategyIsBlock() && (reachedHWM || _this12.isBlocked(now));
if (blocked) {
_this12._unblockTime = now + _this12.computePenalty();
_this12._nextRequest = _this12._unblockTime + _this12.storeOptions.minTime;
_this12.instance._dropAllQueued();
}
return {
reachedHWM,
blocked,
strategy: _this12.storeOptions.strategy
};
})();
}
__free__(index, weight) {
var _this13 = this;
return _asyncToGenerator(function* () {
yield _this13.yieldLoop();
_this13._running -= weight;
_this13._done += weight;
_this13.instance._drainAll(_this13.computeCapacity());
return {
running: _this13._running
};
})();
}
};
module.exports = LocalDatastore;

77
node_modules/bottleneck/lib/Queues.js generated vendored Normal file
View file

@ -0,0 +1,77 @@
"use strict";
var DLList, Events, Queues;
DLList = require("./DLList");
Events = require("./Events");
Queues = class Queues {
constructor(num_priorities) {
var i;
this.Events = new Events(this);
this._length = 0;
this._lists = function () {
var j, ref, results;
results = [];
for (i = j = 1, ref = num_priorities; 1 <= ref ? j <= ref : j >= ref; i = 1 <= ref ? ++j : --j) {
results.push(new DLList(() => {
return this.incr();
}, () => {
return this.decr();
}));
}
return results;
}.call(this);
}
incr() {
if (this._length++ === 0) {
return this.Events.trigger("leftzero");
}
}
decr() {
if (--this._length === 0) {
return this.Events.trigger("zero");
}
}
push(job) {
return this._lists[job.options.priority].push(job);
}
queued(priority) {
if (priority != null) {
return this._lists[priority].length;
} else {
return this._length;
}
}
shiftAll(fn) {
return this._lists.forEach(function (list) {
return list.forEachShift(fn);
});
}
getFirst(arr = this._lists) {
var j, len, list;
for (j = 0, len = arr.length; j < len; j++) {
list = arr[j];
if (list.length > 0) {
return list;
}
}
return [];
}
shiftLastFrom(priority) {
return this.getFirst(this._lists.slice(priority).reverse()).shift();
}
};
module.exports = Queues;

193
node_modules/bottleneck/lib/RedisConnection.js generated vendored Normal file
View file

@ -0,0 +1,193 @@
"use strict";
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
var Events, RedisConnection, Scripts, parser;
parser = require("./parser");
Events = require("./Events");
Scripts = require("./Scripts");
RedisConnection = function () {
class RedisConnection {
constructor(options = {}) {
parser.load(options, this.defaults, this);
if (this.Redis == null) {
this.Redis = eval("require")("redis"); // Obfuscated or else Webpack/Angular will try to inline the optional redis module. To override this behavior: pass the redis module to Bottleneck as the 'Redis' option.
}
if (this.Events == null) {
this.Events = new Events(this);
}
this.terminated = false;
if (this.client == null) {
this.client = this.Redis.createClient(this.clientOptions);
}
this.subscriber = this.client.duplicate();
this.limiters = {};
this.shas = {};
this.ready = this.Promise.all([this._setup(this.client, false), this._setup(this.subscriber, true)]).then(() => {
return this._loadScripts();
}).then(() => {
return {
client: this.client,
subscriber: this.subscriber
};
});
}
_setup(client, sub) {
client.setMaxListeners(0);
return new this.Promise((resolve, reject) => {
client.on("error", e => {
return this.Events.trigger("error", e);
});
if (sub) {
client.on("message", (channel, message) => {
var ref;
return (ref = this.limiters[channel]) != null ? ref._store.onMessage(channel, message) : void 0;
});
}
if (client.ready) {
return resolve();
} else {
return client.once("ready", resolve);
}
});
}
_loadScript(name) {
return new this.Promise((resolve, reject) => {
var payload;
payload = Scripts.payload(name);
return this.client.multi([["script", "load", payload]]).exec((err, replies) => {
if (err != null) {
return reject(err);
}
this.shas[name] = replies[0];
return resolve(replies[0]);
});
});
}
_loadScripts() {
return this.Promise.all(Scripts.names.map(k => {
return this._loadScript(k);
}));
}
__runCommand__(cmd) {
var _this = this;
return _asyncToGenerator(function* () {
yield _this.ready;
return new _this.Promise((resolve, reject) => {
return _this.client.multi([cmd]).exec_atomic(function (err, replies) {
if (err != null) {
return reject(err);
} else {
return resolve(replies[0]);
}
});
});
})();
}
__addLimiter__(instance) {
return this.Promise.all([instance.channel(), instance.channel_client()].map(channel => {
return new this.Promise((resolve, reject) => {
var handler;
handler = chan => {
if (chan === channel) {
this.subscriber.removeListener("subscribe", handler);
this.limiters[channel] = instance;
return resolve();
}
};
this.subscriber.on("subscribe", handler);
return this.subscriber.subscribe(channel);
});
}));
}
__removeLimiter__(instance) {
var _this2 = this;
return this.Promise.all([instance.channel(), instance.channel_client()].map(
/*#__PURE__*/
function () {
var _ref = _asyncToGenerator(function* (channel) {
if (!_this2.terminated) {
yield new _this2.Promise((resolve, reject) => {
return _this2.subscriber.unsubscribe(channel, function (err, chan) {
if (err != null) {
return reject(err);
}
if (chan === channel) {
return resolve();
}
});
});
}
return delete _this2.limiters[channel];
});
return function (_x) {
return _ref.apply(this, arguments);
};
}()));
}
__scriptArgs__(name, id, args, cb) {
var keys;
keys = Scripts.keys(name, id);
return [this.shas[name], keys.length].concat(keys, args, cb);
}
__scriptFn__(name) {
return this.client.evalsha.bind(this.client);
}
disconnect(flush = true) {
var i, k, len, ref;
ref = Object.keys(this.limiters);
for (i = 0, len = ref.length; i < len; i++) {
k = ref[i];
clearInterval(this.limiters[k]._store.heartbeat);
}
this.limiters = {};
this.terminated = true;
this.client.end(flush);
this.subscriber.end(flush);
return this.Promise.resolve();
}
}
;
RedisConnection.prototype.datastore = "redis";
RedisConnection.prototype.defaults = {
Redis: null,
clientOptions: {},
client: null,
Promise: Promise,
Events: null
};
return RedisConnection;
}.call(void 0);
module.exports = RedisConnection;

352
node_modules/bottleneck/lib/RedisDatastore.js generated vendored Normal file
View file

@ -0,0 +1,352 @@
"use strict";
function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); }
function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); }
function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; }
function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; }
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
var BottleneckError, IORedisConnection, RedisConnection, RedisDatastore, parser;
parser = require("./parser");
BottleneckError = require("./BottleneckError");
RedisConnection = require("./RedisConnection");
IORedisConnection = require("./IORedisConnection");
RedisDatastore = class RedisDatastore {
constructor(instance, storeOptions, storeInstanceOptions) {
this.instance = instance;
this.storeOptions = storeOptions;
this.originalId = this.instance.id;
this.clientId = this.instance._randomIndex();
parser.load(storeInstanceOptions, storeInstanceOptions, this);
this.clients = {};
this.capacityPriorityCounters = {};
this.sharedConnection = this.connection != null;
if (this.connection == null) {
this.connection = this.instance.datastore === "redis" ? new RedisConnection({
Redis: this.Redis,
clientOptions: this.clientOptions,
Promise: this.Promise,
Events: this.instance.Events
}) : this.instance.datastore === "ioredis" ? new IORedisConnection({
Redis: this.Redis,
clientOptions: this.clientOptions,
clusterNodes: this.clusterNodes,
Promise: this.Promise,
Events: this.instance.Events
}) : void 0;
}
this.instance.connection = this.connection;
this.instance.datastore = this.connection.datastore;
this.ready = this.connection.ready.then(clients => {
this.clients = clients;
return this.runScript("init", this.prepareInitSettings(this.clearDatastore));
}).then(() => {
return this.connection.__addLimiter__(this.instance);
}).then(() => {
return this.runScript("register_client", [this.instance.queued()]);
}).then(() => {
var base;
if (typeof (base = this.heartbeat = setInterval(() => {
return this.runScript("heartbeat", []).catch(e => {
return this.instance.Events.trigger("error", e);
});
}, this.heartbeatInterval)).unref === "function") {
base.unref();
}
return this.clients;
});
}
__publish__(message) {
var _this = this;
return _asyncToGenerator(function* () {
var client;
var _ref = yield _this.ready;
client = _ref.client;
return client.publish(_this.instance.channel(), `message:${message.toString()}`);
})();
}
onMessage(channel, message) {
var _this2 = this;
return _asyncToGenerator(function* () {
var capacity, counter, data, drained, e, newCapacity, pos, priorityClient, rawCapacity, type;
try {
pos = message.indexOf(":");
var _ref2 = [message.slice(0, pos), message.slice(pos + 1)];
type = _ref2[0];
data = _ref2[1];
if (type === "capacity") {
return yield _this2.instance._drainAll(data.length > 0 ? ~~data : void 0);
} else if (type === "capacity-priority") {
var _data$split = data.split(":");
var _data$split2 = _slicedToArray(_data$split, 3);
rawCapacity = _data$split2[0];
priorityClient = _data$split2[1];
counter = _data$split2[2];
capacity = rawCapacity.length > 0 ? ~~rawCapacity : void 0;
if (priorityClient === _this2.clientId) {
drained = yield _this2.instance._drainAll(capacity);
newCapacity = capacity != null ? capacity - (drained || 0) : "";
return yield _this2.clients.client.publish(_this2.instance.channel(), `capacity-priority:${newCapacity}::${counter}`);
} else if (priorityClient === "") {
clearTimeout(_this2.capacityPriorityCounters[counter]);
delete _this2.capacityPriorityCounters[counter];
return _this2.instance._drainAll(capacity);
} else {
return _this2.capacityPriorityCounters[counter] = setTimeout(
/*#__PURE__*/
_asyncToGenerator(function* () {
var e;
try {
delete _this2.capacityPriorityCounters[counter];
yield _this2.runScript("blacklist_client", [priorityClient]);
return yield _this2.instance._drainAll(capacity);
} catch (error) {
e = error;
return _this2.instance.Events.trigger("error", e);
}
}), 1000);
}
} else if (type === "message") {
return _this2.instance.Events.trigger("message", data);
} else if (type === "blocked") {
return yield _this2.instance._dropAllQueued();
}
} catch (error) {
e = error;
return _this2.instance.Events.trigger("error", e);
}
})();
}
__disconnect__(flush) {
clearInterval(this.heartbeat);
if (this.sharedConnection) {
return this.connection.__removeLimiter__(this.instance);
} else {
return this.connection.disconnect(flush);
}
}
runScript(name, args) {
var _this3 = this;
return _asyncToGenerator(function* () {
if (!(name === "init" || name === "register_client")) {
yield _this3.ready;
}
return new _this3.Promise((resolve, reject) => {
var all_args, arr;
all_args = [Date.now(), _this3.clientId].concat(args);
_this3.instance.Events.trigger("debug", `Calling Redis script: ${name}.lua`, all_args);
arr = _this3.connection.__scriptArgs__(name, _this3.originalId, all_args, function (err, replies) {
if (err != null) {
return reject(err);
}
return resolve(replies);
});
return _this3.connection.__scriptFn__(name)(...arr);
}).catch(e => {
if (e.message === "SETTINGS_KEY_NOT_FOUND") {
if (name === "heartbeat") {
return _this3.Promise.resolve();
} else {
return _this3.runScript("init", _this3.prepareInitSettings(false)).then(() => {
return _this3.runScript(name, args);
});
}
} else if (e.message === "UNKNOWN_CLIENT") {
return _this3.runScript("register_client", [_this3.instance.queued()]).then(() => {
return _this3.runScript(name, args);
});
} else {
return _this3.Promise.reject(e);
}
});
})();
}
prepareArray(arr) {
var i, len, results, x;
results = [];
for (i = 0, len = arr.length; i < len; i++) {
x = arr[i];
results.push(x != null ? x.toString() : "");
}
return results;
}
prepareObject(obj) {
var arr, k, v;
arr = [];
for (k in obj) {
v = obj[k];
arr.push(k, v != null ? v.toString() : "");
}
return arr;
}
prepareInitSettings(clear) {
var args;
args = this.prepareObject(Object.assign({}, this.storeOptions, {
id: this.originalId,
version: this.instance.version,
groupTimeout: this.timeout,
clientTimeout: this.clientTimeout
}));
args.unshift(clear ? 1 : 0, this.instance.version);
return args;
}
convertBool(b) {
return !!b;
}
__updateSettings__(options) {
var _this4 = this;
return _asyncToGenerator(function* () {
yield _this4.runScript("update_settings", _this4.prepareObject(options));
return parser.overwrite(options, options, _this4.storeOptions);
})();
}
__running__() {
return this.runScript("running", []);
}
__queued__() {
return this.runScript("queued", []);
}
__done__() {
return this.runScript("done", []);
}
__groupCheck__() {
var _this5 = this;
return _asyncToGenerator(function* () {
return _this5.convertBool((yield _this5.runScript("group_check", [])));
})();
}
__incrementReservoir__(incr) {
return this.runScript("increment_reservoir", [incr]);
}
__currentReservoir__() {
return this.runScript("current_reservoir", []);
}
__check__(weight) {
var _this6 = this;
return _asyncToGenerator(function* () {
return _this6.convertBool((yield _this6.runScript("check", _this6.prepareArray([weight]))));
})();
}
__register__(index, weight, expiration) {
var _this7 = this;
return _asyncToGenerator(function* () {
var reservoir, success, wait;
var _ref4 = yield _this7.runScript("register", _this7.prepareArray([index, weight, expiration]));
var _ref5 = _slicedToArray(_ref4, 3);
success = _ref5[0];
wait = _ref5[1];
reservoir = _ref5[2];
return {
success: _this7.convertBool(success),
wait,
reservoir
};
})();
}
__submit__(queueLength, weight) {
var _this8 = this;
return _asyncToGenerator(function* () {
var blocked, e, maxConcurrent, overweight, reachedHWM, strategy;
try {
var _ref6 = yield _this8.runScript("submit", _this8.prepareArray([queueLength, weight]));
var _ref7 = _slicedToArray(_ref6, 3);
reachedHWM = _ref7[0];
blocked = _ref7[1];
strategy = _ref7[2];
return {
reachedHWM: _this8.convertBool(reachedHWM),
blocked: _this8.convertBool(blocked),
strategy
};
} catch (error) {
e = error;
if (e.message.indexOf("OVERWEIGHT") === 0) {
var _e$message$split = e.message.split(":");
var _e$message$split2 = _slicedToArray(_e$message$split, 3);
overweight = _e$message$split2[0];
weight = _e$message$split2[1];
maxConcurrent = _e$message$split2[2];
throw new BottleneckError(`Impossible to add a job having a weight of ${weight} to a limiter having a maxConcurrent setting of ${maxConcurrent}`);
} else {
throw e;
}
}
})();
}
__free__(index, weight) {
var _this9 = this;
return _asyncToGenerator(function* () {
var running;
running = yield _this9.runScript("free", _this9.prepareArray([index]));
return {
running
};
})();
}
};
module.exports = RedisDatastore;

162
node_modules/bottleneck/lib/Scripts.js generated vendored Normal file
View file

@ -0,0 +1,162 @@
"use strict";
var headers, lua, templates;
lua = require("./lua.json");
headers = {
refs: lua["refs.lua"],
validate_keys: lua["validate_keys.lua"],
validate_client: lua["validate_client.lua"],
refresh_expiration: lua["refresh_expiration.lua"],
process_tick: lua["process_tick.lua"],
conditions_check: lua["conditions_check.lua"],
get_time: lua["get_time.lua"]
};
exports.allKeys = function (id) {
return [
/*
HASH
*/
`b_${id}_settings`,
/*
HASH
job index -> weight
*/
`b_${id}_job_weights`,
/*
ZSET
job index -> expiration
*/
`b_${id}_job_expirations`,
/*
HASH
job index -> client
*/
`b_${id}_job_clients`,
/*
ZSET
client -> sum running
*/
`b_${id}_client_running`,
/*
HASH
client -> num queued
*/
`b_${id}_client_num_queued`,
/*
ZSET
client -> last job registered
*/
`b_${id}_client_last_registered`,
/*
ZSET
client -> last seen
*/
`b_${id}_client_last_seen`];
};
templates = {
init: {
keys: exports.allKeys,
headers: ["process_tick"],
refresh_expiration: true,
code: lua["init.lua"]
},
group_check: {
keys: exports.allKeys,
headers: [],
refresh_expiration: false,
code: lua["group_check.lua"]
},
register_client: {
keys: exports.allKeys,
headers: ["validate_keys"],
refresh_expiration: false,
code: lua["register_client.lua"]
},
blacklist_client: {
keys: exports.allKeys,
headers: ["validate_keys", "validate_client"],
refresh_expiration: false,
code: lua["blacklist_client.lua"]
},
heartbeat: {
keys: exports.allKeys,
headers: ["validate_keys", "validate_client", "process_tick"],
refresh_expiration: false,
code: lua["heartbeat.lua"]
},
update_settings: {
keys: exports.allKeys,
headers: ["validate_keys", "validate_client", "process_tick"],
refresh_expiration: true,
code: lua["update_settings.lua"]
},
running: {
keys: exports.allKeys,
headers: ["validate_keys", "validate_client", "process_tick"],
refresh_expiration: false,
code: lua["running.lua"]
},
queued: {
keys: exports.allKeys,
headers: ["validate_keys", "validate_client"],
refresh_expiration: false,
code: lua["queued.lua"]
},
done: {
keys: exports.allKeys,
headers: ["validate_keys", "validate_client", "process_tick"],
refresh_expiration: false,
code: lua["done.lua"]
},
check: {
keys: exports.allKeys,
headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"],
refresh_expiration: false,
code: lua["check.lua"]
},
submit: {
keys: exports.allKeys,
headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"],
refresh_expiration: true,
code: lua["submit.lua"]
},
register: {
keys: exports.allKeys,
headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"],
refresh_expiration: true,
code: lua["register.lua"]
},
free: {
keys: exports.allKeys,
headers: ["validate_keys", "validate_client", "process_tick"],
refresh_expiration: true,
code: lua["free.lua"]
},
current_reservoir: {
keys: exports.allKeys,
headers: ["validate_keys", "validate_client", "process_tick"],
refresh_expiration: false,
code: lua["current_reservoir.lua"]
},
increment_reservoir: {
keys: exports.allKeys,
headers: ["validate_keys", "validate_client", "process_tick"],
refresh_expiration: true,
code: lua["increment_reservoir.lua"]
}
};
exports.names = Object.keys(templates);
exports.keys = function (name, id) {
return templates[name].keys(id);
};
exports.payload = function (name) {
var template;
template = templates[name];
return Array.prototype.concat(headers.refs, template.headers.map(function (h) {
return headers[h];
}), template.refresh_expiration ? headers.refresh_expiration : "", template.code).join("\n");
};

88
node_modules/bottleneck/lib/States.js generated vendored Normal file
View file

@ -0,0 +1,88 @@
"use strict";
var BottleneckError, States;
BottleneckError = require("./BottleneckError");
States = class States {
constructor(status1) {
this.status = status1;
this._jobs = {};
this.counts = this.status.map(function () {
return 0;
});
}
next(id) {
var current, next;
current = this._jobs[id];
next = current + 1;
if (current != null && next < this.status.length) {
this.counts[current]--;
this.counts[next]++;
return this._jobs[id]++;
} else if (current != null) {
this.counts[current]--;
return delete this._jobs[id];
}
}
start(id) {
var initial;
initial = 0;
this._jobs[id] = initial;
return this.counts[initial]++;
}
remove(id) {
var current;
current = this._jobs[id];
if (current != null) {
this.counts[current]--;
delete this._jobs[id];
}
return current != null;
}
jobStatus(id) {
var ref;
return (ref = this.status[this._jobs[id]]) != null ? ref : null;
}
statusJobs(status) {
var k, pos, ref, results, v;
if (status != null) {
pos = this.status.indexOf(status);
if (pos < 0) {
throw new BottleneckError(`status must be one of ${this.status.join(', ')}`);
}
ref = this._jobs;
results = [];
for (k in ref) {
v = ref[k];
if (v === pos) {
results.push(k);
}
}
return results;
} else {
return Object.keys(this._jobs);
}
}
statusCounts() {
return this.counts.reduce((acc, v, i) => {
acc[this.status[i]] = v;
return acc;
}, {});
}
};
module.exports = States;

80
node_modules/bottleneck/lib/Sync.js generated vendored Normal file
View file

@ -0,0 +1,80 @@
"use strict";
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
var DLList, Sync;
DLList = require("./DLList");
Sync = class Sync {
constructor(name, Promise) {
this.schedule = this.schedule.bind(this);
this.name = name;
this.Promise = Promise;
this._running = 0;
this._queue = new DLList();
}
isEmpty() {
return this._queue.length === 0;
}
_tryToRun() {
var _this = this;
return _asyncToGenerator(function* () {
var args, cb, error, reject, resolve, returned, task;
if (_this._running < 1 && _this._queue.length > 0) {
_this._running++;
var _this$_queue$shift = _this._queue.shift();
task = _this$_queue$shift.task;
args = _this$_queue$shift.args;
resolve = _this$_queue$shift.resolve;
reject = _this$_queue$shift.reject;
cb = yield _asyncToGenerator(function* () {
try {
returned = yield task(...args);
return function () {
return resolve(returned);
};
} catch (error1) {
error = error1;
return function () {
return reject(error);
};
}
})();
_this._running--;
_this._tryToRun();
return cb();
}
})();
}
schedule(task, ...args) {
var promise, reject, resolve;
resolve = reject = null;
promise = new this.Promise(function (_resolve, _reject) {
resolve = _resolve;
return reject = _reject;
});
this._queue.push({
task,
args,
resolve,
reject
});
this._tryToRun();
return promise;
}
};
module.exports = Sync;

5
node_modules/bottleneck/lib/es5.js generated vendored Normal file
View file

@ -0,0 +1,5 @@
"use strict";
require("regenerator-runtime/runtime");
module.exports = require("./Bottleneck");

3
node_modules/bottleneck/lib/index.js generated vendored Normal file
View file

@ -0,0 +1,3 @@
"use strict";
module.exports = require("./Bottleneck");

24
node_modules/bottleneck/lib/lua.json generated vendored Normal file

File diff suppressed because one or more lines are too long

26
node_modules/bottleneck/lib/parser.js generated vendored Normal file
View file

@ -0,0 +1,26 @@
"use strict";
exports.load = function (received, defaults, onto = {}) {
var k, ref, v;
for (k in defaults) {
v = defaults[k];
onto[k] = (ref = received[k]) != null ? ref : v;
}
return onto;
};
exports.overwrite = function (received, defaults, onto = {}) {
var k, v;
for (k in received) {
v = received[k];
if (defaults[k] !== void 0) {
onto[k] = v;
}
}
return onto;
};

1
node_modules/bottleneck/lib/version.json generated vendored Normal file
View file

@ -0,0 +1 @@
{"version":"2.19.5"}

1524
node_modules/bottleneck/light.js generated vendored Normal file

File diff suppressed because it is too large Load diff

85
node_modules/bottleneck/package.json generated vendored Normal file
View file

@ -0,0 +1,85 @@
{
"_args": [
[
"bottleneck@2.19.5",
"/Users/dougtangren/code/rust/action-gh-release"
]
],
"_from": "bottleneck@2.19.5",
"_id": "bottleneck@2.19.5",
"_inBundle": false,
"_integrity": "sha512-VHiNCbI1lKdl44tGrhNfU3lup0Tj/ZBMJB5/2ZbNXRCPuRCO7ed2mgcK4r17y+KB2EfuYuRaVlwNbAeaWGSpbw==",
"_location": "/bottleneck",
"_phantomChildren": {},
"_requested": {
"type": "version",
"registry": true,
"raw": "bottleneck@2.19.5",
"name": "bottleneck",
"escapedName": "bottleneck",
"rawSpec": "2.19.5",
"saveSpec": null,
"fetchSpec": "2.19.5"
},
"_requiredBy": [
"/@octokit/plugin-throttling"
],
"_resolved": "https://registry.npmjs.org/bottleneck/-/bottleneck-2.19.5.tgz",
"_spec": "2.19.5",
"_where": "/Users/dougtangren/code/rust/action-gh-release",
"author": {
"name": "Simon Grondin"
},
"bugs": {
"url": "https://github.com/SGrondin/bottleneck/issues"
},
"dependencies": {},
"description": "Distributed task scheduler and rate limiter",
"devDependencies": {
"@babel/core": "^7.5.0",
"@babel/preset-env": "^7.5.0",
"@types/es6-promise": "0.0.33",
"assert": "^1.5.0",
"coffeescript": "2.4.x",
"ejs-cli": "github:SGrondin/ejs-cli#master",
"ioredis": "^4.11.1",
"leakage": "^0.4.0",
"mocha": "^6.1.4",
"redis": "^2.8.0",
"regenerator-runtime": "^0.12.1",
"rollup": "^0.66.6",
"rollup-plugin-babel": "^4.3.3",
"rollup-plugin-commonjs": "^9.3.4",
"rollup-plugin-json": "^3.1.0",
"rollup-plugin-node-resolve": "^3.4.0",
"typescript": "^2.6.2"
},
"homepage": "https://github.com/SGrondin/bottleneck#readme",
"keywords": [
"async rate limiter",
"rate limiter",
"rate limiting",
"async",
"rate",
"limiting",
"limiter",
"throttle",
"throttling",
"throttler",
"load",
"clustering"
],
"license": "MIT",
"main": "lib/index.js",
"name": "bottleneck",
"repository": {
"type": "git",
"url": "git+https://github.com/SGrondin/bottleneck.git"
},
"scripts": {
"test": "mocha test",
"test-all": "./scripts/test_all.sh"
},
"typings": "bottleneck.d.ts",
"version": "2.19.5"
}

34
node_modules/bottleneck/rollup.config.es5.js generated vendored Normal file
View file

@ -0,0 +1,34 @@
import json from 'rollup-plugin-json';
import resolve from 'rollup-plugin-node-resolve';
import commonjs from 'rollup-plugin-commonjs';
import babel from 'rollup-plugin-babel';
const bannerLines = [
'This file contains the full Bottleneck library (MIT) compiled to ES5.',
'https://github.com/SGrondin/bottleneck',
'It also contains the regenerator-runtime (MIT), necessary for Babel-generated ES5 code to execute promise and async/await code.',
'See the following link for Copyright and License information:',
'https://github.com/facebook/regenerator/blob/master/packages/regenerator-runtime/runtime.js',
].map(x => ` * ${x}`).join('\n');
const banner = `/**\n${bannerLines}\n */`;
export default {
input: 'lib/es5.js',
output: {
name: 'Bottleneck',
file: 'es5.js',
sourcemap: false,
globals: {},
format: 'umd',
banner
},
external: [],
plugins: [
json(),
resolve(),
commonjs(),
babel({
exclude: 'node_modules/**'
})
]
};

44
node_modules/bottleneck/rollup.config.light.js generated vendored Normal file
View file

@ -0,0 +1,44 @@
import commonjs from 'rollup-plugin-commonjs';
import json from 'rollup-plugin-json';
import resolve from 'rollup-plugin-node-resolve';
const bannerLines = [
'This file contains the Bottleneck library (MIT), compiled to ES2017, and without Clustering support.',
'https://github.com/SGrondin/bottleneck',
].map(x => ` * ${x}`).join('\n');
const banner = `/**\n${bannerLines}\n */`;
const missing = `export default () => console.log('You must import the full version of Bottleneck in order to use this feature.');`;
const exclude = [
'RedisDatastore.js',
'RedisConnection.js',
'IORedisConnection.js',
'Scripts.js'
];
export default {
input: 'lib/index.js',
output: {
name: 'Bottleneck',
file: 'light.js',
sourcemap: false,
globals: {},
format: 'umd',
banner
},
external: [],
plugins: [
json(),
{
load: id => {
const chunks = id.split('/');
const file = chunks[chunks.length - 1];
if (exclude.indexOf(file) >= 0) {
return missing
}
}
},
resolve(),
commonjs()
]
};

25
node_modules/bottleneck/scripts/assemble_lua.js generated vendored Normal file
View file

@ -0,0 +1,25 @@
var fs = require('fs')
var input = __dirname + '/../src/redis'
var loaded = {}
var promises = fs.readdirSync(input).map(function (file) {
return new Promise(function (resolve, reject) {
fs.readFile(input + '/' + file, function (err, data) {
if (err != null) {
return reject(err)
}
loaded[file] = data.toString('utf8')
return resolve()
})
})
})
Promise.all(promises)
.then(function () {
console.log(JSON.stringify(loaded, Object.keys(loaded).sort(), 2))
})
.catch(function (err) {
console.error(err)
process.exit(1)
})

82
node_modules/bottleneck/scripts/build.sh generated vendored Executable file
View file

@ -0,0 +1,82 @@
#!/usr/bin/env bash
set -e
if [ ! -d node_modules ]; then
echo "[B] Run 'npm install' first"
exit 1
fi
clean() {
rm -f .babelrc
rm -rf lib/*
node scripts/version.js > lib/version.json
node scripts/assemble_lua.js > lib/lua.json
}
makeLib10() {
echo '[B] Compiling Bottleneck to Node 10+...'
npx coffee --compile --bare --no-header src/*.coffee
mv src/*.js lib/
}
makeLib6() {
echo '[B] Compiling Bottleneck to Node 6+...'
ln -s .babelrc.lib .babelrc
npx coffee --compile --bare --no-header --transpile src/*.coffee
mv src/*.js lib/
}
makeES5() {
echo '[B] Compiling Bottleneck to ES5...'
ln -s .babelrc.es5 .babelrc
npx coffee --compile --bare --no-header src/*.coffee
mv src/*.js lib/
echo '[B] Assembling ES5 bundle...'
npx rollup -c rollup.config.es5.js
}
makeLight() {
makeLib10
echo '[B] Assembling light bundle...'
npx rollup -c rollup.config.light.js
}
makeTypings() {
echo '[B] Compiling and testing TS typings...'
npx ejs-cli bottleneck.d.ts.ejs > bottleneck.d.ts
npx tsc --noEmit --strict test.ts
}
if [ "$1" = 'dev' ]; then
clean
makeLib10
elif [ "$1" = 'bench' ]; then
clean
makeLib6
elif [ "$1" = 'es5' ]; then
clean
makeES5
elif [ "$1" = 'light' ]; then
clean
makeLight
elif [ "$1" = 'typings' ]; then
makeTypings
else
clean
makeES5
clean
makeLight
clean
makeLib6
makeTypings
fi
rm -f .babelrc
echo '[B] Done!'

20
node_modules/bottleneck/scripts/test_all.sh generated vendored Executable file
View file

@ -0,0 +1,20 @@
#!/usr/bin/env bash
set -e
source .env
echo 'ioredis tests'
DATASTORE=ioredis npm test
echo 'NodeRedis tests'
DATASTORE=redis npm test
echo 'ES5 bundle tests'
BUILD=es5 npm test
echo 'Light bundle tests'
BUILD=light npm test
echo 'Local tests'
npm test

3
node_modules/bottleneck/scripts/version.js generated vendored Normal file
View file

@ -0,0 +1,3 @@
const packagejson = require('../package.json')
console.log(JSON.stringify({version: packagejson.version}))

39
node_modules/bottleneck/src/Batcher.coffee generated vendored Normal file
View file

@ -0,0 +1,39 @@
parser = require "./parser"
Events = require "./Events"
class Batcher
defaults:
maxTime: null
maxSize: null
Promise: Promise
constructor: (@options={}) ->
parser.load @options, @defaults, @
@Events = new Events @
@_arr = []
@_resetPromise()
@_lastFlush = Date.now()
_resetPromise: ->
@_promise = new @Promise (res, rej) => @_resolve = res
_flush: ->
clearTimeout @_timeout
@_lastFlush = Date.now()
@_resolve()
@Events.trigger "batch", @_arr
@_arr = []
@_resetPromise()
add: (data) ->
@_arr.push data
ret = @_promise
if @_arr.length == @maxSize
@_flush()
else if @maxTime? and @_arr.length == 1
@_timeout = setTimeout =>
@_flush()
, @maxTime
ret
module.exports = Batcher

298
node_modules/bottleneck/src/Bottleneck.coffee generated vendored Normal file
View file

@ -0,0 +1,298 @@
NUM_PRIORITIES = 10
DEFAULT_PRIORITY = 5
parser = require "./parser"
Queues = require "./Queues"
Job = require "./Job"
LocalDatastore = require "./LocalDatastore"
RedisDatastore = require "./RedisDatastore"
Events = require "./Events"
States = require "./States"
Sync = require "./Sync"
class Bottleneck
Bottleneck.default = Bottleneck
Bottleneck.Events = Events
Bottleneck.version = Bottleneck::version = require("./version.json").version
Bottleneck.strategy = Bottleneck::strategy = { LEAK:1, OVERFLOW:2, OVERFLOW_PRIORITY:4, BLOCK:3 }
Bottleneck.BottleneckError = Bottleneck::BottleneckError = require "./BottleneckError"
Bottleneck.Group = Bottleneck::Group = require "./Group"
Bottleneck.RedisConnection = Bottleneck::RedisConnection = require "./RedisConnection"
Bottleneck.IORedisConnection = Bottleneck::IORedisConnection = require "./IORedisConnection"
Bottleneck.Batcher = Bottleneck::Batcher = require "./Batcher"
jobDefaults:
priority: DEFAULT_PRIORITY
weight: 1
expiration: null
id: "<no-id>"
storeDefaults:
maxConcurrent: null
minTime: 0
highWater: null
strategy: Bottleneck::strategy.LEAK
penalty: null
reservoir: null
reservoirRefreshInterval: null
reservoirRefreshAmount: null
reservoirIncreaseInterval: null
reservoirIncreaseAmount: null
reservoirIncreaseMaximum: null
localStoreDefaults:
Promise: Promise
timeout: null
heartbeatInterval: 250
redisStoreDefaults:
Promise: Promise
timeout: null
heartbeatInterval: 5000
clientTimeout: 10000
Redis: null
clientOptions: {}
clusterNodes: null
clearDatastore: false
connection: null
instanceDefaults:
datastore: "local"
connection: null
id: "<no-id>"
rejectOnDrop: true
trackDoneStatus: false
Promise: Promise
stopDefaults:
enqueueErrorMessage: "This limiter has been stopped and cannot accept new jobs."
dropWaitingJobs: true
dropErrorMessage: "This limiter has been stopped."
constructor: (options={}, invalid...) ->
@_validateOptions options, invalid
parser.load options, @instanceDefaults, @
@_queues = new Queues NUM_PRIORITIES
@_scheduled = {}
@_states = new States ["RECEIVED", "QUEUED", "RUNNING", "EXECUTING"].concat(if @trackDoneStatus then ["DONE"] else [])
@_limiter = null
@Events = new Events @
@_submitLock = new Sync "submit", @Promise
@_registerLock = new Sync "register", @Promise
storeOptions = parser.load options, @storeDefaults, {}
@_store = if @datastore == "redis" or @datastore == "ioredis" or @connection?
storeInstanceOptions = parser.load options, @redisStoreDefaults, {}
new RedisDatastore @, storeOptions, storeInstanceOptions
else if @datastore == "local"
storeInstanceOptions = parser.load options, @localStoreDefaults, {}
new LocalDatastore @, storeOptions, storeInstanceOptions
else
throw new Bottleneck::BottleneckError "Invalid datastore type: #{@datastore}"
@_queues.on "leftzero", => @_store.heartbeat?.ref?()
@_queues.on "zero", => @_store.heartbeat?.unref?()
_validateOptions: (options, invalid) ->
unless options? and typeof options == "object" and invalid.length == 0
throw new Bottleneck::BottleneckError "Bottleneck v2 takes a single object argument. Refer to https://github.com/SGrondin/bottleneck#upgrading-to-v2 if you're upgrading from Bottleneck v1."
ready: -> @_store.ready
clients: -> @_store.clients
channel: -> "b_#{@id}"
channel_client: -> "b_#{@id}_#{@_store.clientId}"
publish: (message) -> @_store.__publish__ message
disconnect: (flush=true) -> @_store.__disconnect__ flush
chain: (@_limiter) -> @
queued: (priority) -> @_queues.queued priority
clusterQueued: -> @_store.__queued__()
empty: -> @queued() == 0 and @_submitLock.isEmpty()
running: -> @_store.__running__()
done: -> @_store.__done__()
jobStatus: (id) -> @_states.jobStatus id
jobs: (status) -> @_states.statusJobs status
counts: -> @_states.statusCounts()
_randomIndex: -> Math.random().toString(36).slice(2)
check: (weight=1) -> @_store.__check__ weight
_clearGlobalState: (index) ->
if @_scheduled[index]?
clearTimeout @_scheduled[index].expiration
delete @_scheduled[index]
true
else false
_free: (index, job, options, eventInfo) ->
try
{ running } = await @_store.__free__ index, options.weight
@Events.trigger "debug", "Freed #{options.id}", eventInfo
if running == 0 and @empty() then @Events.trigger "idle"
catch e
@Events.trigger "error", e
_run: (index, job, wait) ->
job.doRun()
clearGlobalState = @_clearGlobalState.bind @, index
run = @_run.bind @, index, job
free = @_free.bind @, index, job
@_scheduled[index] =
timeout: setTimeout =>
job.doExecute @_limiter, clearGlobalState, run, free
, wait
expiration: if job.options.expiration? then setTimeout ->
job.doExpire clearGlobalState, run, free
, wait + job.options.expiration
job: job
_drainOne: (capacity) ->
@_registerLock.schedule =>
if @queued() == 0 then return @Promise.resolve null
queue = @_queues.getFirst()
{ options, args } = next = queue.first()
if capacity? and options.weight > capacity then return @Promise.resolve null
@Events.trigger "debug", "Draining #{options.id}", { args, options }
index = @_randomIndex()
@_store.__register__ index, options.weight, options.expiration
.then ({ success, wait, reservoir }) =>
@Events.trigger "debug", "Drained #{options.id}", { success, args, options }
if success
queue.shift()
empty = @empty()
if empty then @Events.trigger "empty"
if reservoir == 0 then @Events.trigger "depleted", empty
@_run index, next, wait
@Promise.resolve options.weight
else
@Promise.resolve null
_drainAll: (capacity, total=0) ->
@_drainOne(capacity)
.then (drained) =>
if drained?
newCapacity = if capacity? then capacity - drained else capacity
@_drainAll(newCapacity, total + drained)
else @Promise.resolve total
.catch (e) => @Events.trigger "error", e
_dropAllQueued: (message) -> @_queues.shiftAll (job) -> job.doDrop { message }
stop: (options={}) ->
options = parser.load options, @stopDefaults
waitForExecuting = (at) =>
finished = =>
counts = @_states.counts
(counts[0] + counts[1] + counts[2] + counts[3]) == at
new @Promise (resolve, reject) =>
if finished() then resolve()
else
@on "done", =>
if finished()
@removeAllListeners "done"
resolve()
done = if options.dropWaitingJobs
@_run = (index, next) -> next.doDrop { message: options.dropErrorMessage }
@_drainOne = => @Promise.resolve null
@_registerLock.schedule => @_submitLock.schedule =>
for k, v of @_scheduled
if @jobStatus(v.job.options.id) == "RUNNING"
clearTimeout v.timeout
clearTimeout v.expiration
v.job.doDrop { message: options.dropErrorMessage }
@_dropAllQueued options.dropErrorMessage
waitForExecuting(0)
else
@schedule { priority: NUM_PRIORITIES - 1, weight: 0 }, => waitForExecuting(1)
@_receive = (job) -> job._reject new Bottleneck::BottleneckError options.enqueueErrorMessage
@stop = => @Promise.reject new Bottleneck::BottleneckError "stop() has already been called"
done
_addToQueue: (job) =>
{ args, options } = job
try
{ reachedHWM, blocked, strategy } = await @_store.__submit__ @queued(), options.weight
catch error
@Events.trigger "debug", "Could not queue #{options.id}", { args, options, error }
job.doDrop { error }
return false
if blocked
job.doDrop()
return true
else if reachedHWM
shifted = if strategy == Bottleneck::strategy.LEAK then @_queues.shiftLastFrom(options.priority)
else if strategy == Bottleneck::strategy.OVERFLOW_PRIORITY then @_queues.shiftLastFrom(options.priority + 1)
else if strategy == Bottleneck::strategy.OVERFLOW then job
if shifted? then shifted.doDrop()
if not shifted? or strategy == Bottleneck::strategy.OVERFLOW
if not shifted? then job.doDrop()
return reachedHWM
job.doQueue reachedHWM, blocked
@_queues.push job
await @_drainAll()
reachedHWM
_receive: (job) ->
if @_states.jobStatus(job.options.id)?
job._reject new Bottleneck::BottleneckError "A job with the same id already exists (id=#{job.options.id})"
false
else
job.doReceive()
@_submitLock.schedule @_addToQueue, job
submit: (args...) ->
if typeof args[0] == "function"
[fn, args..., cb] = args
options = parser.load {}, @jobDefaults
else
[options, fn, args..., cb] = args
options = parser.load options, @jobDefaults
task = (args...) =>
new @Promise (resolve, reject) ->
fn args..., (args...) ->
(if args[0]? then reject else resolve) args
job = new Job task, args, options, @jobDefaults, @rejectOnDrop, @Events, @_states, @Promise
job.promise
.then (args) -> cb? args...
.catch (args) -> if Array.isArray args then cb? args... else cb? args
@_receive job
schedule: (args...) ->
if typeof args[0] == "function"
[task, args...] = args
options = {}
else
[options, task, args...] = args
job = new Job task, args, options, @jobDefaults, @rejectOnDrop, @Events, @_states, @Promise
@_receive job
job.promise
wrap: (fn) ->
schedule = @schedule.bind @
wrapped = (args...) -> schedule fn.bind(@), args...
wrapped.withOptions = (options, args...) -> schedule options, fn, args...
wrapped
updateSettings: (options={}) ->
await @_store.__updateSettings__ parser.overwrite options, @storeDefaults
parser.overwrite options, @instanceDefaults, @
@
currentReservoir: -> @_store.__currentReservoir__()
incrementReservoir: (incr=0) -> @_store.__incrementReservoir__ incr
module.exports = Bottleneck

3
node_modules/bottleneck/src/BottleneckError.coffee generated vendored Normal file
View file

@ -0,0 +1,3 @@
class BottleneckError extends Error
module.exports = BottleneckError

38
node_modules/bottleneck/src/DLList.coffee generated vendored Normal file
View file

@ -0,0 +1,38 @@
class DLList
constructor: (@incr, @decr) ->
@_first = null
@_last = null
@length = 0
push: (value) ->
@length++
@incr?()
node = { value, prev: @_last, next: null }
if @_last?
@_last.next = node
@_last = node
else @_first = @_last = node
undefined
shift: () ->
if not @_first? then return
else
@length--
@decr?()
value = @_first.value
if (@_first = @_first.next)?
@_first.prev = null
else
@_last = null
value
first: () -> if @_first? then @_first.value
getArray: () ->
node = @_first
while node? then (ref = node; node = node.next; ref.value)
forEachShift: (cb) ->
node = @shift()
while node? then (cb node; node = @shift())
undefined
debug: () ->
node = @_first
while node? then (ref = node; node = node.next; { value: ref.value, prev: ref.prev?.value, next: ref.next?.value })
module.exports = DLList

38
node_modules/bottleneck/src/Events.coffee generated vendored Normal file
View file

@ -0,0 +1,38 @@
class Events
constructor: (@instance) ->
@_events = {}
if @instance.on? or @instance.once? or @instance.removeAllListeners?
throw new Error "An Emitter already exists for this object"
@instance.on = (name, cb) => @_addListener name, "many", cb
@instance.once = (name, cb) => @_addListener name, "once", cb
@instance.removeAllListeners = (name=null) =>
if name? then delete @_events[name] else @_events = {}
_addListener: (name, status, cb) ->
@_events[name] ?= []
@_events[name].push {cb, status}
@instance
listenerCount: (name) ->
if @_events[name]? then @_events[name].length else 0
trigger: (name, args...) ->
try
if name != "debug" then @trigger "debug", "Event triggered: #{name}", args
return unless @_events[name]?
@_events[name] = @_events[name].filter (listener) -> listener.status != "none"
promises = @_events[name].map (listener) =>
return if listener.status == "none"
if listener.status == "once" then listener.status = "none"
try
returned = listener.cb?(args...)
if typeof returned?.then == "function"
await returned
else
returned
catch e
if "name" != "error" then @trigger "error", e
null
(await Promise.all promises).find (x) -> x?
catch e
if "name" != "error" then @trigger "error", e
null
module.exports = Events

80
node_modules/bottleneck/src/Group.coffee generated vendored Normal file
View file

@ -0,0 +1,80 @@
parser = require "./parser"
Events = require "./Events"
RedisConnection = require "./RedisConnection"
IORedisConnection = require "./IORedisConnection"
Scripts = require "./Scripts"
class Group
defaults:
timeout: 1000 * 60 * 5
connection: null
Promise: Promise
id: "group-key"
constructor: (@limiterOptions={}) ->
parser.load @limiterOptions, @defaults, @
@Events = new Events @
@instances = {}
@Bottleneck = require "./Bottleneck"
@_startAutoCleanup()
@sharedConnection = @connection?
if !@connection?
if @limiterOptions.datastore == "redis"
@connection = new RedisConnection Object.assign {}, @limiterOptions, { @Events }
else if @limiterOptions.datastore == "ioredis"
@connection = new IORedisConnection Object.assign {}, @limiterOptions, { @Events }
key: (key="") -> @instances[key] ? do =>
limiter = @instances[key] = new @Bottleneck Object.assign @limiterOptions, {
id: "#{@id}-#{key}",
@timeout,
@connection
}
@Events.trigger "created", limiter, key
limiter
deleteKey: (key="") =>
instance = @instances[key]
if @connection
deleted = await @connection.__runCommand__ ['del', Scripts.allKeys("#{@id}-#{key}")...]
if instance?
delete @instances[key]
await instance.disconnect()
instance? or deleted > 0
limiters: -> { key: k, limiter: v } for k, v of @instances
keys: -> Object.keys @instances
clusterKeys: ->
if !@connection? then return @Promise.resolve @keys()
keys = []
cursor = null
start = "b_#{@id}-".length
end = "_settings".length
until cursor == 0
[next, found] = await @connection.__runCommand__ ["scan", (cursor ? 0), "match", "b_#{@id}-*_settings", "count", 10000]
cursor = ~~next
keys.push(k.slice(start, -end)) for k in found
keys
_startAutoCleanup: ->
clearInterval @interval
(@interval = setInterval =>
time = Date.now()
for k, v of @instances
try if await v._store.__groupCheck__(time) then @deleteKey k
catch e then v.Events.trigger "error", e
, (@timeout / 2)).unref?()
updateSettings: (options={}) ->
parser.overwrite options, @defaults, @
parser.overwrite options, options, @limiterOptions
@_startAutoCleanup() if options.timeout?
disconnect: (flush=true) ->
if !@sharedConnection
@connection?.disconnect flush
module.exports = Group

84
node_modules/bottleneck/src/IORedisConnection.coffee generated vendored Normal file
View file

@ -0,0 +1,84 @@
parser = require "./parser"
Events = require "./Events"
Scripts = require "./Scripts"
class IORedisConnection
datastore: "ioredis"
defaults:
Redis: null
clientOptions: {}
clusterNodes: null
client: null
Promise: Promise
Events: null
constructor: (options={}) ->
parser.load options, @defaults, @
@Redis ?= eval("require")("ioredis") # Obfuscated or else Webpack/Angular will try to inline the optional ioredis module. To override this behavior: pass the ioredis module to Bottleneck as the 'Redis' option.
@Events ?= new Events @
@terminated = false
if @clusterNodes?
@client = new @Redis.Cluster @clusterNodes, @clientOptions
@subscriber = new @Redis.Cluster @clusterNodes, @clientOptions
else if @client? and !@client.duplicate?
@subscriber = new @Redis.Cluster @client.startupNodes, @client.options
else
@client ?= new @Redis @clientOptions
@subscriber = @client.duplicate()
@limiters = {}
@ready = @Promise.all [@_setup(@client, false), @_setup(@subscriber, true)]
.then =>
@_loadScripts()
{ @client, @subscriber }
_setup: (client, sub) ->
client.setMaxListeners 0
new @Promise (resolve, reject) =>
client.on "error", (e) => @Events.trigger "error", e
if sub
client.on "message", (channel, message) =>
@limiters[channel]?._store.onMessage channel, message
if client.status == "ready" then resolve()
else client.once "ready", resolve
_loadScripts: -> Scripts.names.forEach (name) => @client.defineCommand name, { lua: Scripts.payload(name) }
__runCommand__: (cmd) ->
await @ready
[[_, deleted]] = await @client.pipeline([cmd]).exec()
deleted
__addLimiter__: (instance) ->
@Promise.all [instance.channel(), instance.channel_client()].map (channel) =>
new @Promise (resolve, reject) =>
@subscriber.subscribe channel, =>
@limiters[channel] = instance
resolve()
__removeLimiter__: (instance) ->
[instance.channel(), instance.channel_client()].forEach (channel) =>
await @subscriber.unsubscribe channel unless @terminated
delete @limiters[channel]
__scriptArgs__: (name, id, args, cb) ->
keys = Scripts.keys name, id
[keys.length].concat keys, args, cb
__scriptFn__: (name) ->
@client[name].bind(@client)
disconnect: (flush=true) ->
clearInterval(@limiters[k]._store.heartbeat) for k in Object.keys @limiters
@limiters = {}
@terminated = true
if flush
@Promise.all [@client.quit(), @subscriber.quit()]
else
@client.disconnect()
@subscriber.disconnect()
@Promise.resolve()
module.exports = IORedisConnection

98
node_modules/bottleneck/src/Job.coffee generated vendored Normal file
View file

@ -0,0 +1,98 @@
NUM_PRIORITIES = 10
DEFAULT_PRIORITY = 5
parser = require "./parser"
BottleneckError = require "./BottleneckError"
class Job
constructor: (@task, @args, options, jobDefaults, @rejectOnDrop, @Events, @_states, @Promise) ->
@options = parser.load options, jobDefaults
@options.priority = @_sanitizePriority @options.priority
if @options.id == jobDefaults.id then @options.id = "#{@options.id}-#{@_randomIndex()}"
@promise = new @Promise (@_resolve, @_reject) =>
@retryCount = 0
_sanitizePriority: (priority) ->
sProperty = if ~~priority != priority then DEFAULT_PRIORITY else priority
if sProperty < 0 then 0 else if sProperty > NUM_PRIORITIES-1 then NUM_PRIORITIES-1 else sProperty
_randomIndex: -> Math.random().toString(36).slice(2)
doDrop: ({ error, message="This job has been dropped by Bottleneck" } = {}) ->
if @_states.remove @options.id
if @rejectOnDrop then @_reject (error ? new BottleneckError message)
@Events.trigger "dropped", { @args, @options, @task, @promise }
true
else
false
_assertStatus: (expected) ->
status = @_states.jobStatus @options.id
if not (status == expected or (expected == "DONE" and status == null))
throw new BottleneckError "Invalid job status #{status}, expected #{expected}. Please open an issue at https://github.com/SGrondin/bottleneck/issues"
doReceive: () ->
@_states.start @options.id
@Events.trigger "received", { @args, @options }
doQueue: (reachedHWM, blocked) ->
@_assertStatus "RECEIVED"
@_states.next @options.id
@Events.trigger "queued", { @args, @options, reachedHWM, blocked }
doRun: () ->
if @retryCount == 0
@_assertStatus "QUEUED"
@_states.next @options.id
else @_assertStatus "EXECUTING"
@Events.trigger "scheduled", { @args, @options }
doExecute: (chained, clearGlobalState, run, free) ->
if @retryCount == 0
@_assertStatus "RUNNING"
@_states.next @options.id
else @_assertStatus "EXECUTING"
eventInfo = { @args, @options, @retryCount }
@Events.trigger "executing", eventInfo
try
passed = await if chained?
chained.schedule @options, @task, @args...
else @task @args...
if clearGlobalState()
@doDone eventInfo
await free @options, eventInfo
@_assertStatus "DONE"
@_resolve passed
catch error
@_onFailure error, eventInfo, clearGlobalState, run, free
doExpire: (clearGlobalState, run, free) ->
if @_states.jobStatus @options.id == "RUNNING"
@_states.next @options.id
@_assertStatus "EXECUTING"
eventInfo = { @args, @options, @retryCount }
error = new BottleneckError "This job timed out after #{@options.expiration} ms."
@_onFailure error, eventInfo, clearGlobalState, run, free
_onFailure: (error, eventInfo, clearGlobalState, run, free) ->
if clearGlobalState()
retry = await @Events.trigger "failed", error, eventInfo
if retry?
retryAfter = ~~retry
@Events.trigger "retry", "Retrying #{@options.id} after #{retryAfter} ms", eventInfo
@retryCount++
run retryAfter
else
@doDone eventInfo
await free @options, eventInfo
@_assertStatus "DONE"
@_reject error
doDone: (eventInfo) ->
@_assertStatus "EXECUTING"
@_states.next @options.id
@Events.trigger "done", eventInfo
module.exports = Job

140
node_modules/bottleneck/src/LocalDatastore.coffee generated vendored Normal file
View file

@ -0,0 +1,140 @@
parser = require "./parser"
BottleneckError = require "./BottleneckError"
class LocalDatastore
constructor: (@instance, @storeOptions, storeInstanceOptions) ->
@clientId = @instance._randomIndex()
parser.load storeInstanceOptions, storeInstanceOptions, @
@_nextRequest = @_lastReservoirRefresh = @_lastReservoirIncrease = Date.now()
@_running = 0
@_done = 0
@_unblockTime = 0
@ready = @Promise.resolve()
@clients = {}
@_startHeartbeat()
_startHeartbeat: ->
if !@heartbeat? and ((
@storeOptions.reservoirRefreshInterval? and @storeOptions.reservoirRefreshAmount?
) or (
@storeOptions.reservoirIncreaseInterval? and @storeOptions.reservoirIncreaseAmount?
))
(@heartbeat = setInterval =>
now = Date.now()
if @storeOptions.reservoirRefreshInterval? and now >= @_lastReservoirRefresh + @storeOptions.reservoirRefreshInterval
@_lastReservoirRefresh = now
@storeOptions.reservoir = @storeOptions.reservoirRefreshAmount
@instance._drainAll @computeCapacity()
if @storeOptions.reservoirIncreaseInterval? and now >= @_lastReservoirIncrease + @storeOptions.reservoirIncreaseInterval
{ reservoirIncreaseAmount: amount, reservoirIncreaseMaximum: maximum, reservoir } = @storeOptions
@_lastReservoirIncrease = now
incr = if maximum? then Math.min amount, maximum - reservoir else amount
if incr > 0
@storeOptions.reservoir += incr
@instance._drainAll @computeCapacity()
, @heartbeatInterval).unref?()
else clearInterval @heartbeat
__publish__: (message) ->
await @yieldLoop()
@instance.Events.trigger "message", message.toString()
__disconnect__: (flush) ->
await @yieldLoop()
clearInterval @heartbeat
@Promise.resolve()
yieldLoop: (t=0) -> new @Promise (resolve, reject) -> setTimeout resolve, t
computePenalty: -> @storeOptions.penalty ? ((15 * @storeOptions.minTime) or 5000)
__updateSettings__: (options) ->
await @yieldLoop()
parser.overwrite options, options, @storeOptions
@_startHeartbeat()
@instance._drainAll @computeCapacity()
true
__running__: ->
await @yieldLoop()
@_running
__queued__: ->
await @yieldLoop()
@instance.queued()
__done__: ->
await @yieldLoop()
@_done
__groupCheck__: (time) ->
await @yieldLoop()
(@_nextRequest + @timeout) < time
computeCapacity: ->
{ maxConcurrent, reservoir } = @storeOptions
if maxConcurrent? and reservoir? then Math.min((maxConcurrent - @_running), reservoir)
else if maxConcurrent? then maxConcurrent - @_running
else if reservoir? then reservoir
else null
conditionsCheck: (weight) ->
capacity = @computeCapacity()
not capacity? or weight <= capacity
__incrementReservoir__: (incr) ->
await @yieldLoop()
reservoir = @storeOptions.reservoir += incr
@instance._drainAll @computeCapacity()
reservoir
__currentReservoir__: ->
await @yieldLoop()
@storeOptions.reservoir
isBlocked: (now) -> @_unblockTime >= now
check: (weight, now) -> @conditionsCheck(weight) and (@_nextRequest - now) <= 0
__check__: (weight) ->
await @yieldLoop()
now = Date.now()
@check weight, now
__register__: (index, weight, expiration) ->
await @yieldLoop()
now = Date.now()
if @conditionsCheck weight
@_running += weight
if @storeOptions.reservoir? then @storeOptions.reservoir -= weight
wait = Math.max @_nextRequest - now, 0
@_nextRequest = now + wait + @storeOptions.minTime
{ success: true, wait, reservoir: @storeOptions.reservoir }
else { success: false }
strategyIsBlock: -> @storeOptions.strategy == 3
__submit__: (queueLength, weight) ->
await @yieldLoop()
if @storeOptions.maxConcurrent? and weight > @storeOptions.maxConcurrent
throw new BottleneckError("Impossible to add a job having a weight of #{weight} to a limiter having a maxConcurrent setting of #{@storeOptions.maxConcurrent}")
now = Date.now()
reachedHWM = @storeOptions.highWater? and queueLength == @storeOptions.highWater and not @check(weight, now)
blocked = @strategyIsBlock() and (reachedHWM or @isBlocked now)
if blocked
@_unblockTime = now + @computePenalty()
@_nextRequest = @_unblockTime + @storeOptions.minTime
@instance._dropAllQueued()
{ reachedHWM, blocked, strategy: @storeOptions.strategy }
__free__: (index, weight) ->
await @yieldLoop()
@_running -= weight
@_done += weight
@instance._drainAll @computeCapacity()
{ running: @_running }
module.exports = LocalDatastore

28
node_modules/bottleneck/src/Queues.coffee generated vendored Normal file
View file

@ -0,0 +1,28 @@
DLList = require "./DLList"
Events = require "./Events"
class Queues
constructor: (num_priorities) ->
@Events = new Events @
@_length = 0
@_lists = for i in [1..num_priorities] then new DLList (=> @incr()), (=> @decr())
incr: -> if @_length++ == 0 then @Events.trigger "leftzero"
decr: -> if --@_length == 0 then @Events.trigger "zero"
push: (job) -> @_lists[job.options.priority].push job
queued: (priority) -> if priority? then @_lists[priority].length else @_length
shiftAll: (fn) -> @_lists.forEach (list) -> list.forEachShift fn
getFirst: (arr=@_lists) ->
for list in arr
return list if list.length > 0
[]
shiftLastFrom: (priority) -> @getFirst(@_lists[priority..].reverse()).shift()
module.exports = Queues

91
node_modules/bottleneck/src/RedisConnection.coffee generated vendored Normal file
View file

@ -0,0 +1,91 @@
parser = require "./parser"
Events = require "./Events"
Scripts = require "./Scripts"
class RedisConnection
datastore: "redis"
defaults:
Redis: null
clientOptions: {}
client: null
Promise: Promise
Events: null
constructor: (options={}) ->
parser.load options, @defaults, @
@Redis ?= eval("require")("redis") # Obfuscated or else Webpack/Angular will try to inline the optional redis module. To override this behavior: pass the redis module to Bottleneck as the 'Redis' option.
@Events ?= new Events @
@terminated = false
@client ?= @Redis.createClient @clientOptions
@subscriber = @client.duplicate()
@limiters = {}
@shas = {}
@ready = @Promise.all [@_setup(@client, false), @_setup(@subscriber, true)]
.then => @_loadScripts()
.then => { @client, @subscriber }
_setup: (client, sub) ->
client.setMaxListeners 0
new @Promise (resolve, reject) =>
client.on "error", (e) => @Events.trigger "error", e
if sub
client.on "message", (channel, message) =>
@limiters[channel]?._store.onMessage channel, message
if client.ready then resolve()
else client.once "ready", resolve
_loadScript: (name) ->
new @Promise (resolve, reject) =>
payload = Scripts.payload name
@client.multi([["script", "load", payload]]).exec (err, replies) =>
if err? then return reject err
@shas[name] = replies[0]
resolve replies[0]
_loadScripts: -> @Promise.all(Scripts.names.map (k) => @_loadScript k)
__runCommand__: (cmd) ->
await @ready
new @Promise (resolve, reject) =>
@client.multi([cmd]).exec_atomic (err, replies) ->
if err? then reject(err) else resolve(replies[0])
__addLimiter__: (instance) ->
@Promise.all [instance.channel(), instance.channel_client()].map (channel) =>
new @Promise (resolve, reject) =>
handler = (chan) =>
if chan == channel
@subscriber.removeListener "subscribe", handler
@limiters[channel] = instance
resolve()
@subscriber.on "subscribe", handler
@subscriber.subscribe channel
__removeLimiter__: (instance) ->
@Promise.all [instance.channel(), instance.channel_client()].map (channel) =>
unless @terminated
await new @Promise (resolve, reject) =>
@subscriber.unsubscribe channel, (err, chan) ->
if err? then return reject err
if chan == channel then return resolve()
delete @limiters[channel]
__scriptArgs__: (name, id, args, cb) ->
keys = Scripts.keys name, id
[@shas[name], keys.length].concat keys, args, cb
__scriptFn__: (name) ->
@client.evalsha.bind(@client)
disconnect: (flush=true) ->
clearInterval(@limiters[k]._store.heartbeat) for k in Object.keys @limiters
@limiters = {}
@terminated = true
@client.end flush
@subscriber.end flush
@Promise.resolve()
module.exports = RedisConnection

158
node_modules/bottleneck/src/RedisDatastore.coffee generated vendored Normal file
View file

@ -0,0 +1,158 @@
parser = require "./parser"
BottleneckError = require "./BottleneckError"
RedisConnection = require "./RedisConnection"
IORedisConnection = require "./IORedisConnection"
class RedisDatastore
constructor: (@instance, @storeOptions, storeInstanceOptions) ->
@originalId = @instance.id
@clientId = @instance._randomIndex()
parser.load storeInstanceOptions, storeInstanceOptions, @
@clients = {}
@capacityPriorityCounters = {}
@sharedConnection = @connection?
@connection ?= if @instance.datastore == "redis" then new RedisConnection { @Redis, @clientOptions, @Promise, Events: @instance.Events }
else if @instance.datastore == "ioredis" then new IORedisConnection { @Redis, @clientOptions, @clusterNodes, @Promise, Events: @instance.Events }
@instance.connection = @connection
@instance.datastore = @connection.datastore
@ready = @connection.ready
.then (@clients) => @runScript "init", @prepareInitSettings @clearDatastore
.then => @connection.__addLimiter__ @instance
.then => @runScript "register_client", [@instance.queued()]
.then =>
(@heartbeat = setInterval =>
@runScript "heartbeat", []
.catch (e) => @instance.Events.trigger "error", e
, @heartbeatInterval).unref?()
@clients
__publish__: (message) ->
{ client } = await @ready
client.publish(@instance.channel(), "message:#{message.toString()}")
onMessage: (channel, message) ->
try
pos = message.indexOf(":")
[type, data] = [message.slice(0, pos), message.slice(pos+1)]
if type == "capacity"
await @instance._drainAll(if data.length > 0 then ~~data)
else if type == "capacity-priority"
[rawCapacity, priorityClient, counter] = data.split(":")
capacity = if rawCapacity.length > 0 then ~~rawCapacity
if priorityClient == @clientId
drained = await @instance._drainAll(capacity)
newCapacity = if capacity? then capacity - (drained or 0) else ""
await @clients.client.publish(@instance.channel(), "capacity-priority:#{newCapacity}::#{counter}")
else if priorityClient == ""
clearTimeout @capacityPriorityCounters[counter]
delete @capacityPriorityCounters[counter]
@instance._drainAll(capacity)
else
@capacityPriorityCounters[counter] = setTimeout =>
try
delete @capacityPriorityCounters[counter]
await @runScript "blacklist_client", [priorityClient]
await @instance._drainAll(capacity)
catch e then @instance.Events.trigger "error", e
, 1000
else if type == "message"
@instance.Events.trigger "message", data
else if type == "blocked"
await @instance._dropAllQueued()
catch e then @instance.Events.trigger "error", e
__disconnect__: (flush) ->
clearInterval @heartbeat
if @sharedConnection
@connection.__removeLimiter__ @instance
else
@connection.disconnect flush
runScript: (name, args) ->
await @ready unless name == "init" or name == "register_client"
new @Promise (resolve, reject) =>
all_args = [Date.now(), @clientId].concat args
@instance.Events.trigger "debug", "Calling Redis script: #{name}.lua", all_args
arr = @connection.__scriptArgs__ name, @originalId, all_args, (err, replies) ->
if err? then return reject err
return resolve replies
@connection.__scriptFn__(name) arr...
.catch (e) =>
if e.message == "SETTINGS_KEY_NOT_FOUND"
if name == "heartbeat" then @Promise.resolve()
else
@runScript("init", @prepareInitSettings(false))
.then => @runScript(name, args)
else if e.message == "UNKNOWN_CLIENT"
@runScript("register_client", [@instance.queued()])
.then => @runScript(name, args)
else @Promise.reject e
prepareArray: (arr) -> (if x? then x.toString() else "") for x in arr
prepareObject: (obj) ->
arr = []
for k, v of obj then arr.push k, (if v? then v.toString() else "")
arr
prepareInitSettings: (clear) ->
args = @prepareObject Object.assign({}, @storeOptions, {
id: @originalId
version: @instance.version
groupTimeout: @timeout
@clientTimeout
})
args.unshift (if clear then 1 else 0), @instance.version
args
convertBool: (b) -> !!b
__updateSettings__: (options) ->
await @runScript "update_settings", @prepareObject options
parser.overwrite options, options, @storeOptions
__running__: -> @runScript "running", []
__queued__: -> @runScript "queued", []
__done__: -> @runScript "done", []
__groupCheck__: -> @convertBool await @runScript "group_check", []
__incrementReservoir__: (incr) -> @runScript "increment_reservoir", [incr]
__currentReservoir__: -> @runScript "current_reservoir", []
__check__: (weight) -> @convertBool await @runScript "check", @prepareArray [weight]
__register__: (index, weight, expiration) ->
[success, wait, reservoir] = await @runScript "register", @prepareArray [index, weight, expiration]
return {
success: @convertBool(success),
wait,
reservoir
}
__submit__: (queueLength, weight) ->
try
[reachedHWM, blocked, strategy] = await @runScript "submit", @prepareArray [queueLength, weight]
return {
reachedHWM: @convertBool(reachedHWM),
blocked: @convertBool(blocked),
strategy
}
catch e
if e.message.indexOf("OVERWEIGHT") == 0
[overweight, weight, maxConcurrent] = e.message.split ":"
throw new BottleneckError("Impossible to add a job having a weight of #{weight} to a limiter having a maxConcurrent setting of #{maxConcurrent}")
else
throw e
__free__: (index, weight) ->
running = await @runScript "free", @prepareArray [index]
return { running }
module.exports = RedisDatastore

151
node_modules/bottleneck/src/Scripts.coffee generated vendored Normal file
View file

@ -0,0 +1,151 @@
lua = require "./lua.json"
headers =
refs: lua["refs.lua"]
validate_keys: lua["validate_keys.lua"]
validate_client: lua["validate_client.lua"]
refresh_expiration: lua["refresh_expiration.lua"]
process_tick: lua["process_tick.lua"]
conditions_check: lua["conditions_check.lua"]
get_time: lua["get_time.lua"]
exports.allKeys = (id) -> [
###
HASH
###
"b_#{id}_settings"
###
HASH
job index -> weight
###
"b_#{id}_job_weights"
###
ZSET
job index -> expiration
###
"b_#{id}_job_expirations"
###
HASH
job index -> client
###
"b_#{id}_job_clients"
###
ZSET
client -> sum running
###
"b_#{id}_client_running"
###
HASH
client -> num queued
###
"b_#{id}_client_num_queued"
###
ZSET
client -> last job registered
###
"b_#{id}_client_last_registered"
###
ZSET
client -> last seen
###
"b_#{id}_client_last_seen"
]
templates =
init:
keys: exports.allKeys
headers: ["process_tick"]
refresh_expiration: true
code: lua["init.lua"]
group_check:
keys: exports.allKeys
headers: []
refresh_expiration: false
code: lua["group_check.lua"]
register_client:
keys: exports.allKeys
headers: ["validate_keys"]
refresh_expiration: false
code: lua["register_client.lua"]
blacklist_client:
keys: exports.allKeys
headers: ["validate_keys", "validate_client"]
refresh_expiration: false
code: lua["blacklist_client.lua"]
heartbeat:
keys: exports.allKeys
headers: ["validate_keys", "validate_client", "process_tick"]
refresh_expiration: false
code: lua["heartbeat.lua"]
update_settings:
keys: exports.allKeys
headers: ["validate_keys", "validate_client", "process_tick"]
refresh_expiration: true
code: lua["update_settings.lua"]
running:
keys: exports.allKeys
headers: ["validate_keys", "validate_client", "process_tick"]
refresh_expiration: false
code: lua["running.lua"]
queued:
keys: exports.allKeys
headers: ["validate_keys", "validate_client"]
refresh_expiration: false
code: lua["queued.lua"]
done:
keys: exports.allKeys
headers: ["validate_keys", "validate_client", "process_tick"]
refresh_expiration: false
code: lua["done.lua"]
check:
keys: exports.allKeys
headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"]
refresh_expiration: false
code: lua["check.lua"]
submit:
keys: exports.allKeys
headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"]
refresh_expiration: true
code: lua["submit.lua"]
register:
keys: exports.allKeys
headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"]
refresh_expiration: true
code: lua["register.lua"]
free:
keys: exports.allKeys
headers: ["validate_keys", "validate_client", "process_tick"]
refresh_expiration: true
code: lua["free.lua"]
current_reservoir:
keys: exports.allKeys
headers: ["validate_keys", "validate_client", "process_tick"]
refresh_expiration: false
code: lua["current_reservoir.lua"]
increment_reservoir:
keys: exports.allKeys
headers: ["validate_keys", "validate_client", "process_tick"]
refresh_expiration: true
code: lua["increment_reservoir.lua"]
exports.names = Object.keys templates
exports.keys = (name, id) ->
templates[name].keys id
exports.payload = (name) ->
template = templates[name]
Array::concat(
headers.refs,
template.headers.map((h) -> headers[h]),
(if template.refresh_expiration then headers.refresh_expiration else ""),
template.code
)
.join("\n")

43
node_modules/bottleneck/src/States.coffee generated vendored Normal file
View file

@ -0,0 +1,43 @@
BottleneckError = require "./BottleneckError"
class States
constructor: (@status) ->
@_jobs = {}
@counts = @status.map(-> 0)
next: (id) ->
current = @_jobs[id]
next = current + 1
if current? and next < @status.length
@counts[current]--
@counts[next]++
@_jobs[id]++
else if current?
@counts[current]--
delete @_jobs[id]
start: (id) ->
initial = 0
@_jobs[id] = initial
@counts[initial]++
remove: (id) ->
current = @_jobs[id]
if current?
@counts[current]--
delete @_jobs[id]
current?
jobStatus: (id) -> @status[@_jobs[id]] ? null
statusJobs: (status) ->
if status?
pos = @status.indexOf status
if pos < 0
throw new BottleneckError "status must be one of #{@status.join ', '}"
k for k,v of @_jobs when v == pos
else
Object.keys @_jobs
statusCounts: -> @counts.reduce(((acc, v, i) => acc[@status[i]] = v; acc), {})
module.exports = States

28
node_modules/bottleneck/src/Sync.coffee generated vendored Normal file
View file

@ -0,0 +1,28 @@
DLList = require "./DLList"
class Sync
constructor: (@name, @Promise) ->
@_running = 0
@_queue = new DLList()
isEmpty: -> @_queue.length == 0
_tryToRun: ->
if (@_running < 1) and @_queue.length > 0
@_running++
{ task, args, resolve, reject } = @_queue.shift()
cb = try
returned = await task args...
() -> resolve returned
catch error
() -> reject error
@_running--
@_tryToRun()
cb()
schedule: (task, args...) =>
resolve = reject = null
promise = new @Promise (_resolve, _reject) ->
resolve = _resolve
reject = _reject
@_queue.push { task, args, resolve, reject }
@_tryToRun()
promise
module.exports = Sync

3
node_modules/bottleneck/src/es5.coffee generated vendored Normal file
View file

@ -0,0 +1,3 @@
require("regenerator-runtime/runtime")
module.exports = require "./Bottleneck"

1
node_modules/bottleneck/src/index.coffee generated vendored Normal file
View file

@ -0,0 +1 @@
module.exports = require "./Bottleneck"

10
node_modules/bottleneck/src/parser.coffee generated vendored Normal file
View file

@ -0,0 +1,10 @@
exports.load = (received, defaults, onto={}) ->
for k, v of defaults
onto[k] = received[k] ? v
onto
exports.overwrite = (received, defaults, onto={}) ->
for k, v of received
if defaults[k] != undefined
onto[k] = v
onto

View file

@ -0,0 +1,8 @@
local blacklist = ARGV[num_static_argv + 1]
if redis.call('zscore', client_last_seen_key, blacklist) then
redis.call('zadd', client_last_seen_key, 0, blacklist)
end
return {}

6
node_modules/bottleneck/src/redis/check.lua generated vendored Normal file
View file

@ -0,0 +1,6 @@
local weight = tonumber(ARGV[num_static_argv + 1])
local capacity = process_tick(now, false)['capacity']
local nextRequest = tonumber(redis.call('hget', settings_key, 'nextRequest'))
return conditions_check(capacity, weight) and nextRequest - now <= 0

View file

@ -0,0 +1,3 @@
local conditions_check = function (capacity, weight)
return capacity == nil or weight <= capacity
end

View file

@ -0,0 +1 @@
return process_tick(now, false)['reservoir']

3
node_modules/bottleneck/src/redis/done.lua generated vendored Normal file
View file

@ -0,0 +1,3 @@
process_tick(now, false)
return tonumber(redis.call('hget', settings_key, 'done'))

5
node_modules/bottleneck/src/redis/free.lua generated vendored Normal file
View file

@ -0,0 +1,5 @@
local index = ARGV[num_static_argv + 1]
redis.call('zadd', job_expirations_key, 0, index)
return process_tick(now, false)['running']

7
node_modules/bottleneck/src/redis/get_time.lua generated vendored Normal file
View file

@ -0,0 +1,7 @@
redis.replicate_commands()
local get_time = function ()
local time = redis.call('time')
return tonumber(time[1]..string.sub(time[2], 1, 3))
end

1
node_modules/bottleneck/src/redis/group_check.lua generated vendored Normal file
View file

@ -0,0 +1 @@
return not (redis.call('exists', settings_key) == 1)

1
node_modules/bottleneck/src/redis/heartbeat.lua generated vendored Normal file
View file

@ -0,0 +1 @@
process_tick(now, true)

View file

@ -0,0 +1,10 @@
local incr = tonumber(ARGV[num_static_argv + 1])
redis.call('hincrby', settings_key, 'reservoir', incr)
local reservoir = process_tick(now, true)['reservoir']
local groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))
refresh_expiration(0, 0, groupTimeout)
return reservoir

105
node_modules/bottleneck/src/redis/init.lua generated vendored Normal file
View file

@ -0,0 +1,105 @@
local clear = tonumber(ARGV[num_static_argv + 1])
local limiter_version = ARGV[num_static_argv + 2]
local num_local_argv = num_static_argv + 2
if clear == 1 then
redis.call('del', unpack(KEYS))
end
if redis.call('exists', settings_key) == 0 then
-- Create
local args = {'hmset', settings_key}
for i = num_local_argv + 1, #ARGV do
table.insert(args, ARGV[i])
end
redis.call(unpack(args))
redis.call('hmset', settings_key,
'nextRequest', now,
'lastReservoirRefresh', now,
'lastReservoirIncrease', now,
'running', 0,
'done', 0,
'unblockTime', 0,
'capacityPriorityCounter', 0
)
else
-- Apply migrations
local settings = redis.call('hmget', settings_key,
'id',
'version'
)
local id = settings[1]
local current_version = settings[2]
if current_version ~= limiter_version then
local version_digits = {}
for k, v in string.gmatch(current_version, "([^.]+)") do
table.insert(version_digits, tonumber(k))
end
-- 2.10.0
if version_digits[2] < 10 then
redis.call('hsetnx', settings_key, 'reservoirRefreshInterval', '')
redis.call('hsetnx', settings_key, 'reservoirRefreshAmount', '')
redis.call('hsetnx', settings_key, 'lastReservoirRefresh', '')
redis.call('hsetnx', settings_key, 'done', 0)
redis.call('hset', settings_key, 'version', '2.10.0')
end
-- 2.11.1
if version_digits[2] < 11 or (version_digits[2] == 11 and version_digits[3] < 1) then
if redis.call('hstrlen', settings_key, 'lastReservoirRefresh') == 0 then
redis.call('hmset', settings_key,
'lastReservoirRefresh', now,
'version', '2.11.1'
)
end
end
-- 2.14.0
if version_digits[2] < 14 then
local old_running_key = 'b_'..id..'_running'
local old_executing_key = 'b_'..id..'_executing'
if redis.call('exists', old_running_key) == 1 then
redis.call('rename', old_running_key, job_weights_key)
end
if redis.call('exists', old_executing_key) == 1 then
redis.call('rename', old_executing_key, job_expirations_key)
end
redis.call('hset', settings_key, 'version', '2.14.0')
end
-- 2.15.2
if version_digits[2] < 15 or (version_digits[2] == 15 and version_digits[3] < 2) then
redis.call('hsetnx', settings_key, 'capacityPriorityCounter', 0)
redis.call('hset', settings_key, 'version', '2.15.2')
end
-- 2.17.0
if version_digits[2] < 17 then
redis.call('hsetnx', settings_key, 'clientTimeout', 10000)
redis.call('hset', settings_key, 'version', '2.17.0')
end
-- 2.18.0
if version_digits[2] < 18 then
redis.call('hsetnx', settings_key, 'reservoirIncreaseInterval', '')
redis.call('hsetnx', settings_key, 'reservoirIncreaseAmount', '')
redis.call('hsetnx', settings_key, 'reservoirIncreaseMaximum', '')
redis.call('hsetnx', settings_key, 'lastReservoirIncrease', now)
redis.call('hset', settings_key, 'version', '2.18.0')
end
end
process_tick(now, false)
end
local groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))
refresh_expiration(0, 0, groupTimeout)
return {}

214
node_modules/bottleneck/src/redis/process_tick.lua generated vendored Normal file
View file

@ -0,0 +1,214 @@
local process_tick = function (now, always_publish)
local compute_capacity = function (maxConcurrent, running, reservoir)
if maxConcurrent ~= nil and reservoir ~= nil then
return math.min((maxConcurrent - running), reservoir)
elseif maxConcurrent ~= nil then
return maxConcurrent - running
elseif reservoir ~= nil then
return reservoir
else
return nil
end
end
local settings = redis.call('hmget', settings_key,
'id',
'maxConcurrent',
'running',
'reservoir',
'reservoirRefreshInterval',
'reservoirRefreshAmount',
'lastReservoirRefresh',
'reservoirIncreaseInterval',
'reservoirIncreaseAmount',
'reservoirIncreaseMaximum',
'lastReservoirIncrease',
'capacityPriorityCounter',
'clientTimeout'
)
local id = settings[1]
local maxConcurrent = tonumber(settings[2])
local running = tonumber(settings[3])
local reservoir = tonumber(settings[4])
local reservoirRefreshInterval = tonumber(settings[5])
local reservoirRefreshAmount = tonumber(settings[6])
local lastReservoirRefresh = tonumber(settings[7])
local reservoirIncreaseInterval = tonumber(settings[8])
local reservoirIncreaseAmount = tonumber(settings[9])
local reservoirIncreaseMaximum = tonumber(settings[10])
local lastReservoirIncrease = tonumber(settings[11])
local capacityPriorityCounter = tonumber(settings[12])
local clientTimeout = tonumber(settings[13])
local initial_capacity = compute_capacity(maxConcurrent, running, reservoir)
--
-- Process 'running' changes
--
local expired = redis.call('zrangebyscore', job_expirations_key, '-inf', '('..now)
if #expired > 0 then
redis.call('zremrangebyscore', job_expirations_key, '-inf', '('..now)
local flush_batch = function (batch, acc)
local weights = redis.call('hmget', job_weights_key, unpack(batch))
redis.call('hdel', job_weights_key, unpack(batch))
local clients = redis.call('hmget', job_clients_key, unpack(batch))
redis.call('hdel', job_clients_key, unpack(batch))
-- Calculate sum of removed weights
for i = 1, #weights do
acc['total'] = acc['total'] + (tonumber(weights[i]) or 0)
end
-- Calculate sum of removed weights by client
local client_weights = {}
for i = 1, #clients do
local removed = tonumber(weights[i]) or 0
if removed > 0 then
acc['client_weights'][clients[i]] = (acc['client_weights'][clients[i]] or 0) + removed
end
end
end
local acc = {
['total'] = 0,
['client_weights'] = {}
}
local batch_size = 1000
-- Compute changes to Zsets and apply changes to Hashes
for i = 1, #expired, batch_size do
local batch = {}
for j = i, math.min(i + batch_size - 1, #expired) do
table.insert(batch, expired[j])
end
flush_batch(batch, acc)
end
-- Apply changes to Zsets
if acc['total'] > 0 then
redis.call('hincrby', settings_key, 'done', acc['total'])
running = tonumber(redis.call('hincrby', settings_key, 'running', -acc['total']))
end
for client, weight in pairs(acc['client_weights']) do
redis.call('zincrby', client_running_key, -weight, client)
end
end
--
-- Process 'reservoir' changes
--
local reservoirRefreshActive = reservoirRefreshInterval ~= nil and reservoirRefreshAmount ~= nil
if reservoirRefreshActive and now >= lastReservoirRefresh + reservoirRefreshInterval then
reservoir = reservoirRefreshAmount
redis.call('hmset', settings_key,
'reservoir', reservoir,
'lastReservoirRefresh', now
)
end
local reservoirIncreaseActive = reservoirIncreaseInterval ~= nil and reservoirIncreaseAmount ~= nil
if reservoirIncreaseActive and now >= lastReservoirIncrease + reservoirIncreaseInterval then
local num_intervals = math.floor((now - lastReservoirIncrease) / reservoirIncreaseInterval)
local incr = reservoirIncreaseAmount * num_intervals
if reservoirIncreaseMaximum ~= nil then
incr = math.min(incr, reservoirIncreaseMaximum - (reservoir or 0))
end
if incr > 0 then
reservoir = (reservoir or 0) + incr
end
redis.call('hmset', settings_key,
'reservoir', reservoir,
'lastReservoirIncrease', lastReservoirIncrease + (num_intervals * reservoirIncreaseInterval)
)
end
--
-- Clear unresponsive clients
--
local unresponsive = redis.call('zrangebyscore', client_last_seen_key, '-inf', (now - clientTimeout))
local unresponsive_lookup = {}
local terminated_clients = {}
for i = 1, #unresponsive do
unresponsive_lookup[unresponsive[i]] = true
if tonumber(redis.call('zscore', client_running_key, unresponsive[i])) == 0 then
table.insert(terminated_clients, unresponsive[i])
end
end
if #terminated_clients > 0 then
redis.call('zrem', client_running_key, unpack(terminated_clients))
redis.call('hdel', client_num_queued_key, unpack(terminated_clients))
redis.call('zrem', client_last_registered_key, unpack(terminated_clients))
redis.call('zrem', client_last_seen_key, unpack(terminated_clients))
end
--
-- Broadcast capacity changes
--
local final_capacity = compute_capacity(maxConcurrent, running, reservoir)
if always_publish or (initial_capacity ~= nil and final_capacity == nil) then
-- always_publish or was not unlimited, now unlimited
redis.call('publish', 'b_'..id, 'capacity:'..(final_capacity or ''))
elseif initial_capacity ~= nil and final_capacity ~= nil and final_capacity > initial_capacity then
-- capacity was increased
-- send the capacity message to the limiter having the lowest number of running jobs
-- the tiebreaker is the limiter having not registered a job in the longest time
local lowest_concurrency_value = nil
local lowest_concurrency_clients = {}
local lowest_concurrency_last_registered = {}
local client_concurrencies = redis.call('zrange', client_running_key, 0, -1, 'withscores')
for i = 1, #client_concurrencies, 2 do
local client = client_concurrencies[i]
local concurrency = tonumber(client_concurrencies[i+1])
if (
lowest_concurrency_value == nil or lowest_concurrency_value == concurrency
) and (
not unresponsive_lookup[client]
) and (
tonumber(redis.call('hget', client_num_queued_key, client)) > 0
) then
lowest_concurrency_value = concurrency
table.insert(lowest_concurrency_clients, client)
local last_registered = tonumber(redis.call('zscore', client_last_registered_key, client))
table.insert(lowest_concurrency_last_registered, last_registered)
end
end
if #lowest_concurrency_clients > 0 then
local position = 1
local earliest = lowest_concurrency_last_registered[1]
for i,v in ipairs(lowest_concurrency_last_registered) do
if v < earliest then
position = i
earliest = v
end
end
local next_client = lowest_concurrency_clients[position]
redis.call('publish', 'b_'..id,
'capacity-priority:'..(final_capacity or '')..
':'..next_client..
':'..capacityPriorityCounter
)
redis.call('hincrby', settings_key, 'capacityPriorityCounter', '1')
else
redis.call('publish', 'b_'..id, 'capacity:'..(final_capacity or ''))
end
end
return {
['capacity'] = final_capacity,
['running'] = running,
['reservoir'] = reservoir
}
end

10
node_modules/bottleneck/src/redis/queued.lua generated vendored Normal file
View file

@ -0,0 +1,10 @@
local clientTimeout = tonumber(redis.call('hget', settings_key, 'clientTimeout'))
local valid_clients = redis.call('zrangebyscore', client_last_seen_key, (now - clientTimeout), 'inf')
local client_queued = redis.call('hmget', client_num_queued_key, unpack(valid_clients))
local sum = 0
for i = 1, #client_queued do
sum = sum + tonumber(client_queued[i])
end
return sum

View file

@ -0,0 +1,11 @@
local refresh_expiration = function (now, nextRequest, groupTimeout)
if groupTimeout ~= nil then
local ttl = (nextRequest + groupTimeout) - now
for i = 1, #KEYS do
redis.call('pexpire', KEYS[i], ttl)
end
end
end

13
node_modules/bottleneck/src/redis/refs.lua generated vendored Normal file
View file

@ -0,0 +1,13 @@
local settings_key = KEYS[1]
local job_weights_key = KEYS[2]
local job_expirations_key = KEYS[3]
local job_clients_key = KEYS[4]
local client_running_key = KEYS[5]
local client_num_queued_key = KEYS[6]
local client_last_registered_key = KEYS[7]
local client_last_seen_key = KEYS[8]
local now = tonumber(ARGV[1])
local client = ARGV[2]
local num_static_argv = 2

51
node_modules/bottleneck/src/redis/register.lua generated vendored Normal file
View file

@ -0,0 +1,51 @@
local index = ARGV[num_static_argv + 1]
local weight = tonumber(ARGV[num_static_argv + 2])
local expiration = tonumber(ARGV[num_static_argv + 3])
local state = process_tick(now, false)
local capacity = state['capacity']
local reservoir = state['reservoir']
local settings = redis.call('hmget', settings_key,
'nextRequest',
'minTime',
'groupTimeout'
)
local nextRequest = tonumber(settings[1])
local minTime = tonumber(settings[2])
local groupTimeout = tonumber(settings[3])
if conditions_check(capacity, weight) then
redis.call('hincrby', settings_key, 'running', weight)
redis.call('hset', job_weights_key, index, weight)
if expiration ~= nil then
redis.call('zadd', job_expirations_key, now + expiration, index)
end
redis.call('hset', job_clients_key, index, client)
redis.call('zincrby', client_running_key, weight, client)
redis.call('hincrby', client_num_queued_key, client, -1)
redis.call('zadd', client_last_registered_key, now, client)
local wait = math.max(nextRequest - now, 0)
local newNextRequest = now + wait + minTime
if reservoir == nil then
redis.call('hset', settings_key,
'nextRequest', newNextRequest
)
else
reservoir = reservoir - weight
redis.call('hmset', settings_key,
'reservoir', reservoir,
'nextRequest', newNextRequest
)
end
refresh_expiration(now, newNextRequest, groupTimeout)
return {true, wait, reservoir}
else
return {false}
end

12
node_modules/bottleneck/src/redis/register_client.lua generated vendored Normal file
View file

@ -0,0 +1,12 @@
local queued = tonumber(ARGV[num_static_argv + 1])
-- Could have been re-registered concurrently
if not redis.call('zscore', client_last_seen_key, client) then
redis.call('zadd', client_running_key, 0, client)
redis.call('hset', client_num_queued_key, client, queued)
redis.call('zadd', client_last_registered_key, 0, client)
end
redis.call('zadd', client_last_seen_key, now, client)
return {}

1
node_modules/bottleneck/src/redis/running.lua generated vendored Normal file
View file

@ -0,0 +1 @@
return process_tick(now, false)['running']

74
node_modules/bottleneck/src/redis/submit.lua generated vendored Normal file
View file

@ -0,0 +1,74 @@
local queueLength = tonumber(ARGV[num_static_argv + 1])
local weight = tonumber(ARGV[num_static_argv + 2])
local capacity = process_tick(now, false)['capacity']
local settings = redis.call('hmget', settings_key,
'id',
'maxConcurrent',
'highWater',
'nextRequest',
'strategy',
'unblockTime',
'penalty',
'minTime',
'groupTimeout'
)
local id = settings[1]
local maxConcurrent = tonumber(settings[2])
local highWater = tonumber(settings[3])
local nextRequest = tonumber(settings[4])
local strategy = tonumber(settings[5])
local unblockTime = tonumber(settings[6])
local penalty = tonumber(settings[7])
local minTime = tonumber(settings[8])
local groupTimeout = tonumber(settings[9])
if maxConcurrent ~= nil and weight > maxConcurrent then
return redis.error_reply('OVERWEIGHT:'..weight..':'..maxConcurrent)
end
local reachedHWM = (highWater ~= nil and queueLength == highWater
and not (
conditions_check(capacity, weight)
and nextRequest - now <= 0
)
)
local blocked = strategy == 3 and (reachedHWM or unblockTime >= now)
if blocked then
local computedPenalty = penalty
if computedPenalty == nil then
if minTime == 0 then
computedPenalty = 5000
else
computedPenalty = 15 * minTime
end
end
local newNextRequest = now + computedPenalty + minTime
redis.call('hmset', settings_key,
'unblockTime', now + computedPenalty,
'nextRequest', newNextRequest
)
local clients_queued_reset = redis.call('hkeys', client_num_queued_key)
local queued_reset = {}
for i = 1, #clients_queued_reset do
table.insert(queued_reset, clients_queued_reset[i])
table.insert(queued_reset, 0)
end
redis.call('hmset', client_num_queued_key, unpack(queued_reset))
redis.call('publish', 'b_'..id, 'blocked:')
refresh_expiration(now, newNextRequest, groupTimeout)
end
if not blocked and not reachedHWM then
redis.call('hincrby', client_num_queued_key, client, 1)
end
return {reachedHWM, blocked, strategy}

14
node_modules/bottleneck/src/redis/update_settings.lua generated vendored Normal file
View file

@ -0,0 +1,14 @@
local args = {'hmset', settings_key}
for i = num_static_argv + 1, #ARGV do
table.insert(args, ARGV[i])
end
redis.call(unpack(args))
process_tick(now, true)
local groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))
refresh_expiration(0, 0, groupTimeout)
return {}

View file

@ -0,0 +1,5 @@
if not redis.call('zscore', client_last_seen_key, client) then
return redis.error_reply('UNKNOWN_CLIENT')
end
redis.call('zadd', client_last_seen_key, now, client)

3
node_modules/bottleneck/src/redis/validate_keys.lua generated vendored Normal file
View file

@ -0,0 +1,3 @@
if not (redis.call('exists', settings_key) == 1) then
return redis.error_reply('SETTINGS_KEY_NOT_FOUND')
end

335
node_modules/bottleneck/test.ts generated vendored Normal file
View file

@ -0,0 +1,335 @@
/// <reference path="bottleneck.d.ts" />
import Bottleneck from "bottleneck";
// import * as assert from "assert";
function assert(b: boolean): void { }
/*
This file is run by scripts/build.sh.
It is used to validate the typings in bottleneck.d.ts.
The command is: tsc --noEmit --strictNullChecks test.ts
This file cannot be run directly.
In order to do that, you must comment out the first line,
and change "bottleneck" to "." on the third line.
*/
function withCb(foo: number, bar: () => void, cb: (err: any, result: string) => void) {
let s: string = `cb ${foo}`;
cb(null, s);
}
console.log(Bottleneck);
let limiter = new Bottleneck({
maxConcurrent: 5,
minTime: 1000,
highWater: 20,
strategy: Bottleneck.strategy.LEAK,
reservoirRefreshInterval: 1000 * 60,
reservoirRefreshAmount: 10,
reservoirIncreaseInterval: 1000 * 60,
reservoirIncreaseAmount: 2,
reservoirIncreaseMaximum: 15
});
limiter.ready().then(() => { console.log('Ready') });
limiter.clients().client;
limiter.disconnect();
limiter.currentReservoir().then(function (x) {
if (x != null) {
let i: number = x;
}
});
limiter.incrementReservoir(5).then(function (x) {
if (x != null) {
let i: number = x;
}
});
limiter.running().then(function (x) {
let i: number = x;
});
limiter.clusterQueued().then(function (x) {
let i: number = x;
});
limiter.done().then(function (x) {
let i: number = x;
});
limiter.submit(withCb, 1, () => {}, (err, result) => {
let s: string = result;
console.log(s);
assert(s == "cb 1");
});
function withPromise(foo: number, bar: () => void): PromiseLike<string> {
let s: string = `promise ${foo}`;
return Promise.resolve(s);
}
let foo: Promise<string> = limiter.schedule(withPromise, 1, () => {});
foo.then(function (result: string) {
let s: string = result;
console.log(s);
assert(s == "promise 1");
});
limiter.on("message", (msg) => console.log(msg));
limiter.publish(JSON.stringify({ a: "abc", b: { c: 123 }}));
function checkEventInfo(info: Bottleneck.EventInfo) {
const numArgs: number = info.args.length;
const id: string = info.options.id;
}
limiter.on('dropped', (info) => {
checkEventInfo(info)
const task: Function = info.task;
const promise: Promise<any> = info.promise;
})
limiter.on('received', (info) => {
checkEventInfo(info)
})
limiter.on('queued', (info) => {
checkEventInfo(info)
const blocked: boolean = info.blocked;
const reachedHWM: boolean = info.reachedHWM;
})
limiter.on('scheduled', (info) => {
checkEventInfo(info)
})
limiter.on('executing', (info) => {
checkEventInfo(info)
const count: number = info.retryCount;
})
limiter.on('failed', (error, info) => {
checkEventInfo(info)
const message: string = error.message;
const count: number = info.retryCount;
return Promise.resolve(10)
})
limiter.on('failed', (error, info) => {
checkEventInfo(info)
const message: string = error.message;
const count: number = info.retryCount;
return Promise.resolve(null)
})
limiter.on('failed', (error, info) => {
checkEventInfo(info)
const message: string = error.message;
const count: number = info.retryCount;
return Promise.resolve()
})
limiter.on('failed', (error, info) => {
checkEventInfo(info)
const message: string = error.message;
const count: number = info.retryCount;
return 10
})
limiter.on('failed', (error, info) => {
checkEventInfo(info)
const message: string = error.message;
const count: number = info.retryCount;
return null
})
limiter.on('failed', (error, info) => {
checkEventInfo(info)
const message: string = error.message;
const count: number = info.retryCount;
})
limiter.on('retry', (message: string, info) => {
checkEventInfo(info)
const count: number = info.retryCount;
})
limiter.on('done', (info) => {
checkEventInfo(info)
const count: number = info.retryCount;
})
let group = new Bottleneck.Group({
maxConcurrent: 5,
minTime: 1000,
highWater: 10,
strategy: Bottleneck.strategy.LEAK,
datastore: "ioredis",
clearDatastore: true,
clientOptions: {},
clusterNodes: []
});
group.on('created', (limiter, key) => {
assert(limiter.empty())
assert(key.length > 0)
})
group.key("foo").submit(withCb, 2, () => {}, (err, result) => {
let s: string = `${result} foo`;
console.log(s);
assert(s == "cb 2 foo");
});
group.key("bar").submit({ priority: 4 }, withCb, 3, () => {}, (err, result) => {
let s: string = `${result} bar`;
console.log(s);
assert(s == "cb 3 foo");
});
let f1: Promise<string> = group.key("pizza").schedule(withPromise, 2, () => {});
f1.then(function (result: string) {
let s: string = result;
console.log(s);
assert(s == "promise 2");
});
let f2: Promise<string> = group.key("pie").schedule({ priority: 4 }, withPromise, 3, () => {});
f2.then(function (result: string) {
let s: string = result;
console.log(s);
assert(s == "promise 3");
});
let wrapped = limiter.wrap((a: number, b: number) => {
let s: string = `Total: ${a + b}`;
return Promise.resolve(s);
});
wrapped(1, 2).then((x) => {
let s: string = x;
console.log(s);
assert(s == "Total: 3");
});
wrapped.withOptions({ priority: 1, id: 'some-id' }, 9, 9).then((x) => {
let s: string = x;
console.log(s);
assert(s == "Total: 18");
})
let counts = limiter.counts();
console.log(`${counts.EXECUTING + 2}`);
console.log(limiter.jobStatus('some-id'))
console.log(limiter.jobs());
console.log(limiter.jobs(Bottleneck.Status.RUNNING));
group.deleteKey("pizza")
.then(function (deleted: boolean) {
console.log(deleted)
});
group.updateSettings({ timeout: 5, maxConcurrent: null, reservoir: null });
let keys: string[] = group.keys();
assert(keys.length == 3);
group.clusterKeys()
.then(function (allKeys: string[]) {
let count = allKeys.length;
})
let queued: number = limiter.chain(group.key("pizza")).queued();
limiter.stop({
dropWaitingJobs: true,
dropErrorMessage: "Begone!",
enqueueErrorMessage: "Denied!"
}).then(() => {
console.log('All stopped.')
})
wrapped(4, 5).catch((e) => {
assert(e.message === "Denied!")
})
const id: string = limiter.id;
const datastore: string = limiter.datastore;
const channel: string = limiter.channel();
const redisConnection = new Bottleneck.RedisConnection({
client: "NodeRedis client object",
clientOptions: {}
})
redisConnection.ready()
.then(function (redisConnectionClients) {
const client = redisConnectionClients.client;
const subscriber = redisConnectionClients.subscriber;
})
redisConnection.on("error", (err) => {
console.log(err.message)
})
const limiterWithConn = new Bottleneck({
connection: redisConnection
})
const ioredisConnection = new Bottleneck.IORedisConnection({
client: "ioredis client object",
clientOptions: {},
clusterNodes: []
})
ioredisConnection.ready()
.then(function (ioredisConnectionClients) {
const client = ioredisConnectionClients.client;
const subscriber = ioredisConnectionClients.subscriber;
})
ioredisConnection.on("error", (err: Bottleneck.BottleneckError) => {
console.log(err.message)
})
const groupWithConn = new Bottleneck.Group({
connection: ioredisConnection
})
const limiterWithConnFromGroup = new Bottleneck({
connection: groupWithConn.connection
})
const groupWithConnFromLimiter = new Bottleneck.Group({
connection: limiterWithConn.connection
})
const batcher = new Bottleneck.Batcher({
maxTime: 1000,
maxSize: 10
})
batcher.on("batch", (batch) => {
const len: number = batch.length
console.log("Number of elements:", len)
})
batcher.on("error", (err: Bottleneck.BottleneckError) => {
console.log(err.message)
})
batcher.add("abc")
batcher.add({ xyz: 5 })
.then(() => console.log("Flushed!"))
const object = {}
const emitter = new Bottleneck.Events(object)
const listenerCount: number = emitter.listenerCount('info')
emitter.trigger('info', 'hello', 'world', 123).then(function (result) {
console.log(result)
})

148
node_modules/bottleneck/test/DLList.js generated vendored Normal file
View file

@ -0,0 +1,148 @@
var DLList = require('../lib/DLList')
var assert = require('assert')
var c = require('./context')({datastore: 'local'})
var fakeQueues = function () {
this._length = 0
this.incr = () => this._length++
this.decr = () => this._length--
this.fns = [this.incr, this.decr]
}
describe('DLList', function () {
it('Should be created and be empty', function () {
var queues = new fakeQueues()
var list = new DLList()
c.mustEqual(list.getArray().length, 0)
})
it('Should be possible to append once', function () {
var queues = new fakeQueues()
var list = new DLList(...queues.fns)
list.push(5)
var arr = list.getArray()
c.mustEqual(arr.length, 1)
c.mustEqual(list.length, 1)
c.mustEqual(queues._length, 1)
c.mustEqual(arr[0], 5)
})
it('Should be possible to append multiple times', function () {
var queues = new fakeQueues()
var list = new DLList(...queues.fns)
list.push(5)
list.push(6)
var arr = list.getArray()
c.mustEqual(arr.length, 2)
c.mustEqual(list.length, 2)
c.mustEqual(queues._length, 2)
c.mustEqual(arr[0], 5)
c.mustEqual(arr[1], 6)
list.push(10)
arr = list.getArray()
c.mustEqual(arr.length, 3)
c.mustEqual(list.length, 3)
c.mustEqual(arr[0], 5)
c.mustEqual(arr[1], 6)
c.mustEqual(arr[2], 10)
})
it('Should be possible to shift an empty list', function () {
var queues = new fakeQueues()
var list = new DLList(...queues.fns)
c.mustEqual(list.length, 0)
assert(list.shift() === undefined)
var arr = list.getArray()
c.mustEqual(arr.length, 0)
c.mustEqual(list.length, 0)
assert(list.shift() === undefined)
arr = list.getArray()
c.mustEqual(arr.length, 0)
c.mustEqual(list.length, 0)
c.mustEqual(queues._length, 0)
})
it('Should be possible to append then shift once', function () {
var queues = new fakeQueues()
var list = new DLList(...queues.fns)
list.push(5)
c.mustEqual(list.length, 1)
c.mustEqual(list.shift(), 5)
var arr = list.getArray()
c.mustEqual(arr.length, 0)
c.mustEqual(list.length, 0)
c.mustEqual(queues._length, 0)
})
it('Should be possible to append then shift multiple times', function () {
var queues = new fakeQueues()
var list = new DLList(...queues.fns)
list.push(5)
c.mustEqual(list.length, 1)
c.mustEqual(list.shift(), 5)
c.mustEqual(list.length, 0)
list.push(6)
c.mustEqual(list.length, 1)
c.mustEqual(list.shift(), 6)
c.mustEqual(list.length, 0)
c.mustEqual(queues._length, 0)
})
it('Should pass a full test', function () {
var queues = new fakeQueues()
var list = new DLList(...queues.fns)
list.push(10)
c.mustEqual(list.length, 1)
list.push("11")
c.mustEqual(list.length, 2)
list.push(12)
c.mustEqual(list.length, 3)
c.mustEqual(queues._length, 3)
c.mustEqual(list.shift(), 10)
c.mustEqual(list.length, 2)
c.mustEqual(list.shift(),"11")
c.mustEqual(list.length, 1)
list.push(true)
c.mustEqual(list.length, 2)
var arr = list.getArray()
c.mustEqual(arr[0], 12)
c.mustEqual(arr[1], true)
c.mustEqual(arr.length, 2)
c.mustEqual(queues._length, 2)
})
it('Should return the first value without shifting', function () {
var queues = new fakeQueues()
var list = new DLList(...queues.fns)
assert(list.first() === undefined)
assert(list.first() === undefined)
list.push(1)
c.mustEqual(list.first(), 1)
c.mustEqual(list.first(), 1)
list.push(2)
c.mustEqual(list.first(), 1)
c.mustEqual(list.first(), 1)
c.mustEqual(list.shift(), 1)
c.mustEqual(list.first(), 2)
c.mustEqual(list.first(), 2)
c.mustEqual(list.shift(), 2)
assert(list.first() === undefined)
assert(list.first() === undefined)
assert(list.first() === undefined)
assert(list.shift() === undefined)
assert(list.first() === undefined)
})
})

209
node_modules/bottleneck/test/batcher.js generated vendored Normal file
View file

@ -0,0 +1,209 @@
var makeTest = require('./context')
var Bottleneck = require('./bottleneck')
var assert = require('assert')
describe('Batcher', function () {
var c
afterEach(function () {
return c.limiter.disconnect(false)
})
it('Should batch by time and size', function () {
c = makeTest()
var batcher = new Bottleneck.Batcher({
maxTime: 50,
maxSize: 3
})
var t0 = Date.now()
var batches = []
batcher.on('batch', function (batcher) {
batches.push(batcher)
})
return Promise.all([
batcher.add(1).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 1)),
batcher.add(2).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 2)),
batcher.add(3).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 3)),
batcher.add(4).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 4)),
batcher.add(5).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 5))
])
.then(function (data) {
c.mustEqual(
data.map((([t, x]) => [Math.floor(t / 50), x])),
[[0, 1], [0, 2], [0, 3], [1, 4], [1, 5]]
)
return c.last()
})
.then(function (results) {
c.checkDuration(50, 20)
c.mustEqual(batches, [[1, 2, 3], [4, 5]])
})
})
it('Should batch by time', function () {
c = makeTest()
var batcher = new Bottleneck.Batcher({
maxTime: 50
})
var t0 = Date.now()
var batches = []
batcher.on('batch', function (batcher) {
batches.push(batcher)
})
return Promise.all([
batcher.add(1).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 1)),
batcher.add(2).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 2))
])
.then(function (data) {
c.mustEqual(
data.map((([t, x]) => [Math.floor(t / 50), x])),
[[1, 1], [1, 2]]
)
return Promise.all([
batcher.add(3).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 3)),
batcher.add(4).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 4))
])
})
.then(function (data) {
c.mustEqual(
data.map((([t, x]) => [Math.floor(t / 50), x])),
[[2, 3], [2, 4]]
)
return c.last()
})
.then(function (results) {
c.checkDuration(100)
c.mustEqual(batches, [[1, 2], [3, 4]])
})
})
it('Should batch by size', function () {
c = makeTest()
var batcher = new Bottleneck.Batcher({
maxSize: 2
})
var batches = []
batcher.on('batch', function (batcher) {
batches.push(batcher)
})
return Promise.all([
batcher.add(1).then((x) => c.limiter.schedule(c.promise, null, 1)),
batcher.add(2).then((x) => c.limiter.schedule(c.promise, null, 2))
])
.then(function () {
return Promise.all([
batcher.add(3).then((x) => c.limiter.schedule(c.promise, null, 3)),
batcher.add(4).then((x) => c.limiter.schedule(c.promise, null, 4))
])
})
.then(c.last)
.then(function (results) {
c.checkDuration(0)
c.mustEqual(batches, [[1, 2], [3, 4]])
})
})
it('Should stagger flushes', function () {
c = makeTest()
var batcher = new Bottleneck.Batcher({
maxTime: 50,
maxSize: 3
})
var t0 = Date.now()
var batches = []
batcher.on('batch', function (batcher) {
batches.push(batcher)
})
return Promise.all([
batcher.add(1).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 1)),
batcher.add(2).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 2))
])
.then(function (data) {
c.mustEqual(
data.map((([t, x]) => [Math.floor(t / 50), x])),
[[1, 1], [1, 2]]
)
var promises = []
promises.push(batcher.add(3).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 3)))
return c.wait(10)
.then(function () {
promises.push(batcher.add(4).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 4)))
return Promise.all(promises)
})
})
.then(function (data) {
c.mustEqual(
data.map((([t, x]) => [Math.floor(t / 50), x])),
[[2, 3], [2, 4]]
)
return c.last()
})
.then(function (results) {
c.checkDuration(120, 20)
c.mustEqual(batches, [[1, 2], [3, 4]])
})
})
it('Should force then stagger flushes', function () {
c = makeTest()
var batcher = new Bottleneck.Batcher({
maxTime: 50,
maxSize: 3
})
var t0 = Date.now()
var batches = []
batcher.on('batch', function (batcher) {
batches.push(batcher)
})
var promises = []
promises.push(batcher.add(1).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 1)))
promises.push(batcher.add(2).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 2)))
return c.wait(10)
.then(function () {
promises.push(batcher.add(3).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 3)))
return Promise.all(promises)
})
.then(function (data) {
c.mustEqual(
data.map((([t, x]) => [Math.floor(t / 50), x])),
[[0, 1], [0, 2], [0, 3]]
)
return Promise.all([
batcher.add(4).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 4)),
batcher.add(5).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 5)),
])
})
.then(function (data) {
c.mustEqual(
data.map((([t, x]) => [Math.floor(t / 50), x])),
[[1, 4], [1, 5]]
)
return c.last()
})
.then(function (results) {
c.checkDuration(85, 25)
c.mustEqual(batches, [[1, 2, 3], [4, 5]])
})
})
})

7
node_modules/bottleneck/test/bottleneck.js generated vendored Normal file
View file

@ -0,0 +1,7 @@
if (process.env.BUILD === 'es5') {
module.exports = require('../es5.js')
} else if (process.env.BUILD === 'light') {
module.exports = require('../light.js')
} else {
module.exports = require('../lib/index.js')
}

1549
node_modules/bottleneck/test/cluster.js generated vendored Normal file

File diff suppressed because it is too large Load diff

142
node_modules/bottleneck/test/context.js generated vendored Normal file
View file

@ -0,0 +1,142 @@
global.TEST = true
var Bottleneck = require('./bottleneck')
var assert = require('assert')
module.exports = function (options={}) {
var mustEqual = function (a, b) {
var strA = JSON.stringify(a)
var strB = JSON.stringify(b)
if (strA !== strB) {
console.log(strA + ' !== ' + strB, (new Error('').stack))
assert(strA === strB)
}
}
var start
var calls = []
// set options.datastore
var setRedisClientOptions = function (options) {
options.clearDatastore = true
if (options.clientOptions == null) {
options.clientOptions = {
host: process.env.REDIS_HOST,
port: process.env.REDIS_PORT,
}
}
}
if (options.datastore == null && process.env.DATASTORE === 'redis') {
options.datastore = 'redis'
setRedisClientOptions(options)
} else if (options.datastore == null && process.env.DATASTORE === 'ioredis') {
options.datastore = 'ioredis'
setRedisClientOptions(options)
} else {
options.datastore = 'local'
}
var limiter = new Bottleneck(options)
// limiter.on("debug", function (str, args) { console.log(`${Date.now()-start} ${str} ${JSON.stringify(args)}`) })
if (!options.errorEventsExpected) {
limiter.on("error", function (err) {
console.log('(CONTEXT) ERROR EVENT', err)
})
}
limiter.ready().then(function (client) {
start = Date.now()
})
var getResults = function () {
return {
elapsed: Date.now() - start,
callsDuration: calls.length > 0 ? calls[calls.length - 1].time : null,
calls: calls
}
}
var context = {
job: function (err, ...result) {
var cb = result.pop()
calls.push({err: err, result: result, time: Date.now()-start})
if (process.env.DEBUG) console.log(result, calls)
cb.apply({}, [err].concat(result))
},
slowJob: function (duration, err, ...result) {
setTimeout(function () {
var cb = result.pop()
calls.push({err: err, result: result, time: Date.now()-start})
if (process.env.DEBUG) console.log(result, calls)
cb.apply({}, [err].concat(result))
}, duration)
},
promise: function (err, ...result) {
return new Promise(function (resolve, reject) {
if (process.env.DEBUG) console.log('In c.promise. Result: ', result)
calls.push({err: err, result: result, time: Date.now()-start})
if (process.env.DEBUG) console.log(result, calls)
if (err === null) {
return resolve(result)
} else {
return reject(err)
}
})
},
slowPromise: function (duration, err, ...result) {
return new Promise(function (resolve, reject) {
setTimeout(function () {
if (process.env.DEBUG) console.log('In c.slowPromise. Result: ', result)
calls.push({err: err, result: result, time: Date.now()-start})
if (process.env.DEBUG) console.log(result, calls)
if (err === null) {
return resolve(result)
} else {
return reject(err)
}
}, duration)
})
},
pNoErrVal: function (promise, ...expected) {
if (process.env.DEBUG) console.log('In c.pNoErrVal. Expected:', expected)
return promise.then(function (actual) {
mustEqual(actual, expected)
})
},
noErrVal: function (...expected) {
return function (err, ...actual) {
mustEqual(err, null)
mustEqual(actual, expected)
}
},
last: function (options) {
var opt = options != null ? options : {}
return limiter.schedule(opt, function () { return Promise.resolve(getResults()) })
.catch(function (err) { console.error("Error in context.last:", err)})
},
wait: function (wait) {
return new Promise(function (resolve, reject) {
setTimeout(resolve, wait)
})
},
limiter: limiter,
mustEqual: mustEqual,
mustExist: function (a) { assert(a != null) },
results: getResults,
checkResultsOrder: function (order) {
mustEqual(order.length, calls.length)
for (var i = 0; i < Math.max(calls.length, order.length); i++) {
mustEqual(order[i], calls[i].result)
}
},
checkDuration: function (shouldBe, minBound = 10) {
var results = getResults()
var min = shouldBe - minBound
var max = shouldBe + 50
if (!(results.callsDuration > min && results.callsDuration < max)) {
console.error('Duration not around ' + shouldBe + '. Was ' + results.callsDuration)
}
assert(results.callsDuration > min && results.callsDuration < max)
}
}
return context
}

867
node_modules/bottleneck/test/general.js generated vendored Normal file
View file

@ -0,0 +1,867 @@
var makeTest = require('./context')
var Bottleneck = require('./bottleneck')
var assert = require('assert')
var child_process = require('child_process')
describe('General', function () {
var c
afterEach(function () {
return c.limiter.disconnect(false)
})
if (
process.env.DATASTORE !== 'redis' && process.env.DATASTORE !== 'ioredis' &&
process.env.BUILD !== 'es5' && process.env.BUILD !== 'light'
) {
it('Should not leak memory on instantiation', async function () {
c = makeTest()
this.timeout(8000)
const { iterate } = require('leakage')
const result = await iterate.async(async () => {
const limiter = new Bottleneck({ datastore: 'local' })
await limiter.ready()
return limiter.disconnect(false)
}, { iterations: 25 })
})
it('Should not leak memory running jobs', async function () {
c = makeTest()
this.timeout(12000)
const { iterate } = require('leakage')
const limiter = new Bottleneck({ datastore: 'local', maxConcurrent: 1, minTime: 10 })
await limiter.ready()
var ctr = 0
var i = 0
const result = await iterate.async(async () => {
await limiter.schedule(function (zero, one) {
i = i + zero + one
}, 0, 1)
await limiter.schedule(function (zero, one) {
i = i + zero + one
}, 0, 1)
}, { iterations: 25 })
c.mustEqual(i, 302)
})
}
it('Should prompt to upgrade', function () {
c = makeTest()
try {
var limiter = new Bottleneck(1, 250)
} catch (err) {
c.mustEqual(err.message, 'Bottleneck v2 takes a single object argument. Refer to https://github.com/SGrondin/bottleneck#upgrading-to-v2 if you\'re upgrading from Bottleneck v1.')
}
})
it('Should allow null capacity', function () {
c = makeTest({ id: 'null', minTime: 0 })
return c.limiter.updateSettings({ minTime: 10 })
})
it('Should keep scope', async function () {
c = makeTest({ maxConcurrent: 1 })
class Job {
constructor() {
this.value = 5
}
action(x) {
return this.value + x
}
}
var job = new Job()
c.mustEqual(6, await c.limiter.schedule(() => job.action.bind(job)(1)))
c.mustEqual(7, await c.limiter.wrap(job.action.bind(job))(2))
})
it('Should pass multiple arguments back even on errors when using submit()', function (done) {
c = makeTest({ maxConcurrent: 1 })
c.limiter.submit(c.job, new Error('welp'), 1, 2, function (err, x, y) {
c.mustEqual(err.message, 'welp')
c.mustEqual(x, 1)
c.mustEqual(y, 2)
done()
})
})
it('Should expose the Events library', function (cb) {
c = makeTest()
class Hello {
constructor() {
this.emitter = new Bottleneck.Events(this)
}
doSomething() {
this.emitter.trigger('info', 'hello', 'world', 123)
return 5
}
}
const myObject = new Hello();
myObject.on('info', (...args) => {
c.mustEqual(args, ['hello', 'world', 123])
cb()
})
myObject.doSomething()
c.mustEqual(myObject.emitter.listenerCount('info'), 1)
c.mustEqual(myObject.emitter.listenerCount('nothing'), 0)
myObject.on('blah', '')
myObject.on('blah', null)
myObject.on('blah')
return myObject.emitter.trigger('blah')
})
describe('Counts and statuses', function () {
it('Should check() and return the queued count with and without a priority value', async function () {
c = makeTest({maxConcurrent: 1, minTime: 100})
c.mustEqual(await c.limiter.check(), true)
c.mustEqual(c.limiter.queued(), 0)
c.mustEqual(await c.limiter.clusterQueued(), 0)
await c.limiter.submit({id: 1}, c.slowJob, 50, null, 1, c.noErrVal(1))
c.mustEqual(c.limiter.queued(), 0) // It's already running
c.mustEqual(await c.limiter.check(), false)
await c.limiter.submit({id: 2}, c.slowJob, 50, null, 2, c.noErrVal(2))
c.mustEqual(c.limiter.queued(), 1)
c.mustEqual(await c.limiter.clusterQueued(), 1)
c.mustEqual(c.limiter.queued(1), 0)
c.mustEqual(c.limiter.queued(5), 1)
await c.limiter.submit({id: 3}, c.slowJob, 50, null, 3, c.noErrVal(3))
c.mustEqual(c.limiter.queued(), 2)
c.mustEqual(await c.limiter.clusterQueued(), 2)
c.mustEqual(c.limiter.queued(1), 0)
c.mustEqual(c.limiter.queued(5), 2)
await c.limiter.submit({id: 4}, c.slowJob, 50, null, 4, c.noErrVal(4))
c.mustEqual(c.limiter.queued(), 3)
c.mustEqual(await c.limiter.clusterQueued(), 3)
c.mustEqual(c.limiter.queued(1), 0)
c.mustEqual(c.limiter.queued(5), 3)
await c.limiter.submit({priority: 1, id: 5}, c.job, null, 5, c.noErrVal(5))
c.mustEqual(c.limiter.queued(), 4)
c.mustEqual(await c.limiter.clusterQueued(), 4)
c.mustEqual(c.limiter.queued(1), 1)
c.mustEqual(c.limiter.queued(5), 3)
var results = await c.last()
c.mustEqual(c.limiter.queued(), 0)
c.mustEqual(await c.limiter.clusterQueued(), 0)
c.checkResultsOrder([[1], [5], [2], [3], [4]])
c.checkDuration(450)
})
it('Should return the running and done counts', function () {
c = makeTest({maxConcurrent: 5, minTime: 0})
return Promise.all([c.limiter.running(), c.limiter.done()])
.then(function ([running, done]) {
c.mustEqual(running, 0)
c.mustEqual(done, 0)
c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 1 }, c.slowPromise, 100, null, 1), 1)
c.pNoErrVal(c.limiter.schedule({ weight: 3, id: 2 }, c.slowPromise, 200, null, 2), 2)
c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 3 }, c.slowPromise, 100, null, 3), 3)
return c.limiter.schedule({ weight: 0, id: 4 }, c.promise, null)
})
.then(function () {
return Promise.all([c.limiter.running(), c.limiter.done()])
})
.then(function ([running, done]) {
c.mustEqual(running, 5)
c.mustEqual(done, 0)
return c.wait(125)
})
.then(function () {
return Promise.all([c.limiter.running(), c.limiter.done()])
})
.then(function ([running, done]) {
c.mustEqual(running, 3)
c.mustEqual(done, 2)
return c.wait(100)
})
.then(function () {
return Promise.all([c.limiter.running(), c.limiter.done()])
})
.then(function ([running, done]) {
c.mustEqual(running, 0)
c.mustEqual(done, 5)
return c.last()
})
.then(function (results) {
c.checkDuration(200)
c.checkResultsOrder([[], [1], [3], [2]])
})
})
it('Should refuse duplicate Job IDs', async function () {
c = makeTest({maxConcurrent: 2, minTime: 100, trackDoneStatus: true})
try {
await c.limiter.schedule({ id: 'a' }, c.promise, null, 1)
await c.limiter.schedule({ id: 'b' }, c.promise, null, 2)
await c.limiter.schedule({ id: 'a' }, c.promise, null, 3)
} catch (e) {
c.mustEqual(e.message, 'A job with the same id already exists (id=a)')
}
})
it('Should return job statuses', function () {
c = makeTest({maxConcurrent: 2, minTime: 100})
c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 0, RUNNING: 0, EXECUTING: 0 })
c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 1 }, c.slowPromise, 100, null, 1), 1)
c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 2 }, c.slowPromise, 200, null, 2), 2)
c.pNoErrVal(c.limiter.schedule({ weight: 2, id: 3 }, c.slowPromise, 100, null, 3), 3)
c.mustEqual(c.limiter.counts(), { RECEIVED: 3, QUEUED: 0, RUNNING: 0, EXECUTING: 0 })
return c.wait(50)
.then(function () {
c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 1, RUNNING: 1, EXECUTING: 1 })
c.mustEqual(c.limiter.jobStatus(1), 'EXECUTING')
c.mustEqual(c.limiter.jobStatus(2), 'RUNNING')
c.mustEqual(c.limiter.jobStatus(3), 'QUEUED')
return c.last()
})
.then(function (results) {
c.checkDuration(400)
c.checkResultsOrder([[1], [2], [3]])
})
})
it('Should return job statuses, including DONE', function () {
c = makeTest({maxConcurrent: 2, minTime: 100, trackDoneStatus: true})
c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 0, RUNNING: 0, EXECUTING: 0, DONE: 0 })
c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 1 }, c.slowPromise, 100, null, 1), 1)
c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 2 }, c.slowPromise, 200, null, 2), 2)
c.pNoErrVal(c.limiter.schedule({ weight: 2, id: 3 }, c.slowPromise, 100, null, 3), 3)
c.mustEqual(c.limiter.counts(), { RECEIVED: 3, QUEUED: 0, RUNNING: 0, EXECUTING: 0, DONE: 0 })
return c.wait(50)
.then(function () {
c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 1, RUNNING: 1, EXECUTING: 1, DONE: 0 })
c.mustEqual(c.limiter.jobStatus(1), 'EXECUTING')
c.mustEqual(c.limiter.jobStatus(2), 'RUNNING')
c.mustEqual(c.limiter.jobStatus(3), 'QUEUED')
return c.wait(100)
})
.then(function () {
c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 1, RUNNING: 0, EXECUTING: 1, DONE: 1 })
c.mustEqual(c.limiter.jobStatus(1), 'DONE')
c.mustEqual(c.limiter.jobStatus(2), 'EXECUTING')
c.mustEqual(c.limiter.jobStatus(3), 'QUEUED')
return c.last()
})
.then(function (results) {
c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 0, RUNNING: 0, EXECUTING: 0, DONE: 4 })
c.checkDuration(400)
c.checkResultsOrder([[1], [2], [3]])
})
})
it('Should return jobs for a status', function () {
c = makeTest({maxConcurrent: 2, minTime: 100, trackDoneStatus: true})
c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 0, RUNNING: 0, EXECUTING: 0, DONE: 0 })
c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 1 }, c.slowPromise, 100, null, 1), 1)
c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 2 }, c.slowPromise, 200, null, 2), 2)
c.pNoErrVal(c.limiter.schedule({ weight: 2, id: 3 }, c.slowPromise, 100, null, 3), 3)
c.mustEqual(c.limiter.counts(), { RECEIVED: 3, QUEUED: 0, RUNNING: 0, EXECUTING: 0, DONE: 0 })
c.mustEqual(c.limiter.jobs(), ['1', '2', '3'])
c.mustEqual(c.limiter.jobs('RECEIVED'), ['1', '2', '3'])
return c.wait(50)
.then(function () {
c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 1, RUNNING: 1, EXECUTING: 1, DONE: 0 })
c.mustEqual(c.limiter.jobs('EXECUTING'), ['1'])
c.mustEqual(c.limiter.jobs('RUNNING'), ['2'])
c.mustEqual(c.limiter.jobs('QUEUED'), ['3'])
return c.wait(100)
})
.then(function () {
c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 1, RUNNING: 0, EXECUTING: 1, DONE: 1 })
c.mustEqual(c.limiter.jobs('DONE'), ['1'])
c.mustEqual(c.limiter.jobs('EXECUTING'), ['2'])
c.mustEqual(c.limiter.jobs('QUEUED'), ['3'])
return c.last()
})
.then(function (results) {
c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 0, RUNNING: 0, EXECUTING: 0, DONE: 4 })
c.checkDuration(400)
c.checkResultsOrder([[1], [2], [3]])
})
})
it('Should trigger events on status changes', function () {
c = makeTest({maxConcurrent: 2, minTime: 100, trackDoneStatus: true})
var onReceived = 0
var onQueued = 0
var onScheduled = 0
var onExecuting = 0
var onDone = 0
c.limiter.on('received', (info) => {
c.mustEqual(Object.keys(info).sort(), ['args', 'options'])
onReceived++
})
c.limiter.on('queued', (info) => {
c.mustEqual(Object.keys(info).sort(), ['args', 'blocked', 'options', 'reachedHWM'])
onQueued++
})
c.limiter.on('scheduled', (info) => {
c.mustEqual(Object.keys(info).sort(), ['args', 'options'])
onScheduled++
})
c.limiter.on('executing', (info) => {
c.mustEqual(Object.keys(info).sort(), ['args', 'options', 'retryCount'])
onExecuting++
})
c.limiter.on('done', (info) => {
c.mustEqual(Object.keys(info).sort(), ['args', 'options', 'retryCount'])
onDone++
})
c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 0, RUNNING: 0, EXECUTING: 0, DONE: 0 })
c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 1 }, c.slowPromise, 100, null, 1), 1)
c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 2 }, c.slowPromise, 200, null, 2), 2)
c.pNoErrVal(c.limiter.schedule({ weight: 2, id: 3 }, c.slowPromise, 100, null, 3), 3)
c.mustEqual(c.limiter.counts(), { RECEIVED: 3, QUEUED: 0, RUNNING: 0, EXECUTING: 0, DONE: 0 })
c.mustEqual([onReceived, onQueued, onScheduled, onExecuting, onDone], [3, 0, 0, 0, 0])
return c.wait(50)
.then(function () {
c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 1, RUNNING: 1, EXECUTING: 1, DONE: 0 })
c.mustEqual([onReceived, onQueued, onScheduled, onExecuting, onDone], [3, 3, 2, 1, 0])
return c.wait(100)
})
.then(function () {
c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 1, RUNNING: 0, EXECUTING: 1, DONE: 1 })
c.mustEqual(c.limiter.jobs('DONE'), ['1'])
c.mustEqual(c.limiter.jobs('EXECUTING'), ['2'])
c.mustEqual(c.limiter.jobs('QUEUED'), ['3'])
c.mustEqual([onReceived, onQueued, onScheduled, onExecuting, onDone], [3, 3, 2, 2, 1])
return c.last()
})
.then(function (results) {
c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 0, RUNNING: 0, EXECUTING: 0, DONE: 4 })
c.mustEqual([onReceived, onQueued, onScheduled, onExecuting, onDone], [4, 4, 4, 4, 4])
c.checkDuration(400)
c.checkResultsOrder([[1], [2], [3]])
})
})
})
describe('Events', function () {
it('Should return itself', function () {
c = makeTest({ id: 'test-limiter' })
var returned = c.limiter.on('ready', function () { })
c.mustEqual(returned.id, 'test-limiter')
})
it('Should fire events on empty queue', function () {
c = makeTest({maxConcurrent: 1, minTime: 100})
var calledEmpty = 0
var calledIdle = 0
var calledDepleted = 0
c.limiter.on('empty', function () { calledEmpty++ })
c.limiter.on('idle', function () { calledIdle++ })
c.limiter.on('depleted', function () { calledDepleted++ })
return c.pNoErrVal(c.limiter.schedule({id: 1}, c.slowPromise, 50, null, 1), 1)
.then(function () {
c.mustEqual(calledEmpty, 1)
c.mustEqual(calledIdle, 1)
return Promise.all([
c.pNoErrVal(c.limiter.schedule({id: 2}, c.slowPromise, 50, null, 2), 2),
c.pNoErrVal(c.limiter.schedule({id: 3}, c.slowPromise, 50, null, 3), 3)
])
})
.then(function () {
return c.limiter.submit({id: 4}, c.slowJob, 50, null, 4, null)
})
.then(function () {
c.checkDuration(250)
c.checkResultsOrder([[1], [2], [3]])
c.mustEqual(calledEmpty, 3)
c.mustEqual(calledIdle, 2)
c.mustEqual(calledDepleted, 0)
return c.last()
})
})
it('Should fire events once', function () {
c = makeTest({maxConcurrent: 1, minTime: 100})
var calledEmptyOnce = 0
var calledIdleOnce = 0
var calledEmpty = 0
var calledIdle = 0
var calledDepleted = 0
c.limiter.once('empty', function () { calledEmptyOnce++ })
c.limiter.once('idle', function () { calledIdleOnce++ })
c.limiter.on('empty', function () { calledEmpty++ })
c.limiter.on('idle', function () { calledIdle++ })
c.limiter.on('depleted', function () { calledDepleted++ })
c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 1), 1)
return c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2)
.then(function () {
c.mustEqual(calledEmptyOnce, 1)
c.mustEqual(calledIdleOnce, 1)
c.mustEqual(calledEmpty, 1)
c.mustEqual(calledIdle, 1)
return c.pNoErrVal(c.limiter.schedule(c.promise, null, 3), 3)
})
.then(function () {
c.checkDuration(200)
c.checkResultsOrder([[1], [2], [3]])
c.mustEqual(calledEmptyOnce, 1)
c.mustEqual(calledIdleOnce, 1)
c.mustEqual(calledEmpty, 2)
c.mustEqual(calledIdle, 2)
c.mustEqual(calledDepleted, 0)
})
})
it('Should support faulty event listeners', function (done) {
c = makeTest({maxConcurrent: 1, minTime: 100, errorEventsExpected: true})
var calledError = 0
c.limiter.on('error', function (err) {
calledError++
if (err.message === 'Oh noes!' && calledError === 1) {
done()
}
})
c.limiter.on('empty', function () {
throw new Error('Oh noes!')
})
c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1)
})
it('Should wait for async event listeners', function (done) {
c = makeTest({maxConcurrent: 1, minTime: 100, errorEventsExpected: true})
var calledError = 0
c.limiter.on('error', function (err) {
calledError++
if (err.message === 'It broke!' && calledError === 1) {
done()
}
})
c.limiter.on('empty', function () {
return c.slowPromise(100, null, 1, 2)
.then(function (x) {
c.mustEqual(x, [1, 2])
return Promise.reject(new Error('It broke!'))
})
})
c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1)
})
})
describe('High water limit', function () {
it('Should support highWater set to 0', function () {
c = makeTest({maxConcurrent: 1, minTime: 0, highWater: 0, rejectOnDrop: false})
var first = c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 1), 1)
c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 2), 2)
c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 3), 3)
c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 4), 4)
return first
.then(function () {
return c.last({ weight: 0 })
})
.then(function (results) {
c.checkDuration(50)
c.checkResultsOrder([[1]])
})
})
it('Should support highWater set to 1', function () {
c = makeTest({maxConcurrent: 1, minTime: 0, highWater: 1, rejectOnDrop: false})
var first = c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 1), 1)
c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 2), 2)
c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 3), 3)
var last = c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 4), 4)
return Promise.all([first, last])
.then(function () {
return c.last({ weight: 0 })
})
.then(function (results) {
c.checkDuration(100)
c.checkResultsOrder([[1], [4]])
})
})
})
describe('Weight', function () {
it('Should not add jobs with a weight above the maxConcurrent', function () {
c = makeTest({maxConcurrent: 2})
c.pNoErrVal(c.limiter.schedule({ weight: 1 }, c.promise, null, 1), 1)
c.pNoErrVal(c.limiter.schedule({ weight: 2 }, c.promise, null, 2), 2)
return c.limiter.schedule({ weight: 3 }, c.promise, null, 3)
.catch(function (err) {
c.mustEqual(err.message, 'Impossible to add a job having a weight of 3 to a limiter having a maxConcurrent setting of 2')
return c.last()
})
.then(function (results) {
c.checkDuration(0)
c.checkResultsOrder([[1], [2]])
})
})
it('Should support custom job weights', function () {
c = makeTest({maxConcurrent: 2})
c.pNoErrVal(c.limiter.schedule({ weight: 1 }, c.slowPromise, 100, null, 1), 1)
c.pNoErrVal(c.limiter.schedule({ weight: 2 }, c.slowPromise, 200, null, 2), 2)
c.pNoErrVal(c.limiter.schedule({ weight: 1 }, c.slowPromise, 100, null, 3), 3)
c.pNoErrVal(c.limiter.schedule({ weight: 1 }, c.slowPromise, 100, null, 4), 4)
c.pNoErrVal(c.limiter.schedule({ weight: 0 }, c.slowPromise, 100, null, 5), 5)
return c.last()
.then(function (results) {
c.checkDuration(400)
c.checkResultsOrder([[1], [2], [3], [4], [5]])
})
})
it('Should overflow at the correct rate', function () {
c = makeTest({
maxConcurrent: 2,
reservoir: 3
})
var calledDepleted = 0
var emptyArguments = []
c.limiter.on('depleted', function (empty) {
emptyArguments.push(empty)
calledDepleted++
})
var p1 = c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 1 }, c.slowPromise, 100, null, 1), 1)
var p2 = c.pNoErrVal(c.limiter.schedule({ weight: 2, id: 2 }, c.slowPromise, 150, null, 2), 2)
var p3 = c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 3 }, c.slowPromise, 100, null, 3), 3)
var p4 = c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 4 }, c.slowPromise, 100, null, 4), 4)
return Promise.all([p1, p2])
.then(function () {
c.mustEqual(c.limiter.queued(), 2)
return c.limiter.currentReservoir()
})
.then(function (reservoir) {
c.mustEqual(reservoir, 0)
c.mustEqual(calledDepleted, 1)
return c.limiter.incrementReservoir(1)
})
.then(function (reservoir) {
c.mustEqual(reservoir, 1)
return c.last({ priority: 1, weight: 0 })
})
.then(function (results) {
c.mustEqual(calledDepleted, 3)
c.mustEqual(c.limiter.queued(), 1)
c.checkDuration(250)
c.checkResultsOrder([[1], [2]])
return c.limiter.currentReservoir()
})
.then(function (reservoir) {
c.mustEqual(reservoir, 0)
return c.limiter.updateSettings({ reservoir: 1 })
})
.then(function () {
return Promise.all([p3, p4])
})
.then(function () {
return c.limiter.currentReservoir()
})
.then(function (reservoir) {
c.mustEqual(reservoir, 0)
c.mustEqual(calledDepleted, 4)
c.mustEqual(emptyArguments, [false, false, false, true])
})
})
})
describe('Expiration', function () {
it('Should cancel jobs', function () {
c = makeTest({ maxConcurrent: 2 })
var t0 = Date.now()
return Promise.all([
c.pNoErrVal(c.limiter.schedule({ id: 'very-slow-no-expiration' }, c.slowPromise, 150, null, 1), 1),
c.limiter.schedule({ expiration: 50, id: 'slow-with-expiration' }, c.slowPromise, 75, null, 2)
.then(function () {
return Promise.reject(new Error("Should have timed out."))
})
.catch(function (err) {
c.mustEqual(err.message, 'This job timed out after 50 ms.')
var duration = Date.now() - t0
assert(duration > 45 && duration < 80)
return Promise.all([c.limiter.running(), c.limiter.done()])
})
.then(function ([running, done]) {
c.mustEqual(running, 1)
c.mustEqual(done, 1)
})
])
.then(function () {
var duration = Date.now() - t0
assert(duration > 145 && duration < 180)
return Promise.all([c.limiter.running(), c.limiter.done()])
})
.then(function ([running, done]) {
c.mustEqual(running, 0)
c.mustEqual(done, 2)
})
})
})
describe('Pubsub', function () {
it('Should pass strings', function (done) {
c = makeTest({ maxConcurrent: 2 })
c.limiter.on('message', function (msg) {
c.mustEqual(msg, 'hello')
done()
})
c.limiter.publish('hello')
})
it('Should pass objects', function (done) {
c = makeTest({ maxConcurrent: 2 })
var obj = {
array: ['abc', true],
num: 235.59
}
c.limiter.on('message', function (msg) {
c.mustEqual(JSON.parse(msg), obj)
done()
})
c.limiter.publish(JSON.stringify(obj))
})
})
describe('Reservoir Refresh', function () {
it('Should auto-refresh the reservoir', function () {
c = makeTest({
reservoir: 8,
reservoirRefreshInterval: 150,
reservoirRefreshAmount: 5,
heartbeatInterval: 75 // not for production use
})
var calledDepleted = 0
c.limiter.on('depleted', function () {
calledDepleted++
})
return Promise.all([
c.pNoErrVal(c.limiter.schedule({ weight: 1 }, c.promise, null, 1), 1),
c.pNoErrVal(c.limiter.schedule({ weight: 2 }, c.promise, null, 2), 2),
c.pNoErrVal(c.limiter.schedule({ weight: 3 }, c.promise, null, 3), 3),
c.pNoErrVal(c.limiter.schedule({ weight: 4 }, c.promise, null, 4), 4),
c.pNoErrVal(c.limiter.schedule({ weight: 5 }, c.promise, null, 5), 5)
])
.then(function () {
return c.limiter.currentReservoir()
})
.then(function (reservoir) {
c.mustEqual(reservoir, 0)
return c.last({ weight: 0, priority: 9 })
})
.then(function (results) {
c.checkResultsOrder([[1], [2], [3], [4], [5]])
c.mustEqual(calledDepleted, 2)
c.checkDuration(300)
})
})
it('Should allow staggered X by Y type usage', function () {
c = makeTest({
reservoir: 2,
reservoirRefreshInterval: 150,
reservoirRefreshAmount: 2,
heartbeatInterval: 75 // not for production use
})
return Promise.all([
c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1),
c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2),
c.pNoErrVal(c.limiter.schedule(c.promise, null, 3), 3),
c.pNoErrVal(c.limiter.schedule(c.promise, null, 4), 4)
])
.then(function () {
return c.limiter.currentReservoir()
})
.then(function (reservoir) {
c.mustEqual(reservoir, 0)
return c.last({ weight: 0, priority: 9 })
})
.then(function (results) {
c.checkResultsOrder([[1], [2], [3], [4]])
c.checkDuration(150)
})
})
it('Should keep process alive until queue is empty', function (done) {
c = makeTest()
var options = {
cwd: process.cwd() + '/test/spawn',
timeout: 1000
}
child_process.exec('node refreshKeepAlive.js', options, function (err, stdout, stderr) {
c.mustEqual(stdout, '[0][0][2][2]')
c.mustEqual(stderr, '')
done(err)
})
})
})
describe('Reservoir Increase', function () {
it('Should auto-increase the reservoir', async function () {
c = makeTest({
reservoir: 3,
reservoirIncreaseInterval: 150,
reservoirIncreaseAmount: 5,
heartbeatInterval: 75 // not for production use
})
var calledDepleted = 0
c.limiter.on('depleted', function () {
calledDepleted++
})
await Promise.all([
c.pNoErrVal(c.limiter.schedule({ weight: 1 }, c.promise, null, 1), 1),
c.pNoErrVal(c.limiter.schedule({ weight: 2 }, c.promise, null, 2), 2),
c.pNoErrVal(c.limiter.schedule({ weight: 3 }, c.promise, null, 3), 3),
c.pNoErrVal(c.limiter.schedule({ weight: 4 }, c.promise, null, 4), 4),
c.pNoErrVal(c.limiter.schedule({ weight: 5 }, c.promise, null, 5), 5)
])
const reservoir = await c.limiter.currentReservoir()
c.mustEqual(reservoir, 3)
const results = await c.last({ weight: 0, priority: 9 })
c.checkResultsOrder([[1], [2], [3], [4], [5]])
c.mustEqual(calledDepleted, 1)
c.checkDuration(450)
})
it('Should auto-increase the reservoir up to a maximum', async function () {
c = makeTest({
reservoir: 3,
reservoirIncreaseInterval: 150,
reservoirIncreaseAmount: 5,
reservoirIncreaseMaximum: 6,
heartbeatInterval: 75 // not for production use
})
var calledDepleted = 0
c.limiter.on('depleted', function () {
calledDepleted++
})
await Promise.all([
c.pNoErrVal(c.limiter.schedule({ weight: 1 }, c.promise, null, 1), 1),
c.pNoErrVal(c.limiter.schedule({ weight: 2 }, c.promise, null, 2), 2),
c.pNoErrVal(c.limiter.schedule({ weight: 3 }, c.promise, null, 3), 3),
c.pNoErrVal(c.limiter.schedule({ weight: 4 }, c.promise, null, 4), 4),
c.pNoErrVal(c.limiter.schedule({ weight: 5 }, c.promise, null, 5), 5)
])
const reservoir = await c.limiter.currentReservoir()
c.mustEqual(reservoir, 1)
const results = await c.last({ weight: 0, priority: 9 })
c.checkResultsOrder([[1], [2], [3], [4], [5]])
c.mustEqual(calledDepleted, 1)
c.checkDuration(450)
})
it('Should allow staggered X by Y type usage', function () {
c = makeTest({
reservoir: 2,
reservoirIncreaseInterval: 150,
reservoirIncreaseAmount: 2,
heartbeatInterval: 75 // not for production use
})
return Promise.all([
c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1),
c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2),
c.pNoErrVal(c.limiter.schedule(c.promise, null, 3), 3),
c.pNoErrVal(c.limiter.schedule(c.promise, null, 4), 4)
])
.then(function () {
return c.limiter.currentReservoir()
})
.then(function (reservoir) {
c.mustEqual(reservoir, 0)
return c.last({ weight: 0, priority: 9 })
})
.then(function (results) {
c.checkResultsOrder([[1], [2], [3], [4]])
c.checkDuration(150)
})
})
it('Should keep process alive until queue is empty', function (done) {
c = makeTest()
var options = {
cwd: process.cwd() + '/test/spawn',
timeout: 1000
}
child_process.exec('node increaseKeepAlive.js', options, function (err, stdout, stderr) {
c.mustEqual(stdout, '[0][0][2][2]')
c.mustEqual(stderr, '')
done(err)
})
})
})
})

255
node_modules/bottleneck/test/group.js generated vendored Normal file
View file

@ -0,0 +1,255 @@
var makeTest = require('./context')
var Bottleneck = require('./bottleneck')
var assert = require('assert')
describe('Group', function () {
var c
afterEach(function () {
return c.limiter.disconnect(false)
})
it('Should create limiters', function (done) {
c = makeTest()
var group = new Bottleneck.Group({
maxConcurrent: 1, minTime: 100
})
var results = []
var job = function (...result) {
results.push(result)
return new Promise(function (resolve, reject) {
setTimeout(function () {
return resolve()
}, 50)
})
}
group.key('A').schedule(job, 1, 2)
group.key('A').schedule(job, 3)
group.key('A').schedule(job, 4)
setTimeout(function () {
group.key('B').schedule(job, 5)
}, 20)
setTimeout(function () {
group.key('C').schedule(job, 6)
group.key('C').schedule(job, 7)
}, 40)
group.key('A').submit(function (cb) {
c.mustEqual(results, [[1,2], [5], [6], [3], [7], [4]])
cb()
done()
}, null)
})
it('Should set up the limiter IDs (default)', function () {
c = makeTest()
var group = new Bottleneck.Group({
maxConcurrent: 1, minTime: 100
})
c.mustEqual(group.key('A').id, 'group-key-A')
c.mustEqual(group.key('B').id, 'group-key-B')
c.mustEqual(group.key('XYZ').id, 'group-key-XYZ')
var ids = group.keys().map(function (key) {
var limiter = group.key(key)
c.mustEqual(limiter._store.timeout, group.timeout)
return limiter.id
})
c.mustEqual(ids.sort(), ['group-key-A', 'group-key-B', 'group-key-XYZ'])
})
it('Should set up the limiter IDs (custom)', function () {
c = makeTest()
var group = new Bottleneck.Group({
maxConcurrent: 1, minTime: 100,
id: 'custom-id'
})
c.mustEqual(group.key('A').id, 'custom-id-A')
c.mustEqual(group.key('B').id, 'custom-id-B')
c.mustEqual(group.key('XYZ').id, 'custom-id-XYZ')
var ids = group.keys().map(function (key) {
var limiter = group.key(key)
c.mustEqual(limiter._store.timeout, group.timeout)
return limiter.id
})
c.mustEqual(ids.sort(), ['custom-id-A', 'custom-id-B', 'custom-id-XYZ'])
})
it('Should pass new limiter to \'created\' event', function () {
c = makeTest()
var group = new Bottleneck.Group({
maxConcurrent: 1, minTime: 100
})
var keys = []
var ids = []
var promises = []
group.on('created', function (created, key) {
keys.push(key)
promises.push(
created.updateSettings({ id: key })
.then(function (limiter) {
ids.push(limiter.id)
})
)
})
group.key('A')
group.key('B')
group.key('A')
group.key('B')
group.key('B')
group.key('BB')
group.key('C')
group.key('A')
return Promise.all(promises)
.then(function () {
c.mustEqual(keys, ids)
return c.limiter.ready()
})
})
it('Should pass error on failure', function (done) {
var failureMessage = 'SOMETHING BLEW UP!!'
c = makeTest()
var group = new Bottleneck.Group({
maxConcurrent: 1, minTime: 100
})
c.mustEqual(Object.keys(group.limiters), [])
var results = []
var job = function (...result) {
results.push(result)
return new Promise(function (resolve, reject) {
setTimeout(function () {
return resolve()
}, 50)
})
}
group.key('A').schedule(job, 1, 2)
group.key('A').schedule(job, 3)
group.key('A').schedule(job, 4)
group.key('B').schedule(() => Promise.reject(new Error(failureMessage)))
.catch(function (err) {
results.push(['CAUGHT', err.message])
})
setTimeout(function () {
group.key('C').schedule(job, 6)
group.key('C').schedule(job, 7)
}, 40)
group.key('A').submit(function (cb) {
c.mustEqual(results, [[1,2], ['CAUGHT', failureMessage], [6], [3], [7], [4]])
cb()
done()
}, null)
})
it('Should update its timeout', function () {
c = makeTest()
var group1 = new Bottleneck.Group({
maxConcurrent: 1, minTime: 100
})
var group2 = new Bottleneck.Group({
maxConcurrent: 1, minTime: 100, timeout: 5000
})
c.mustEqual(group1.timeout, 300000)
c.mustEqual(group2.timeout, 5000)
var p1 = group1.updateSettings({ timeout: 123 })
var p2 = group2.updateSettings({ timeout: 456 })
return Promise.all([p1, p2])
.then(function () {
c.mustEqual(group1.timeout, 123)
c.mustEqual(group2.timeout, 456)
})
})
it('Should update its limiter options', function () {
c = makeTest()
var group = new Bottleneck.Group({
maxConcurrent: 1, minTime: 100
})
var limiter1 = group.key('AAA')
c.mustEqual(limiter1._store.storeOptions.minTime, 100)
group.updateSettings({ minTime: 200 })
c.mustEqual(limiter1._store.storeOptions.minTime, 100)
var limiter2 = group.key('BBB')
c.mustEqual(limiter2._store.storeOptions.minTime, 200)
})
it('Should support keys(), limiters(), deleteKey()', function () {
c = makeTest()
var group1 = new Bottleneck.Group({
maxConcurrent: 1
})
var KEY_A = "AAA"
var KEY_B = "BBB"
return Promise.all([
c.pNoErrVal(group1.key(KEY_A).schedule(c.promise, null, 1), 1),
c.pNoErrVal(group1.key(KEY_B).schedule(c.promise, null, 2), 2)
])
.then(function () {
var keys = group1.keys()
var limiters = group1.limiters()
c.mustEqual(keys, [KEY_A, KEY_B])
c.mustEqual(limiters.length, 2)
limiters.forEach(function (limiter, i) {
c.mustEqual(limiter.key, keys[i])
assert(limiter.limiter instanceof Bottleneck)
})
return group1.deleteKey(KEY_A)
})
.then(function (deleted) {
c.mustEqual(deleted, true)
c.mustEqual(group1.keys().length, 1)
return group1.deleteKey(KEY_A)
})
.then(function (deleted) {
c.mustEqual(deleted, false)
c.mustEqual(group1.keys().length, 1)
})
})
it('Should call autocleanup', function () {
var KEY = 'test-key'
var group = new Bottleneck.Group({
maxConcurrent: 1
})
group.updateSettings({ timeout: 50 })
c = makeTest({ id: 'something', timeout: group.timeout })
group.instances[KEY] = c.limiter
return group.key(KEY).schedule(function () {
return Promise.resolve()
})
.then(function () {
assert(group.instances[KEY] != null)
return new Promise(function (resolve, reject) {
setTimeout(resolve, 100)
})
})
.then(function () {
assert(group.instances[KEY] == null)
})
})
})

135
node_modules/bottleneck/test/ioredis.js generated vendored Normal file
View file

@ -0,0 +1,135 @@
var makeTest = require('./context')
var Bottleneck = require('./bottleneck')
var assert = require('assert')
var Redis = require('ioredis')
if (process.env.DATASTORE === 'ioredis') {
describe('ioredis-only', function () {
var c
afterEach(function () {
return c.limiter.disconnect(false)
})
it('Should accept ioredis lib override', function () {
c = makeTest({
maxConcurrent: 2,
Redis,
clientOptions: {},
clusterNodes: [{
host: process.env.REDIS_HOST,
port: process.env.REDIS_PORT
}]
})
c.mustEqual(c.limiter.datastore, 'ioredis')
})
it('Should connect in Redis Cluster mode', function () {
c = makeTest({
maxConcurrent: 2,
clientOptions: {},
clusterNodes: [{
host: process.env.REDIS_HOST,
port: process.env.REDIS_PORT
}]
})
c.mustEqual(c.limiter.datastore, 'ioredis')
assert(c.limiter._store.connection.client.nodes().length >= 0)
})
it('Should connect in Redis Cluster mode with premade client', function () {
var client = new Redis.Cluster('')
var connection = new Bottleneck.IORedisConnection({ client })
c = makeTest({
maxConcurrent: 2,
clientOptions: {},
clusterNodes: [{
host: process.env.REDIS_HOST,
port: process.env.REDIS_PORT
}]
})
c.mustEqual(c.limiter.datastore, 'ioredis')
assert(c.limiter._store.connection.client.nodes().length >= 0)
connection.disconnect(false)
})
it('Should accept existing connections', function () {
var connection = new Bottleneck.IORedisConnection()
connection.id = 'super-connection'
c = makeTest({
minTime: 50,
connection
})
c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1)
c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2)
return c.last()
.then(function (results) {
c.checkResultsOrder([[1], [2]])
c.checkDuration(50)
c.mustEqual(c.limiter.connection.id, 'super-connection')
c.mustEqual(c.limiter.datastore, 'ioredis')
return c.limiter.disconnect()
})
.then(function () {
// Shared connections should not be disconnected by the limiter
c.mustEqual(c.limiter.clients().client.status, 'ready')
return connection.disconnect()
})
})
it('Should accept existing redis clients', function () {
var client = new Redis()
client.id = 'super-client'
var connection = new Bottleneck.IORedisConnection({ client })
connection.id = 'super-connection'
c = makeTest({
minTime: 50,
connection
})
c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1)
c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2)
return c.last()
.then(function (results) {
c.checkResultsOrder([[1], [2]])
c.checkDuration(50)
c.mustEqual(c.limiter.clients().client.id, 'super-client')
c.mustEqual(c.limiter.connection.id, 'super-connection')
c.mustEqual(c.limiter.datastore, 'ioredis')
return c.limiter.disconnect()
})
.then(function () {
// Shared connections should not be disconnected by the limiter
c.mustEqual(c.limiter.clients().client.status, 'ready')
return connection.disconnect()
})
})
it('Should trigger error events on the shared connection', function (done) {
var connection = new Bottleneck.IORedisConnection({
clientOptions: {
port: 1
}
})
connection.on('error', function (err) {
c.mustEqual(c.limiter.datastore, 'ioredis')
connection.disconnect()
done()
})
c = makeTest({ connection })
c.limiter.on('error', function (err) {
done(err)
})
})
})
}

100
node_modules/bottleneck/test/node_redis.js generated vendored Normal file
View file

@ -0,0 +1,100 @@
var makeTest = require('./context')
var Bottleneck = require('./bottleneck')
var assert = require('assert')
var Redis = require('redis')
if (process.env.DATASTORE === 'redis') {
describe('node_redis-only', function () {
var c
afterEach(function () {
return c.limiter.disconnect(false)
})
it('Should accept node_redis lib override', function () {
c = makeTest({
maxConcurrent: 2,
Redis,
clientOptions: {}
})
c.mustEqual(c.limiter.datastore, 'redis')
})
it('Should accept existing connections', function () {
var connection = new Bottleneck.RedisConnection()
connection.id = 'super-connection'
c = makeTest({
minTime: 50,
connection
})
c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1)
c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2)
return c.last()
.then(function (results) {
c.checkResultsOrder([[1], [2]])
c.checkDuration(50)
c.mustEqual(c.limiter.connection.id, 'super-connection')
c.mustEqual(c.limiter.datastore, 'redis')
return c.limiter.disconnect()
})
.then(function () {
// Shared connections should not be disconnected by the limiter
c.mustEqual(c.limiter.clients().client.ready, true)
return connection.disconnect()
})
})
it('Should accept existing redis clients', function () {
var client = Redis.createClient()
client.id = 'super-client'
var connection = new Bottleneck.RedisConnection({ client })
connection.id = 'super-connection'
c = makeTest({
minTime: 50,
connection
})
c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1)
c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2)
return c.last()
.then(function (results) {
c.checkResultsOrder([[1], [2]])
c.checkDuration(50)
c.mustEqual(c.limiter.clients().client.id, 'super-client')
c.mustEqual(c.limiter.connection.id, 'super-connection')
c.mustEqual(c.limiter.datastore, 'redis')
return c.limiter.disconnect()
})
.then(function () {
// Shared connections should not be disconnected by the limiter
c.mustEqual(c.limiter.clients().client.ready, true)
return connection.disconnect()
})
})
it('Should trigger error events on the shared connection', function (done) {
var connection = new Bottleneck.RedisConnection({
clientOptions: {
port: 1
}
})
connection.on('error', function (err) {
c.mustEqual(c.limiter.datastore, 'redis')
connection.disconnect()
done()
})
c = makeTest({ connection })
c.limiter.on('error', function (err) {
done(err)
})
})
})
}

184
node_modules/bottleneck/test/priority.js generated vendored Normal file
View file

@ -0,0 +1,184 @@
var makeTest = require('./context')
var Bottleneck = require('./bottleneck')
var assert = require('assert')
describe('Priority', function () {
var c
afterEach(function () {
return c.limiter.disconnect(false)
})
it('Should do basic ordering', function () {
c = makeTest({maxConcurrent: 1, minTime: 100, rejectOnDrop: false})
return Promise.all([
c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 1), 1),
c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2),
c.pNoErrVal(c.limiter.schedule({priority: 1}, c.promise, null, 5, 6), 5, 6),
c.pNoErrVal(c.limiter.schedule(c.promise, null, 3), 3),
c.pNoErrVal(c.limiter.schedule(c.promise, null, 4), 4)
])
.then(function () {
return c.last()
})
.then(function (results) {
c.checkResultsOrder([[1], [5,6], [2] ,[3], [4]])
c.checkDuration(400)
})
})
it('Should support LEAK', function () {
c = makeTest({
maxConcurrent: 1,
minTime: 100,
highWater: 3,
strategy: Bottleneck.strategy.LEAK,
rejectOnDrop: false
})
var called = false
c.limiter.on('dropped', function (dropped) {
c.mustExist(dropped.task)
c.mustExist(dropped.args)
c.mustExist(dropped.promise)
called = true
})
c.limiter.submit(c.slowJob, 50, null, 1, c.noErrVal(1))
c.limiter.submit(c.job, null, 2, c.noErrVal(2))
c.limiter.submit(c.job, null, 3, c.noErrVal(3))
c.limiter.submit(c.job, null, 4, c.noErrVal(4))
c.limiter.submit({priority: 2}, c.job, null, 5, c.noErrVal(5))
c.limiter.submit({priority: 1}, c.job, null, 6, c.noErrVal(6))
c.limiter.submit({priority: 9}, c.job, null, 7, c.noErrVal(7))
return c.last({ weight: 0 })
.then(function (results) {
c.checkDuration(200)
c.checkResultsOrder([[1], [6], [5]])
c.mustEqual(called, true)
})
})
it('Should support OVERFLOW', function () {
c = makeTest({
maxConcurrent: 1,
minTime: 100,
highWater: 2,
strategy: Bottleneck.strategy.OVERFLOW,
rejectOnDrop: false
})
var called = false
c.limiter.on('dropped', function (dropped) {
c.mustExist(dropped.task)
c.mustExist(dropped.args)
c.mustExist(dropped.promise)
called = true
})
c.limiter.submit(c.slowJob, 50, null, 1, c.noErrVal(1))
c.limiter.submit(c.job, null, 2, c.noErrVal(2))
c.limiter.submit(c.job, null, 3, c.noErrVal(3))
c.limiter.submit(c.job, null, 4, c.noErrVal(4))
c.limiter.submit({priority: 2}, c.job, null, 5, c.noErrVal(5))
c.limiter.submit({priority: 1}, c.job, null, 6, c.noErrVal(6))
return c.limiter.submit({priority: 9}, c.job, null, 7, c.noErrVal(7))
.then(function () {
return c.limiter.updateSettings({ highWater: null })
})
.then(c.last)
.then(function (results) {
c.checkDuration(200)
c.checkResultsOrder([[1], [2], [3]])
c.mustEqual(called, true)
})
})
it('Should support OVERFLOW_PRIORITY', function () {
c = makeTest({
maxConcurrent: 1,
minTime: 100,
highWater: 2,
strategy: Bottleneck.strategy.OVERFLOW_PRIORITY,
rejectOnDrop: false
})
var called = false
c.limiter.on('dropped', function (dropped) {
c.mustExist(dropped.task)
c.mustExist(dropped.args)
c.mustExist(dropped.promise)
called = true
})
c.limiter.submit(c.slowJob, 50, null, 1, c.noErrVal(1))
c.limiter.submit(c.job, null, 2, c.noErrVal(2))
c.limiter.submit(c.job, null, 3, c.noErrVal(3))
c.limiter.submit(c.job, null, 4, c.noErrVal(4))
c.limiter.submit({priority: 2}, c.job, null, 5, c.noErrVal(5))
c.limiter.submit({priority: 2}, c.job, null, 6, c.noErrVal(6))
return c.limiter.submit({priority: 2}, c.job, null, 7, c.noErrVal(7))
.then(function () {
return c.limiter.updateSettings({highWater: null})
})
.then(c.last)
.then(function (results) {
c.checkDuration(200)
c.checkResultsOrder([[1], [5], [6]])
c.mustEqual(called, true)
})
})
it('Should support BLOCK', function (done) {
c = makeTest({
maxConcurrent: 1,
minTime: 100,
highWater: 2,
trackDoneStatus: true,
strategy: Bottleneck.strategy.BLOCK
})
var called = 0
c.limiter.on('dropped', function (dropped) {
c.mustExist(dropped.task)
c.mustExist(dropped.args)
c.mustExist(dropped.promise)
called++
if (called === 3) {
c.limiter.updateSettings({ highWater: null })
.then(function () {
return c.limiter.schedule(c.job, null, 8)
})
.catch(function (err) {
assert(err instanceof Bottleneck.BottleneckError)
c.mustEqual(err.message, 'This job has been dropped by Bottleneck')
c.limiter.removeAllListeners('error')
done()
})
}
})
c.limiter.submit(c.slowJob, 20, null, 1, c.noErrVal(1))
c.limiter.submit(c.slowJob, 20, null, 2, (err) => c.mustExist(err))
c.limiter.submit(c.slowJob, 20, null, 3, (err) => c.mustExist(err))
c.limiter.submit(c.slowJob, 20, null, 4, (err) => c.mustExist(err))
})
it('Should have the right priority', function () {
c = makeTest({maxConcurrent: 1, minTime: 100})
c.pNoErrVal(c.limiter.schedule({priority: 6}, c.slowPromise, 50, null, 1), 1)
c.pNoErrVal(c.limiter.schedule({priority: 5}, c.promise, null, 2), 2)
c.pNoErrVal(c.limiter.schedule({priority: 4}, c.promise, null, 3), 3)
c.pNoErrVal(c.limiter.schedule({priority: 3}, c.promise, null, 4), 4)
return c.last()
.then(function (results) {
c.checkDuration(300)
c.checkResultsOrder([[1], [4], [3], [2]])
})
})
})

202
node_modules/bottleneck/test/promises.js generated vendored Normal file
View file

@ -0,0 +1,202 @@
var makeTest = require('./context')
var Bottleneck = require('./bottleneck')
var assert = require('assert')
describe('Promises', function () {
var c
afterEach(function () {
return c.limiter.disconnect(false)
})
it('Should support promises', function () {
c = makeTest({maxConcurrent: 1, minTime: 100})
c.limiter.submit(c.job, null, 1, 9, c.noErrVal(1, 9))
c.limiter.submit(c.job, null, 2, c.noErrVal(2))
c.limiter.submit(c.job, null, 3, c.noErrVal(3))
c.pNoErrVal(c.limiter.schedule(c.promise, null, 4, 5), 4, 5)
return c.last()
.then(function (results) {
c.checkResultsOrder([[1,9], [2], [3], [4,5]])
c.checkDuration(300)
})
})
it('Should pass error on failure', function () {
var failureMessage = 'failed'
c = makeTest({maxConcurrent: 1, minTime: 100})
return c.limiter.schedule(c.promise, new Error(failureMessage))
.catch(function (err) {
c.mustEqual(err.message, failureMessage)
})
})
it('Should allow non-Promise returns', function () {
c = makeTest()
var str = 'This is a string'
return c.limiter.schedule(() => str)
.then(function (x) {
c.mustEqual(x, str)
})
})
it('Should get rejected when rejectOnDrop is true', function () {
c = makeTest({
maxConcurrent: 1,
minTime: 0,
highWater: 1,
strategy: Bottleneck.strategy.OVERFLOW,
rejectOnDrop: true
})
var dropped = 0
var caught = 0
var p1
var p2
c.limiter.on('dropped', function () {
dropped++
})
p1 = c.pNoErrVal(c.limiter.schedule({id: 1}, c.slowPromise, 50, null, 1), 1)
p2 = c.pNoErrVal(c.limiter.schedule({id: 2}, c.slowPromise, 50, null, 2), 2)
return c.limiter.schedule({id: 3}, c.slowPromise, 50, null, 3)
.catch(function (err) {
c.mustEqual(err.message, 'This job has been dropped by Bottleneck')
assert(err instanceof Bottleneck.BottleneckError)
caught++
return Promise.all([p1, p2])
})
.then(c.last)
.then(function (results) {
c.checkResultsOrder([[1], [2]])
c.checkDuration(100)
c.mustEqual(dropped, 1)
c.mustEqual(caught, 1)
})
})
it('Should automatically wrap an exception in a rejected promise - schedule()', function () {
c = makeTest({maxConcurrent: 1, minTime: 100})
return c.limiter.schedule(() => {
throw new Error('I will reject')
})
.then(() => assert(false))
.catch(err => {
assert(err.message === 'I will reject');
})
})
describe('Wrap', function () {
it('Should wrap', function () {
c = makeTest({maxConcurrent: 1, minTime: 100})
c.limiter.submit(c.job, null, 1, c.noErrVal(1))
c.limiter.submit(c.job, null, 2, c.noErrVal(2))
c.limiter.submit(c.job, null, 3, c.noErrVal(3))
var wrapped = c.limiter.wrap(c.promise)
c.pNoErrVal(wrapped(null, 4), 4)
return c.last()
.then(function (results) {
c.checkResultsOrder([[1], [2], [3], [4]])
c.checkDuration(300)
})
})
it('Should automatically wrap a returned value in a resolved promise', function () {
c = makeTest({maxConcurrent: 1, minTime: 100})
fn = c.limiter.wrap(() => { return 7 });
return fn().then(result => {
assert(result === 7);
})
})
it('Should automatically wrap an exception in a rejected promise', function () {
c = makeTest({maxConcurrent: 1, minTime: 100})
fn = c.limiter.wrap(() => { throw new Error('I will reject') });
return fn().then(() => assert(false)).catch(error => {
assert(error.message === 'I will reject');
})
})
it('Should inherit the original target for wrapped methods', function () {
c = makeTest({maxConcurrent: 1, minTime: 100})
var object = {
fn: c.limiter.wrap(function () { return this })
}
return object.fn().then(result => {
assert(result === object)
})
})
it('Should inherit the original target on prototype methods', function () {
c = makeTest({maxConcurrent: 1, minTime: 100})
class Animal {
constructor(name) { this.name = name }
getName() { return this.name }
}
Animal.prototype.getName = c.limiter.wrap(Animal.prototype.getName)
let elephant = new Animal('Dumbo')
return elephant.getName().then(result => {
assert(result === 'Dumbo')
})
})
it('Should pass errors back', function () {
var failureMessage = 'BLEW UP!!!'
c = makeTest({maxConcurrent: 1, minTime: 100})
var wrapped = c.limiter.wrap(c.promise)
c.pNoErrVal(wrapped(null, 1), 1)
c.pNoErrVal(wrapped(null, 2), 2)
return wrapped(new Error(failureMessage), 3)
.catch(function (err) {
c.mustEqual(err.message, failureMessage)
return c.last()
})
.then(function (results) {
c.checkResultsOrder([[1], [2], [3]])
c.checkDuration(200)
})
})
it('Should allow passing options', function () {
var failureMessage = 'BLEW UP!!!'
c = makeTest({maxConcurrent: 1, minTime: 50})
var wrapped = c.limiter.wrap(c.promise)
c.pNoErrVal(wrapped(null, 1), 1)
c.pNoErrVal(wrapped(null, 2), 2)
c.pNoErrVal(wrapped(null, 3), 3)
c.pNoErrVal(wrapped(null, 4), 4)
c.pNoErrVal(wrapped.withOptions({ priority: 1 }, null, 5), 5)
return wrapped.withOptions({ priority: 1 }, new Error(failureMessage), 6)
.catch(function (err) {
c.mustEqual(err.message, failureMessage)
return c.last()
})
.then(function (results) {
c.checkResultsOrder([[1], [2], [5], [6], [3], [4]])
c.checkDuration(250)
})
})
})
})

237
node_modules/bottleneck/test/retries.js generated vendored Normal file
View file

@ -0,0 +1,237 @@
var makeTest = require('./context')
var Bottleneck = require('./bottleneck')
var assert = require('assert')
var child_process = require('child_process')
describe('Retries', function () {
var c
afterEach(function () {
return c.limiter.disconnect(false)
})
it('Should retry when requested by the user (sync)', async function () {
c = makeTest({ trackDoneStatus: true })
var failedEvents = 0
var retryEvents = 0
c.limiter.on('failed', function (error, info) {
c.mustEqual(c.limiter.counts().EXECUTING, 1)
c.mustEqual(info.retryCount, failedEvents)
failedEvents++
return 50
})
c.limiter.on('retry', function (error, info) {
c.mustEqual(c.limiter.counts().EXECUTING, 1)
retryEvents++
})
var times = 0
const job = function () {
times++
if (times <= 2) {
return Promise.reject(new Error('boom'))
}
return Promise.resolve('Success!')
}
c.mustEqual(await c.limiter.schedule(job), 'Success!')
const results = await c.results()
assert(results.elapsed > 90 && results.elapsed < 130)
c.mustEqual(failedEvents, 2)
c.mustEqual(retryEvents, 2)
c.mustEqual(c.limiter.counts().EXECUTING, 0)
c.mustEqual(c.limiter.counts().DONE, 1)
})
it('Should retry when requested by the user (async)', async function () {
c = makeTest({ trackDoneStatus: true })
var failedEvents = 0
var retryEvents = 0
c.limiter.on('failed', function (error, info) {
c.mustEqual(c.limiter.counts().EXECUTING, 1)
c.mustEqual(info.retryCount, failedEvents)
failedEvents++
return Promise.resolve(50)
})
c.limiter.on('retry', function (error, info) {
c.mustEqual(c.limiter.counts().EXECUTING, 1)
retryEvents++
})
var times = 0
const job = function () {
times++
if (times <= 2) {
return Promise.reject(new Error('boom'))
}
return Promise.resolve('Success!')
}
c.mustEqual(await c.limiter.schedule(job), 'Success!')
const results = await c.results()
assert(results.elapsed > 90 && results.elapsed < 130)
c.mustEqual(failedEvents, 2)
c.mustEqual(retryEvents, 2)
c.mustEqual(c.limiter.counts().EXECUTING, 0)
c.mustEqual(c.limiter.counts().DONE, 1)
})
it('Should not retry when user returns an error (sync)', async function () {
c = makeTest({ errorEventsExpected: true, trackDoneStatus: true })
var failedEvents = 0
var retryEvents = 0
var errorEvents = 0
var caught = false
c.limiter.on('failed', function (error, info) {
c.mustEqual(c.limiter.counts().EXECUTING, 1)
c.mustEqual(info.retryCount, failedEvents)
failedEvents++
throw new Error('Nope')
})
c.limiter.on('retry', function (error, info) {
retryEvents++
})
c.limiter.on('error', function (error, info) {
c.mustEqual(error.message, 'Nope')
errorEvents++
})
const job = function () {
return Promise.reject(new Error('boom'))
}
try {
await c.limiter.schedule(job)
throw new Error('Should not reach')
} catch (error) {
c.mustEqual(error.message, 'boom')
caught = true
}
c.mustEqual(failedEvents, 1)
c.mustEqual(retryEvents, 0)
c.mustEqual(errorEvents, 1)
c.mustEqual(caught, true)
c.mustEqual(c.limiter.counts().EXECUTING, 0)
c.mustEqual(c.limiter.counts().DONE, 1)
})
it('Should not retry when user returns an error (async)', async function () {
c = makeTest({ errorEventsExpected: true, trackDoneStatus: true })
var failedEvents = 0
var retryEvents = 0
var errorEvents = 0
var caught = false
c.limiter.on('failed', function (error, info) {
c.mustEqual(c.limiter.counts().EXECUTING, 1)
c.mustEqual(info.retryCount, failedEvents)
failedEvents++
return Promise.reject(new Error('Nope'))
})
c.limiter.on('retry', function (error, info) {
retryEvents++
})
c.limiter.on('error', function (error, info) {
c.mustEqual(error.message, 'Nope')
errorEvents++
})
const job = function () {
return Promise.reject(new Error('boom'))
}
try {
await c.limiter.schedule(job)
throw new Error('Should not reach')
} catch (error) {
c.mustEqual(error.message, 'boom')
caught = true
}
c.mustEqual(failedEvents, 1)
c.mustEqual(retryEvents, 0)
c.mustEqual(errorEvents, 1)
c.mustEqual(caught, true)
c.mustEqual(c.limiter.counts().EXECUTING, 0)
c.mustEqual(c.limiter.counts().DONE, 1)
})
it('Should not retry when user returns null (sync)', async function () {
c = makeTest({ trackDoneStatus: true })
var failedEvents = 0
var retryEvents = 0
var caught = false
c.limiter.on('failed', function (error, info) {
c.mustEqual(c.limiter.counts().EXECUTING, 1)
c.mustEqual(info.retryCount, failedEvents)
failedEvents++
return null
})
c.limiter.on('retry', function (error, info) {
retryEvents++
})
const job = function () {
return Promise.reject(new Error('boom'))
}
try {
await c.limiter.schedule(job)
throw new Error('Should not reach')
} catch (error) {
c.mustEqual(error.message, 'boom')
caught = true
}
c.mustEqual(failedEvents, 1)
c.mustEqual(retryEvents, 0)
c.mustEqual(caught, true)
c.mustEqual(c.limiter.counts().EXECUTING, 0)
c.mustEqual(c.limiter.counts().DONE, 1)
})
it('Should not retry when user returns null (async)', async function () {
c = makeTest({ trackDoneStatus: true })
var failedEvents = 0
var retryEvents = 0
var caught = false
c.limiter.on('failed', function (error, info) {
c.mustEqual(c.limiter.counts().EXECUTING, 1)
c.mustEqual(info.retryCount, failedEvents)
failedEvents++
return Promise.resolve(null)
})
c.limiter.on('retry', function (error, info) {
retryEvents++
})
const job = function () {
return Promise.reject(new Error('boom'))
}
try {
await c.limiter.schedule(job)
throw new Error('Should not reach')
} catch (error) {
c.mustEqual(error.message, 'boom')
caught = true
}
c.mustEqual(failedEvents, 1)
c.mustEqual(retryEvents, 0)
c.mustEqual(caught, true)
c.mustEqual(c.limiter.counts().EXECUTING, 0)
c.mustEqual(c.limiter.counts().DONE, 1)
})
})

View file

@ -0,0 +1,17 @@
var Bottleneck = require('../bottleneck.js')
var now = Date.now()
var limiter = new Bottleneck({
reservoir: 2,
reservoirIncreaseAmount: 2,
reservoirIncreaseInterval: 200
})
var f1 = () => {
var secDiff = Math.floor((Date.now() - now) / 100)
return Promise.resolve(`[${secDiff}]`)
}
limiter.schedule(f1).then((x) => process.stdout.write(x))
limiter.schedule(f1).then((x) => process.stdout.write(x))
limiter.schedule(f1).then((x) => process.stdout.write(x))
limiter.schedule(f1).then((x) => process.stdout.write(x))

17
node_modules/bottleneck/test/spawn/refreshKeepAlive.js generated vendored Normal file
View file

@ -0,0 +1,17 @@
var Bottleneck = require('../bottleneck.js')
var now = Date.now()
var limiter = new Bottleneck({
reservoir: 2,
reservoirRefreshAmount: 2,
reservoirRefreshInterval: 200
})
var f1 = () => {
var secDiff = Math.floor((Date.now() - now) / 100)
return Promise.resolve(`[${secDiff}]`)
}
limiter.schedule(f1).then((x) => process.stdout.write(x))
limiter.schedule(f1).then((x) => process.stdout.write(x))
limiter.schedule(f1).then((x) => process.stdout.write(x))
limiter.schedule(f1).then((x) => process.stdout.write(x))

103
node_modules/bottleneck/test/states.js generated vendored Normal file
View file

@ -0,0 +1,103 @@
var States = require('../lib/States')
var assert = require('assert')
var c = require('./context')({datastore: 'local'})
var Bottleneck = require('./bottleneck')
describe('States', function () {
it('Should be created and be empty', function () {
var states = new States(["A", "B", "C"])
c.mustEqual(states.statusCounts(), { A: 0, B: 0, C: 0 })
})
it('Should start new series', function () {
var states = new States(["A", "B", "C"])
states.start('x')
states.start('y')
c.mustEqual(states.statusCounts(), { A: 2, B: 0, C: 0 })
})
it('Should increment', function () {
var states = new States(["A", "B", "C"])
states.start('x')
states.start('y')
states.next('x')
states.next('y')
states.next('x')
c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 1 })
states.next('z')
c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 1 })
states.next('x')
c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 0 })
states.next('x')
c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 0 })
states.next('y')
states.next('y')
c.mustEqual(states.statusCounts(), { A: 0, B: 0, C: 0 })
})
it('Should remove', function () {
var states = new States(["A", "B", "C"])
states.start('x')
states.start('y')
states.next('x')
states.next('y')
states.next('x')
c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 1 })
states.remove('x')
c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 0 })
states.remove('y')
c.mustEqual(states.statusCounts(), { A: 0, B: 0, C: 0 })
})
it('Should return current status', function () {
var states = new States(["A", "B", "C"])
states.start('x')
states.start('y')
states.next('x')
states.next('y')
states.next('x')
c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 1 })
c.mustEqual(states.jobStatus('x'), 'C')
c.mustEqual(states.jobStatus('y'), 'B')
c.mustEqual(states.jobStatus('z'), null)
})
it('Should return job ids for a status', function (done) {
var states = new States(["A", "B", "C"])
states.start('x')
states.start('y')
states.start('z')
states.next('x')
states.next('y')
states.next('x')
states.next('z')
c.mustEqual(states.statusCounts(), { A: 0, B: 2, C: 1 })
c.mustEqual(states.statusJobs().sort(), ['x', 'y', 'z'])
c.mustEqual(states.statusJobs('A'), [])
c.mustEqual(states.statusJobs('B').sort(), ['y', 'z'])
c.mustEqual(states.statusJobs('C'), ['x'])
try {
states.statusJobs('Z')
} catch (err) {
if (process.env.BUILD !== 'es5' && process.env.BUILD !== 'light') {
assert(err instanceof Bottleneck.BottleneckError)
}
done()
}
})
})

208
node_modules/bottleneck/test/stop.js generated vendored Normal file
View file

@ -0,0 +1,208 @@
var makeTest = require('./context')
var Bottleneck = require('./bottleneck')
var assert = require('assert')
describe('Stop', function () {
var c
afterEach(function () {
return c.limiter.disconnect(false)
})
it('Should stop and drop the queue', function (done) {
c = makeTest({
maxConcurrent: 2,
minTime: 100,
trackDoneStatus: true
})
var submitFailed = false
var queuedDropped = false
var scheduledDropped = false
var dropped = 0
c.limiter.on('dropped', function () {
dropped++
})
c.pNoErrVal(c.limiter.schedule({id: '0'}, c.promise, null, 0), 0)
c.pNoErrVal(c.limiter.schedule({id: '1'}, c.slowPromise, 100, null, 1), 1)
c.limiter.schedule({id: '2'}, c.promise, null, 2)
.catch(function (err) {
c.mustEqual(err.message, 'Dropped!')
scheduledDropped = true
})
c.limiter.schedule({id: '3'}, c.promise, null, 3)
.catch(function (err) {
c.mustEqual(err.message, 'Dropped!')
queuedDropped = true
})
setTimeout(function () {
var counts = c.limiter.counts()
c.mustEqual(counts.RECEIVED, 0)
c.mustEqual(counts.QUEUED, 1)
c.mustEqual(counts.RUNNING, 1)
c.mustEqual(counts.EXECUTING, 1)
c.mustEqual(counts.DONE, 1)
c.limiter.stop({
enqueueErrorMessage: 'Stopped!',
dropErrorMessage: 'Dropped!'
})
.then(function () {
counts = c.limiter.counts()
c.mustEqual(submitFailed, true)
c.mustEqual(scheduledDropped, true)
c.mustEqual(queuedDropped, true)
c.mustEqual(dropped, 2)
c.mustEqual(counts.RECEIVED, 0)
c.mustEqual(counts.QUEUED, 0)
c.mustEqual(counts.RUNNING, 0)
c.mustEqual(counts.EXECUTING, 0)
c.mustEqual(counts.DONE, 2)
c.checkResultsOrder([[0], [1]])
done()
})
c.limiter.schedule(() => Promise.resolve(true))
.catch(function (err) {
c.mustEqual(err.message, 'Stopped!')
submitFailed = true
})
}, 125)
})
it('Should stop and let the queue finish', function (done) {
c = makeTest({
maxConcurrent: 1,
minTime: 100,
trackDoneStatus: true
})
var submitFailed = false
var dropped = 0
c.limiter.on('dropped', function () {
dropped++
})
c.pNoErrVal(c.limiter.schedule({id: '1'}, c.promise, null, 1), 1)
c.pNoErrVal(c.limiter.schedule({id: '2'}, c.promise, null, 2), 2)
c.pNoErrVal(c.limiter.schedule({id: '3'}, c.slowPromise, 100, null, 3), 3)
setTimeout(function () {
var counts = c.limiter.counts()
c.mustEqual(counts.RECEIVED, 0)
c.mustEqual(counts.QUEUED, 1)
c.mustEqual(counts.RUNNING, 1)
c.mustEqual(counts.EXECUTING, 0)
c.mustEqual(counts.DONE, 1)
c.limiter.stop({
enqueueErrorMessage: 'Stopped!',
dropWaitingJobs: false
})
.then(function () {
counts = c.limiter.counts()
c.mustEqual(submitFailed, true)
c.mustEqual(dropped, 0)
c.mustEqual(counts.RECEIVED, 0)
c.mustEqual(counts.QUEUED, 0)
c.mustEqual(counts.RUNNING, 0)
c.mustEqual(counts.EXECUTING, 0)
c.mustEqual(counts.DONE, 4)
c.checkResultsOrder([[1], [2], [3]])
done()
})
c.limiter.schedule(() => Promise.resolve(true))
.catch(function (err) {
c.mustEqual(err.message, 'Stopped!')
submitFailed = true
})
}, 75)
})
it('Should still resolve when rejectOnDrop is false', function (done) {
c = makeTest({
maxConcurrent: 1,
minTime: 100,
rejectOnDrop: false
})
c.pNoErrVal(c.limiter.schedule({id: '1'}, c.promise, null, 1), 1)
c.pNoErrVal(c.limiter.schedule({id: '2'}, c.promise, null, 2), 2)
c.pNoErrVal(c.limiter.schedule({id: '3'}, c.slowPromise, 100, null, 3), 3)
c.limiter.stop()
.then(function () {
return c.limiter.stop()
})
.then(function () {
done(new Error("Should not be here"))
})
.catch(function (err) {
c.mustEqual(err.message, "stop() has already been called")
done()
})
})
it('Should not allow calling stop() twice when dropWaitingJobs=true', function (done) {
c = makeTest({
maxConcurrent: 1,
minTime: 100
})
var failed = 0
var handler = function (err) {
c.mustEqual(err.message, "This limiter has been stopped.")
failed++
}
c.pNoErrVal(c.limiter.schedule({id: '1'}, c.promise, null, 1), 1).catch(handler)
c.pNoErrVal(c.limiter.schedule({id: '2'}, c.promise, null, 2), 2).catch(handler)
c.pNoErrVal(c.limiter.schedule({id: '3'}, c.slowPromise, 100, null, 3), 3).catch(handler)
c.limiter.stop({ dropWaitingJobs: true })
.then(function () {
return c.limiter.stop({ dropWaitingJobs: true })
})
.then(function () {
done(new Error("Should not be here"))
})
.catch(function (err) {
c.mustEqual(err.message, "stop() has already been called")
c.mustEqual(failed, 3)
done()
})
})
it('Should not allow calling stop() twice when dropWaitingJobs=false', function (done) {
c = makeTest({
maxConcurrent: 1,
minTime: 100
})
c.pNoErrVal(c.limiter.schedule({id: '1'}, c.promise, null, 1), 1)
c.pNoErrVal(c.limiter.schedule({id: '2'}, c.promise, null, 2), 2)
c.pNoErrVal(c.limiter.schedule({id: '3'}, c.slowPromise, 100, null, 3), 3)
c.limiter.stop({ dropWaitingJobs: false })
.then(function () {
return c.limiter.stop({ dropWaitingJobs: false })
})
.then(function () {
done(new Error("Should not be here"))
})
.catch(function (err) {
c.mustEqual(err.message, "stop() has already been called")
done()
})
})
})