///
import type { DeserializeOptions,ObjectIdLike , SerializeOptions } from 'bson';
import { Binary , BSON , BSONRegExp , BSONSymbol , BSONType , Code , DBRef , Decimal128 , deserialize , Document , Double , Int32 , Long , MaxKey , MinKey , ObjectId , serialize , Timestamp , UUID } from 'bson';
import type { SrvRecord } from 'dns';
import { EventEmitter } from 'events';
import type { Socket , TcpNetConnectOpts } from 'net';
import { Readable , Writable } from 'stream';
import type { ConnectionOptions as ConnectionOptions_2 , TLSSocket , TLSSocketOptions } from 'tls';
/** @public */
export declare abstract class AbstractCursor extends TypedEventEmitter {
/* Excluded from this release type: cursorId */
/* Excluded from this release type: cursorSession */
/* Excluded from this release type: selectedServer */
/* Excluded from this release type: cursorNamespace */
/* Excluded from this release type: documents */
/* Excluded from this release type: cursorClient */
/* Excluded from this release type: transform */
/* Excluded from this release type: initialized */
/* Excluded from this release type: isClosed */
/* Excluded from this release type: isKilled */
/* Excluded from this release type: cursorOptions */
/** @event */
static readonly CLOSE: "close";
/* Excluded from this release type: deserializationOptions */
/* Excluded from this release type: __constructor */
/**
* The cursor has no id until it receives a response from the initial cursor creating command.
*
* It is non-zero for as long as the database has an open cursor.
*
* The initiating command may receive a zero id if the entire result is in the `firstBatch`.
*/
get id(): Long | undefined;
/* Excluded from this release type: isDead */
/* Excluded from this release type: client */
/* Excluded from this release type: server */
get namespace(): MongoDBNamespace;
get readPreference(): ReadPreference;
get readConcern(): ReadConcern | undefined;
/* Excluded from this release type: session */
/* Excluded from this release type: session */
/**
* The cursor is closed and all remaining locally buffered documents have been iterated.
*/
get closed(): boolean;
/**
* A `killCursors` command was attempted on this cursor.
* This is performed if the cursor id is non zero.
*/
get killed(): boolean;
get loadBalanced(): boolean;
/** Returns current buffered documents length */
bufferedCount(): number;
/** Returns current buffered documents */
readBufferedDocuments(number?: number): TSchema[];
[Symbol.asyncIterator](): AsyncGenerator;
stream(options?: CursorStreamOptions): Readable & AsyncIterable;
hasNext(): Promise;
/** Get the next available document from the cursor, returns null if no more documents are available. */
next(): Promise;
/**
* Try to get the next available document from the cursor or `null` if an empty batch is returned
*/
tryNext(): Promise;
/**
* Iterates over all the documents for this cursor using the iterator, callback pattern.
*
* If the iterator returns `false`, iteration will stop.
*
* @param iterator - The iteration callback.
* @deprecated - Will be removed in a future release. Use for await...of instead.
*/
forEach(iterator: (doc: TSchema) => boolean | void): Promise;
close(): Promise;
/**
* Returns an array of documents. The caller is responsible for making sure that there
* is enough memory to store the results. Note that the array only contains partial
* results when this cursor had been previously accessed. In that case,
* cursor.rewind() can be used to reset the cursor.
*/
toArray(): Promise;
/**
* Add a cursor flag to the cursor
*
* @param flag - The flag to set, must be one of following ['tailable', 'oplogReplay', 'noCursorTimeout', 'awaitData', 'partial' -.
* @param value - The flag boolean value.
*/
addCursorFlag(flag: CursorFlag, value: boolean): this;
/**
* Map all documents using the provided function
* If there is a transform set on the cursor, that will be called first and the result passed to
* this function's transform.
*
* @remarks
*
* **Note** Cursors use `null` internally to indicate that there are no more documents in the cursor. Providing a mapping
* function that maps values to `null` will result in the cursor closing itself before it has finished iterating
* all documents. This will **not** result in a memory leak, just surprising behavior. For example:
*
* ```typescript
* const cursor = collection.find({});
* cursor.map(() => null);
*
* const documents = await cursor.toArray();
* // documents is always [], regardless of how many documents are in the collection.
* ```
*
* Other falsey values are allowed:
*
* ```typescript
* const cursor = collection.find({});
* cursor.map(() => '');
*
* const documents = await cursor.toArray();
* // documents is now an array of empty strings
* ```
*
* **Note for Typescript Users:** adding a transform changes the return type of the iteration of this cursor,
* it **does not** return a new instance of a cursor. This means when calling map,
* you should always assign the result to a new variable in order to get a correctly typed cursor variable.
* Take note of the following example:
*
* @example
* ```typescript
* const cursor: FindCursor = coll.find();
* const mappedCursor: FindCursor = cursor.map(doc => Object.keys(doc).length);
* const keyCounts: number[] = await mappedCursor.toArray(); // cursor.toArray() still returns Document[]
* ```
* @param transform - The mapping transformation method.
*/
map(transform: (doc: TSchema) => T): AbstractCursor;
/**
* Set the ReadPreference for the cursor.
*
* @param readPreference - The new read preference for the cursor.
*/
withReadPreference(readPreference: ReadPreferenceLike): this;
/**
* Set the ReadPreference for the cursor.
*
* @param readPreference - The new read preference for the cursor.
*/
withReadConcern(readConcern: ReadConcernLike): this;
/**
* Set a maxTimeMS on the cursor query, allowing for hard timeout limits on queries (Only supported on MongoDB 2.6 or higher)
*
* @param value - Number of milliseconds to wait before aborting the query.
*/
maxTimeMS(value: number): this;
/**
* Set the batch size for the cursor.
*
* @param value - The number of documents to return per batch. See {@link https://www.mongodb.com/docs/manual/reference/command/find/|find command documentation}.
*/
batchSize(value: number): this;
/**
* Rewind this cursor to its uninitialized state. Any options that are present on the cursor will
* remain in effect. Iterating this cursor will cause new queries to be sent to the server, even
* if the resultant data has already been retrieved by this cursor.
*/
rewind(): void;
/**
* Returns a new uninitialized copy of this cursor, with options matching those that have been set on the current instance
*/
abstract clone(): AbstractCursor;
/* Excluded from this release type: _initialize */
/* Excluded from this release type: getMore */
/* Excluded from this release type: cursorInit */
/* Excluded from this release type: fetchBatch */
/* Excluded from this release type: cleanup */
/* Excluded from this release type: hasEmittedClose */
/* Excluded from this release type: emitClose */
/* Excluded from this release type: transformDocument */
/* Excluded from this release type: throwIfInitialized */
}
/** @public */
export declare type AbstractCursorEvents = {
[AbstractCursor.CLOSE](): void;
};
/** @public */
export declare interface AbstractCursorOptions extends BSONSerializeOptions {
session?: ClientSession;
readPreference?: ReadPreferenceLike;
readConcern?: ReadConcernLike;
/**
* Specifies the number of documents to return in each response from MongoDB
*/
batchSize?: number;
/**
* When applicable `maxTimeMS` controls the amount of time the initial command
* that constructs a cursor should take. (ex. find, aggregate, listCollections)
*/
maxTimeMS?: number;
/**
* When applicable `maxAwaitTimeMS` controls the amount of time subsequent getMores
* that a cursor uses to fetch more data should take. (ex. cursor.next())
*/
maxAwaitTimeMS?: number;
/**
* Comment to apply to the operation.
*
* In server versions pre-4.4, 'comment' must be string. A server
* error will be thrown if any other type is provided.
*
* In server versions 4.4 and above, 'comment' can be any valid BSON type.
*/
comment?: unknown;
/**
* By default, MongoDB will automatically close a cursor when the
* client has exhausted all results in the cursor. However, for [capped collections](https://www.mongodb.com/docs/manual/core/capped-collections)
* you may use a Tailable Cursor that remains open after the client exhausts
* the results in the initial cursor.
*/
tailable?: boolean;
/**
* If awaitData is set to true, when the cursor reaches the end of the capped collection,
* MongoDB blocks the query thread for a period of time waiting for new data to arrive.
* When new data is inserted into the capped collection, the blocked thread is signaled
* to wake up and return the next batch to the client.
*/
awaitData?: boolean;
noCursorTimeout?: boolean;
/* Excluded from this release type: timeoutMS */
}
/* Excluded from this release type: AbstractOperation */
/** @public */
export declare type AcceptedFields = {
readonly [key in KeysOfAType]?: AssignableType;
};
/** @public */
export declare type AddToSetOperators = {
$each?: Array>;
};
/**
* The **Admin** class is an internal class that allows convenient access to
* the admin functionality and commands for MongoDB.
*
* **ADMIN Cannot directly be instantiated**
* @public
*
* @example
* ```ts
* import { MongoClient } from 'mongodb';
*
* const client = new MongoClient('mongodb://localhost:27017');
* const admin = client.db().admin();
* const dbInfo = await admin.listDatabases();
* for (const db of dbInfo.databases) {
* console.log(db.name);
* }
* ```
*/
export declare class Admin {
/* Excluded from this release type: s */
/* Excluded from this release type: __constructor */
/**
* Execute a command
*
* The driver will ensure the following fields are attached to the command sent to the server:
* - `lsid` - sourced from an implicit session or options.session
* - `$readPreference` - defaults to primary or can be configured by options.readPreference
* - `$db` - sourced from the name of this database
*
* If the client has a serverApi setting:
* - `apiVersion`
* - `apiStrict`
* - `apiDeprecationErrors`
*
* When in a transaction:
* - `readConcern` - sourced from readConcern set on the TransactionOptions
* - `writeConcern` - sourced from writeConcern set on the TransactionOptions
*
* Attaching any of the above fields to the command will have no effect as the driver will overwrite the value.
*
* @param command - The command to execute
* @param options - Optional settings for the command
*/
command(command: Document, options?: RunCommandOptions): Promise;
/**
* Retrieve the server build information
*
* @param options - Optional settings for the command
*/
buildInfo(options?: CommandOperationOptions): Promise;
/**
* Retrieve the server build information
*
* @param options - Optional settings for the command
*/
serverInfo(options?: CommandOperationOptions): Promise;
/**
* Retrieve this db's server status.
*
* @param options - Optional settings for the command
*/
serverStatus(options?: CommandOperationOptions): Promise;
/**
* Ping the MongoDB server and retrieve results
*
* @param options - Optional settings for the command
*/
ping(options?: CommandOperationOptions): Promise;
/**
* Remove a user from a database
*
* @param username - The username to remove
* @param options - Optional settings for the command
*/
removeUser(username: string, options?: RemoveUserOptions): Promise;
/**
* Validate an existing collection
*
* @param collectionName - The name of the collection to validate.
* @param options - Optional settings for the command
*/
validateCollection(collectionName: string, options?: ValidateCollectionOptions): Promise;
/**
* List the available databases
*
* @param options - Optional settings for the command
*/
listDatabases(options?: ListDatabasesOptions): Promise;
/**
* Get ReplicaSet status
*
* @param options - Optional settings for the command
*/
replSetGetStatus(options?: CommandOperationOptions): Promise;
}
/* Excluded from this release type: AdminPrivate */
/* Excluded from this release type: AggregateOperation */
/** @public */
export declare interface AggregateOptions extends CommandOperationOptions {
/** allowDiskUse lets the server know if it can use disk to store temporary results for the aggregation (requires mongodb 2.6 \>). */
allowDiskUse?: boolean;
/** The number of documents to return per batch. See [aggregation documentation](https://www.mongodb.com/docs/manual/reference/command/aggregate). */
batchSize?: number;
/** Allow driver to bypass schema validation. */
bypassDocumentValidation?: boolean;
/** Return the query as cursor, on 2.6 \> it returns as a real cursor on pre 2.6 it returns as an emulated cursor. */
cursor?: Document;
/** specifies a cumulative time limit in milliseconds for processing operations on the cursor. MongoDB interrupts the operation at the earliest following interrupt point. */
maxTimeMS?: number;
/** The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query. */
maxAwaitTimeMS?: number;
/** Specify collation. */
collation?: CollationOptions;
/** Add an index selection hint to an aggregation command */
hint?: Hint;
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
let?: Document;
out?: string;
}
/**
* The **AggregationCursor** class is an internal class that embodies an aggregation cursor on MongoDB
* allowing for iteration over the results returned from the underlying query. It supports
* one by one document iteration, conversion to an array or can be iterated as a Node 4.X
* or higher stream
* @public
*/
export declare class AggregationCursor extends AbstractCursor {
readonly pipeline: Document[];
/* Excluded from this release type: aggregateOptions */
/* Excluded from this release type: __constructor */
clone(): AggregationCursor;
map(transform: (doc: TSchema) => T): AggregationCursor;
/* Excluded from this release type: _initialize */
/** Execute the explain for the cursor */
explain(verbosity?: ExplainVerbosityLike): Promise;
/** Add a stage to the aggregation pipeline
* @example
* ```
* const documents = await users.aggregate().addStage({ $match: { name: /Mike/ } }).toArray();
* ```
* @example
* ```
* const documents = await users.aggregate()
* .addStage<{ name: string }>({ $project: { name: true } })
* .toArray(); // type of documents is { name: string }[]
* ```
*/
addStage(stage: Document): this;
addStage(stage: Document): AggregationCursor;
/** Add a group stage to the aggregation pipeline */
group($group: Document): AggregationCursor;
/** Add a limit stage to the aggregation pipeline */
limit($limit: number): this;
/** Add a match stage to the aggregation pipeline */
match($match: Document): this;
/** Add an out stage to the aggregation pipeline */
out($out: {
db: string;
coll: string;
} | string): this;
/**
* Add a project stage to the aggregation pipeline
*
* @remarks
* In order to strictly type this function you must provide an interface
* that represents the effect of your projection on the result documents.
*
* By default chaining a projection to your cursor changes the returned type to the generic {@link Document} type.
* You should specify a parameterized type to have assertions on your final results.
*
* @example
* ```typescript
* // Best way
* const docs: AggregationCursor<{ a: number }> = cursor.project<{ a: number }>({ _id: 0, a: true });
* // Flexible way
* const docs: AggregationCursor = cursor.project({ _id: 0, a: true });
* ```
*
* @remarks
* In order to strictly type this function you must provide an interface
* that represents the effect of your projection on the result documents.
*
* **Note for Typescript Users:** adding a transform changes the return type of the iteration of this cursor,
* it **does not** return a new instance of a cursor. This means when calling project,
* you should always assign the result to a new variable in order to get a correctly typed cursor variable.
* Take note of the following example:
*
* @example
* ```typescript
* const cursor: AggregationCursor<{ a: number; b: string }> = coll.aggregate([]);
* const projectCursor = cursor.project<{ a: number }>({ _id: 0, a: true });
* const aPropOnlyArray: {a: number}[] = await projectCursor.toArray();
*
* // or always use chaining and save the final cursor
*
* const cursor = coll.aggregate().project<{ a: string }>({
* _id: 0,
* a: { $convert: { input: '$a', to: 'string' }
* }});
* ```
*/
project($project: Document): AggregationCursor;
/** Add a lookup stage to the aggregation pipeline */
lookup($lookup: Document): this;
/** Add a redact stage to the aggregation pipeline */
redact($redact: Document): this;
/** Add a skip stage to the aggregation pipeline */
skip($skip: number): this;
/** Add a sort stage to the aggregation pipeline */
sort($sort: Sort): this;
/** Add a unwind stage to the aggregation pipeline */
unwind($unwind: Document | string): this;
/** Add a geoNear stage to the aggregation pipeline */
geoNear($geoNear: Document): this;
}
/** @public */
export declare interface AggregationCursorOptions extends AbstractCursorOptions, AggregateOptions {
}
/**
* It is possible to search using alternative types in mongodb e.g.
* string types can be searched using a regex in mongo
* array types can be searched using their element type
* @public
*/
export declare type AlternativeType = T extends ReadonlyArray ? T | RegExpOrString : RegExpOrString;
/** @public */
export declare type AnyBulkWriteOperation = {
insertOne: InsertOneModel;
} | {
replaceOne: ReplaceOneModel;
} | {
updateOne: UpdateOneModel;
} | {
updateMany: UpdateManyModel;
} | {
deleteOne: DeleteOneModel;
} | {
deleteMany: DeleteManyModel;
};
/** @public */
export declare type AnyError = MongoError | Error;
/** @public */
export declare type ArrayElement = Type extends ReadonlyArray ? Item : never;
/** @public */
export declare type ArrayOperator = {
$each?: Array>;
$slice?: number;
$position?: number;
$sort?: Sort;
};
/** @public */
export declare interface Auth {
/** The username for auth */
username?: string;
/** The password for auth */
password?: string;
}
/* Excluded from this release type: AuthContext */
/** @public */
export declare const AuthMechanism: Readonly<{
readonly MONGODB_AWS: "MONGODB-AWS";
readonly MONGODB_CR: "MONGODB-CR";
readonly MONGODB_DEFAULT: "DEFAULT";
readonly MONGODB_GSSAPI: "GSSAPI";
readonly MONGODB_PLAIN: "PLAIN";
readonly MONGODB_SCRAM_SHA1: "SCRAM-SHA-1";
readonly MONGODB_SCRAM_SHA256: "SCRAM-SHA-256";
readonly MONGODB_X509: "MONGODB-X509";
readonly MONGODB_OIDC: "MONGODB-OIDC";
}>;
/** @public */
export declare type AuthMechanism = (typeof AuthMechanism)[keyof typeof AuthMechanism];
/** @public */
export declare interface AuthMechanismProperties extends Document {
SERVICE_HOST?: string;
SERVICE_NAME?: string;
SERVICE_REALM?: string;
CANONICALIZE_HOST_NAME?: GSSAPICanonicalizationValue;
AWS_SESSION_TOKEN?: string;
/** A user provided OIDC machine callback function. */
OIDC_CALLBACK?: OIDCCallbackFunction;
/** A user provided OIDC human interacted callback function. */
OIDC_HUMAN_CALLBACK?: OIDCCallbackFunction;
/** The OIDC environment. Note that 'test' is for internal use only. */
ENVIRONMENT?: 'test' | 'azure' | 'gcp';
/** Allowed hosts that OIDC auth can connect to. */
ALLOWED_HOSTS?: string[];
/** The resource token for OIDC auth in Azure and GCP. */
TOKEN_RESOURCE?: string;
}
/* Excluded from this release type: AuthProvider */
/* Excluded from this release type: AutoEncrypter */
/**
* @public
*
* Extra options related to the mongocryptd process
* \* _Available in MongoDB 6.0 or higher._
*/
export declare type AutoEncryptionExtraOptions = NonNullable;
/** @public */
export declare const AutoEncryptionLoggerLevel: Readonly<{
readonly FatalError: 0;
readonly Error: 1;
readonly Warning: 2;
readonly Info: 3;
readonly Trace: 4;
}>;
/**
* @public
* The level of severity of the log message
*
* | Value | Level |
* |-------|-------|
* | 0 | Fatal Error |
* | 1 | Error |
* | 2 | Warning |
* | 3 | Info |
* | 4 | Trace |
*/
export declare type AutoEncryptionLoggerLevel = (typeof AutoEncryptionLoggerLevel)[keyof typeof AutoEncryptionLoggerLevel];
/** @public */
export declare interface AutoEncryptionOptions {
/* Excluded from this release type: metadataClient */
/** A `MongoClient` used to fetch keys from a key vault */
keyVaultClient?: MongoClient;
/** The namespace where keys are stored in the key vault */
keyVaultNamespace?: string;
/** Configuration options that are used by specific KMS providers during key generation, encryption, and decryption. */
kmsProviders?: {
/** Configuration options for using 'aws' as your KMS provider */
aws?: {
/** The access key used for the AWS KMS provider */
accessKeyId: string;
/** The secret access key used for the AWS KMS provider */
secretAccessKey: string;
/**
* An optional AWS session token that will be used as the
* X-Amz-Security-Token header for AWS requests.
*/
sessionToken?: string;
} | Record;
/** Configuration options for using 'local' as your KMS provider */
local?: {
/**
* The master key used to encrypt/decrypt data keys.
* A 96-byte long Buffer or base64 encoded string.
*/
key: Buffer | string;
};
/** Configuration options for using 'azure' as your KMS provider */
azure?: {
/** The tenant ID identifies the organization for the account */
tenantId: string;
/** The client ID to authenticate a registered application */
clientId: string;
/** The client secret to authenticate a registered application */
clientSecret: string;
/**
* If present, a host with optional port. E.g. "example.com" or "example.com:443".
* This is optional, and only needed if customer is using a non-commercial Azure instance
* (e.g. a government or China account, which use different URLs).
* Defaults to "login.microsoftonline.com"
*/
identityPlatformEndpoint?: string | undefined;
} | {
/**
* If present, an access token to authenticate with Azure.
*/
accessToken: string;
} | Record;
/** Configuration options for using 'gcp' as your KMS provider */
gcp?: {
/** The service account email to authenticate */
email: string;
/** A PKCS#8 encrypted key. This can either be a base64 string or a binary representation */
privateKey: string | Buffer;
/**
* If present, a host with optional port. E.g. "example.com" or "example.com:443".
* Defaults to "oauth2.googleapis.com"
*/
endpoint?: string | undefined;
} | {
/**
* If present, an access token to authenticate with GCP.
*/
accessToken: string;
} | Record;
/**
* Configuration options for using 'kmip' as your KMS provider
*/
kmip?: {
/**
* The output endpoint string.
* The endpoint consists of a hostname and port separated by a colon.
* E.g. "example.com:123". A port is always present.
*/
endpoint?: string;
};
};
/**
* A map of namespaces to a local JSON schema for encryption
*
* **NOTE**: Supplying options.schemaMap provides more security than relying on JSON Schemas obtained from the server.
* It protects against a malicious server advertising a false JSON Schema, which could trick the client into sending decrypted data that should be encrypted.
* Schemas supplied in the schemaMap only apply to configuring automatic encryption for Client-Side Field Level Encryption.
* Other validation rules in the JSON schema will not be enforced by the driver and will result in an error.
*/
schemaMap?: Document;
/** Supply a schema for the encrypted fields in the document */
encryptedFieldsMap?: Document;
/** Allows the user to bypass auto encryption, maintaining implicit decryption */
bypassAutoEncryption?: boolean;
/** Allows users to bypass query analysis */
bypassQueryAnalysis?: boolean;
options?: {
/** An optional hook to catch logging messages from the underlying encryption engine */
logger?: (level: AutoEncryptionLoggerLevel, message: string) => void;
};
extraOptions?: {
/**
* A local process the driver communicates with to determine how to encrypt values in a command.
* Defaults to "mongodb://%2Fvar%2Fmongocryptd.sock" if domain sockets are available or "mongodb://localhost:27020" otherwise
*/
mongocryptdURI?: string;
/** If true, autoEncryption will not attempt to spawn a mongocryptd before connecting */
mongocryptdBypassSpawn?: boolean;
/** The path to the mongocryptd executable on the system */
mongocryptdSpawnPath?: string;
/** Command line arguments to use when auto-spawning a mongocryptd */
mongocryptdSpawnArgs?: string[];
/**
* Full path to a MongoDB Crypt shared library to be used (instead of mongocryptd).
*
* This needs to be the path to the file itself, not a directory.
* It can be an absolute or relative path. If the path is relative and
* its first component is `$ORIGIN`, it will be replaced by the directory
* containing the mongodb-client-encryption native addon file. Otherwise,
* the path will be interpreted relative to the current working directory.
*
* Currently, loading different MongoDB Crypt shared library files from different
* MongoClients in the same process is not supported.
*
* If this option is provided and no MongoDB Crypt shared library could be loaded
* from the specified location, creating the MongoClient will fail.
*
* If this option is not provided and `cryptSharedLibRequired` is not specified,
* the AutoEncrypter will attempt to spawn and/or use mongocryptd according
* to the mongocryptd-specific `extraOptions` options.
*
* Specifying a path prevents mongocryptd from being used as a fallback.
*
* Requires the MongoDB Crypt shared library, available in MongoDB 6.0 or higher.
*/
cryptSharedLibPath?: string;
/**
* If specified, never use mongocryptd and instead fail when the MongoDB Crypt
* shared library could not be loaded.
*
* This is always true when `cryptSharedLibPath` is specified.
*
* Requires the MongoDB Crypt shared library, available in MongoDB 6.0 or higher.
*/
cryptSharedLibRequired?: boolean;
/* Excluded from this release type: cryptSharedLibSearchPaths */
};
proxyOptions?: ProxyOptions;
/** The TLS options to use connecting to the KMS provider */
tlsOptions?: CSFLEKMSTlsOptions;
}
/**
* @public
* Configuration options for making an AWS encryption key
*/
export declare interface AWSEncryptionKeyOptions {
/**
* The AWS region of the KMS
*/
region: string;
/**
* The Amazon Resource Name (ARN) to the AWS customer master key (CMK)
*/
key: string;
/**
* An alternate host to send KMS requests to. May include port number.
*/
endpoint?: string | undefined;
}
/** @public */
export declare interface AWSKMSProviderConfiguration {
/**
* The access key used for the AWS KMS provider
*/
accessKeyId: string;
/**
* The secret access key used for the AWS KMS provider
*/
secretAccessKey: string;
/**
* An optional AWS session token that will be used as the
* X-Amz-Security-Token header for AWS requests.
*/
sessionToken?: string;
}
/**
* @public
* Configuration options for making an Azure encryption key
*/
export declare interface AzureEncryptionKeyOptions {
/**
* Key name
*/
keyName: string;
/**
* Key vault URL, typically `.vault.azure.net`
*/
keyVaultEndpoint: string;
/**
* Key version
*/
keyVersion?: string | undefined;
}
/** @public */
export declare type AzureKMSProviderConfiguration = {
/**
* The tenant ID identifies the organization for the account
*/
tenantId: string;
/**
* The client ID to authenticate a registered application
*/
clientId: string;
/**
* The client secret to authenticate a registered application
*/
clientSecret: string;
/**
* If present, a host with optional port. E.g. "example.com" or "example.com:443".
* This is optional, and only needed if customer is using a non-commercial Azure instance
* (e.g. a government or China account, which use different URLs).
* Defaults to "login.microsoftonline.com"
*/
identityPlatformEndpoint?: string | undefined;
} | {
/**
* If present, an access token to authenticate with Azure.
*/
accessToken: string;
};
/**
* Keeps the state of a unordered batch so we can rewrite the results
* correctly after command execution
*
* @public
*/
export declare class Batch {
originalZeroIndex: number;
currentIndex: number;
originalIndexes: number[];
batchType: BatchType;
operations: T[];
size: number;
sizeBytes: number;
constructor(batchType: BatchType, originalZeroIndex: number);
}
/** @public */
export declare const BatchType: Readonly<{
readonly INSERT: 1;
readonly UPDATE: 2;
readonly DELETE: 3;
}>;
/** @public */
export declare type BatchType = (typeof BatchType)[keyof typeof BatchType];
export { Binary }
/** @public */
export declare type BitwiseFilter = number /** numeric bit mask */ | Binary /** BinData bit mask */ | ReadonlyArray;
export { BSON }
/* Excluded from this release type: BSONElement */
export { BSONRegExp }
/**
* BSON Serialization options.
* @public
*/
export declare interface BSONSerializeOptions extends Omit, Omit {
/**
* Enabling the raw option will return a [Node.js Buffer](https://nodejs.org/api/buffer.html)
* which is allocated using [allocUnsafe API](https://nodejs.org/api/buffer.html#static-method-bufferallocunsafesize).
* See this section from the [Node.js Docs here](https://nodejs.org/api/buffer.html#what-makes-bufferallocunsafe-and-bufferallocunsafeslow-unsafe)
* for more detail about what "unsafe" refers to in this context.
* If you need to maintain your own editable clone of the bytes returned for an extended life time of the process, it is recommended you allocate
* your own buffer and clone the contents:
*
* @example
* ```ts
* const raw = await collection.findOne({}, { raw: true });
* const myBuffer = Buffer.alloc(raw.byteLength);
* myBuffer.set(raw, 0);
* // Only save and use `myBuffer` beyond this point
* ```
*
* @remarks
* Please note there is a known limitation where this option cannot be used at the MongoClient level (see [NODE-3946](https://jira.mongodb.org/browse/NODE-3946)).
* It does correctly work at `Db`, `Collection`, and per operation the same as other BSON options work.
*/
raw?: boolean;
/** Enable utf8 validation when deserializing BSON documents. Defaults to true. */
enableUtf8Validation?: boolean;
}
export { BSONSymbol }
export { BSONType }
/** @public */
export declare type BSONTypeAlias = keyof typeof BSONType;
/* Excluded from this release type: BufferPool */
/** @public */
export declare abstract class BulkOperationBase {
private collection;
isOrdered: boolean;
/* Excluded from this release type: s */
operationId?: number;
/* Excluded from this release type: __constructor */
/**
* Add a single insert document to the bulk operation
*
* @example
* ```ts
* const bulkOp = collection.initializeOrderedBulkOp();
*
* // Adds three inserts to the bulkOp.
* bulkOp
* .insert({ a: 1 })
* .insert({ b: 2 })
* .insert({ c: 3 });
* await bulkOp.execute();
* ```
*/
insert(document: Document): BulkOperationBase;
/**
* Builds a find operation for an update/updateOne/delete/deleteOne/replaceOne.
* Returns a builder object used to complete the definition of the operation.
*
* @example
* ```ts
* const bulkOp = collection.initializeOrderedBulkOp();
*
* // Add an updateOne to the bulkOp
* bulkOp.find({ a: 1 }).updateOne({ $set: { b: 2 } });
*
* // Add an updateMany to the bulkOp
* bulkOp.find({ c: 3 }).update({ $set: { d: 4 } });
*
* // Add an upsert
* bulkOp.find({ e: 5 }).upsert().updateOne({ $set: { f: 6 } });
*
* // Add a deletion
* bulkOp.find({ g: 7 }).deleteOne();
*
* // Add a multi deletion
* bulkOp.find({ h: 8 }).delete();
*
* // Add a replaceOne
* bulkOp.find({ i: 9 }).replaceOne({writeConcern: { j: 10 }});
*
* // Update using a pipeline (requires Mongodb 4.2 or higher)
* bulk.find({ k: 11, y: { $exists: true }, z: { $exists: true } }).updateOne([
* { $set: { total: { $sum: [ '$y', '$z' ] } } }
* ]);
*
* // All of the ops will now be executed
* await bulkOp.execute();
* ```
*/
find(selector: Document): FindOperators;
/** Specifies a raw operation to perform in the bulk write. */
raw(op: AnyBulkWriteOperation): this;
get bsonOptions(): BSONSerializeOptions;
get writeConcern(): WriteConcern | undefined;
get batches(): Batch[];
execute(options?: BulkWriteOptions): Promise;
/* Excluded from this release type: handleWriteError */
abstract addToOperationsList(batchType: BatchType, document: Document | UpdateStatement | DeleteStatement): this;
private shouldForceServerObjectId;
}
/* Excluded from this release type: BulkOperationPrivate */
/* Excluded from this release type: BulkResult */
/** @public */
export declare interface BulkWriteOperationError {
index: number;
code: number;
errmsg: string;
errInfo: Document;
op: Document | UpdateStatement | DeleteStatement;
}
/** @public */
export declare interface BulkWriteOptions extends CommandOperationOptions {
/**
* Allow driver to bypass schema validation.
* @defaultValue `false` - documents will be validated by default
**/
bypassDocumentValidation?: boolean;
/**
* If true, when an insert fails, don't execute the remaining writes.
* If false, continue with remaining inserts when one fails.
* @defaultValue `true` - inserts are ordered by default
*/
ordered?: boolean;
/**
* Force server to assign _id values instead of driver.
* @defaultValue `false` - the driver generates `_id` fields by default
**/
forceServerObjectId?: boolean;
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
let?: Document;
}
/**
* @public
* The result of a bulk write.
*/
export declare class BulkWriteResult {
private readonly result;
/** Number of documents inserted. */
readonly insertedCount: number;
/** Number of documents matched for update. */
readonly matchedCount: number;
/** Number of documents modified. */
readonly modifiedCount: number;
/** Number of documents deleted. */
readonly deletedCount: number;
/** Number of documents upserted. */
readonly upsertedCount: number;
/** Upserted document generated Id's, hash key is the index of the originating operation */
readonly upsertedIds: {
[key: number]: any;
};
/** Inserted document generated Id's, hash key is the index of the originating operation */
readonly insertedIds: {
[key: number]: any;
};
private static generateIdMap;
/* Excluded from this release type: __constructor */
/** Evaluates to true if the bulk operation correctly executes */
get ok(): number;
/* Excluded from this release type: getSuccessfullyInsertedIds */
/** Returns the upserted id at the given index */
getUpsertedIdAt(index: number): Document | undefined;
/** Returns raw internal result */
getRawResponse(): Document;
/** Returns true if the bulk operation contains a write error */
hasWriteErrors(): boolean;
/** Returns the number of write errors off the bulk operation */
getWriteErrorCount(): number;
/** Returns a specific write error object */
getWriteErrorAt(index: number): WriteError | undefined;
/** Retrieve all write errors */
getWriteErrors(): WriteError[];
/** Retrieve the write concern error if one exists */
getWriteConcernError(): WriteConcernError | undefined;
toString(): string;
isOk(): boolean;
}
/**
* MongoDB Driver style callback
* @public
*/
export declare type Callback = (error?: AnyError, result?: T) => void;
/** @public */
export declare class CancellationToken extends TypedEventEmitter<{
cancel(): void;
}> {
}
/**
* Creates a new Change Stream instance. Normally created using {@link Collection#watch|Collection.watch()}.
* @public
*/
export declare class ChangeStream> extends TypedEventEmitter> {
pipeline: Document[];
/**
* @remarks WriteConcern can still be present on the options because
* we inherit options from the client/db/collection. The
* key must be present on the options in order to delete it.
* This allows typescript to delete the key but will
* not allow a writeConcern to be assigned as a property on options.
*/
options: ChangeStreamOptions & {
writeConcern?: never;
};
parent: MongoClient | Db | Collection;
namespace: MongoDBNamespace;
type: symbol;
/* Excluded from this release type: cursor */
streamOptions?: CursorStreamOptions;
/* Excluded from this release type: [kCursorStream] */
/* Excluded from this release type: [kClosed] */
/* Excluded from this release type: [kMode] */
/** @event */
static readonly RESPONSE: "response";
/** @event */
static readonly MORE: "more";
/** @event */
static readonly INIT: "init";
/** @event */
static readonly CLOSE: "close";
/**
* Fired for each new matching change in the specified namespace. Attaching a `change`
* event listener to a Change Stream will switch the stream into flowing mode. Data will
* then be passed as soon as it is available.
* @event
*/
static readonly CHANGE: "change";
/** @event */
static readonly END: "end";
/** @event */
static readonly ERROR: "error";
/**
* Emitted each time the change stream stores a new resume token.
* @event
*/
static readonly RESUME_TOKEN_CHANGED: "resumeTokenChanged";
/* Excluded from this release type: __constructor */
/* Excluded from this release type: cursorStream */
/** The cached resume token that is used to resume after the most recently returned change. */
get resumeToken(): ResumeToken;
/** Check if there is any document still available in the Change Stream */
hasNext(): Promise;
/** Get the next available document from the Change Stream. */
next(): Promise;
/**
* Try to get the next available document from the Change Stream's cursor or `null` if an empty batch is returned
*/
tryNext(): Promise;
[Symbol.asyncIterator](): AsyncGenerator;
/** Is the cursor closed */
get closed(): boolean;
/** Close the Change Stream */
close(): Promise;
/**
* Return a modified Readable stream including a possible transform method.
*
* NOTE: When using a Stream to process change stream events, the stream will
* NOT automatically resume in the case a resumable error is encountered.
*
* @throws MongoChangeStreamError if the underlying cursor or the change stream is closed
*/
stream(options?: CursorStreamOptions): Readable & AsyncIterable;
/* Excluded from this release type: _setIsEmitter */
/* Excluded from this release type: _setIsIterator */
/* Excluded from this release type: _createChangeStreamCursor */
/* Excluded from this release type: _closeEmitterModeWithError */
/* Excluded from this release type: _streamEvents */
/* Excluded from this release type: _endStream */
/* Excluded from this release type: _processChange */
/* Excluded from this release type: _processErrorStreamMode */
/* Excluded from this release type: _processErrorIteratorMode */
}
/**
* Only present when the `showExpandedEvents` flag is enabled.
* @public
* @see https://www.mongodb.com/docs/manual/reference/change-events/modify/#mongodb-data-modify
*/
export declare interface ChangeStreamCollModDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentCollectionUUID {
/** Describes the type of operation represented in this change notification */
operationType: 'modify';
}
/**
* @public
* @see https://www.mongodb.com/docs/manual/reference/change-events/create/#mongodb-data-create
*/
export declare interface ChangeStreamCreateDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentCollectionUUID {
/** Describes the type of operation represented in this change notification */
operationType: 'create';
}
/**
* Only present when the `showExpandedEvents` flag is enabled.
* @public
* @see https://www.mongodb.com/docs/manual/reference/change-events/createIndexes/#mongodb-data-createIndexes
*/
export declare interface ChangeStreamCreateIndexDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentCollectionUUID, ChangeStreamDocumentOperationDescription {
/** Describes the type of operation represented in this change notification */
operationType: 'createIndexes';
}
/* Excluded from this release type: ChangeStreamCursor */
/* Excluded from this release type: ChangeStreamCursorOptions */
/**
* @public
* @see https://www.mongodb.com/docs/manual/reference/change-events/#delete-event
*/
export declare interface ChangeStreamDeleteDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentKey, ChangeStreamDocumentCollectionUUID {
/** Describes the type of operation represented in this change notification */
operationType: 'delete';
/** Namespace the delete event occurred on */
ns: ChangeStreamNameSpace;
/**
* Contains the pre-image of the modified or deleted document if the
* pre-image is available for the change event and either 'required' or
* 'whenAvailable' was specified for the 'fullDocumentBeforeChange' option
* when creating the change stream. If 'whenAvailable' was specified but the
* pre-image is unavailable, this will be explicitly set to null.
*/
fullDocumentBeforeChange?: TSchema;
}
/** @public */
export declare type ChangeStreamDocument = ChangeStreamInsertDocument | ChangeStreamUpdateDocument | ChangeStreamReplaceDocument | ChangeStreamDeleteDocument | ChangeStreamDropDocument | ChangeStreamRenameDocument | ChangeStreamDropDatabaseDocument | ChangeStreamInvalidateDocument | ChangeStreamCreateIndexDocument | ChangeStreamCreateDocument | ChangeStreamCollModDocument | ChangeStreamDropIndexDocument | ChangeStreamShardCollectionDocument | ChangeStreamReshardCollectionDocument | ChangeStreamRefineCollectionShardKeyDocument;
/** @public */
export declare interface ChangeStreamDocumentCollectionUUID {
/**
* The UUID (Binary subtype 4) of the collection that the operation was performed on.
*
* Only present when the `showExpandedEvents` flag is enabled.
*
* **NOTE:** collectionUUID will be converted to a NodeJS Buffer if the promoteBuffers
* flag is enabled.
*
* @sinceServerVersion 6.1.0
*/
collectionUUID: Binary;
}
/** @public */
export declare interface ChangeStreamDocumentCommon {
/**
* The id functions as an opaque token for use when resuming an interrupted
* change stream.
*/
_id: ResumeToken;
/**
* The timestamp from the oplog entry associated with the event.
* For events that happened as part of a multi-document transaction, the associated change stream
* notifications will have the same clusterTime value, namely the time when the transaction was committed.
* On a sharded cluster, events that occur on different shards can have the same clusterTime but be
* associated with different transactions or even not be associated with any transaction.
* To identify events for a single transaction, you can use the combination of lsid and txnNumber in the change stream event document.
*/
clusterTime?: Timestamp;
/**
* The transaction number.
* Only present if the operation is part of a multi-document transaction.
*
* **NOTE:** txnNumber can be a Long if promoteLongs is set to false
*/
txnNumber?: number;
/**
* The identifier for the session associated with the transaction.
* Only present if the operation is part of a multi-document transaction.
*/
lsid?: ServerSessionId;
/**
* When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent
* stage, events larger than 16MB will be split into multiple events and contain the
* following information about which fragment the current event is.
*/
splitEvent?: ChangeStreamSplitEvent;
}
/** @public */
export declare interface ChangeStreamDocumentKey {
/**
* For unsharded collections this contains a single field `_id`.
* For sharded collections, this will contain all the components of the shard key
*/
documentKey: {
_id: InferIdType;
[shardKey: string]: any;
};
}
/** @public */
export declare interface ChangeStreamDocumentOperationDescription {
/**
* An description of the operation.
*
* Only present when the `showExpandedEvents` flag is enabled.
*
* @sinceServerVersion 6.1.0
*/
operationDescription?: Document;
}
/**
* @public
* @see https://www.mongodb.com/docs/manual/reference/change-events/#dropdatabase-event
*/
export declare interface ChangeStreamDropDatabaseDocument extends ChangeStreamDocumentCommon {
/** Describes the type of operation represented in this change notification */
operationType: 'dropDatabase';
/** The database dropped */
ns: {
db: string;
};
}
/**
* @public
* @see https://www.mongodb.com/docs/manual/reference/change-events/#drop-event
*/
export declare interface ChangeStreamDropDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentCollectionUUID {
/** Describes the type of operation represented in this change notification */
operationType: 'drop';
/** Namespace the drop event occurred on */
ns: ChangeStreamNameSpace;
}
/**
* Only present when the `showExpandedEvents` flag is enabled.
* @public
* @see https://www.mongodb.com/docs/manual/reference/change-events/dropIndexes/#mongodb-data-dropIndexes
*/
export declare interface ChangeStreamDropIndexDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentCollectionUUID, ChangeStreamDocumentOperationDescription {
/** Describes the type of operation represented in this change notification */
operationType: 'dropIndexes';
}
/** @public */
export declare type ChangeStreamEvents> = {
resumeTokenChanged(token: ResumeToken): void;
init(response: any): void;
more(response?: any): void;
response(): void;
end(): void;
error(error: Error): void;
change(change: TChange): void;
} & AbstractCursorEvents;
/**
* @public
* @see https://www.mongodb.com/docs/manual/reference/change-events/#insert-event
*/
export declare interface ChangeStreamInsertDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentKey, ChangeStreamDocumentCollectionUUID {
/** Describes the type of operation represented in this change notification */
operationType: 'insert';
/** This key will contain the document being inserted */
fullDocument: TSchema;
/** Namespace the insert event occurred on */
ns: ChangeStreamNameSpace;
}
/**
* @public
* @see https://www.mongodb.com/docs/manual/reference/change-events/#invalidate-event
*/
export declare interface ChangeStreamInvalidateDocument extends ChangeStreamDocumentCommon {
/** Describes the type of operation represented in this change notification */
operationType: 'invalidate';
}
/** @public */
export declare interface ChangeStreamNameSpace {
db: string;
coll: string;
}
/**
* Options that can be passed to a ChangeStream. Note that startAfter, resumeAfter, and startAtOperationTime are all mutually exclusive, and the server will error if more than one is specified.
* @public
*/
export declare interface ChangeStreamOptions extends Omit {
/**
* Allowed values: 'updateLookup', 'whenAvailable', 'required'.
*
* When set to 'updateLookup', the change notification for partial updates
* will include both a delta describing the changes to the document as well
* as a copy of the entire document that was changed from some time after
* the change occurred.
*
* When set to 'whenAvailable', configures the change stream to return the
* post-image of the modified document for replace and update change events
* if the post-image for this event is available.
*
* When set to 'required', the same behavior as 'whenAvailable' except that
* an error is raised if the post-image is not available.
*/
fullDocument?: string;
/**
* Allowed values: 'whenAvailable', 'required', 'off'.
*
* The default is to not send a value, which is equivalent to 'off'.
*
* When set to 'whenAvailable', configures the change stream to return the
* pre-image of the modified document for replace, update, and delete change
* events if it is available.
*
* When set to 'required', the same behavior as 'whenAvailable' except that
* an error is raised if the pre-image is not available.
*/
fullDocumentBeforeChange?: string;
/** The maximum amount of time for the server to wait on new documents to satisfy a change stream query. */
maxAwaitTimeMS?: number;
/**
* Allows you to start a changeStream after a specified event.
* @see https://www.mongodb.com/docs/manual/changeStreams/#resumeafter-for-change-streams
*/
resumeAfter?: ResumeToken;
/**
* Similar to resumeAfter, but will allow you to start after an invalidated event.
* @see https://www.mongodb.com/docs/manual/changeStreams/#startafter-for-change-streams
*/
startAfter?: ResumeToken;
/** Will start the changeStream after the specified operationTime. */
startAtOperationTime?: OperationTime;
/**
* The number of documents to return per batch.
* @see https://www.mongodb.com/docs/manual/reference/command/aggregate
*/
batchSize?: number;
/**
* When enabled, configures the change stream to include extra change events.
*
* - createIndexes
* - dropIndexes
* - modify
* - create
* - shardCollection
* - reshardCollection
* - refineCollectionShardKey
*/
showExpandedEvents?: boolean;
}
/**
* @public
* @see https://www.mongodb.com/docs/manual/reference/change-events/refineCollectionShardKey/#mongodb-data-refineCollectionShardKey
*/
export declare interface ChangeStreamRefineCollectionShardKeyDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentCollectionUUID, ChangeStreamDocumentOperationDescription {
/** Describes the type of operation represented in this change notification */
operationType: 'refineCollectionShardKey';
}
/**
* @public
* @see https://www.mongodb.com/docs/manual/reference/change-events/#rename-event
*/
export declare interface ChangeStreamRenameDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentCollectionUUID {
/** Describes the type of operation represented in this change notification */
operationType: 'rename';
/** The new name for the `ns.coll` collection */
to: {
db: string;
coll: string;
};
/** The "from" namespace that the rename occurred on */
ns: ChangeStreamNameSpace;
}
/**
* @public
* @see https://www.mongodb.com/docs/manual/reference/change-events/#replace-event
*/
export declare interface ChangeStreamReplaceDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentKey {
/** Describes the type of operation represented in this change notification */
operationType: 'replace';
/** The fullDocument of a replace event represents the document after the insert of the replacement document */
fullDocument: TSchema;
/** Namespace the replace event occurred on */
ns: ChangeStreamNameSpace;
/**
* Contains the pre-image of the modified or deleted document if the
* pre-image is available for the change event and either 'required' or
* 'whenAvailable' was specified for the 'fullDocumentBeforeChange' option
* when creating the change stream. If 'whenAvailable' was specified but the
* pre-image is unavailable, this will be explicitly set to null.
*/
fullDocumentBeforeChange?: TSchema;
}
/**
* @public
* @see https://www.mongodb.com/docs/manual/reference/change-events/reshardCollection/#mongodb-data-reshardCollection
*/
export declare interface ChangeStreamReshardCollectionDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentCollectionUUID, ChangeStreamDocumentOperationDescription {
/** Describes the type of operation represented in this change notification */
operationType: 'reshardCollection';
}
/**
* @public
* @see https://www.mongodb.com/docs/manual/reference/change-events/shardCollection/#mongodb-data-shardCollection
*/
export declare interface ChangeStreamShardCollectionDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentCollectionUUID, ChangeStreamDocumentOperationDescription {
/** Describes the type of operation represented in this change notification */
operationType: 'shardCollection';
}
/** @public */
export declare interface ChangeStreamSplitEvent {
/** Which fragment of the change this is. */
fragment: number;
/** The total number of fragments. */
of: number;
}
/**
* @public
* @see https://www.mongodb.com/docs/manual/reference/change-events/#update-event
*/
export declare interface ChangeStreamUpdateDocument extends ChangeStreamDocumentCommon, ChangeStreamDocumentKey, ChangeStreamDocumentCollectionUUID {
/** Describes the type of operation represented in this change notification */
operationType: 'update';
/**
* This is only set if `fullDocument` is set to `'updateLookup'`
* Contains the point-in-time post-image of the modified document if the
* post-image is available and either 'required' or 'whenAvailable' was
* specified for the 'fullDocument' option when creating the change stream.
*/
fullDocument?: TSchema;
/** Contains a description of updated and removed fields in this operation */
updateDescription: UpdateDescription;
/** Namespace the update event occurred on */
ns: ChangeStreamNameSpace;
/**
* Contains the pre-image of the modified or deleted document if the
* pre-image is available for the change event and either 'required' or
* 'whenAvailable' was specified for the 'fullDocumentBeforeChange' option
* when creating the change stream. If 'whenAvailable' was specified but the
* pre-image is unavailable, this will be explicitly set to null.
*/
fullDocumentBeforeChange?: TSchema;
}
/**
* @public
* The public interface for explicit in-use encryption
*/
export declare class ClientEncryption {
/* Excluded from this release type: _client */
/* Excluded from this release type: _keyVaultNamespace */
/* Excluded from this release type: _keyVaultClient */
/* Excluded from this release type: _proxyOptions */
/* Excluded from this release type: _tlsOptions */
/* Excluded from this release type: _kmsProviders */
/* Excluded from this release type: _mongoCrypt */
/* Excluded from this release type: getMongoCrypt */
/**
* Create a new encryption instance
*
* @example
* ```ts
* new ClientEncryption(mongoClient, {
* keyVaultNamespace: 'client.encryption',
* kmsProviders: {
* local: {
* key: masterKey // The master key used for encryption/decryption. A 96-byte long Buffer
* }
* }
* });
* ```
*
* @example
* ```ts
* new ClientEncryption(mongoClient, {
* keyVaultNamespace: 'client.encryption',
* kmsProviders: {
* aws: {
* accessKeyId: AWS_ACCESS_KEY,
* secretAccessKey: AWS_SECRET_KEY
* }
* }
* });
* ```
*/
constructor(client: MongoClient, options: ClientEncryptionOptions);
/**
* Creates a data key used for explicit encryption and inserts it into the key vault namespace
*
* @example
* ```ts
* // Using async/await to create a local key
* const dataKeyId = await clientEncryption.createDataKey('local');
* ```
*
* @example
* ```ts
* // Using async/await to create an aws key
* const dataKeyId = await clientEncryption.createDataKey('aws', {
* masterKey: {
* region: 'us-east-1',
* key: 'xxxxxxxxxxxxxx' // CMK ARN here
* }
* });
* ```
*
* @example
* ```ts
* // Using async/await to create an aws key with a keyAltName
* const dataKeyId = await clientEncryption.createDataKey('aws', {
* masterKey: {
* region: 'us-east-1',
* key: 'xxxxxxxxxxxxxx' // CMK ARN here
* },
* keyAltNames: [ 'mySpecialKey' ]
* });
* ```
*/
createDataKey(provider: ClientEncryptionDataKeyProvider, options?: ClientEncryptionCreateDataKeyProviderOptions): Promise;
/**
* Searches the keyvault for any data keys matching the provided filter. If there are matches, rewrapManyDataKey then attempts to re-wrap the data keys using the provided options.
*
* If no matches are found, then no bulk write is performed.
*
* @example
* ```ts
* // rewrapping all data data keys (using a filter that matches all documents)
* const filter = {};
*
* const result = await clientEncryption.rewrapManyDataKey(filter);
* if (result.bulkWriteResult != null) {
* // keys were re-wrapped, results will be available in the bulkWrite object.
* }
* ```
*
* @example
* ```ts
* // attempting to rewrap all data keys with no matches
* const filter = { _id: new Binary() } // assume _id matches no documents in the database
* const result = await clientEncryption.rewrapManyDataKey(filter);
*
* if (result.bulkWriteResult == null) {
* // no keys matched, `bulkWriteResult` does not exist on the result object
* }
* ```
*/
rewrapManyDataKey(filter: Filter, options: ClientEncryptionRewrapManyDataKeyProviderOptions): Promise<{
bulkWriteResult?: BulkWriteResult;
}>;
/**
* Deletes the key with the provided id from the keyvault, if it exists.
*
* @example
* ```ts
* // delete a key by _id
* const id = new Binary(); // id is a bson binary subtype 4 object
* const { deletedCount } = await clientEncryption.deleteKey(id);
*
* if (deletedCount != null && deletedCount > 0) {
* // successful deletion
* }
* ```
*
*/
deleteKey(_id: Binary): Promise;
/**
* Finds all the keys currently stored in the keyvault.
*
* This method will not throw.
*
* @returns a FindCursor over all keys in the keyvault.
* @example
* ```ts
* // fetching all keys
* const keys = await clientEncryption.getKeys().toArray();
* ```
*/
getKeys(): FindCursor;
/**
* Finds a key in the keyvault with the specified _id.
*
* Returns a promise that either resolves to a {@link DataKey} if a document matches the key or null if no documents
* match the id. The promise rejects with an error if an error is thrown.
* @example
* ```ts
* // getting a key by id
* const id = new Binary(); // id is a bson binary subtype 4 object
* const key = await clientEncryption.getKey(id);
* if (!key) {
* // key is null if there was no matching key
* }
* ```
*/
getKey(_id: Binary): Promise;
/**
* Finds a key in the keyvault which has the specified keyAltName.
*
* @param keyAltName - a keyAltName to search for a key
* @returns Returns a promise that either resolves to a {@link DataKey} if a document matches the key or null if no documents
* match the keyAltName. The promise rejects with an error if an error is thrown.
* @example
* ```ts
* // get a key by alt name
* const keyAltName = 'keyAltName';
* const key = await clientEncryption.getKeyByAltName(keyAltName);
* if (!key) {
* // key is null if there is no matching key
* }
* ```
*/
getKeyByAltName(keyAltName: string): Promise | null>;
/**
* Adds a keyAltName to a key identified by the provided _id.
*
* This method resolves to/returns the *old* key value (prior to adding the new altKeyName).
*
* @param _id - The id of the document to update.
* @param keyAltName - a keyAltName to search for a key
* @returns Returns a promise that either resolves to a {@link DataKey} if a document matches the key or null if no documents
* match the id. The promise rejects with an error if an error is thrown.
* @example
* ```ts
* // adding an keyAltName to a data key
* const id = new Binary(); // id is a bson binary subtype 4 object
* const keyAltName = 'keyAltName';
* const oldKey = await clientEncryption.addKeyAltName(id, keyAltName);
* if (!oldKey) {
* // null is returned if there is no matching document with an id matching the supplied id
* }
* ```
*/
addKeyAltName(_id: Binary, keyAltName: string): Promise | null>;
/**
* Adds a keyAltName to a key identified by the provided _id.
*
* This method resolves to/returns the *old* key value (prior to removing the new altKeyName).
*
* If the removed keyAltName is the last keyAltName for that key, the `altKeyNames` property is unset from the document.
*
* @param _id - The id of the document to update.
* @param keyAltName - a keyAltName to search for a key
* @returns Returns a promise that either resolves to a {@link DataKey} if a document matches the key or null if no documents
* match the id. The promise rejects with an error if an error is thrown.
* @example
* ```ts
* // removing a key alt name from a data key
* const id = new Binary(); // id is a bson binary subtype 4 object
* const keyAltName = 'keyAltName';
* const oldKey = await clientEncryption.removeKeyAltName(id, keyAltName);
*
* if (!oldKey) {
* // null is returned if there is no matching document with an id matching the supplied id
* }
* ```
*/
removeKeyAltName(_id: Binary, keyAltName: string): Promise | null>;
/**
* A convenience method for creating an encrypted collection.
* This method will create data keys for any encryptedFields that do not have a `keyId` defined
* and then create a new collection with the full set of encryptedFields.
*
* @param db - A Node.js driver Db object with which to create the collection
* @param name - The name of the collection to be created
* @param options - Options for createDataKey and for createCollection
* @returns created collection and generated encryptedFields
* @throws MongoCryptCreateDataKeyError - If part way through the process a createDataKey invocation fails, an error will be rejected that has the partial `encryptedFields` that were created.
* @throws MongoCryptCreateEncryptedCollectionError - If creating the collection fails, an error will be rejected that has the entire `encryptedFields` that were created.
*/
createEncryptedCollection(db: Db, name: string, options: {
provider: ClientEncryptionDataKeyProvider;
createCollectionOptions: Omit & {
encryptedFields: Document;
};
masterKey?: AWSEncryptionKeyOptions | AzureEncryptionKeyOptions | GCPEncryptionKeyOptions;
}): Promise<{
collection: Collection;
encryptedFields: Document;
}>;
/**
* Explicitly encrypt a provided value. Note that either `options.keyId` or `options.keyAltName` must
* be specified. Specifying both `options.keyId` and `options.keyAltName` is considered an error.
*
* @param value - The value that you wish to serialize. Must be of a type that can be serialized into BSON
* @param options -
* @returns a Promise that either resolves with the encrypted value, or rejects with an error.
*
* @example
* ```ts
* // Encryption with async/await api
* async function encryptMyData(value) {
* const keyId = await clientEncryption.createDataKey('local');
* return clientEncryption.encrypt(value, { keyId, algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic' });
* }
* ```
*
* @example
* ```ts
* // Encryption using a keyAltName
* async function encryptMyData(value) {
* await clientEncryption.createDataKey('local', { keyAltNames: 'mySpecialKey' });
* return clientEncryption.encrypt(value, { keyAltName: 'mySpecialKey', algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic' });
* }
* ```
*/
encrypt(value: unknown, options: ClientEncryptionEncryptOptions): Promise;
/**
* Encrypts a Match Expression or Aggregate Expression to query a range index.
*
* Only supported when queryType is "rangePreview" and algorithm is "RangePreview".
*
* @experimental The Range algorithm is experimental only. It is not intended for production use. It is subject to breaking changes.
*
* @param expression - a BSON document of one of the following forms:
* 1. A Match Expression of this form:
* `{$and: [{: {$gt: }}, {: {$lt: }}]}`
* 2. An Aggregate Expression of this form:
* `{$and: [{$gt: [, ]}, {$lt: [, ]}]}`
*
* `$gt` may also be `$gte`. `$lt` may also be `$lte`.
*
* @param options -
* @returns Returns a Promise that either resolves with the encrypted value or rejects with an error.
*/
encryptExpression(expression: Document, options: ClientEncryptionEncryptOptions): Promise;
/**
* Explicitly decrypt a provided encrypted value
*
* @param value - An encrypted value
* @returns a Promise that either resolves with the decrypted value, or rejects with an error
*
* @example
* ```ts
* // Decrypting value with async/await API
* async function decryptMyValue(value) {
* return clientEncryption.decrypt(value);
* }
* ```
*/
decrypt(value: Binary): Promise;
/* Excluded from this release type: askForKMSCredentials */
static get libmongocryptVersion(): string;
/* Excluded from this release type: _encrypt */
}
/**
* @public
* Options to provide when creating a new data key.
*/
export declare interface ClientEncryptionCreateDataKeyProviderOptions {
/**
* Identifies a new KMS-specific key used to encrypt the new data key
*/
masterKey?: AWSEncryptionKeyOptions | AzureEncryptionKeyOptions | GCPEncryptionKeyOptions | KMIPEncryptionKeyOptions | undefined;
/**
* An optional list of string alternate names used to reference a key.
* If a key is created with alternate names, then encryption may refer to the key by the unique alternate name instead of by _id.
*/
keyAltNames?: string[] | undefined;
/** @experimental */
keyMaterial?: Buffer | Binary;
}
/**
* @public
*
* A data key provider. Allowed values:
*
* - aws, gcp, local, kmip or azure
* - (`mongodb-client-encryption>=6.0.1` only) a named key, in the form of:
* `aws:`, `gcp:`, `local:`, `kmip:`, `azure:`
* where `name` is an alphanumeric string, underscores allowed.
*/
export declare type ClientEncryptionDataKeyProvider = keyof KMSProviders;
/**
* @public
* Options to provide when encrypting data.
*/
export declare interface ClientEncryptionEncryptOptions {
/**
* The algorithm to use for encryption.
*/
algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic' | 'AEAD_AES_256_CBC_HMAC_SHA_512-Random' | 'Indexed' | 'Unindexed' | 'RangePreview';
/**
* The id of the Binary dataKey to use for encryption
*/
keyId?: Binary;
/**
* A unique string name corresponding to an already existing dataKey.
*/
keyAltName?: string;
/** The contention factor. */
contentionFactor?: bigint | number;
/**
* The query type supported. Only the queryType `equality` is stable.
*
* @experimental Public Technical Preview: The queryType `rangePreview` is experimental.
*/
queryType?: 'equality' | 'rangePreview';
/** @experimental Public Technical Preview: The index options for a Queryable Encryption field supporting "rangePreview" queries.*/
rangeOptions?: RangeOptions;
}
/**
* @public
* Options to provide when encrypting data.
*/
export declare interface ClientEncryptionEncryptOptions {
/**
* The algorithm to use for encryption.
*/
algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic' | 'AEAD_AES_256_CBC_HMAC_SHA_512-Random' | 'Indexed' | 'Unindexed' | 'RangePreview';
/**
* The id of the Binary dataKey to use for encryption
*/
keyId?: Binary;
/**
* A unique string name corresponding to an already existing dataKey.
*/
keyAltName?: string;
/** The contention factor. */
contentionFactor?: bigint | number;
/**
* The query type supported. Only the queryType `equality` is stable.
*
* @experimental Public Technical Preview: The queryType `rangePreview` is experimental.
*/
queryType?: 'equality' | 'rangePreview';
/** @experimental Public Technical Preview: The index options for a Queryable Encryption field supporting "rangePreview" queries.*/
rangeOptions?: RangeOptions;
}
/**
* @public
* Additional settings to provide when creating a new `ClientEncryption` instance.
*/
export declare interface ClientEncryptionOptions {
/**
* The namespace of the key vault, used to store encryption keys
*/
keyVaultNamespace: string;
/**
* A MongoClient used to fetch keys from a key vault. Defaults to client.
*/
keyVaultClient?: MongoClient | undefined;
/**
* Options for specific KMS providers to use
*/
kmsProviders?: KMSProviders;
/**
* Options for specifying a Socks5 proxy to use for connecting to the KMS.
*/
proxyOptions?: ProxyOptions;
/**
* TLS options for kms providers to use.
*/
tlsOptions?: CSFLEKMSTlsOptions;
}
/**
* @public
* @experimental
*/
export declare interface ClientEncryptionRewrapManyDataKeyProviderOptions {
provider: ClientEncryptionDataKeyProvider;
masterKey?: AWSEncryptionKeyOptions | AzureEncryptionKeyOptions | GCPEncryptionKeyOptions | KMIPEncryptionKeyOptions | undefined;
}
/**
* @public
* @experimental
*/
export declare interface ClientEncryptionRewrapManyDataKeyResult {
/** The result of rewrapping data keys. If unset, no keys matched the filter. */
bulkWriteResult?: BulkWriteResult;
}
/**
* @public
*
* TLS options to use when connecting. The spec specifically calls out which insecure
* tls options are not allowed:
*
* - tlsAllowInvalidCertificates
* - tlsAllowInvalidHostnames
* - tlsInsecure
*
* These options are not included in the type, and are ignored if provided.
*/
export declare type ClientEncryptionTlsOptions = Pick;
/**
* @public
* @see https://github.com/mongodb/specifications/blob/master/source/mongodb-handshake/handshake.rst#hello-command
*/
export declare interface ClientMetadata {
driver: {
name: string;
version: string;
};
os: {
type: string;
name?: NodeJS.Platform;
architecture?: string;
version?: string;
};
platform: string;
application?: {
name: string;
};
/** FaaS environment information */
env?: {
name: 'aws.lambda' | 'gcp.func' | 'azure.func' | 'vercel';
timeout_sec?: Int32;
memory_mb?: Int32;
region?: string;
url?: string;
};
}
/** @public */
export declare interface ClientMetadataOptions {
driverInfo?: {
name?: string;
version?: string;
platform?: string;
};
appName?: string;
}
/**
* A class representing a client session on the server
*
* NOTE: not meant to be instantiated directly.
* @public
*/
export declare class ClientSession extends TypedEventEmitter {
/* Excluded from this release type: client */
/* Excluded from this release type: sessionPool */
hasEnded: boolean;
clientOptions?: MongoOptions;
supports: {
causalConsistency: boolean;
};
clusterTime?: ClusterTime;
operationTime?: Timestamp;
explicit: boolean;
/* Excluded from this release type: owner */
defaultTransactionOptions: TransactionOptions;
transaction: Transaction;
/* Excluded from this release type: [kServerSession] */
/* Excluded from this release type: [kSnapshotTime] */
/* Excluded from this release type: [kSnapshotEnabled] */
/* Excluded from this release type: [kPinnedConnection] */
/* Excluded from this release type: [kTxnNumberIncrement] */
/* Excluded from this release type: timeoutMS */
/* Excluded from this release type: __constructor */
/** The server id associated with this session */
get id(): ServerSessionId | undefined;
get serverSession(): ServerSession;
/** Whether or not this session is configured for snapshot reads */
get snapshotEnabled(): boolean;
get loadBalanced(): boolean;
/* Excluded from this release type: pinnedConnection */
/* Excluded from this release type: pin */
/* Excluded from this release type: unpin */
get isPinned(): boolean;
/**
* Ends this session on the server
*
* @param options - Optional settings. Currently reserved for future use
*/
endSession(options?: EndSessionOptions): Promise;
/**
* Advances the operationTime for a ClientSession.
*
* @param operationTime - the `BSON.Timestamp` of the operation type it is desired to advance to
*/
advanceOperationTime(operationTime: Timestamp): void;
/**
* Advances the clusterTime for a ClientSession to the provided clusterTime of another ClientSession
*
* @param clusterTime - the $clusterTime returned by the server from another session in the form of a document containing the `BSON.Timestamp` clusterTime and signature
*/
advanceClusterTime(clusterTime: ClusterTime): void;
/**
* Used to determine if this session equals another
*
* @param session - The session to compare to
*/
equals(session: ClientSession): boolean;
/**
* Increment the transaction number on the internal ServerSession
*
* @privateRemarks
* This helper increments a value stored on the client session that will be
* added to the serverSession's txnNumber upon applying it to a command.
* This is because the serverSession is lazily acquired after a connection is obtained
*/
incrementTransactionNumber(): void;
/** @returns whether this session is currently in a transaction or not */
inTransaction(): boolean;
/**
* Starts a new transaction with the given options.
*
* @remarks
* **IMPORTANT**: Running operations in parallel is not supported during a transaction. The use of `Promise.all`,
* `Promise.allSettled`, `Promise.race`, etc to parallelize operations inside a transaction is
* undefined behaviour.
*
* @param options - Options for the transaction
*/
startTransaction(options?: TransactionOptions): void;
/**
* Commits the currently active transaction in this session.
*/
commitTransaction(): Promise;
/**
* Aborts the currently active transaction in this session.
*/
abortTransaction(): Promise;
/**
* This is here to ensure that ClientSession is never serialized to BSON.
*/
toBSON(): never;
/**
* Starts a transaction and runs a provided function, ensuring the commitTransaction is always attempted when all operations run in the function have completed.
*
* **IMPORTANT:** This method requires the function passed in to return a Promise. That promise must be made by `await`-ing all operations in such a way that rejections are propagated to the returned promise.
*
* **IMPORTANT:** Running operations in parallel is not supported during a transaction. The use of `Promise.all`,
* `Promise.allSettled`, `Promise.race`, etc to parallelize operations inside a transaction is
* undefined behaviour.
*
*
* @remarks
* - If all operations successfully complete and the `commitTransaction` operation is successful, then the provided function will return the result of the provided function.
* - If the transaction is unable to complete or an error is thrown from within the provided function, then the provided function will throw an error.
* - If the transaction is manually aborted within the provided function it will not throw.
* - If the driver needs to attempt to retry the operations, the provided function may be called multiple times.
*
* Checkout a descriptive example here:
* @see https://www.mongodb.com/blog/post/quick-start-nodejs--mongodb--how-to-implement-transactions
*
* If a command inside withTransaction fails:
* - It may cause the transaction on the server to be aborted.
* - This situation is normally handled transparently by the driver.
* - However, if the application catches such an error and does not rethrow it, the driver will not be able to determine whether the transaction was aborted or not.
* - The driver will then retry the transaction indefinitely.
*
* To avoid this situation, the application must not silently handle errors within the provided function.
* If the application needs to handle errors within, it must await all operations such that if an operation is rejected it becomes the rejection of the callback function passed into withTransaction.
*
* @param fn - callback to run within a transaction
* @param options - optional settings for the transaction
* @returns A raw command response or undefined
*/
withTransaction(fn: WithTransactionCallback, options?: TransactionOptions): Promise;
}
/** @public */
export declare type ClientSessionEvents = {
ended(session: ClientSession): void;
};
/** @public */
export declare interface ClientSessionOptions {
/** Whether causal consistency should be enabled on this session */
causalConsistency?: boolean;
/** Whether all read operations should be read from the same snapshot for this session (NOTE: not compatible with `causalConsistency=true`) */
snapshot?: boolean;
/** The default TransactionOptions to use for transactions started on this session. */
defaultTransactionOptions?: TransactionOptions;
/* Excluded from this release type: defaultTimeoutMS */
/* Excluded from this release type: owner */
/* Excluded from this release type: explicit */
/* Excluded from this release type: initialClusterTime */
}
/**
* @public
* @deprecated This interface is deprecated and will be removed in a future release as it is not used
* in the driver
*/
export declare interface CloseOptions {
force?: boolean;
}
/** @public
* Configuration options for clustered collections
* @see https://www.mongodb.com/docs/manual/core/clustered-collections/
*/
export declare interface ClusteredCollectionOptions extends Document {
name?: string;
key: Document;
unique: boolean;
}
/**
* @public
* Gossiped in component for the cluster time tracking the state of user databases
* across the cluster. It may optionally include a signature identifying the process that
* generated such a value.
*/
export declare interface ClusterTime {
clusterTime: Timestamp;
/** Used to validate the identity of a request or response's ClusterTime. */
signature?: {
hash: Binary;
keyId: Long;
};
}
export { Code }
/** @public */
export declare interface CollationOptions {
locale: string;
caseLevel?: boolean;
caseFirst?: string;
strength?: number;
numericOrdering?: boolean;
alternate?: string;
maxVariable?: string;
backwards?: boolean;
normalization?: boolean;
}
/**
* The **Collection** class is an internal class that embodies a MongoDB collection
* allowing for insert/find/update/delete and other command operation on that MongoDB collection.
*
* **COLLECTION Cannot directly be instantiated**
* @public
*
* @example
* ```ts
* import { MongoClient } from 'mongodb';
*
* interface Pet {
* name: string;
* kind: 'dog' | 'cat' | 'fish';
* }
*
* const client = new MongoClient('mongodb://localhost:27017');
* const pets = client.db().collection('pets');
*
* const petCursor = pets.find();
*
* for await (const pet of petCursor) {
* console.log(`${pet.name} is a ${pet.kind}!`);
* }
* ```
*/
export declare class Collection {
/* Excluded from this release type: s */
/* Excluded from this release type: client */
/* Excluded from this release type: __constructor */
/**
* The name of the database this collection belongs to
*/
get dbName(): string;
/**
* The name of this collection
*/
get collectionName(): string;
/**
* The namespace of this collection, in the format `${this.dbName}.${this.collectionName}`
*/
get namespace(): string;
/* Excluded from this release type: fullNamespace */
/**
* The current readConcern of the collection. If not explicitly defined for
* this collection, will be inherited from the parent DB
*/
get readConcern(): ReadConcern | undefined;
/**
* The current readPreference of the collection. If not explicitly defined for
* this collection, will be inherited from the parent DB
*/
get readPreference(): ReadPreference | undefined;
get bsonOptions(): BSONSerializeOptions;
/**
* The current writeConcern of the collection. If not explicitly defined for
* this collection, will be inherited from the parent DB
*/
get writeConcern(): WriteConcern | undefined;
/** The current index hint for the collection */
get hint(): Hint | undefined;
set hint(v: Hint | undefined);
/**
* Inserts a single document into MongoDB. If documents passed in do not contain the **_id** field,
* one will be added to each of the documents missing it by the driver, mutating the document. This behavior
* can be overridden by setting the **forceServerObjectId** flag.
*
* @param doc - The document to insert
* @param options - Optional settings for the command
*/
insertOne(doc: OptionalUnlessRequiredId, options?: InsertOneOptions): Promise>;
/**
* Inserts an array of documents into MongoDB. If documents passed in do not contain the **_id** field,
* one will be added to each of the documents missing it by the driver, mutating the document. This behavior
* can be overridden by setting the **forceServerObjectId** flag.
*
* @param docs - The documents to insert
* @param options - Optional settings for the command
*/
insertMany(docs: OptionalUnlessRequiredId[], options?: BulkWriteOptions): Promise>;
/**
* Perform a bulkWrite operation without a fluent API
*
* Legal operation types are
* - `insertOne`
* - `replaceOne`
* - `updateOne`
* - `updateMany`
* - `deleteOne`
* - `deleteMany`
*
* If documents passed in do not contain the **_id** field,
* one will be added to each of the documents missing it by the driver, mutating the document. This behavior
* can be overridden by setting the **forceServerObjectId** flag.
*
* @param operations - Bulk operations to perform
* @param options - Optional settings for the command
* @throws MongoDriverError if operations is not an array
*/
bulkWrite(operations: AnyBulkWriteOperation[], options?: BulkWriteOptions): Promise;
/**
* Update a single document in a collection
*
* The value of `update` can be either:
* - UpdateFilter - A document that contains update operator expressions,
* - Document[] - an aggregation pipeline.
*
* @param filter - The filter used to select the document to update
* @param update - The modifications to apply
* @param options - Optional settings for the command
*/
updateOne(filter: Filter, update: UpdateFilter | Document[], options?: UpdateOptions): Promise>;
/**
* Replace a document in a collection with another document
*
* @param filter - The filter used to select the document to replace
* @param replacement - The Document that replaces the matching document
* @param options - Optional settings for the command
*/
replaceOne(filter: Filter, replacement: WithoutId, options?: ReplaceOptions): Promise | Document>;
/**
* Update multiple documents in a collection
*
* The value of `update` can be either:
* - UpdateFilter - A document that contains update operator expressions,
* - Document[] - an aggregation pipeline.
*
* @param filter - The filter used to select the document to update
* @param update - The modifications to apply
* @param options - Optional settings for the command
*/
updateMany(filter: Filter, update: UpdateFilter | Document[], options?: UpdateOptions): Promise>;
/**
* Delete a document from a collection
*
* @param filter - The filter used to select the document to remove
* @param options - Optional settings for the command
*/
deleteOne(filter?: Filter, options?: DeleteOptions): Promise;
/**
* Delete multiple documents from a collection
*
* @param filter - The filter used to select the documents to remove
* @param options - Optional settings for the command
*/
deleteMany(filter?: Filter, options?: DeleteOptions): Promise;
/**
* Rename the collection.
*
* @remarks
* This operation does not inherit options from the Db or MongoClient.
*
* @param newName - New name of of the collection.
* @param options - Optional settings for the command
*/
rename(newName: string, options?: RenameOptions): Promise;
/**
* Drop the collection from the database, removing it permanently. New accesses will create a new collection.
*
* @param options - Optional settings for the command
*/
drop(options?: DropCollectionOptions): Promise;
/**
* Fetches the first document that matches the filter
*
* @param filter - Query for find Operation
* @param options - Optional settings for the command
*/
findOne(): Promise | null>;
findOne(filter: Filter): Promise | null>;
findOne(filter: Filter, options: FindOptions): Promise | null>;
findOne(): Promise;
findOne(filter: Filter): Promise;
findOne(filter: Filter, options?: FindOptions): Promise;
/**
* Creates a cursor for a filter that can be used to iterate over results from MongoDB
*
* @param filter - The filter predicate. If unspecified, then all documents in the collection will match the predicate
*/
find(): FindCursor>;
find(filter: Filter, options?: FindOptions): FindCursor>;
find(filter: Filter, options?: FindOptions): FindCursor;
/**
* Returns the options of the collection.
*
* @param options - Optional settings for the command
*/
options(options?: OperationOptions): Promise;
/**
* Returns if the collection is a capped collection
*
* @param options - Optional settings for the command
*/
isCapped(options?: OperationOptions): Promise;
/**
* Creates an index on the db and collection collection.
*
* @param indexSpec - The field name or index specification to create an index for
* @param options - Optional settings for the command
*
* @example
* ```ts
* const collection = client.db('foo').collection('bar');
*
* await collection.createIndex({ a: 1, b: -1 });
*
* // Alternate syntax for { c: 1, d: -1 } that ensures order of indexes
* await collection.createIndex([ [c, 1], [d, -1] ]);
*
* // Equivalent to { e: 1 }
* await collection.createIndex('e');
*
* // Equivalent to { f: 1, g: 1 }
* await collection.createIndex(['f', 'g'])
*
* // Equivalent to { h: 1, i: -1 }
* await collection.createIndex([ { h: 1 }, { i: -1 } ]);
*
* // Equivalent to { j: 1, k: -1, l: 2d }
* await collection.createIndex(['j', ['k', -1], { l: '2d' }])
* ```
*/
createIndex(indexSpec: IndexSpecification, options?: CreateIndexesOptions): Promise;
/**
* Creates multiple indexes in the collection, this method is only supported for
* MongoDB 2.6 or higher. Earlier version of MongoDB will throw a command not supported
* error.
*
* **Note**: Unlike {@link Collection#createIndex| createIndex}, this function takes in raw index specifications.
* Index specifications are defined {@link https://www.mongodb.com/docs/manual/reference/command/createIndexes/| here}.
*
* @param indexSpecs - An array of index specifications to be created
* @param options - Optional settings for the command
*
* @example
* ```ts
* const collection = client.db('foo').collection('bar');
* await collection.createIndexes([
* // Simple index on field fizz
* {
* key: { fizz: 1 },
* }
* // wildcard index
* {
* key: { '$**': 1 }
* },
* // named index on darmok and jalad
* {
* key: { darmok: 1, jalad: -1 }
* name: 'tanagra'
* }
* ]);
* ```
*/
createIndexes(indexSpecs: IndexDescription[], options?: CreateIndexesOptions): Promise;
/**
* Drops an index from this collection.
*
* @param indexName - Name of the index to drop.
* @param options - Optional settings for the command
*/
dropIndex(indexName: string, options?: DropIndexesOptions): Promise;
/**
* Drops all indexes from this collection.
*
* @param options - Optional settings for the command
*/
dropIndexes(options?: DropIndexesOptions): Promise;
/**
* Get the list of all indexes information for the collection.
*
* @param options - Optional settings for the command
*/
listIndexes(options?: ListIndexesOptions): ListIndexesCursor;
/**
* Checks if one or more indexes exist on the collection, fails on first non-existing index
*
* @param indexes - One or more index names to check.
* @param options - Optional settings for the command
*/
indexExists(indexes: string | string[], options?: ListIndexesOptions): Promise;
/**
* Retrieves this collections index info.
*
* @param options - Optional settings for the command
*/
indexInformation(options: IndexInformationOptions & {
full: true;
}): Promise;
indexInformation(options: IndexInformationOptions & {
full?: false;
}): Promise;
indexInformation(options: IndexInformationOptions): Promise;
indexInformation(): Promise;
/**
* Gets an estimate of the count of documents in a collection using collection metadata.
* This will always run a count command on all server versions.
*
* due to an oversight in versions 5.0.0-5.0.8 of MongoDB, the count command,
* which estimatedDocumentCount uses in its implementation, was not included in v1 of
* the Stable API, and so users of the Stable API with estimatedDocumentCount are
* recommended to upgrade their server version to 5.0.9+ or set apiStrict: false to avoid
* encountering errors.
*
* @see {@link https://www.mongodb.com/docs/manual/reference/command/count/#behavior|Count: Behavior}
* @param options - Optional settings for the command
*/
estimatedDocumentCount(options?: EstimatedDocumentCountOptions): Promise;
/**
* Gets the number of documents matching the filter.
* For a fast count of the total documents in a collection see {@link Collection#estimatedDocumentCount| estimatedDocumentCount}.
* **Note**: When migrating from {@link Collection#count| count} to {@link Collection#countDocuments| countDocuments}
* the following query operators must be replaced:
*
* | Operator | Replacement |
* | -------- | ----------- |
* | `$where` | [`$expr`][1] |
* | `$near` | [`$geoWithin`][2] with [`$center`][3] |
* | `$nearSphere` | [`$geoWithin`][2] with [`$centerSphere`][4] |
*
* [1]: https://www.mongodb.com/docs/manual/reference/operator/query/expr/
* [2]: https://www.mongodb.com/docs/manual/reference/operator/query/geoWithin/
* [3]: https://www.mongodb.com/docs/manual/reference/operator/query/center/#op._S_center
* [4]: https://www.mongodb.com/docs/manual/reference/operator/query/centerSphere/#op._S_centerSphere
*
* @param filter - The filter for the count
* @param options - Optional settings for the command
*
* @see https://www.mongodb.com/docs/manual/reference/operator/query/expr/
* @see https://www.mongodb.com/docs/manual/reference/operator/query/geoWithin/
* @see https://www.mongodb.com/docs/manual/reference/operator/query/center/#op._S_center
* @see https://www.mongodb.com/docs/manual/reference/operator/query/centerSphere/#op._S_centerSphere
*/
countDocuments(filter?: Filter, options?: CountDocumentsOptions): Promise;
/**
* The distinct command returns a list of distinct values for the given key across a collection.
*
* @param key - Field of the document to find distinct values for
* @param filter - The filter for filtering the set of documents to which we apply the distinct filter.
* @param options - Optional settings for the command
*/
distinct>(key: Key): Promise[Key]>>>;
distinct>(key: Key, filter: Filter): Promise[Key]>>>;
distinct>(key: Key, filter: Filter, options: DistinctOptions): Promise[Key]>>>;
distinct(key: string): Promise;
distinct(key: string, filter: Filter): Promise;
distinct(key: string, filter: Filter, options: DistinctOptions): Promise;
/**
* Retrieve all the indexes on the collection.
*
* @param options - Optional settings for the command
*/
indexes(options: IndexInformationOptions & {
full?: true;
}): Promise;
indexes(options: IndexInformationOptions & {
full: false;
}): Promise;
indexes(options: IndexInformationOptions): Promise;
indexes(options?: ListIndexesOptions): Promise;
/**
* Find a document and delete it in one atomic operation. Requires a write lock for the duration of the operation.
*
* @param filter - The filter used to select the document to remove
* @param options - Optional settings for the command
*/
findOneAndDelete(filter: Filter, options: FindOneAndDeleteOptions & {
includeResultMetadata: true;
}): Promise>;
findOneAndDelete(filter: Filter, options: FindOneAndDeleteOptions & {
includeResultMetadata: false;
}): Promise | null>;
findOneAndDelete(filter: Filter, options: FindOneAndDeleteOptions): Promise | null>;
findOneAndDelete(filter: Filter): Promise | null>;
/**
* Find a document and replace it in one atomic operation. Requires a write lock for the duration of the operation.
*
* @param filter - The filter used to select the document to replace
* @param replacement - The Document that replaces the matching document
* @param options - Optional settings for the command
*/
findOneAndReplace(filter: Filter, replacement: WithoutId, options: FindOneAndReplaceOptions & {
includeResultMetadata: true;
}): Promise>;
findOneAndReplace(filter: Filter, replacement: WithoutId, options: FindOneAndReplaceOptions & {
includeResultMetadata: false;
}): Promise | null>;
findOneAndReplace(filter: Filter, replacement: WithoutId, options: FindOneAndReplaceOptions): Promise | null>;
findOneAndReplace(filter: Filter, replacement: WithoutId): Promise | null>;
/**
* Find a document and update it in one atomic operation. Requires a write lock for the duration of the operation.
*
* @param filter - The filter used to select the document to update
* @param update - Update operations to be performed on the document
* @param options - Optional settings for the command
*/
findOneAndUpdate(filter: Filter, update: UpdateFilter, options: FindOneAndUpdateOptions & {
includeResultMetadata: true;
}): Promise>;
findOneAndUpdate(filter: Filter, update: UpdateFilter, options: FindOneAndUpdateOptions & {
includeResultMetadata: false;
}): Promise | null>;
findOneAndUpdate(filter: Filter, update: UpdateFilter, options: FindOneAndUpdateOptions): Promise | null>;
findOneAndUpdate(filter: Filter, update: UpdateFilter): Promise | null>;
/**
* Execute an aggregation framework pipeline against the collection, needs MongoDB \>= 2.2
*
* @param pipeline - An array of aggregation pipelines to execute
* @param options - Optional settings for the command
*/
aggregate(pipeline?: Document[], options?: AggregateOptions): AggregationCursor;
/**
* Create a new Change Stream, watching for new changes (insertions, updates, replacements, deletions, and invalidations) in this collection.
*
* @remarks
* watch() accepts two generic arguments for distinct use cases:
* - The first is to override the schema that may be defined for this specific collection
* - The second is to override the shape of the change stream document entirely, if it is not provided the type will default to ChangeStreamDocument of the first argument
* @example
* By just providing the first argument I can type the change to be `ChangeStreamDocument<{ _id: number }>`
* ```ts
* collection.watch<{ _id: number }>()
* .on('change', change => console.log(change._id.toFixed(4)));
* ```
*
* @example
* Passing a second argument provides a way to reflect the type changes caused by an advanced pipeline.
* Here, we are using a pipeline to have MongoDB filter for insert changes only and add a comment.
* No need start from scratch on the ChangeStreamInsertDocument type!
* By using an intersection we can save time and ensure defaults remain the same type!
* ```ts
* collection
* .watch & { comment: string }>([
* { $addFields: { comment: 'big changes' } },
* { $match: { operationType: 'insert' } }
* ])
* .on('change', change => {
* change.comment.startsWith('big');
* change.operationType === 'insert';
* // No need to narrow in code because the generics did that for us!
* expectType(change.fullDocument);
* });
* ```
*
* @param pipeline - An array of {@link https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/|aggregation pipeline stages} through which to pass change stream documents. This allows for filtering (using $match) and manipulating the change stream documents.
* @param options - Optional settings for the command
* @typeParam TLocal - Type of the data being detected by the change stream
* @typeParam TChange - Type of the whole change stream document emitted
*/
watch>(pipeline?: Document[], options?: ChangeStreamOptions): ChangeStream;
/**
* Initiate an Out of order batch write operation. All operations will be buffered into insert/update/remove commands executed out of order.
*
* @throws MongoNotConnectedError
* @remarks
* **NOTE:** MongoClient must be connected prior to calling this method due to a known limitation in this legacy implementation.
* However, `collection.bulkWrite()` provides an equivalent API that does not require prior connecting.
*/
initializeUnorderedBulkOp(options?: BulkWriteOptions): UnorderedBulkOperation;
/**
* Initiate an In order bulk write operation. Operations will be serially executed in the order they are added, creating a new operation for each switch in types.
*
* @throws MongoNotConnectedError
* @remarks
* **NOTE:** MongoClient must be connected prior to calling this method due to a known limitation in this legacy implementation.
* However, `collection.bulkWrite()` provides an equivalent API that does not require prior connecting.
*/
initializeOrderedBulkOp(options?: BulkWriteOptions): OrderedBulkOperation;
/**
* An estimated count of matching documents in the db to a filter.
*
* **NOTE:** This method has been deprecated, since it does not provide an accurate count of the documents
* in a collection. To obtain an accurate count of documents in the collection, use {@link Collection#countDocuments| countDocuments}.
* To obtain an estimated count of all documents in the collection, use {@link Collection#estimatedDocumentCount| estimatedDocumentCount}.
*
* @deprecated use {@link Collection#countDocuments| countDocuments} or {@link Collection#estimatedDocumentCount| estimatedDocumentCount} instead
*
* @param filter - The filter for the count.
* @param options - Optional settings for the command
*/
count(filter?: Filter, options?: CountOptions): Promise;
/**
* Returns all search indexes for the current collection.
*
* @param options - The options for the list indexes operation.
*
* @remarks Only available when used against a 7.0+ Atlas cluster.
*/
listSearchIndexes(options?: ListSearchIndexesOptions): ListSearchIndexesCursor;
/**
* Returns all search indexes for the current collection.
*
* @param name - The name of the index to search for. Only indexes with matching index names will be returned.
* @param options - The options for the list indexes operation.
*
* @remarks Only available when used against a 7.0+ Atlas cluster.
*/
listSearchIndexes(name: string, options?: ListSearchIndexesOptions): ListSearchIndexesCursor;
/**
* Creates a single search index for the collection.
*
* @param description - The index description for the new search index.
* @returns A promise that resolves to the name of the new search index.
*
* @remarks Only available when used against a 7.0+ Atlas cluster.
*/
createSearchIndex(description: SearchIndexDescription): Promise;
/**
* Creates multiple search indexes for the current collection.
*
* @param descriptions - An array of `SearchIndexDescription`s for the new search indexes.
* @returns A promise that resolves to an array of the newly created search index names.
*
* @remarks Only available when used against a 7.0+ Atlas cluster.
* @returns
*/
createSearchIndexes(descriptions: SearchIndexDescription[]): Promise;
/**
* Deletes a search index by index name.
*
* @param name - The name of the search index to be deleted.
*
* @remarks Only available when used against a 7.0+ Atlas cluster.
*/
dropSearchIndex(name: string): Promise;
/**
* Updates a search index by replacing the existing index definition with the provided definition.
*
* @param name - The name of the search index to update.
* @param definition - The new search index definition.
*
* @remarks Only available when used against a 7.0+ Atlas cluster.
*/
updateSearchIndex(name: string, definition: Document): Promise;
}
/** @public */
export declare interface CollectionInfo extends Document {
name: string;
type?: string;
options?: Document;
info?: {
readOnly?: false;
uuid?: Binary;
};
idIndex?: Document;
}
/** @public */
export declare interface CollectionOptions extends BSONSerializeOptions, WriteConcernOptions {
/** Specify a read concern for the collection. (only MongoDB 3.2 or higher supported) */
readConcern?: ReadConcernLike;
/** The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST). */
readPreference?: ReadPreferenceLike;
/* Excluded from this release type: timeoutMS */
}
/* Excluded from this release type: CollectionPrivate */
/* Excluded from this release type: COMMAND_FAILED */
/* Excluded from this release type: COMMAND_STARTED */
/* Excluded from this release type: COMMAND_SUCCEEDED */
/**
* An event indicating the failure of a given command
* @public
* @category Event
*/
export declare class CommandFailedEvent {
address: string;
/** Driver generated connection id */
connectionId?: string | number;
/**
* Server generated connection id
* Distinct from the connection id and is returned by the hello or legacy hello response as "connectionId" from the server on 4.2+.
*/
serverConnectionId: bigint | null;
requestId: number;
duration: number;
commandName: string;
failure: Error;
serviceId?: ObjectId;
/* Excluded from this release type: name */
/* Excluded from this release type: __constructor */
get hasServiceId(): boolean;
}
/* Excluded from this release type: CommandOperation */
/** @public */
export declare interface CommandOperationOptions extends OperationOptions, WriteConcernOptions, ExplainOptions {
/** Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported) */
readConcern?: ReadConcernLike;
/** Collation */
collation?: CollationOptions;
maxTimeMS?: number;
/**
* Comment to apply to the operation.
*
* In server versions pre-4.4, 'comment' must be string. A server
* error will be thrown if any other type is provided.
*
* In server versions 4.4 and above, 'comment' can be any valid BSON type.
*/
comment?: unknown;
/** Should retry failed writes */
retryWrites?: boolean;
dbName?: string;
authdb?: string;
noResponse?: boolean;
}
/* Excluded from this release type: CommandOptions */
/**
* An event indicating the start of a given command
* @public
* @category Event
*/
export declare class CommandStartedEvent {
commandObj?: Document;
requestId: number;
databaseName: string;
commandName: string;
command: Document;
address: string;
/** Driver generated connection id */
connectionId?: string | number;
/**
* Server generated connection id
* Distinct from the connection id and is returned by the hello or legacy hello response as "connectionId"
* from the server on 4.2+.
*/
serverConnectionId: bigint | null;
serviceId?: ObjectId;
/* Excluded from this release type: name */
/* Excluded from this release type: __constructor */
get hasServiceId(): boolean;
}
/**
* An event indicating the success of a given command
* @public
* @category Event
*/
export declare class CommandSucceededEvent {
address: string;
/** Driver generated connection id */
connectionId?: string | number;
/**
* Server generated connection id
* Distinct from the connection id and is returned by the hello or legacy hello response as "connectionId" from the server on 4.2+.
*/
serverConnectionId: bigint | null;
requestId: number;
duration: number;
commandName: string;
reply: unknown;
serviceId?: ObjectId;
/* Excluded from this release type: name */
/* Excluded from this release type: __constructor */
get hasServiceId(): boolean;
}
/** @public */
export declare type CommonEvents = 'newListener' | 'removeListener';
/** @public */
export declare const Compressor: Readonly<{
readonly none: 0;
readonly snappy: 1;
readonly zlib: 2;
readonly zstd: 3;
}>;
/** @public */
export declare type Compressor = (typeof Compressor)[CompressorName];
/** @public */
export declare type CompressorName = keyof typeof Compressor;
/** @public */
export declare type Condition = AlternativeType | FilterOperators>;
/* Excluded from this release type: Connection */
/* Excluded from this release type: CONNECTION_CHECK_OUT_FAILED */
/* Excluded from this release type: CONNECTION_CHECK_OUT_STARTED */
/* Excluded from this release type: CONNECTION_CHECKED_IN */
/* Excluded from this release type: CONNECTION_CHECKED_OUT */
/* Excluded from this release type: CONNECTION_CLOSED */
/* Excluded from this release type: CONNECTION_CREATED */
/* Excluded from this release type: CONNECTION_POOL_CLEARED */
/* Excluded from this release type: CONNECTION_POOL_CLOSED */
/* Excluded from this release type: CONNECTION_POOL_CREATED */
/* Excluded from this release type: CONNECTION_POOL_READY */
/* Excluded from this release type: CONNECTION_READY */
/**
* An event published when a connection is checked into the connection pool
* @public
* @category Event
*/
export declare class ConnectionCheckedInEvent extends ConnectionPoolMonitoringEvent {
/** The id of the connection */
connectionId: number | '';
/* Excluded from this release type: name */
/* Excluded from this release type: __constructor */
}
/**
* An event published when a connection is checked out of the connection pool
* @public
* @category Event
*/
export declare class ConnectionCheckedOutEvent extends ConnectionPoolMonitoringEvent {
/** The id of the connection */
connectionId: number | '';
/* Excluded from this release type: name */
/* Excluded from this release type: __constructor */
}
/**
* An event published when a request to check a connection out fails
* @public
* @category Event
*/
export declare class ConnectionCheckOutFailedEvent extends ConnectionPoolMonitoringEvent {
/** The reason the attempt to check out failed */
reason: string;
/* Excluded from this release type: error */
/* Excluded from this release type: name */
/* Excluded from this release type: __constructor */
}
/**
* An event published when a request to check a connection out begins
* @public
* @category Event
*/
export declare class ConnectionCheckOutStartedEvent extends ConnectionPoolMonitoringEvent {
/* Excluded from this release type: name */
/* Excluded from this release type: __constructor */
}
/**
* An event published when a connection is closed
* @public
* @category Event
*/
export declare class ConnectionClosedEvent extends ConnectionPoolMonitoringEvent {
/** The id of the connection */
connectionId: number | '';
/** The reason the connection was closed */
reason: string;
serviceId?: ObjectId;
/* Excluded from this release type: name */
/* Excluded from this release type: error */
/* Excluded from this release type: __constructor */
}
/**
* An event published when a connection pool creates a new connection
* @public
* @category Event
*/
export declare class ConnectionCreatedEvent extends ConnectionPoolMonitoringEvent {
/** A monotonically increasing, per-pool id for the newly created connection */
connectionId: number | '