flow like the river
This commit is contained in:
commit
013fe673f3
42435 changed files with 5764238 additions and 0 deletions
201
VISUALIZACION/node_modules/mongodb/LICENSE.md
generated
vendored
Executable file
201
VISUALIZACION/node_modules/mongodb/LICENSE.md
generated
vendored
Executable file
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
310
VISUALIZACION/node_modules/mongodb/README.md
generated
vendored
Executable file
310
VISUALIZACION/node_modules/mongodb/README.md
generated
vendored
Executable file
|
|
@ -0,0 +1,310 @@
|
|||
# MongoDB Node.js Driver
|
||||
|
||||
The official [MongoDB](https://www.mongodb.com/) driver for Node.js.
|
||||
|
||||
**Upgrading to version 5? Take a look at our [upgrade guide here](https://github.com/mongodb/node-mongodb-native/blob/HEAD/etc/notes/CHANGES_5.0.0.md)!**
|
||||
|
||||
## Quick Links
|
||||
|
||||
| Site | Link |
|
||||
| -------------------------| ----------------------------------------------------------------------------------------------------------------- |
|
||||
| Documentation | [www.mongodb.com/docs/drivers/node](https://www.mongodb.com/docs/drivers/node) |
|
||||
| API Docs | [mongodb.github.io/node-mongodb-native](https://mongodb.github.io/node-mongodb-native) |
|
||||
| `npm` package | [www.npmjs.com/package/mongodb](https://www.npmjs.com/package/mongodb) |
|
||||
| MongoDB | [www.mongodb.com](https://www.mongodb.com) |
|
||||
| MongoDB University | [learn.mongodb.com](https://learn.mongodb.com/catalog?labels=%5B%22Language%22%5D&values=%5B%22Node.js%22%5D) |
|
||||
| MongoDB Developer Center | [www.mongodb.com/developer](https://www.mongodb.com/developer/languages/javascript/) |
|
||||
| Stack Overflow | [stackoverflow.com](https://stackoverflow.com/search?q=%28%5Btypescript%5D+or+%5Bjavascript%5D+or+%5Bnode.js%5D%29+and+%5Bmongodb%5D) |
|
||||
| Source Code | [github.com/mongodb/node-mongodb-native](https://github.com/mongodb/node-mongodb-native) |
|
||||
| Upgrade to v5 | [etc/notes/CHANGES_5.0.0.md](https://github.com/mongodb/node-mongodb-native/blob/HEAD/etc/notes/CHANGES_5.0.0.md) |
|
||||
| Contributing | [CONTRIBUTING.md](https://github.com/mongodb/node-mongodb-native/blob/HEAD/CONTRIBUTING.md) |
|
||||
| Changelog | [HISTORY.md](https://github.com/mongodb/node-mongodb-native/blob/HEAD/HISTORY.md) |
|
||||
|
||||
### Bugs / Feature Requests
|
||||
|
||||
Think you’ve found a bug? Want to see a new feature in `node-mongodb-native`? Please open a
|
||||
case in our issue management tool, JIRA:
|
||||
|
||||
- Create an account and login [jira.mongodb.org](https://jira.mongodb.org).
|
||||
- Navigate to the NODE project [jira.mongodb.org/browse/NODE](https://jira.mongodb.org/browse/NODE).
|
||||
- Click **Create Issue** - Please provide as much information as possible about the issue type and how to reproduce it.
|
||||
|
||||
Bug reports in JIRA for all driver projects (i.e. NODE, PYTHON, CSHARP, JAVA) and the
|
||||
Core Server (i.e. SERVER) project are **public**.
|
||||
|
||||
### Support / Feedback
|
||||
|
||||
For issues with, questions about, or feedback for the Node.js driver, please look into our [support channels](https://www.mongodb.com/docs/manual/support). Please do not email any of the driver developers directly with issues or questions - you're more likely to get an answer on the [MongoDB Community Forums](https://community.mongodb.com/tags/c/drivers-odms-connectors/7/node-js-driver).
|
||||
|
||||
### Change Log
|
||||
|
||||
Change history can be found in [`HISTORY.md`](https://github.com/mongodb/node-mongodb-native/blob/HEAD/HISTORY.md).
|
||||
|
||||
### Compatibility
|
||||
|
||||
For version compatibility matrices, please refer to the following links:
|
||||
|
||||
- [MongoDB](https://www.mongodb.com/docs/drivers/node/current/compatibility/#mongodb-compatibility)
|
||||
- [NodeJS](https://www.mongodb.com/docs/drivers/node/current/compatibility/#language-compatibility)
|
||||
|
||||
#### Typescript Version
|
||||
|
||||
We recommend using the latest version of typescript, however we currently ensure the driver's public types compile against `typescript@4.1.6`.
|
||||
This is the lowest typescript version guaranteed to work with our driver: older versions may or may not work - use at your own risk.
|
||||
Since typescript [does not restrict breaking changes to major versions](https://github.com/Microsoft/TypeScript/wiki/Breaking-Changes) we consider this support best effort.
|
||||
If you run into any unexpected compiler failures against our supported TypeScript versions please let us know by filing an issue on our [JIRA](https://jira.mongodb.org/browse/NODE).
|
||||
|
||||
## Installation
|
||||
|
||||
The recommended way to get started using the Node.js 5.x driver is by using the `npm` (Node Package Manager) to install the dependency in your project.
|
||||
|
||||
After you've created your own project using `npm init`, you can run:
|
||||
|
||||
```bash
|
||||
npm install mongodb
|
||||
# or ...
|
||||
yarn add mongodb
|
||||
```
|
||||
|
||||
This will download the MongoDB driver and add a dependency entry in your `package.json` file.
|
||||
|
||||
If you are a Typescript user, you will need the Node.js type definitions to use the driver's definitions:
|
||||
|
||||
```sh
|
||||
npm install -D @types/node
|
||||
```
|
||||
|
||||
## Driver Extensions
|
||||
|
||||
The MongoDB driver can optionally be enhanced by the following feature packages:
|
||||
|
||||
Maintained by MongoDB:
|
||||
|
||||
- Zstd network compression - [@mongodb-js/zstd](https://github.com/mongodb-js/zstd)
|
||||
- MongoDB field level and queryable encryption - [mongodb-client-encryption](https://github.com/mongodb/libmongocrypt#readme)
|
||||
- GSSAPI / SSPI / Kerberos authentication - [kerberos](https://github.com/mongodb-js/kerberos)
|
||||
|
||||
Some of these packages include native C++ extensions.
|
||||
Consult the [trouble shooting guide here](https://github.com/mongodb/node-mongodb-native/blob/HEAD/etc/notes/native-extensions.md) if you run into compilation issues.
|
||||
|
||||
Third party:
|
||||
|
||||
- Snappy network compression - [snappy](https://github.com/Brooooooklyn/snappy)
|
||||
- AWS authentication - [@aws-sdk/credential-providers](https://github.com/aws/aws-sdk-js-v3/tree/main/packages/credential-providers)
|
||||
|
||||
## Quick Start
|
||||
|
||||
This guide will show you how to set up a simple application using Node.js and MongoDB. Its scope is only how to set up the driver and perform the simple CRUD operations. For more in-depth coverage, see the [official documentation](https://www.mongodb.com/docs/drivers/node/).
|
||||
|
||||
### Create the `package.json` file
|
||||
|
||||
First, create a directory where your application will live.
|
||||
|
||||
```bash
|
||||
mkdir myProject
|
||||
cd myProject
|
||||
```
|
||||
|
||||
Enter the following command and answer the questions to create the initial structure for your new project:
|
||||
|
||||
```bash
|
||||
npm init -y
|
||||
```
|
||||
|
||||
Next, install the driver as a dependency.
|
||||
|
||||
```bash
|
||||
npm install mongodb
|
||||
```
|
||||
|
||||
### Start a MongoDB Server
|
||||
|
||||
For complete MongoDB installation instructions, see [the manual](https://www.mongodb.com/docs/manual/installation/).
|
||||
|
||||
1. Download the right MongoDB version from [MongoDB](https://www.mongodb.org/downloads)
|
||||
2. Create a database directory (in this case under **/data**).
|
||||
3. Install and start a `mongod` process.
|
||||
|
||||
```bash
|
||||
mongod --dbpath=/data
|
||||
```
|
||||
|
||||
You should see the **mongod** process start up and print some status information.
|
||||
|
||||
### Connect to MongoDB
|
||||
|
||||
Create a new **app.js** file and add the following code to try out some basic CRUD
|
||||
operations using the MongoDB driver.
|
||||
|
||||
Add code to connect to the server and the database **myProject**:
|
||||
|
||||
> **NOTE:** Resolving DNS Connection issues
|
||||
>
|
||||
> Node.js 18 changed the default DNS resolution ordering from always prioritizing ipv4 to the ordering
|
||||
> returned by the DNS provider. In some environments, this can result in `localhost` resolving to
|
||||
> an ipv6 address instead of ipv4 and a consequent failure to connect to the server.
|
||||
>
|
||||
> This can be resolved by:
|
||||
>
|
||||
> - specifying the ip address family using the MongoClient `family` option (`MongoClient(<uri>, { family: 4 } )`)
|
||||
> - launching mongod or mongos with the ipv6 flag enabled ([--ipv6 mongod option documentation](https://www.mongodb.com/docs/manual/reference/program/mongod/#std-option-mongod.--ipv6))
|
||||
> - using a host of `127.0.0.1` in place of localhost
|
||||
> - specifying the DNS resolution ordering with the `--dns-resolution-order` Node.js command line argument (e.g. `node --dns-resolution-order=ipv4first`)
|
||||
|
||||
```js
|
||||
const { MongoClient } = require('mongodb');
|
||||
// or as an es module:
|
||||
// import { MongoClient } from 'mongodb'
|
||||
|
||||
// Connection URL
|
||||
const url = 'mongodb://localhost:27017';
|
||||
const client = new MongoClient(url);
|
||||
|
||||
// Database Name
|
||||
const dbName = 'myProject';
|
||||
|
||||
async function main() {
|
||||
// Use connect method to connect to the server
|
||||
await client.connect();
|
||||
console.log('Connected successfully to server');
|
||||
const db = client.db(dbName);
|
||||
const collection = db.collection('documents');
|
||||
|
||||
// the following code examples can be pasted here...
|
||||
|
||||
return 'done.';
|
||||
}
|
||||
|
||||
main()
|
||||
.then(console.log)
|
||||
.catch(console.error)
|
||||
.finally(() => client.close());
|
||||
```
|
||||
|
||||
Run your app from the command line with:
|
||||
|
||||
```bash
|
||||
node app.js
|
||||
```
|
||||
|
||||
The application should print **Connected successfully to server** to the console.
|
||||
|
||||
### Insert a Document
|
||||
|
||||
Add to **app.js** the following function which uses the **insertMany**
|
||||
method to add three documents to the **documents** collection.
|
||||
|
||||
```js
|
||||
const insertResult = await collection.insertMany([{ a: 1 }, { a: 2 }, { a: 3 }]);
|
||||
console.log('Inserted documents =>', insertResult);
|
||||
```
|
||||
|
||||
The **insertMany** command returns an object with information about the insert operations.
|
||||
|
||||
### Find All Documents
|
||||
|
||||
Add a query that returns all the documents.
|
||||
|
||||
```js
|
||||
const findResult = await collection.find({}).toArray();
|
||||
console.log('Found documents =>', findResult);
|
||||
```
|
||||
|
||||
This query returns all the documents in the **documents** collection.
|
||||
If you add this below the insertMany example you'll see the document's you've inserted.
|
||||
|
||||
### Find Documents with a Query Filter
|
||||
|
||||
Add a query filter to find only documents which meet the query criteria.
|
||||
|
||||
```js
|
||||
const filteredDocs = await collection.find({ a: 3 }).toArray();
|
||||
console.log('Found documents filtered by { a: 3 } =>', filteredDocs);
|
||||
```
|
||||
|
||||
Only the documents which match `'a' : 3` should be returned.
|
||||
|
||||
### Update a document
|
||||
|
||||
The following operation updates a document in the **documents** collection.
|
||||
|
||||
```js
|
||||
const updateResult = await collection.updateOne({ a: 3 }, { $set: { b: 1 } });
|
||||
console.log('Updated documents =>', updateResult);
|
||||
```
|
||||
|
||||
The method updates the first document where the field **a** is equal to **3** by adding a new field **b** to the document set to **1**. `updateResult` contains information about whether there was a matching document to update or not.
|
||||
|
||||
### Remove a document
|
||||
|
||||
Remove the document where the field **a** is equal to **3**.
|
||||
|
||||
```js
|
||||
const deleteResult = await collection.deleteMany({ a: 3 });
|
||||
console.log('Deleted documents =>', deleteResult);
|
||||
```
|
||||
|
||||
### Index a Collection
|
||||
|
||||
[Indexes](https://www.mongodb.com/docs/manual/indexes/) can improve your application's
|
||||
performance. The following function creates an index on the **a** field in the
|
||||
**documents** collection.
|
||||
|
||||
```js
|
||||
const indexName = await collection.createIndex({ a: 1 });
|
||||
console.log('index name =', indexName);
|
||||
```
|
||||
|
||||
For more detailed information, see the [indexing strategies page](https://www.mongodb.com/docs/manual/applications/indexes/).
|
||||
|
||||
## Error Handling
|
||||
|
||||
If you need to filter certain errors from our driver we have a helpful tree of errors described in [etc/notes/errors.md](https://github.com/mongodb/node-mongodb-native/blob/HEAD/etc/notes/errors.md).
|
||||
|
||||
It is our recommendation to use `instanceof` checks on errors and to avoid relying on parsing `error.message` and `error.name` strings in your code.
|
||||
We guarantee `instanceof` checks will pass according to semver guidelines, but errors may be sub-classed or their messages may change at any time, even patch releases, as we see fit to increase the helpfulness of the errors.
|
||||
|
||||
Any new errors we add to the driver will directly extend an existing error class and no existing error will be moved to a different parent class outside of a major release.
|
||||
This means `instanceof` will always be able to accurately capture the errors that our driver throws.
|
||||
|
||||
```typescript
|
||||
const client = new MongoClient(url);
|
||||
await client.connect();
|
||||
const collection = client.db().collection('collection');
|
||||
|
||||
try {
|
||||
await collection.insertOne({ _id: 1 });
|
||||
await collection.insertOne({ _id: 1 }); // duplicate key error
|
||||
} catch (error) {
|
||||
if (error instanceof MongoServerError) {
|
||||
console.log(`Error worth logging: ${error}`); // special case for some reason
|
||||
}
|
||||
throw error; // still want to crash
|
||||
}
|
||||
```
|
||||
|
||||
## Nightly releases
|
||||
|
||||
If you need to test with a change from the latest `main` branch our `mongodb` npm package has nightly versions released under the `nightly` tag.
|
||||
|
||||
```sh
|
||||
npm install mongodb@nightly
|
||||
```
|
||||
|
||||
Nightly versions are published regardless of testing outcome.
|
||||
This means there could be sematic breakages or partially implemented features.
|
||||
The nightly build is not suitable for production use.
|
||||
|
||||
## Next Steps
|
||||
|
||||
- [MongoDB Documentation](https://www.mongodb.com/docs/manual/)
|
||||
- [MongoDB Node Driver Documentation](https://www.mongodb.com/docs/drivers/node/)
|
||||
- [Read about Schemas](https://www.mongodb.com/docs/manual/core/data-modeling-introduction/)
|
||||
- [Star us on GitHub](https://github.com/mongodb/node-mongodb-native)
|
||||
|
||||
## License
|
||||
|
||||
[Apache 2.0](LICENSE.md)
|
||||
|
||||
© 2012-present MongoDB [Contributors](https://github.com/mongodb/node-mongodb-native/blob/HEAD/CONTRIBUTORS.md) \
|
||||
© 2009-2012 Christian Amor Kvalheim
|
||||
12
VISUALIZACION/node_modules/mongodb/etc/prepare.js
generated
vendored
Executable file
12
VISUALIZACION/node_modules/mongodb/etc/prepare.js
generated
vendored
Executable file
|
|
@ -0,0 +1,12 @@
|
|||
#! /usr/bin/env node
|
||||
var cp = require('child_process');
|
||||
var fs = require('fs');
|
||||
var os = require('os');
|
||||
|
||||
if (fs.existsSync('src')) {
|
||||
cp.spawn('npm', ['run', 'build:dts'], { stdio: 'inherit', shell: os.platform() === 'win32' });
|
||||
} else {
|
||||
if (!fs.existsSync('lib')) {
|
||||
console.warn('MongoDB: No compiled javascript present, the driver is not installed correctly.');
|
||||
}
|
||||
}
|
||||
149
VISUALIZACION/node_modules/mongodb/lib/admin.js
generated
vendored
Executable file
149
VISUALIZACION/node_modules/mongodb/lib/admin.js
generated
vendored
Executable file
|
|
@ -0,0 +1,149 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Admin = void 0;
|
||||
const add_user_1 = require("./operations/add_user");
|
||||
const execute_operation_1 = require("./operations/execute_operation");
|
||||
const list_databases_1 = require("./operations/list_databases");
|
||||
const remove_user_1 = require("./operations/remove_user");
|
||||
const run_command_1 = require("./operations/run_command");
|
||||
const validate_collection_1 = require("./operations/validate_collection");
|
||||
/**
|
||||
* The **Admin** class is an internal class that allows convenient access to
|
||||
* the admin functionality and commands for MongoDB.
|
||||
*
|
||||
* **ADMIN Cannot directly be instantiated**
|
||||
* @public
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* import { MongoClient } from 'mongodb';
|
||||
*
|
||||
* const client = new MongoClient('mongodb://localhost:27017');
|
||||
* const admin = client.db().admin();
|
||||
* const dbInfo = await admin.listDatabases();
|
||||
* for (const db of dbInfo.databases) {
|
||||
* console.log(db.name);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
class Admin {
|
||||
/**
|
||||
* Create a new Admin instance
|
||||
* @internal
|
||||
*/
|
||||
constructor(db) {
|
||||
this.s = { db };
|
||||
}
|
||||
/**
|
||||
* Execute a command
|
||||
*
|
||||
* The driver will ensure the following fields are attached to the command sent to the server:
|
||||
* - `lsid` - sourced from an implicit session or options.session
|
||||
* - `$readPreference` - defaults to primary or can be configured by options.readPreference
|
||||
* - `$db` - sourced from the name of this database
|
||||
*
|
||||
* If the client has a serverApi setting:
|
||||
* - `apiVersion`
|
||||
* - `apiStrict`
|
||||
* - `apiDeprecationErrors`
|
||||
*
|
||||
* When in a transaction:
|
||||
* - `readConcern` - sourced from readConcern set on the TransactionOptions
|
||||
* - `writeConcern` - sourced from writeConcern set on the TransactionOptions
|
||||
*
|
||||
* Attaching any of the above fields to the command will have no effect as the driver will overwrite the value.
|
||||
*
|
||||
* @param command - The command to execute
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async command(command, options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.s.db.client, new run_command_1.RunCommandOperation(this.s.db, command, { dbName: 'admin', ...options }));
|
||||
}
|
||||
/**
|
||||
* Retrieve the server build information
|
||||
*
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async buildInfo(options) {
|
||||
return this.command({ buildinfo: 1 }, options);
|
||||
}
|
||||
/**
|
||||
* Retrieve the server build information
|
||||
*
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async serverInfo(options) {
|
||||
return this.command({ buildinfo: 1 }, options);
|
||||
}
|
||||
/**
|
||||
* Retrieve this db's server status.
|
||||
*
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async serverStatus(options) {
|
||||
return this.command({ serverStatus: 1 }, options);
|
||||
}
|
||||
/**
|
||||
* Ping the MongoDB server and retrieve results
|
||||
*
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async ping(options) {
|
||||
return this.command({ ping: 1 }, options);
|
||||
}
|
||||
/**
|
||||
* Add a user to the database
|
||||
*
|
||||
* @param username - The username for the new user
|
||||
* @param passwordOrOptions - An optional password for the new user, or the options for the command
|
||||
* @param options - Optional settings for the command
|
||||
* @deprecated Use the createUser command in `db.command()` instead.
|
||||
* @see https://www.mongodb.com/docs/manual/reference/command/createUser/
|
||||
*/
|
||||
async addUser(username, passwordOrOptions, options) {
|
||||
options =
|
||||
options != null && typeof options === 'object'
|
||||
? options
|
||||
: passwordOrOptions != null && typeof passwordOrOptions === 'object'
|
||||
? passwordOrOptions
|
||||
: undefined;
|
||||
const password = typeof passwordOrOptions === 'string' ? passwordOrOptions : undefined;
|
||||
return (0, execute_operation_1.executeOperation)(this.s.db.client, new add_user_1.AddUserOperation(this.s.db, username, password, { dbName: 'admin', ...options }));
|
||||
}
|
||||
/**
|
||||
* Remove a user from a database
|
||||
*
|
||||
* @param username - The username to remove
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async removeUser(username, options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.s.db.client, new remove_user_1.RemoveUserOperation(this.s.db, username, { dbName: 'admin', ...options }));
|
||||
}
|
||||
/**
|
||||
* Validate an existing collection
|
||||
*
|
||||
* @param collectionName - The name of the collection to validate.
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async validateCollection(collectionName, options = {}) {
|
||||
return (0, execute_operation_1.executeOperation)(this.s.db.client, new validate_collection_1.ValidateCollectionOperation(this, collectionName, options));
|
||||
}
|
||||
/**
|
||||
* List the available databases
|
||||
*
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async listDatabases(options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.s.db.client, new list_databases_1.ListDatabasesOperation(this.s.db, options));
|
||||
}
|
||||
/**
|
||||
* Get ReplicaSet status
|
||||
*
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async replSetGetStatus(options) {
|
||||
return this.command({ replSetGetStatus: 1 }, options);
|
||||
}
|
||||
}
|
||||
exports.Admin = Admin;
|
||||
//# sourceMappingURL=admin.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/admin.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/admin.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"admin.js","sourceRoot":"","sources":["../src/admin.ts"],"names":[],"mappings":";;;AAEA,oDAA8E;AAE9E,sEAAkE;AAClE,gEAIqC;AACrC,0DAAuF;AACvF,0DAAuF;AACvF,0EAG0C;AAO1C;;;;;;;;;;;;;;;;;;GAkBG;AACH,MAAa,KAAK;IAIhB;;;OAGG;IACH,YAAY,EAAM;QAChB,IAAI,CAAC,CAAC,GAAG,EAAE,EAAE,EAAE,CAAC;IAClB,CAAC;IAED;;;;;;;;;;;;;;;;;;;;;OAqBG;IACH,KAAK,CAAC,OAAO,CAAC,OAAiB,EAAE,OAA2B;QAC1D,OAAO,IAAA,oCAAgB,EACrB,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,EAChB,IAAI,iCAAmB,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,OAAO,EAAE,EAAE,MAAM,EAAE,OAAO,EAAE,GAAG,OAAO,EAAE,CAAC,CAC7E,CAAC;IACJ,CAAC;IAED;;;;OAIG;IACH,KAAK,CAAC,SAAS,CAAC,OAAiC;QAC/C,OAAO,IAAI,CAAC,OAAO,CAAC,EAAE,SAAS,EAAE,CAAC,EAAE,EAAE,OAAO,CAAC,CAAC;IACjD,CAAC;IAED;;;;OAIG;IACH,KAAK,CAAC,UAAU,CAAC,OAAiC;QAChD,OAAO,IAAI,CAAC,OAAO,CAAC,EAAE,SAAS,EAAE,CAAC,EAAE,EAAE,OAAO,CAAC,CAAC;IACjD,CAAC;IAED;;;;OAIG;IACH,KAAK,CAAC,YAAY,CAAC,OAAiC;QAClD,OAAO,IAAI,CAAC,OAAO,CAAC,EAAE,YAAY,EAAE,CAAC,EAAE,EAAE,OAAO,CAAC,CAAC;IACpD,CAAC;IAED;;;;OAIG;IACH,KAAK,CAAC,IAAI,CAAC,OAAiC;QAC1C,OAAO,IAAI,CAAC,OAAO,CAAC,EAAE,IAAI,EAAE,CAAC,EAAE,EAAE,OAAO,CAAC,CAAC;IAC5C,CAAC;IAED;;;;;;;;OAQG;IACH,KAAK,CAAC,OAAO,CACX,QAAgB,EAChB,iBAA2C,EAC3C,OAAwB;QAExB,OAAO;YACL,OAAO,IAAI,IAAI,IAAI,OAAO,OAAO,KAAK,QAAQ;gBAC5C,CAAC,CAAC,OAAO;gBACT,CAAC,CAAC,iBAAiB,IAAI,IAAI,IAAI,OAAO,iBAAiB,KAAK,QAAQ;oBACpE,CAAC,CAAC,iBAAiB;oBACnB,CAAC,CAAC,SAAS,CAAC;QAChB,MAAM,QAAQ,GAAG,OAAO,iBAAiB,KAAK,QAAQ,CAAC,CAAC,CAAC,iBAAiB,CAAC,CAAC,CAAC,SAAS,CAAC;QACvF,OAAO,IAAA,oCAAgB,EACrB,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,EAChB,IAAI,2BAAgB,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,QAAQ,EAAE,QAAQ,EAAE,EAAE,MAAM,EAAE,OAAO,EAAE,GAAG,OAAO,EAAE,CAAC,CACrF,CAAC;IACJ,CAAC;IAED;;;;;OAKG;IACH,KAAK,CAAC,UAAU,CAAC,QAAgB,EAAE,OAA2B;QAC5D,OAAO,IAAA,oCAAgB,EACrB,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,EAChB,IAAI,iCAAmB,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,QAAQ,EAAE,EAAE,MAAM,EAAE,OAAO,EAAE,GAAG,OAAO,EAAE,CAAC,CAC9E,CAAC;IACJ,CAAC;IAED;;;;;OAKG;IACH,KAAK,CAAC,kBAAkB,CACtB,cAAsB,EACtB,UAAqC,EAAE;QAEvC,OAAO,IAAA,oCAAgB,EACrB,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,EAChB,IAAI,iDAA2B,CAAC,IAAI,EAAE,cAAc,EAAE,OAAO,CAAC,CAC/D,CAAC;IACJ,CAAC;IAED;;;;OAIG;IACH,KAAK,CAAC,aAAa,CAAC,OAA8B;QAChD,OAAO,IAAA,oCAAgB,EAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,MAAM,EAAE,IAAI,uCAAsB,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,OAAO,CAAC,CAAC,CAAC;IAC5F,CAAC;IAED;;;;OAIG;IACH,KAAK,CAAC,gBAAgB,CAAC,OAAiC;QACtD,OAAO,IAAI,CAAC,OAAO,CAAC,EAAE,gBAAgB,EAAE,CAAC,EAAE,EAAE,OAAO,CAAC,CAAC;IACxD,CAAC;CACF;AAtJD,sBAsJC"}
|
||||
61
VISUALIZACION/node_modules/mongodb/lib/bson.js
generated
vendored
Executable file
61
VISUALIZACION/node_modules/mongodb/lib/bson.js
generated
vendored
Executable file
|
|
@ -0,0 +1,61 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.resolveBSONOptions = exports.pluckBSONSerializeOptions = exports.Timestamp = exports.serialize = exports.ObjectId = exports.MinKey = exports.MaxKey = exports.Long = exports.Int32 = exports.Double = exports.deserialize = exports.Decimal128 = exports.DBRef = exports.Code = exports.calculateObjectSize = exports.BSONType = exports.BSONSymbol = exports.BSONRegExp = exports.BSON = exports.Binary = void 0;
|
||||
var bson_1 = require("bson");
|
||||
Object.defineProperty(exports, "Binary", { enumerable: true, get: function () { return bson_1.Binary; } });
|
||||
Object.defineProperty(exports, "BSON", { enumerable: true, get: function () { return bson_1.BSON; } });
|
||||
Object.defineProperty(exports, "BSONRegExp", { enumerable: true, get: function () { return bson_1.BSONRegExp; } });
|
||||
Object.defineProperty(exports, "BSONSymbol", { enumerable: true, get: function () { return bson_1.BSONSymbol; } });
|
||||
Object.defineProperty(exports, "BSONType", { enumerable: true, get: function () { return bson_1.BSONType; } });
|
||||
Object.defineProperty(exports, "calculateObjectSize", { enumerable: true, get: function () { return bson_1.calculateObjectSize; } });
|
||||
Object.defineProperty(exports, "Code", { enumerable: true, get: function () { return bson_1.Code; } });
|
||||
Object.defineProperty(exports, "DBRef", { enumerable: true, get: function () { return bson_1.DBRef; } });
|
||||
Object.defineProperty(exports, "Decimal128", { enumerable: true, get: function () { return bson_1.Decimal128; } });
|
||||
Object.defineProperty(exports, "deserialize", { enumerable: true, get: function () { return bson_1.deserialize; } });
|
||||
Object.defineProperty(exports, "Double", { enumerable: true, get: function () { return bson_1.Double; } });
|
||||
Object.defineProperty(exports, "Int32", { enumerable: true, get: function () { return bson_1.Int32; } });
|
||||
Object.defineProperty(exports, "Long", { enumerable: true, get: function () { return bson_1.Long; } });
|
||||
Object.defineProperty(exports, "MaxKey", { enumerable: true, get: function () { return bson_1.MaxKey; } });
|
||||
Object.defineProperty(exports, "MinKey", { enumerable: true, get: function () { return bson_1.MinKey; } });
|
||||
Object.defineProperty(exports, "ObjectId", { enumerable: true, get: function () { return bson_1.ObjectId; } });
|
||||
Object.defineProperty(exports, "serialize", { enumerable: true, get: function () { return bson_1.serialize; } });
|
||||
Object.defineProperty(exports, "Timestamp", { enumerable: true, get: function () { return bson_1.Timestamp; } });
|
||||
function pluckBSONSerializeOptions(options) {
|
||||
const { fieldsAsRaw, useBigInt64, promoteValues, promoteBuffers, promoteLongs, serializeFunctions, ignoreUndefined, bsonRegExp, raw, enableUtf8Validation } = options;
|
||||
return {
|
||||
fieldsAsRaw,
|
||||
useBigInt64,
|
||||
promoteValues,
|
||||
promoteBuffers,
|
||||
promoteLongs,
|
||||
serializeFunctions,
|
||||
ignoreUndefined,
|
||||
bsonRegExp,
|
||||
raw,
|
||||
enableUtf8Validation
|
||||
};
|
||||
}
|
||||
exports.pluckBSONSerializeOptions = pluckBSONSerializeOptions;
|
||||
/**
|
||||
* Merge the given BSONSerializeOptions, preferring options over the parent's options, and
|
||||
* substituting defaults for values not set.
|
||||
*
|
||||
* @internal
|
||||
*/
|
||||
function resolveBSONOptions(options, parent) {
|
||||
const parentOptions = parent?.bsonOptions;
|
||||
return {
|
||||
raw: options?.raw ?? parentOptions?.raw ?? false,
|
||||
useBigInt64: options?.useBigInt64 ?? parentOptions?.useBigInt64 ?? false,
|
||||
promoteLongs: options?.promoteLongs ?? parentOptions?.promoteLongs ?? true,
|
||||
promoteValues: options?.promoteValues ?? parentOptions?.promoteValues ?? true,
|
||||
promoteBuffers: options?.promoteBuffers ?? parentOptions?.promoteBuffers ?? false,
|
||||
ignoreUndefined: options?.ignoreUndefined ?? parentOptions?.ignoreUndefined ?? false,
|
||||
bsonRegExp: options?.bsonRegExp ?? parentOptions?.bsonRegExp ?? false,
|
||||
serializeFunctions: options?.serializeFunctions ?? parentOptions?.serializeFunctions ?? false,
|
||||
fieldsAsRaw: options?.fieldsAsRaw ?? parentOptions?.fieldsAsRaw ?? {},
|
||||
enableUtf8Validation: options?.enableUtf8Validation ?? parentOptions?.enableUtf8Validation ?? true
|
||||
};
|
||||
}
|
||||
exports.resolveBSONOptions = resolveBSONOptions;
|
||||
//# sourceMappingURL=bson.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/bson.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/bson.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"bson.js","sourceRoot":"","sources":["../src/bson.ts"],"names":[],"mappings":";;;AAEA,6BAoBc;AAnBZ,8FAAA,MAAM,OAAA;AACN,4FAAA,IAAI,OAAA;AACJ,kGAAA,UAAU,OAAA;AACV,kGAAA,UAAU,OAAA;AACV,gGAAA,QAAQ,OAAA;AACR,2GAAA,mBAAmB,OAAA;AACnB,4FAAA,IAAI,OAAA;AACJ,6FAAA,KAAK,OAAA;AACL,kGAAA,UAAU,OAAA;AACV,mGAAA,WAAW,OAAA;AAEX,8FAAA,MAAM,OAAA;AACN,6FAAA,KAAK,OAAA;AACL,4FAAA,IAAI,OAAA;AACJ,8FAAA,MAAM,OAAA;AACN,8FAAA,MAAM,OAAA;AACN,gGAAA,QAAQ,OAAA;AACR,iGAAA,SAAS,OAAA;AACT,iGAAA,SAAS,OAAA;AA4CX,SAAgB,yBAAyB,CAAC,OAA6B;IACrE,MAAM,EACJ,WAAW,EACX,WAAW,EACX,aAAa,EACb,cAAc,EACd,YAAY,EACZ,kBAAkB,EAClB,eAAe,EACf,UAAU,EACV,GAAG,EACH,oBAAoB,EACrB,GAAG,OAAO,CAAC;IACZ,OAAO;QACL,WAAW;QACX,WAAW;QACX,aAAa;QACb,cAAc;QACd,YAAY;QACZ,kBAAkB;QAClB,eAAe;QACf,UAAU;QACV,GAAG;QACH,oBAAoB;KACrB,CAAC;AACJ,CAAC;AAzBD,8DAyBC;AAED;;;;;GAKG;AACH,SAAgB,kBAAkB,CAChC,OAA8B,EAC9B,MAA+C;IAE/C,MAAM,aAAa,GAAG,MAAM,EAAE,WAAW,CAAC;IAC1C,OAAO;QACL,GAAG,EAAE,OAAO,EAAE,GAAG,IAAI,aAAa,EAAE,GAAG,IAAI,KAAK;QAChD,WAAW,EAAE,OAAO,EAAE,WAAW,IAAI,aAAa,EAAE,WAAW,IAAI,KAAK;QACxE,YAAY,EAAE,OAAO,EAAE,YAAY,IAAI,aAAa,EAAE,YAAY,IAAI,IAAI;QAC1E,aAAa,EAAE,OAAO,EAAE,aAAa,IAAI,aAAa,EAAE,aAAa,IAAI,IAAI;QAC7E,cAAc,EAAE,OAAO,EAAE,cAAc,IAAI,aAAa,EAAE,cAAc,IAAI,KAAK;QACjF,eAAe,EAAE,OAAO,EAAE,eAAe,IAAI,aAAa,EAAE,eAAe,IAAI,KAAK;QACpF,UAAU,EAAE,OAAO,EAAE,UAAU,IAAI,aAAa,EAAE,UAAU,IAAI,KAAK;QACrE,kBAAkB,EAAE,OAAO,EAAE,kBAAkB,IAAI,aAAa,EAAE,kBAAkB,IAAI,KAAK;QAC7F,WAAW,EAAE,OAAO,EAAE,WAAW,IAAI,aAAa,EAAE,WAAW,IAAI,EAAE;QACrE,oBAAoB,EAClB,OAAO,EAAE,oBAAoB,IAAI,aAAa,EAAE,oBAAoB,IAAI,IAAI;KAC/E,CAAC;AACJ,CAAC;AAlBD,gDAkBC"}
|
||||
895
VISUALIZACION/node_modules/mongodb/lib/bulk/common.js
generated
vendored
Executable file
895
VISUALIZACION/node_modules/mongodb/lib/bulk/common.js
generated
vendored
Executable file
|
|
@ -0,0 +1,895 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.BulkOperationBase = exports.FindOperators = exports.MongoBulkWriteError = exports.mergeBatchResults = exports.WriteError = exports.WriteConcernError = exports.BulkWriteResult = exports.Batch = exports.BatchType = void 0;
|
||||
const bson_1 = require("../bson");
|
||||
const error_1 = require("../error");
|
||||
const delete_1 = require("../operations/delete");
|
||||
const execute_operation_1 = require("../operations/execute_operation");
|
||||
const insert_1 = require("../operations/insert");
|
||||
const operation_1 = require("../operations/operation");
|
||||
const update_1 = require("../operations/update");
|
||||
const utils_1 = require("../utils");
|
||||
const write_concern_1 = require("../write_concern");
|
||||
/** @internal */
|
||||
const kServerError = Symbol('serverError');
|
||||
/** @public */
|
||||
exports.BatchType = Object.freeze({
|
||||
INSERT: 1,
|
||||
UPDATE: 2,
|
||||
DELETE: 3
|
||||
});
|
||||
/**
|
||||
* Keeps the state of a unordered batch so we can rewrite the results
|
||||
* correctly after command execution
|
||||
*
|
||||
* @public
|
||||
*/
|
||||
class Batch {
|
||||
constructor(batchType, originalZeroIndex) {
|
||||
this.originalZeroIndex = originalZeroIndex;
|
||||
this.currentIndex = 0;
|
||||
this.originalIndexes = [];
|
||||
this.batchType = batchType;
|
||||
this.operations = [];
|
||||
this.size = 0;
|
||||
this.sizeBytes = 0;
|
||||
}
|
||||
}
|
||||
exports.Batch = Batch;
|
||||
/**
|
||||
* @public
|
||||
* The result of a bulk write.
|
||||
*/
|
||||
class BulkWriteResult {
|
||||
static generateIdMap(ids) {
|
||||
const idMap = {};
|
||||
for (const doc of ids) {
|
||||
idMap[doc.index] = doc._id;
|
||||
}
|
||||
return idMap;
|
||||
}
|
||||
/**
|
||||
* Create a new BulkWriteResult instance
|
||||
* @internal
|
||||
*/
|
||||
constructor(bulkResult) {
|
||||
this.result = bulkResult;
|
||||
this.insertedCount = this.result.nInserted ?? 0;
|
||||
this.matchedCount = this.result.nMatched ?? 0;
|
||||
this.modifiedCount = this.result.nModified ?? 0;
|
||||
this.deletedCount = this.result.nRemoved ?? 0;
|
||||
this.upsertedCount = this.result.upserted.length ?? 0;
|
||||
this.upsertedIds = BulkWriteResult.generateIdMap(this.result.upserted);
|
||||
this.insertedIds = BulkWriteResult.generateIdMap(this.result.insertedIds);
|
||||
Object.defineProperty(this, 'result', { value: this.result, enumerable: false });
|
||||
}
|
||||
/** Evaluates to true if the bulk operation correctly executes */
|
||||
get ok() {
|
||||
return this.result.ok;
|
||||
}
|
||||
/**
|
||||
* The number of inserted documents
|
||||
* @deprecated Use insertedCount instead.
|
||||
*/
|
||||
get nInserted() {
|
||||
return this.result.nInserted;
|
||||
}
|
||||
/**
|
||||
* Number of upserted documents
|
||||
* @deprecated User upsertedCount instead.
|
||||
*/
|
||||
get nUpserted() {
|
||||
return this.result.nUpserted;
|
||||
}
|
||||
/**
|
||||
* Number of matched documents
|
||||
* @deprecated Use matchedCount instead.
|
||||
*/
|
||||
get nMatched() {
|
||||
return this.result.nMatched;
|
||||
}
|
||||
/**
|
||||
* Number of documents updated physically on disk
|
||||
* @deprecated Use modifiedCount instead.
|
||||
*/
|
||||
get nModified() {
|
||||
return this.result.nModified;
|
||||
}
|
||||
/**
|
||||
* Number of removed documents
|
||||
* @deprecated Use deletedCount instead.
|
||||
*/
|
||||
get nRemoved() {
|
||||
return this.result.nRemoved;
|
||||
}
|
||||
/**
|
||||
* Returns an array of all inserted ids
|
||||
* @deprecated Use insertedIds instead.
|
||||
*/
|
||||
getInsertedIds() {
|
||||
return this.result.insertedIds;
|
||||
}
|
||||
/**
|
||||
* Returns an array of all upserted ids
|
||||
* @deprecated Use upsertedIds instead.
|
||||
*/
|
||||
getUpsertedIds() {
|
||||
return this.result.upserted;
|
||||
}
|
||||
/** Returns the upserted id at the given index */
|
||||
getUpsertedIdAt(index) {
|
||||
return this.result.upserted[index];
|
||||
}
|
||||
/** Returns raw internal result */
|
||||
getRawResponse() {
|
||||
return this.result;
|
||||
}
|
||||
/** Returns true if the bulk operation contains a write error */
|
||||
hasWriteErrors() {
|
||||
return this.result.writeErrors.length > 0;
|
||||
}
|
||||
/** Returns the number of write errors off the bulk operation */
|
||||
getWriteErrorCount() {
|
||||
return this.result.writeErrors.length;
|
||||
}
|
||||
/** Returns a specific write error object */
|
||||
getWriteErrorAt(index) {
|
||||
return index < this.result.writeErrors.length ? this.result.writeErrors[index] : undefined;
|
||||
}
|
||||
/** Retrieve all write errors */
|
||||
getWriteErrors() {
|
||||
return this.result.writeErrors;
|
||||
}
|
||||
/** Retrieve the write concern error if one exists */
|
||||
getWriteConcernError() {
|
||||
if (this.result.writeConcernErrors.length === 0) {
|
||||
return;
|
||||
}
|
||||
else if (this.result.writeConcernErrors.length === 1) {
|
||||
// Return the error
|
||||
return this.result.writeConcernErrors[0];
|
||||
}
|
||||
else {
|
||||
// Combine the errors
|
||||
let errmsg = '';
|
||||
for (let i = 0; i < this.result.writeConcernErrors.length; i++) {
|
||||
const err = this.result.writeConcernErrors[i];
|
||||
errmsg = errmsg + err.errmsg;
|
||||
// TODO: Something better
|
||||
if (i === 0)
|
||||
errmsg = errmsg + ' and ';
|
||||
}
|
||||
return new WriteConcernError({ errmsg, code: error_1.MONGODB_ERROR_CODES.WriteConcernFailed });
|
||||
}
|
||||
}
|
||||
toString() {
|
||||
return `BulkWriteResult(${this.result})`;
|
||||
}
|
||||
isOk() {
|
||||
return this.result.ok === 1;
|
||||
}
|
||||
}
|
||||
exports.BulkWriteResult = BulkWriteResult;
|
||||
/**
|
||||
* An error representing a failure by the server to apply the requested write concern to the bulk operation.
|
||||
* @public
|
||||
* @category Error
|
||||
*/
|
||||
class WriteConcernError {
|
||||
constructor(error) {
|
||||
this[kServerError] = error;
|
||||
}
|
||||
/** Write concern error code. */
|
||||
get code() {
|
||||
return this[kServerError].code;
|
||||
}
|
||||
/** Write concern error message. */
|
||||
get errmsg() {
|
||||
return this[kServerError].errmsg;
|
||||
}
|
||||
/** Write concern error info. */
|
||||
get errInfo() {
|
||||
return this[kServerError].errInfo;
|
||||
}
|
||||
toJSON() {
|
||||
return this[kServerError];
|
||||
}
|
||||
toString() {
|
||||
return `WriteConcernError(${this.errmsg})`;
|
||||
}
|
||||
}
|
||||
exports.WriteConcernError = WriteConcernError;
|
||||
/**
|
||||
* An error that occurred during a BulkWrite on the server.
|
||||
* @public
|
||||
* @category Error
|
||||
*/
|
||||
class WriteError {
|
||||
constructor(err) {
|
||||
this.err = err;
|
||||
}
|
||||
/** WriteError code. */
|
||||
get code() {
|
||||
return this.err.code;
|
||||
}
|
||||
/** WriteError original bulk operation index. */
|
||||
get index() {
|
||||
return this.err.index;
|
||||
}
|
||||
/** WriteError message. */
|
||||
get errmsg() {
|
||||
return this.err.errmsg;
|
||||
}
|
||||
/** WriteError details. */
|
||||
get errInfo() {
|
||||
return this.err.errInfo;
|
||||
}
|
||||
/** Returns the underlying operation that caused the error */
|
||||
getOperation() {
|
||||
return this.err.op;
|
||||
}
|
||||
toJSON() {
|
||||
return { code: this.err.code, index: this.err.index, errmsg: this.err.errmsg, op: this.err.op };
|
||||
}
|
||||
toString() {
|
||||
return `WriteError(${JSON.stringify(this.toJSON())})`;
|
||||
}
|
||||
}
|
||||
exports.WriteError = WriteError;
|
||||
/** Merges results into shared data structure */
|
||||
function mergeBatchResults(batch, bulkResult, err, result) {
|
||||
// If we have an error set the result to be the err object
|
||||
if (err) {
|
||||
result = err;
|
||||
}
|
||||
else if (result && result.result) {
|
||||
result = result.result;
|
||||
}
|
||||
if (result == null) {
|
||||
return;
|
||||
}
|
||||
// Do we have a top level error stop processing and return
|
||||
if (result.ok === 0 && bulkResult.ok === 1) {
|
||||
bulkResult.ok = 0;
|
||||
const writeError = {
|
||||
index: 0,
|
||||
code: result.code || 0,
|
||||
errmsg: result.message,
|
||||
errInfo: result.errInfo,
|
||||
op: batch.operations[0]
|
||||
};
|
||||
bulkResult.writeErrors.push(new WriteError(writeError));
|
||||
return;
|
||||
}
|
||||
else if (result.ok === 0 && bulkResult.ok === 0) {
|
||||
return;
|
||||
}
|
||||
// If we have an insert Batch type
|
||||
if (isInsertBatch(batch) && result.n) {
|
||||
bulkResult.nInserted = bulkResult.nInserted + result.n;
|
||||
}
|
||||
// If we have an insert Batch type
|
||||
if (isDeleteBatch(batch) && result.n) {
|
||||
bulkResult.nRemoved = bulkResult.nRemoved + result.n;
|
||||
}
|
||||
let nUpserted = 0;
|
||||
// We have an array of upserted values, we need to rewrite the indexes
|
||||
if (Array.isArray(result.upserted)) {
|
||||
nUpserted = result.upserted.length;
|
||||
for (let i = 0; i < result.upserted.length; i++) {
|
||||
bulkResult.upserted.push({
|
||||
index: result.upserted[i].index + batch.originalZeroIndex,
|
||||
_id: result.upserted[i]._id
|
||||
});
|
||||
}
|
||||
}
|
||||
else if (result.upserted) {
|
||||
nUpserted = 1;
|
||||
bulkResult.upserted.push({
|
||||
index: batch.originalZeroIndex,
|
||||
_id: result.upserted
|
||||
});
|
||||
}
|
||||
// If we have an update Batch type
|
||||
if (isUpdateBatch(batch) && result.n) {
|
||||
const nModified = result.nModified;
|
||||
bulkResult.nUpserted = bulkResult.nUpserted + nUpserted;
|
||||
bulkResult.nMatched = bulkResult.nMatched + (result.n - nUpserted);
|
||||
if (typeof nModified === 'number') {
|
||||
bulkResult.nModified = bulkResult.nModified + nModified;
|
||||
}
|
||||
else {
|
||||
bulkResult.nModified = 0;
|
||||
}
|
||||
}
|
||||
if (Array.isArray(result.writeErrors)) {
|
||||
for (let i = 0; i < result.writeErrors.length; i++) {
|
||||
const writeError = {
|
||||
index: batch.originalIndexes[result.writeErrors[i].index],
|
||||
code: result.writeErrors[i].code,
|
||||
errmsg: result.writeErrors[i].errmsg,
|
||||
errInfo: result.writeErrors[i].errInfo,
|
||||
op: batch.operations[result.writeErrors[i].index]
|
||||
};
|
||||
bulkResult.writeErrors.push(new WriteError(writeError));
|
||||
}
|
||||
}
|
||||
if (result.writeConcernError) {
|
||||
bulkResult.writeConcernErrors.push(new WriteConcernError(result.writeConcernError));
|
||||
}
|
||||
}
|
||||
exports.mergeBatchResults = mergeBatchResults;
|
||||
function executeCommands(bulkOperation, options, callback) {
|
||||
if (bulkOperation.s.batches.length === 0) {
|
||||
return callback(undefined, new BulkWriteResult(bulkOperation.s.bulkResult));
|
||||
}
|
||||
const batch = bulkOperation.s.batches.shift();
|
||||
function resultHandler(err, result) {
|
||||
// Error is a driver related error not a bulk op error, return early
|
||||
if (err && 'message' in err && !(err instanceof error_1.MongoWriteConcernError)) {
|
||||
return callback(new MongoBulkWriteError(err, new BulkWriteResult(bulkOperation.s.bulkResult)));
|
||||
}
|
||||
if (err instanceof error_1.MongoWriteConcernError) {
|
||||
return handleMongoWriteConcernError(batch, bulkOperation.s.bulkResult, err, callback);
|
||||
}
|
||||
// Merge the results together
|
||||
mergeBatchResults(batch, bulkOperation.s.bulkResult, err, result);
|
||||
const writeResult = new BulkWriteResult(bulkOperation.s.bulkResult);
|
||||
if (bulkOperation.handleWriteError(callback, writeResult))
|
||||
return;
|
||||
// Execute the next command in line
|
||||
executeCommands(bulkOperation, options, callback);
|
||||
}
|
||||
const finalOptions = (0, utils_1.resolveOptions)(bulkOperation, {
|
||||
...options,
|
||||
ordered: bulkOperation.isOrdered
|
||||
});
|
||||
if (finalOptions.bypassDocumentValidation !== true) {
|
||||
delete finalOptions.bypassDocumentValidation;
|
||||
}
|
||||
// Set an operationIf if provided
|
||||
if (bulkOperation.operationId) {
|
||||
resultHandler.operationId = bulkOperation.operationId;
|
||||
}
|
||||
// Is the bypassDocumentValidation options specific
|
||||
if (bulkOperation.s.bypassDocumentValidation === true) {
|
||||
finalOptions.bypassDocumentValidation = true;
|
||||
}
|
||||
// Is the checkKeys option disabled
|
||||
if (bulkOperation.s.checkKeys === false) {
|
||||
finalOptions.checkKeys = false;
|
||||
}
|
||||
if (finalOptions.retryWrites) {
|
||||
if (isUpdateBatch(batch)) {
|
||||
finalOptions.retryWrites = finalOptions.retryWrites && !batch.operations.some(op => op.multi);
|
||||
}
|
||||
if (isDeleteBatch(batch)) {
|
||||
finalOptions.retryWrites =
|
||||
finalOptions.retryWrites && !batch.operations.some(op => op.limit === 0);
|
||||
}
|
||||
}
|
||||
try {
|
||||
if (isInsertBatch(batch)) {
|
||||
(0, execute_operation_1.executeOperation)(bulkOperation.s.collection.client, new insert_1.InsertOperation(bulkOperation.s.namespace, batch.operations, finalOptions), resultHandler);
|
||||
}
|
||||
else if (isUpdateBatch(batch)) {
|
||||
(0, execute_operation_1.executeOperation)(bulkOperation.s.collection.client, new update_1.UpdateOperation(bulkOperation.s.namespace, batch.operations, finalOptions), resultHandler);
|
||||
}
|
||||
else if (isDeleteBatch(batch)) {
|
||||
(0, execute_operation_1.executeOperation)(bulkOperation.s.collection.client, new delete_1.DeleteOperation(bulkOperation.s.namespace, batch.operations, finalOptions), resultHandler);
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
// Force top level error
|
||||
err.ok = 0;
|
||||
// Merge top level error and return
|
||||
mergeBatchResults(batch, bulkOperation.s.bulkResult, err, undefined);
|
||||
callback();
|
||||
}
|
||||
}
|
||||
function handleMongoWriteConcernError(batch, bulkResult, err, callback) {
|
||||
mergeBatchResults(batch, bulkResult, undefined, err.result);
|
||||
callback(new MongoBulkWriteError({
|
||||
message: err.result?.writeConcernError.errmsg,
|
||||
code: err.result?.writeConcernError.result
|
||||
}, new BulkWriteResult(bulkResult)));
|
||||
}
|
||||
/**
|
||||
* An error indicating an unsuccessful Bulk Write
|
||||
* @public
|
||||
* @category Error
|
||||
*/
|
||||
class MongoBulkWriteError extends error_1.MongoServerError {
|
||||
/** Creates a new MongoBulkWriteError */
|
||||
constructor(error, result) {
|
||||
super(error);
|
||||
this.writeErrors = [];
|
||||
if (error instanceof WriteConcernError)
|
||||
this.err = error;
|
||||
else if (!(error instanceof Error)) {
|
||||
this.message = error.message;
|
||||
this.code = error.code;
|
||||
this.writeErrors = error.writeErrors ?? [];
|
||||
}
|
||||
this.result = result;
|
||||
Object.assign(this, error);
|
||||
}
|
||||
get name() {
|
||||
return 'MongoBulkWriteError';
|
||||
}
|
||||
/** Number of documents inserted. */
|
||||
get insertedCount() {
|
||||
return this.result.insertedCount;
|
||||
}
|
||||
/** Number of documents matched for update. */
|
||||
get matchedCount() {
|
||||
return this.result.matchedCount;
|
||||
}
|
||||
/** Number of documents modified. */
|
||||
get modifiedCount() {
|
||||
return this.result.modifiedCount;
|
||||
}
|
||||
/** Number of documents deleted. */
|
||||
get deletedCount() {
|
||||
return this.result.deletedCount;
|
||||
}
|
||||
/** Number of documents upserted. */
|
||||
get upsertedCount() {
|
||||
return this.result.upsertedCount;
|
||||
}
|
||||
/** Inserted document generated Id's, hash key is the index of the originating operation */
|
||||
get insertedIds() {
|
||||
return this.result.insertedIds;
|
||||
}
|
||||
/** Upserted document generated Id's, hash key is the index of the originating operation */
|
||||
get upsertedIds() {
|
||||
return this.result.upsertedIds;
|
||||
}
|
||||
}
|
||||
exports.MongoBulkWriteError = MongoBulkWriteError;
|
||||
/**
|
||||
* A builder object that is returned from {@link BulkOperationBase#find}.
|
||||
* Is used to build a write operation that involves a query filter.
|
||||
*
|
||||
* @public
|
||||
*/
|
||||
class FindOperators {
|
||||
/**
|
||||
* Creates a new FindOperators object.
|
||||
* @internal
|
||||
*/
|
||||
constructor(bulkOperation) {
|
||||
this.bulkOperation = bulkOperation;
|
||||
}
|
||||
/** Add a multiple update operation to the bulk operation */
|
||||
update(updateDocument) {
|
||||
const currentOp = buildCurrentOp(this.bulkOperation);
|
||||
return this.bulkOperation.addToOperationsList(exports.BatchType.UPDATE, (0, update_1.makeUpdateStatement)(currentOp.selector, updateDocument, {
|
||||
...currentOp,
|
||||
multi: true
|
||||
}));
|
||||
}
|
||||
/** Add a single update operation to the bulk operation */
|
||||
updateOne(updateDocument) {
|
||||
if (!(0, utils_1.hasAtomicOperators)(updateDocument)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Update document requires atomic operators');
|
||||
}
|
||||
const currentOp = buildCurrentOp(this.bulkOperation);
|
||||
return this.bulkOperation.addToOperationsList(exports.BatchType.UPDATE, (0, update_1.makeUpdateStatement)(currentOp.selector, updateDocument, { ...currentOp, multi: false }));
|
||||
}
|
||||
/** Add a replace one operation to the bulk operation */
|
||||
replaceOne(replacement) {
|
||||
if ((0, utils_1.hasAtomicOperators)(replacement)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Replacement document must not use atomic operators');
|
||||
}
|
||||
const currentOp = buildCurrentOp(this.bulkOperation);
|
||||
return this.bulkOperation.addToOperationsList(exports.BatchType.UPDATE, (0, update_1.makeUpdateStatement)(currentOp.selector, replacement, { ...currentOp, multi: false }));
|
||||
}
|
||||
/** Add a delete one operation to the bulk operation */
|
||||
deleteOne() {
|
||||
const currentOp = buildCurrentOp(this.bulkOperation);
|
||||
return this.bulkOperation.addToOperationsList(exports.BatchType.DELETE, (0, delete_1.makeDeleteStatement)(currentOp.selector, { ...currentOp, limit: 1 }));
|
||||
}
|
||||
/** Add a delete many operation to the bulk operation */
|
||||
delete() {
|
||||
const currentOp = buildCurrentOp(this.bulkOperation);
|
||||
return this.bulkOperation.addToOperationsList(exports.BatchType.DELETE, (0, delete_1.makeDeleteStatement)(currentOp.selector, { ...currentOp, limit: 0 }));
|
||||
}
|
||||
/** Upsert modifier for update bulk operation, noting that this operation is an upsert. */
|
||||
upsert() {
|
||||
if (!this.bulkOperation.s.currentOp) {
|
||||
this.bulkOperation.s.currentOp = {};
|
||||
}
|
||||
this.bulkOperation.s.currentOp.upsert = true;
|
||||
return this;
|
||||
}
|
||||
/** Specifies the collation for the query condition. */
|
||||
collation(collation) {
|
||||
if (!this.bulkOperation.s.currentOp) {
|
||||
this.bulkOperation.s.currentOp = {};
|
||||
}
|
||||
this.bulkOperation.s.currentOp.collation = collation;
|
||||
return this;
|
||||
}
|
||||
/** Specifies arrayFilters for UpdateOne or UpdateMany bulk operations. */
|
||||
arrayFilters(arrayFilters) {
|
||||
if (!this.bulkOperation.s.currentOp) {
|
||||
this.bulkOperation.s.currentOp = {};
|
||||
}
|
||||
this.bulkOperation.s.currentOp.arrayFilters = arrayFilters;
|
||||
return this;
|
||||
}
|
||||
/** Specifies hint for the bulk operation. */
|
||||
hint(hint) {
|
||||
if (!this.bulkOperation.s.currentOp) {
|
||||
this.bulkOperation.s.currentOp = {};
|
||||
}
|
||||
this.bulkOperation.s.currentOp.hint = hint;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
exports.FindOperators = FindOperators;
|
||||
/**
|
||||
* TODO(NODE-4063)
|
||||
* BulkWrites merge complexity is implemented in executeCommands
|
||||
* This provides a vehicle to treat bulkOperations like any other operation (hence "shim")
|
||||
* We would like this logic to simply live inside the BulkWriteOperation class
|
||||
* @internal
|
||||
*/
|
||||
class BulkWriteShimOperation extends operation_1.AbstractCallbackOperation {
|
||||
constructor(bulkOperation, options) {
|
||||
super(options);
|
||||
this.bulkOperation = bulkOperation;
|
||||
}
|
||||
executeCallback(server, session, callback) {
|
||||
if (this.options.session == null) {
|
||||
// An implicit session could have been created by 'executeOperation'
|
||||
// So if we stick it on finalOptions here, each bulk operation
|
||||
// will use this same session, it'll be passed in the same way
|
||||
// an explicit session would be
|
||||
this.options.session = session;
|
||||
}
|
||||
return executeCommands(this.bulkOperation, this.options, callback);
|
||||
}
|
||||
}
|
||||
/** @public */
|
||||
class BulkOperationBase {
|
||||
/**
|
||||
* Create a new OrderedBulkOperation or UnorderedBulkOperation instance
|
||||
* @internal
|
||||
*/
|
||||
constructor(collection, options, isOrdered) {
|
||||
// determine whether bulkOperation is ordered or unordered
|
||||
this.isOrdered = isOrdered;
|
||||
const topology = (0, utils_1.getTopology)(collection);
|
||||
options = options == null ? {} : options;
|
||||
// TODO Bring from driver information in hello
|
||||
// Get the namespace for the write operations
|
||||
const namespace = collection.s.namespace;
|
||||
// Used to mark operation as executed
|
||||
const executed = false;
|
||||
// Current item
|
||||
const currentOp = undefined;
|
||||
// Set max byte size
|
||||
const hello = topology.lastHello();
|
||||
// If we have autoEncryption on, batch-splitting must be done on 2mb chunks, but single documents
|
||||
// over 2mb are still allowed
|
||||
const usingAutoEncryption = !!(topology.s.options && topology.s.options.autoEncrypter);
|
||||
const maxBsonObjectSize = hello && hello.maxBsonObjectSize ? hello.maxBsonObjectSize : 1024 * 1024 * 16;
|
||||
const maxBatchSizeBytes = usingAutoEncryption ? 1024 * 1024 * 2 : maxBsonObjectSize;
|
||||
const maxWriteBatchSize = hello && hello.maxWriteBatchSize ? hello.maxWriteBatchSize : 1000;
|
||||
// Calculates the largest possible size of an Array key, represented as a BSON string
|
||||
// element. This calculation:
|
||||
// 1 byte for BSON type
|
||||
// # of bytes = length of (string representation of (maxWriteBatchSize - 1))
|
||||
// + 1 bytes for null terminator
|
||||
const maxKeySize = (maxWriteBatchSize - 1).toString(10).length + 2;
|
||||
// Final options for retryable writes
|
||||
let finalOptions = Object.assign({}, options);
|
||||
finalOptions = (0, utils_1.applyRetryableWrites)(finalOptions, collection.s.db);
|
||||
// Final results
|
||||
const bulkResult = {
|
||||
ok: 1,
|
||||
writeErrors: [],
|
||||
writeConcernErrors: [],
|
||||
insertedIds: [],
|
||||
nInserted: 0,
|
||||
nUpserted: 0,
|
||||
nMatched: 0,
|
||||
nModified: 0,
|
||||
nRemoved: 0,
|
||||
upserted: []
|
||||
};
|
||||
// Internal state
|
||||
this.s = {
|
||||
// Final result
|
||||
bulkResult,
|
||||
// Current batch state
|
||||
currentBatch: undefined,
|
||||
currentIndex: 0,
|
||||
// ordered specific
|
||||
currentBatchSize: 0,
|
||||
currentBatchSizeBytes: 0,
|
||||
// unordered specific
|
||||
currentInsertBatch: undefined,
|
||||
currentUpdateBatch: undefined,
|
||||
currentRemoveBatch: undefined,
|
||||
batches: [],
|
||||
// Write concern
|
||||
writeConcern: write_concern_1.WriteConcern.fromOptions(options),
|
||||
// Max batch size options
|
||||
maxBsonObjectSize,
|
||||
maxBatchSizeBytes,
|
||||
maxWriteBatchSize,
|
||||
maxKeySize,
|
||||
// Namespace
|
||||
namespace,
|
||||
// Topology
|
||||
topology,
|
||||
// Options
|
||||
options: finalOptions,
|
||||
// BSON options
|
||||
bsonOptions: (0, bson_1.resolveBSONOptions)(options),
|
||||
// Current operation
|
||||
currentOp,
|
||||
// Executed
|
||||
executed,
|
||||
// Collection
|
||||
collection,
|
||||
// Fundamental error
|
||||
err: undefined,
|
||||
// check keys
|
||||
checkKeys: typeof options.checkKeys === 'boolean' ? options.checkKeys : false
|
||||
};
|
||||
// bypass Validation
|
||||
if (options.bypassDocumentValidation === true) {
|
||||
this.s.bypassDocumentValidation = true;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Add a single insert document to the bulk operation
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const bulkOp = collection.initializeOrderedBulkOp();
|
||||
*
|
||||
* // Adds three inserts to the bulkOp.
|
||||
* bulkOp
|
||||
* .insert({ a: 1 })
|
||||
* .insert({ b: 2 })
|
||||
* .insert({ c: 3 });
|
||||
* await bulkOp.execute();
|
||||
* ```
|
||||
*/
|
||||
insert(document) {
|
||||
if (document._id == null && !shouldForceServerObjectId(this)) {
|
||||
document._id = new bson_1.ObjectId();
|
||||
}
|
||||
return this.addToOperationsList(exports.BatchType.INSERT, document);
|
||||
}
|
||||
/**
|
||||
* Builds a find operation for an update/updateOne/delete/deleteOne/replaceOne.
|
||||
* Returns a builder object used to complete the definition of the operation.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const bulkOp = collection.initializeOrderedBulkOp();
|
||||
*
|
||||
* // Add an updateOne to the bulkOp
|
||||
* bulkOp.find({ a: 1 }).updateOne({ $set: { b: 2 } });
|
||||
*
|
||||
* // Add an updateMany to the bulkOp
|
||||
* bulkOp.find({ c: 3 }).update({ $set: { d: 4 } });
|
||||
*
|
||||
* // Add an upsert
|
||||
* bulkOp.find({ e: 5 }).upsert().updateOne({ $set: { f: 6 } });
|
||||
*
|
||||
* // Add a deletion
|
||||
* bulkOp.find({ g: 7 }).deleteOne();
|
||||
*
|
||||
* // Add a multi deletion
|
||||
* bulkOp.find({ h: 8 }).delete();
|
||||
*
|
||||
* // Add a replaceOne
|
||||
* bulkOp.find({ i: 9 }).replaceOne({writeConcern: { j: 10 }});
|
||||
*
|
||||
* // Update using a pipeline (requires Mongodb 4.2 or higher)
|
||||
* bulk.find({ k: 11, y: { $exists: true }, z: { $exists: true } }).updateOne([
|
||||
* { $set: { total: { $sum: [ '$y', '$z' ] } } }
|
||||
* ]);
|
||||
*
|
||||
* // All of the ops will now be executed
|
||||
* await bulkOp.execute();
|
||||
* ```
|
||||
*/
|
||||
find(selector) {
|
||||
if (!selector) {
|
||||
throw new error_1.MongoInvalidArgumentError('Bulk find operation must specify a selector');
|
||||
}
|
||||
// Save a current selector
|
||||
this.s.currentOp = {
|
||||
selector: selector
|
||||
};
|
||||
return new FindOperators(this);
|
||||
}
|
||||
/** Specifies a raw operation to perform in the bulk write. */
|
||||
raw(op) {
|
||||
if (op == null || typeof op !== 'object') {
|
||||
throw new error_1.MongoInvalidArgumentError('Operation must be an object with an operation key');
|
||||
}
|
||||
if ('insertOne' in op) {
|
||||
const forceServerObjectId = shouldForceServerObjectId(this);
|
||||
if (op.insertOne && op.insertOne.document == null) {
|
||||
// NOTE: provided for legacy support, but this is a malformed operation
|
||||
if (forceServerObjectId !== true && op.insertOne._id == null) {
|
||||
op.insertOne._id = new bson_1.ObjectId();
|
||||
}
|
||||
return this.addToOperationsList(exports.BatchType.INSERT, op.insertOne);
|
||||
}
|
||||
if (forceServerObjectId !== true && op.insertOne.document._id == null) {
|
||||
op.insertOne.document._id = new bson_1.ObjectId();
|
||||
}
|
||||
return this.addToOperationsList(exports.BatchType.INSERT, op.insertOne.document);
|
||||
}
|
||||
if ('replaceOne' in op || 'updateOne' in op || 'updateMany' in op) {
|
||||
if ('replaceOne' in op) {
|
||||
if ('q' in op.replaceOne) {
|
||||
throw new error_1.MongoInvalidArgumentError('Raw operations are not allowed');
|
||||
}
|
||||
const updateStatement = (0, update_1.makeUpdateStatement)(op.replaceOne.filter, op.replaceOne.replacement, { ...op.replaceOne, multi: false });
|
||||
if ((0, utils_1.hasAtomicOperators)(updateStatement.u)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Replacement document must not use atomic operators');
|
||||
}
|
||||
return this.addToOperationsList(exports.BatchType.UPDATE, updateStatement);
|
||||
}
|
||||
if ('updateOne' in op) {
|
||||
if ('q' in op.updateOne) {
|
||||
throw new error_1.MongoInvalidArgumentError('Raw operations are not allowed');
|
||||
}
|
||||
const updateStatement = (0, update_1.makeUpdateStatement)(op.updateOne.filter, op.updateOne.update, {
|
||||
...op.updateOne,
|
||||
multi: false
|
||||
});
|
||||
if (!(0, utils_1.hasAtomicOperators)(updateStatement.u)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Update document requires atomic operators');
|
||||
}
|
||||
return this.addToOperationsList(exports.BatchType.UPDATE, updateStatement);
|
||||
}
|
||||
if ('updateMany' in op) {
|
||||
if ('q' in op.updateMany) {
|
||||
throw new error_1.MongoInvalidArgumentError('Raw operations are not allowed');
|
||||
}
|
||||
const updateStatement = (0, update_1.makeUpdateStatement)(op.updateMany.filter, op.updateMany.update, {
|
||||
...op.updateMany,
|
||||
multi: true
|
||||
});
|
||||
if (!(0, utils_1.hasAtomicOperators)(updateStatement.u)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Update document requires atomic operators');
|
||||
}
|
||||
return this.addToOperationsList(exports.BatchType.UPDATE, updateStatement);
|
||||
}
|
||||
}
|
||||
if ('deleteOne' in op) {
|
||||
if ('q' in op.deleteOne) {
|
||||
throw new error_1.MongoInvalidArgumentError('Raw operations are not allowed');
|
||||
}
|
||||
return this.addToOperationsList(exports.BatchType.DELETE, (0, delete_1.makeDeleteStatement)(op.deleteOne.filter, { ...op.deleteOne, limit: 1 }));
|
||||
}
|
||||
if ('deleteMany' in op) {
|
||||
if ('q' in op.deleteMany) {
|
||||
throw new error_1.MongoInvalidArgumentError('Raw operations are not allowed');
|
||||
}
|
||||
return this.addToOperationsList(exports.BatchType.DELETE, (0, delete_1.makeDeleteStatement)(op.deleteMany.filter, { ...op.deleteMany, limit: 0 }));
|
||||
}
|
||||
// otherwise an unknown operation was provided
|
||||
throw new error_1.MongoInvalidArgumentError('bulkWrite only supports insertOne, updateOne, updateMany, deleteOne, deleteMany');
|
||||
}
|
||||
get bsonOptions() {
|
||||
return this.s.bsonOptions;
|
||||
}
|
||||
get writeConcern() {
|
||||
return this.s.writeConcern;
|
||||
}
|
||||
get batches() {
|
||||
const batches = [...this.s.batches];
|
||||
if (this.isOrdered) {
|
||||
if (this.s.currentBatch)
|
||||
batches.push(this.s.currentBatch);
|
||||
}
|
||||
else {
|
||||
if (this.s.currentInsertBatch)
|
||||
batches.push(this.s.currentInsertBatch);
|
||||
if (this.s.currentUpdateBatch)
|
||||
batches.push(this.s.currentUpdateBatch);
|
||||
if (this.s.currentRemoveBatch)
|
||||
batches.push(this.s.currentRemoveBatch);
|
||||
}
|
||||
return batches;
|
||||
}
|
||||
async execute(options = {}) {
|
||||
if (this.s.executed) {
|
||||
throw new error_1.MongoBatchReExecutionError();
|
||||
}
|
||||
const writeConcern = write_concern_1.WriteConcern.fromOptions(options);
|
||||
if (writeConcern) {
|
||||
this.s.writeConcern = writeConcern;
|
||||
}
|
||||
// If we have current batch
|
||||
if (this.isOrdered) {
|
||||
if (this.s.currentBatch)
|
||||
this.s.batches.push(this.s.currentBatch);
|
||||
}
|
||||
else {
|
||||
if (this.s.currentInsertBatch)
|
||||
this.s.batches.push(this.s.currentInsertBatch);
|
||||
if (this.s.currentUpdateBatch)
|
||||
this.s.batches.push(this.s.currentUpdateBatch);
|
||||
if (this.s.currentRemoveBatch)
|
||||
this.s.batches.push(this.s.currentRemoveBatch);
|
||||
}
|
||||
// If we have no operations in the bulk raise an error
|
||||
if (this.s.batches.length === 0) {
|
||||
throw new error_1.MongoInvalidArgumentError('Invalid BulkOperation, Batch cannot be empty');
|
||||
}
|
||||
this.s.executed = true;
|
||||
const finalOptions = { ...this.s.options, ...options };
|
||||
const operation = new BulkWriteShimOperation(this, finalOptions);
|
||||
return (0, execute_operation_1.executeOperation)(this.s.collection.client, operation);
|
||||
}
|
||||
/**
|
||||
* Handles the write error before executing commands
|
||||
* @internal
|
||||
*/
|
||||
handleWriteError(callback, writeResult) {
|
||||
if (this.s.bulkResult.writeErrors.length > 0) {
|
||||
const msg = this.s.bulkResult.writeErrors[0].errmsg
|
||||
? this.s.bulkResult.writeErrors[0].errmsg
|
||||
: 'write operation failed';
|
||||
callback(new MongoBulkWriteError({
|
||||
message: msg,
|
||||
code: this.s.bulkResult.writeErrors[0].code,
|
||||
writeErrors: this.s.bulkResult.writeErrors
|
||||
}, writeResult));
|
||||
return true;
|
||||
}
|
||||
const writeConcernError = writeResult.getWriteConcernError();
|
||||
if (writeConcernError) {
|
||||
callback(new MongoBulkWriteError(writeConcernError, writeResult));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
exports.BulkOperationBase = BulkOperationBase;
|
||||
Object.defineProperty(BulkOperationBase.prototype, 'length', {
|
||||
enumerable: true,
|
||||
get() {
|
||||
return this.s.currentIndex;
|
||||
}
|
||||
});
|
||||
function shouldForceServerObjectId(bulkOperation) {
|
||||
if (typeof bulkOperation.s.options.forceServerObjectId === 'boolean') {
|
||||
return bulkOperation.s.options.forceServerObjectId;
|
||||
}
|
||||
if (typeof bulkOperation.s.collection.s.db.options?.forceServerObjectId === 'boolean') {
|
||||
return bulkOperation.s.collection.s.db.options?.forceServerObjectId;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
function isInsertBatch(batch) {
|
||||
return batch.batchType === exports.BatchType.INSERT;
|
||||
}
|
||||
function isUpdateBatch(batch) {
|
||||
return batch.batchType === exports.BatchType.UPDATE;
|
||||
}
|
||||
function isDeleteBatch(batch) {
|
||||
return batch.batchType === exports.BatchType.DELETE;
|
||||
}
|
||||
function buildCurrentOp(bulkOp) {
|
||||
let { currentOp } = bulkOp.s;
|
||||
bulkOp.s.currentOp = undefined;
|
||||
if (!currentOp)
|
||||
currentOp = {};
|
||||
return currentOp;
|
||||
}
|
||||
//# sourceMappingURL=common.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/bulk/common.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/bulk/common.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
67
VISUALIZACION/node_modules/mongodb/lib/bulk/ordered.js
generated
vendored
Executable file
67
VISUALIZACION/node_modules/mongodb/lib/bulk/ordered.js
generated
vendored
Executable file
|
|
@ -0,0 +1,67 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.OrderedBulkOperation = void 0;
|
||||
const BSON = require("../bson");
|
||||
const error_1 = require("../error");
|
||||
const common_1 = require("./common");
|
||||
/** @public */
|
||||
class OrderedBulkOperation extends common_1.BulkOperationBase {
|
||||
/** @internal */
|
||||
constructor(collection, options) {
|
||||
super(collection, options, true);
|
||||
}
|
||||
addToOperationsList(batchType, document) {
|
||||
// Get the bsonSize
|
||||
const bsonSize = BSON.calculateObjectSize(document, {
|
||||
checkKeys: false,
|
||||
// Since we don't know what the user selected for BSON options here,
|
||||
// err on the safe side, and check the size with ignoreUndefined: false.
|
||||
ignoreUndefined: false
|
||||
});
|
||||
// Throw error if the doc is bigger than the max BSON size
|
||||
if (bsonSize >= this.s.maxBsonObjectSize)
|
||||
// TODO(NODE-3483): Change this to MongoBSONError
|
||||
throw new error_1.MongoInvalidArgumentError(`Document is larger than the maximum size ${this.s.maxBsonObjectSize}`);
|
||||
// Create a new batch object if we don't have a current one
|
||||
if (this.s.currentBatch == null) {
|
||||
this.s.currentBatch = new common_1.Batch(batchType, this.s.currentIndex);
|
||||
}
|
||||
const maxKeySize = this.s.maxKeySize;
|
||||
// Check if we need to create a new batch
|
||||
if (
|
||||
// New batch if we exceed the max batch op size
|
||||
this.s.currentBatchSize + 1 >= this.s.maxWriteBatchSize ||
|
||||
// New batch if we exceed the maxBatchSizeBytes. Only matters if batch already has a doc,
|
||||
// since we can't sent an empty batch
|
||||
(this.s.currentBatchSize > 0 &&
|
||||
this.s.currentBatchSizeBytes + maxKeySize + bsonSize >= this.s.maxBatchSizeBytes) ||
|
||||
// New batch if the new op does not have the same op type as the current batch
|
||||
this.s.currentBatch.batchType !== batchType) {
|
||||
// Save the batch to the execution stack
|
||||
this.s.batches.push(this.s.currentBatch);
|
||||
// Create a new batch
|
||||
this.s.currentBatch = new common_1.Batch(batchType, this.s.currentIndex);
|
||||
// Reset the current size trackers
|
||||
this.s.currentBatchSize = 0;
|
||||
this.s.currentBatchSizeBytes = 0;
|
||||
}
|
||||
if (batchType === common_1.BatchType.INSERT) {
|
||||
this.s.bulkResult.insertedIds.push({
|
||||
index: this.s.currentIndex,
|
||||
_id: document._id
|
||||
});
|
||||
}
|
||||
// We have an array of documents
|
||||
if (Array.isArray(document)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Operation passed in cannot be an Array');
|
||||
}
|
||||
this.s.currentBatch.originalIndexes.push(this.s.currentIndex);
|
||||
this.s.currentBatch.operations.push(document);
|
||||
this.s.currentBatchSize += 1;
|
||||
this.s.currentBatchSizeBytes += maxKeySize + bsonSize;
|
||||
this.s.currentIndex += 1;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
exports.OrderedBulkOperation = OrderedBulkOperation;
|
||||
//# sourceMappingURL=ordered.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/bulk/ordered.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/bulk/ordered.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"ordered.js","sourceRoot":"","sources":["../../src/bulk/ordered.ts"],"names":[],"mappings":";;;AACA,gCAAgC;AAEhC,oCAAqD;AAGrD,qCAAsF;AAEtF,cAAc;AACd,MAAa,oBAAqB,SAAQ,0BAAiB;IACzD,gBAAgB;IAChB,YAAY,UAAsB,EAAE,OAAyB;QAC3D,KAAK,CAAC,UAAU,EAAE,OAAO,EAAE,IAAI,CAAC,CAAC;IACnC,CAAC;IAED,mBAAmB,CACjB,SAAoB,EACpB,QAAsD;QAEtD,mBAAmB;QACnB,MAAM,QAAQ,GAAG,IAAI,CAAC,mBAAmB,CAAC,QAAQ,EAAE;YAClD,SAAS,EAAE,KAAK;YAChB,oEAAoE;YACpE,wEAAwE;YACxE,eAAe,EAAE,KAAK;SAChB,CAAC,CAAC;QAEV,0DAA0D;QAC1D,IAAI,QAAQ,IAAI,IAAI,CAAC,CAAC,CAAC,iBAAiB;YACtC,iDAAiD;YACjD,MAAM,IAAI,iCAAyB,CACjC,4CAA4C,IAAI,CAAC,CAAC,CAAC,iBAAiB,EAAE,CACvE,CAAC;QAEJ,2DAA2D;QAC3D,IAAI,IAAI,CAAC,CAAC,CAAC,YAAY,IAAI,IAAI,EAAE;YAC/B,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,IAAI,cAAK,CAAC,SAAS,EAAE,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC;SACjE;QAED,MAAM,UAAU,GAAG,IAAI,CAAC,CAAC,CAAC,UAAU,CAAC;QAErC,yCAAyC;QACzC;QACE,+CAA+C;QAC/C,IAAI,CAAC,CAAC,CAAC,gBAAgB,GAAG,CAAC,IAAI,IAAI,CAAC,CAAC,CAAC,iBAAiB;YACvD,yFAAyF;YACzF,qCAAqC;YACrC,CAAC,IAAI,CAAC,CAAC,CAAC,gBAAgB,GAAG,CAAC;gBAC1B,IAAI,CAAC,CAAC,CAAC,qBAAqB,GAAG,UAAU,GAAG,QAAQ,IAAI,IAAI,CAAC,CAAC,CAAC,iBAAiB,CAAC;YACnF,8EAA8E;YAC9E,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,SAAS,KAAK,SAAS,EAC3C;YACA,wCAAwC;YACxC,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC;YAEzC,qBAAqB;YACrB,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,IAAI,cAAK,CAAC,SAAS,EAAE,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC;YAEhE,kCAAkC;YAClC,IAAI,CAAC,CAAC,CAAC,gBAAgB,GAAG,CAAC,CAAC;YAC5B,IAAI,CAAC,CAAC,CAAC,qBAAqB,GAAG,CAAC,CAAC;SAClC;QAED,IAAI,SAAS,KAAK,kBAAS,CAAC,MAAM,EAAE;YAClC,IAAI,CAAC,CAAC,CAAC,UAAU,CAAC,WAAW,CAAC,IAAI,CAAC;gBACjC,KAAK,EAAE,IAAI,CAAC,CAAC,CAAC,YAAY;gBAC1B,GAAG,EAAG,QAAqB,CAAC,GAAG;aAChC,CAAC,CAAC;SACJ;QAED,gCAAgC;QAChC,IAAI,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE;YAC3B,MAAM,IAAI,iCAAyB,CAAC,wCAAwC,CAAC,CAAC;SAC/E;QAED,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,eAAe,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC;QAC9D,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,UAAU,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;QAC9C,IAAI,CAAC,CAAC,CAAC,gBAAgB,IAAI,CAAC,CAAC;QAC7B,IAAI,CAAC,CAAC,CAAC,qBAAqB,IAAI,UAAU,GAAG,QAAQ,CAAC;QACtD,IAAI,CAAC,CAAC,CAAC,YAAY,IAAI,CAAC,CAAC;QACzB,OAAO,IAAI,CAAC;IACd,CAAC;CACF;AAzED,oDAyEC"}
|
||||
92
VISUALIZACION/node_modules/mongodb/lib/bulk/unordered.js
generated
vendored
Executable file
92
VISUALIZACION/node_modules/mongodb/lib/bulk/unordered.js
generated
vendored
Executable file
|
|
@ -0,0 +1,92 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.UnorderedBulkOperation = void 0;
|
||||
const BSON = require("../bson");
|
||||
const error_1 = require("../error");
|
||||
const common_1 = require("./common");
|
||||
/** @public */
|
||||
class UnorderedBulkOperation extends common_1.BulkOperationBase {
|
||||
/** @internal */
|
||||
constructor(collection, options) {
|
||||
super(collection, options, false);
|
||||
}
|
||||
handleWriteError(callback, writeResult) {
|
||||
if (this.s.batches.length) {
|
||||
return false;
|
||||
}
|
||||
return super.handleWriteError(callback, writeResult);
|
||||
}
|
||||
addToOperationsList(batchType, document) {
|
||||
// Get the bsonSize
|
||||
const bsonSize = BSON.calculateObjectSize(document, {
|
||||
checkKeys: false,
|
||||
// Since we don't know what the user selected for BSON options here,
|
||||
// err on the safe side, and check the size with ignoreUndefined: false.
|
||||
ignoreUndefined: false
|
||||
});
|
||||
// Throw error if the doc is bigger than the max BSON size
|
||||
if (bsonSize >= this.s.maxBsonObjectSize) {
|
||||
// TODO(NODE-3483): Change this to MongoBSONError
|
||||
throw new error_1.MongoInvalidArgumentError(`Document is larger than the maximum size ${this.s.maxBsonObjectSize}`);
|
||||
}
|
||||
// Holds the current batch
|
||||
this.s.currentBatch = undefined;
|
||||
// Get the right type of batch
|
||||
if (batchType === common_1.BatchType.INSERT) {
|
||||
this.s.currentBatch = this.s.currentInsertBatch;
|
||||
}
|
||||
else if (batchType === common_1.BatchType.UPDATE) {
|
||||
this.s.currentBatch = this.s.currentUpdateBatch;
|
||||
}
|
||||
else if (batchType === common_1.BatchType.DELETE) {
|
||||
this.s.currentBatch = this.s.currentRemoveBatch;
|
||||
}
|
||||
const maxKeySize = this.s.maxKeySize;
|
||||
// Create a new batch object if we don't have a current one
|
||||
if (this.s.currentBatch == null) {
|
||||
this.s.currentBatch = new common_1.Batch(batchType, this.s.currentIndex);
|
||||
}
|
||||
// Check if we need to create a new batch
|
||||
if (
|
||||
// New batch if we exceed the max batch op size
|
||||
this.s.currentBatch.size + 1 >= this.s.maxWriteBatchSize ||
|
||||
// New batch if we exceed the maxBatchSizeBytes. Only matters if batch already has a doc,
|
||||
// since we can't sent an empty batch
|
||||
(this.s.currentBatch.size > 0 &&
|
||||
this.s.currentBatch.sizeBytes + maxKeySize + bsonSize >= this.s.maxBatchSizeBytes) ||
|
||||
// New batch if the new op does not have the same op type as the current batch
|
||||
this.s.currentBatch.batchType !== batchType) {
|
||||
// Save the batch to the execution stack
|
||||
this.s.batches.push(this.s.currentBatch);
|
||||
// Create a new batch
|
||||
this.s.currentBatch = new common_1.Batch(batchType, this.s.currentIndex);
|
||||
}
|
||||
// We have an array of documents
|
||||
if (Array.isArray(document)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Operation passed in cannot be an Array');
|
||||
}
|
||||
this.s.currentBatch.operations.push(document);
|
||||
this.s.currentBatch.originalIndexes.push(this.s.currentIndex);
|
||||
this.s.currentIndex = this.s.currentIndex + 1;
|
||||
// Save back the current Batch to the right type
|
||||
if (batchType === common_1.BatchType.INSERT) {
|
||||
this.s.currentInsertBatch = this.s.currentBatch;
|
||||
this.s.bulkResult.insertedIds.push({
|
||||
index: this.s.bulkResult.insertedIds.length,
|
||||
_id: document._id
|
||||
});
|
||||
}
|
||||
else if (batchType === common_1.BatchType.UPDATE) {
|
||||
this.s.currentUpdateBatch = this.s.currentBatch;
|
||||
}
|
||||
else if (batchType === common_1.BatchType.DELETE) {
|
||||
this.s.currentRemoveBatch = this.s.currentBatch;
|
||||
}
|
||||
// Update current batch size
|
||||
this.s.currentBatch.size += 1;
|
||||
this.s.currentBatch.sizeBytes += maxKeySize + bsonSize;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
exports.UnorderedBulkOperation = UnorderedBulkOperation;
|
||||
//# sourceMappingURL=unordered.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/bulk/unordered.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/bulk/unordered.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"unordered.js","sourceRoot":"","sources":["../../src/bulk/unordered.ts"],"names":[],"mappings":";;;AACA,gCAAgC;AAEhC,oCAAqD;AAIrD,qCAMkB;AAElB,cAAc;AACd,MAAa,sBAAuB,SAAQ,0BAAiB;IAC3D,gBAAgB;IAChB,YAAY,UAAsB,EAAE,OAAyB;QAC3D,KAAK,CAAC,UAAU,EAAE,OAAO,EAAE,KAAK,CAAC,CAAC;IACpC,CAAC;IAEQ,gBAAgB,CAAC,QAAkB,EAAE,WAA4B;QACxE,IAAI,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,EAAE;YACzB,OAAO,KAAK,CAAC;SACd;QAED,OAAO,KAAK,CAAC,gBAAgB,CAAC,QAAQ,EAAE,WAAW,CAAC,CAAC;IACvD,CAAC;IAED,mBAAmB,CACjB,SAAoB,EACpB,QAAsD;QAEtD,mBAAmB;QACnB,MAAM,QAAQ,GAAG,IAAI,CAAC,mBAAmB,CAAC,QAAQ,EAAE;YAClD,SAAS,EAAE,KAAK;YAEhB,oEAAoE;YACpE,wEAAwE;YACxE,eAAe,EAAE,KAAK;SAChB,CAAC,CAAC;QAEV,0DAA0D;QAC1D,IAAI,QAAQ,IAAI,IAAI,CAAC,CAAC,CAAC,iBAAiB,EAAE;YACxC,iDAAiD;YACjD,MAAM,IAAI,iCAAyB,CACjC,4CAA4C,IAAI,CAAC,CAAC,CAAC,iBAAiB,EAAE,CACvE,CAAC;SACH;QAED,0BAA0B;QAC1B,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,SAAS,CAAC;QAChC,8BAA8B;QAC9B,IAAI,SAAS,KAAK,kBAAS,CAAC,MAAM,EAAE;YAClC,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,IAAI,CAAC,CAAC,CAAC,kBAAkB,CAAC;SACjD;aAAM,IAAI,SAAS,KAAK,kBAAS,CAAC,MAAM,EAAE;YACzC,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,IAAI,CAAC,CAAC,CAAC,kBAAkB,CAAC;SACjD;aAAM,IAAI,SAAS,KAAK,kBAAS,CAAC,MAAM,EAAE;YACzC,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,IAAI,CAAC,CAAC,CAAC,kBAAkB,CAAC;SACjD;QAED,MAAM,UAAU,GAAG,IAAI,CAAC,CAAC,CAAC,UAAU,CAAC;QAErC,2DAA2D;QAC3D,IAAI,IAAI,CAAC,CAAC,CAAC,YAAY,IAAI,IAAI,EAAE;YAC/B,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,IAAI,cAAK,CAAC,SAAS,EAAE,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC;SACjE;QAED,yCAAyC;QACzC;QACE,+CAA+C;QAC/C,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,IAAI,GAAG,CAAC,IAAI,IAAI,CAAC,CAAC,CAAC,iBAAiB;YACxD,yFAAyF;YACzF,qCAAqC;YACrC,CAAC,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,IAAI,GAAG,CAAC;gBAC3B,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,SAAS,GAAG,UAAU,GAAG,QAAQ,IAAI,IAAI,CAAC,CAAC,CAAC,iBAAiB,CAAC;YACpF,8EAA8E;YAC9E,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,SAAS,KAAK,SAAS,EAC3C;YACA,wCAAwC;YACxC,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC;YAEzC,qBAAqB;YACrB,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,IAAI,cAAK,CAAC,SAAS,EAAE,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC;SACjE;QAED,gCAAgC;QAChC,IAAI,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE;YAC3B,MAAM,IAAI,iCAAyB,CAAC,wCAAwC,CAAC,CAAC;SAC/E;QAED,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,UAAU,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;QAC9C,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,eAAe,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC;QAC9D,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,CAAC,CAAC;QAE9C,gDAAgD;QAChD,IAAI,SAAS,KAAK,kBAAS,CAAC,MAAM,EAAE;YAClC,IAAI,CAAC,CAAC,CAAC,kBAAkB,GAAG,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC;YAChD,IAAI,CAAC,CAAC,CAAC,UAAU,CAAC,WAAW,CAAC,IAAI,CAAC;gBACjC,KAAK,EAAE,IAAI,CAAC,CAAC,CAAC,UAAU,CAAC,WAAW,CAAC,MAAM;gBAC3C,GAAG,EAAG,QAAqB,CAAC,GAAG;aAChC,CAAC,CAAC;SACJ;aAAM,IAAI,SAAS,KAAK,kBAAS,CAAC,MAAM,EAAE;YACzC,IAAI,CAAC,CAAC,CAAC,kBAAkB,GAAG,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC;SACjD;aAAM,IAAI,SAAS,KAAK,kBAAS,CAAC,MAAM,EAAE;YACzC,IAAI,CAAC,CAAC,CAAC,kBAAkB,GAAG,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC;SACjD;QAED,4BAA4B;QAC5B,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,IAAI,IAAI,CAAC,CAAC;QAC9B,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,SAAS,IAAI,UAAU,GAAG,QAAQ,CAAC;QAEvD,OAAO,IAAI,CAAC;IACd,CAAC;CACF;AAnGD,wDAmGC"}
|
||||
397
VISUALIZACION/node_modules/mongodb/lib/change_stream.js
generated
vendored
Executable file
397
VISUALIZACION/node_modules/mongodb/lib/change_stream.js
generated
vendored
Executable file
|
|
@ -0,0 +1,397 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ChangeStream = void 0;
|
||||
const collection_1 = require("./collection");
|
||||
const constants_1 = require("./constants");
|
||||
const change_stream_cursor_1 = require("./cursor/change_stream_cursor");
|
||||
const db_1 = require("./db");
|
||||
const error_1 = require("./error");
|
||||
const mongo_client_1 = require("./mongo_client");
|
||||
const mongo_types_1 = require("./mongo_types");
|
||||
const utils_1 = require("./utils");
|
||||
/** @internal */
|
||||
const kCursorStream = Symbol('cursorStream');
|
||||
/** @internal */
|
||||
const kClosed = Symbol('closed');
|
||||
/** @internal */
|
||||
const kMode = Symbol('mode');
|
||||
const CHANGE_STREAM_OPTIONS = [
|
||||
'resumeAfter',
|
||||
'startAfter',
|
||||
'startAtOperationTime',
|
||||
'fullDocument',
|
||||
'fullDocumentBeforeChange',
|
||||
'showExpandedEvents'
|
||||
];
|
||||
const CHANGE_DOMAIN_TYPES = {
|
||||
COLLECTION: Symbol('Collection'),
|
||||
DATABASE: Symbol('Database'),
|
||||
CLUSTER: Symbol('Cluster')
|
||||
};
|
||||
const CHANGE_STREAM_EVENTS = [constants_1.RESUME_TOKEN_CHANGED, constants_1.END, constants_1.CLOSE];
|
||||
const NO_RESUME_TOKEN_ERROR = 'A change stream document has been received that lacks a resume token (_id).';
|
||||
const CHANGESTREAM_CLOSED_ERROR = 'ChangeStream is closed';
|
||||
/**
|
||||
* Creates a new Change Stream instance. Normally created using {@link Collection#watch|Collection.watch()}.
|
||||
* @public
|
||||
*/
|
||||
class ChangeStream extends mongo_types_1.TypedEventEmitter {
|
||||
/**
|
||||
* @internal
|
||||
*
|
||||
* @param parent - The parent object that created this change stream
|
||||
* @param pipeline - An array of {@link https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/|aggregation pipeline stages} through which to pass change stream documents
|
||||
*/
|
||||
constructor(parent, pipeline = [], options = {}) {
|
||||
super();
|
||||
this.pipeline = pipeline;
|
||||
this.options = { ...options };
|
||||
delete this.options.writeConcern;
|
||||
if (parent instanceof collection_1.Collection) {
|
||||
this.type = CHANGE_DOMAIN_TYPES.COLLECTION;
|
||||
}
|
||||
else if (parent instanceof db_1.Db) {
|
||||
this.type = CHANGE_DOMAIN_TYPES.DATABASE;
|
||||
}
|
||||
else if (parent instanceof mongo_client_1.MongoClient) {
|
||||
this.type = CHANGE_DOMAIN_TYPES.CLUSTER;
|
||||
}
|
||||
else {
|
||||
throw new error_1.MongoChangeStreamError('Parent provided to ChangeStream constructor must be an instance of Collection, Db, or MongoClient');
|
||||
}
|
||||
this.parent = parent;
|
||||
this.namespace = parent.s.namespace;
|
||||
if (!this.options.readPreference && parent.readPreference) {
|
||||
this.options.readPreference = parent.readPreference;
|
||||
}
|
||||
// Create contained Change Stream cursor
|
||||
this.cursor = this._createChangeStreamCursor(options);
|
||||
this[kClosed] = false;
|
||||
this[kMode] = false;
|
||||
// Listen for any `change` listeners being added to ChangeStream
|
||||
this.on('newListener', eventName => {
|
||||
if (eventName === 'change' && this.cursor && this.listenerCount('change') === 0) {
|
||||
this._streamEvents(this.cursor);
|
||||
}
|
||||
});
|
||||
this.on('removeListener', eventName => {
|
||||
if (eventName === 'change' && this.listenerCount('change') === 0 && this.cursor) {
|
||||
this[kCursorStream]?.removeAllListeners('data');
|
||||
}
|
||||
});
|
||||
}
|
||||
/** @internal */
|
||||
get cursorStream() {
|
||||
return this[kCursorStream];
|
||||
}
|
||||
/** The cached resume token that is used to resume after the most recently returned change. */
|
||||
get resumeToken() {
|
||||
return this.cursor?.resumeToken;
|
||||
}
|
||||
/** Check if there is any document still available in the Change Stream */
|
||||
async hasNext() {
|
||||
this._setIsIterator();
|
||||
// Change streams must resume indefinitely while each resume event succeeds.
|
||||
// This loop continues until either a change event is received or until a resume attempt
|
||||
// fails.
|
||||
// eslint-disable-next-line no-constant-condition
|
||||
while (true) {
|
||||
try {
|
||||
const hasNext = await this.cursor.hasNext();
|
||||
return hasNext;
|
||||
}
|
||||
catch (error) {
|
||||
try {
|
||||
await this._processErrorIteratorMode(error);
|
||||
}
|
||||
catch (error) {
|
||||
try {
|
||||
await this.close();
|
||||
}
|
||||
catch {
|
||||
// We are not concerned with errors from close()
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/** Get the next available document from the Change Stream. */
|
||||
async next() {
|
||||
this._setIsIterator();
|
||||
// Change streams must resume indefinitely while each resume event succeeds.
|
||||
// This loop continues until either a change event is received or until a resume attempt
|
||||
// fails.
|
||||
// eslint-disable-next-line no-constant-condition
|
||||
while (true) {
|
||||
try {
|
||||
const change = await this.cursor.next();
|
||||
const processedChange = this._processChange(change ?? null);
|
||||
return processedChange;
|
||||
}
|
||||
catch (error) {
|
||||
try {
|
||||
await this._processErrorIteratorMode(error);
|
||||
}
|
||||
catch (error) {
|
||||
try {
|
||||
await this.close();
|
||||
}
|
||||
catch {
|
||||
// We are not concerned with errors from close()
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Try to get the next available document from the Change Stream's cursor or `null` if an empty batch is returned
|
||||
*/
|
||||
async tryNext() {
|
||||
this._setIsIterator();
|
||||
// Change streams must resume indefinitely while each resume event succeeds.
|
||||
// This loop continues until either a change event is received or until a resume attempt
|
||||
// fails.
|
||||
// eslint-disable-next-line no-constant-condition
|
||||
while (true) {
|
||||
try {
|
||||
const change = await this.cursor.tryNext();
|
||||
return change ?? null;
|
||||
}
|
||||
catch (error) {
|
||||
try {
|
||||
await this._processErrorIteratorMode(error);
|
||||
}
|
||||
catch (error) {
|
||||
try {
|
||||
await this.close();
|
||||
}
|
||||
catch {
|
||||
// We are not concerned with errors from close()
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
async *[Symbol.asyncIterator]() {
|
||||
if (this.closed) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
// Change streams run indefinitely as long as errors are resumable
|
||||
// So the only loop breaking condition is if `next()` throws
|
||||
while (true) {
|
||||
yield await this.next();
|
||||
}
|
||||
}
|
||||
finally {
|
||||
try {
|
||||
await this.close();
|
||||
}
|
||||
catch {
|
||||
// we're not concerned with errors from close()
|
||||
}
|
||||
}
|
||||
}
|
||||
/** Is the cursor closed */
|
||||
get closed() {
|
||||
return this[kClosed] || this.cursor.closed;
|
||||
}
|
||||
/** Close the Change Stream */
|
||||
async close() {
|
||||
this[kClosed] = true;
|
||||
const cursor = this.cursor;
|
||||
try {
|
||||
await cursor.close();
|
||||
}
|
||||
finally {
|
||||
this._endStream();
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Return a modified Readable stream including a possible transform method.
|
||||
*
|
||||
* NOTE: When using a Stream to process change stream events, the stream will
|
||||
* NOT automatically resume in the case a resumable error is encountered.
|
||||
*
|
||||
* @throws MongoChangeStreamError if the underlying cursor or the change stream is closed
|
||||
*/
|
||||
stream(options) {
|
||||
if (this.closed) {
|
||||
throw new error_1.MongoChangeStreamError(CHANGESTREAM_CLOSED_ERROR);
|
||||
}
|
||||
this.streamOptions = options;
|
||||
return this.cursor.stream(options);
|
||||
}
|
||||
/** @internal */
|
||||
_setIsEmitter() {
|
||||
if (this[kMode] === 'iterator') {
|
||||
// TODO(NODE-3485): Replace with MongoChangeStreamModeError
|
||||
throw new error_1.MongoAPIError('ChangeStream cannot be used as an EventEmitter after being used as an iterator');
|
||||
}
|
||||
this[kMode] = 'emitter';
|
||||
}
|
||||
/** @internal */
|
||||
_setIsIterator() {
|
||||
if (this[kMode] === 'emitter') {
|
||||
// TODO(NODE-3485): Replace with MongoChangeStreamModeError
|
||||
throw new error_1.MongoAPIError('ChangeStream cannot be used as an iterator after being used as an EventEmitter');
|
||||
}
|
||||
this[kMode] = 'iterator';
|
||||
}
|
||||
/**
|
||||
* Create a new change stream cursor based on self's configuration
|
||||
* @internal
|
||||
*/
|
||||
_createChangeStreamCursor(options) {
|
||||
const changeStreamStageOptions = (0, utils_1.filterOptions)(options, CHANGE_STREAM_OPTIONS);
|
||||
if (this.type === CHANGE_DOMAIN_TYPES.CLUSTER) {
|
||||
changeStreamStageOptions.allChangesForCluster = true;
|
||||
}
|
||||
const pipeline = [{ $changeStream: changeStreamStageOptions }, ...this.pipeline];
|
||||
const client = this.type === CHANGE_DOMAIN_TYPES.CLUSTER
|
||||
? this.parent
|
||||
: this.type === CHANGE_DOMAIN_TYPES.DATABASE
|
||||
? this.parent.client
|
||||
: this.type === CHANGE_DOMAIN_TYPES.COLLECTION
|
||||
? this.parent.client
|
||||
: null;
|
||||
if (client == null) {
|
||||
// This should never happen because of the assertion in the constructor
|
||||
throw new error_1.MongoRuntimeError(`Changestream type should only be one of cluster, database, collection. Found ${this.type.toString()}`);
|
||||
}
|
||||
const changeStreamCursor = new change_stream_cursor_1.ChangeStreamCursor(client, this.namespace, pipeline, options);
|
||||
for (const event of CHANGE_STREAM_EVENTS) {
|
||||
changeStreamCursor.on(event, e => this.emit(event, e));
|
||||
}
|
||||
if (this.listenerCount(ChangeStream.CHANGE) > 0) {
|
||||
this._streamEvents(changeStreamCursor);
|
||||
}
|
||||
return changeStreamCursor;
|
||||
}
|
||||
/** @internal */
|
||||
_closeEmitterModeWithError(error) {
|
||||
this.emit(ChangeStream.ERROR, error);
|
||||
this.close().catch(() => null);
|
||||
}
|
||||
/** @internal */
|
||||
_streamEvents(cursor) {
|
||||
this._setIsEmitter();
|
||||
const stream = this[kCursorStream] ?? cursor.stream();
|
||||
this[kCursorStream] = stream;
|
||||
stream.on('data', change => {
|
||||
try {
|
||||
const processedChange = this._processChange(change);
|
||||
this.emit(ChangeStream.CHANGE, processedChange);
|
||||
}
|
||||
catch (error) {
|
||||
this.emit(ChangeStream.ERROR, error);
|
||||
}
|
||||
});
|
||||
stream.on('error', error => this._processErrorStreamMode(error));
|
||||
}
|
||||
/** @internal */
|
||||
_endStream() {
|
||||
const cursorStream = this[kCursorStream];
|
||||
if (cursorStream) {
|
||||
['data', 'close', 'end', 'error'].forEach(event => cursorStream.removeAllListeners(event));
|
||||
cursorStream.destroy();
|
||||
}
|
||||
this[kCursorStream] = undefined;
|
||||
}
|
||||
/** @internal */
|
||||
_processChange(change) {
|
||||
if (this[kClosed]) {
|
||||
// TODO(NODE-3485): Replace with MongoChangeStreamClosedError
|
||||
throw new error_1.MongoAPIError(CHANGESTREAM_CLOSED_ERROR);
|
||||
}
|
||||
// a null change means the cursor has been notified, implicitly closing the change stream
|
||||
if (change == null) {
|
||||
// TODO(NODE-3485): Replace with MongoChangeStreamClosedError
|
||||
throw new error_1.MongoRuntimeError(CHANGESTREAM_CLOSED_ERROR);
|
||||
}
|
||||
if (change && !change._id) {
|
||||
throw new error_1.MongoChangeStreamError(NO_RESUME_TOKEN_ERROR);
|
||||
}
|
||||
// cache the resume token
|
||||
this.cursor.cacheResumeToken(change._id);
|
||||
// wipe the startAtOperationTime if there was one so that there won't be a conflict
|
||||
// between resumeToken and startAtOperationTime if we need to reconnect the cursor
|
||||
this.options.startAtOperationTime = undefined;
|
||||
return change;
|
||||
}
|
||||
/** @internal */
|
||||
_processErrorStreamMode(changeStreamError) {
|
||||
// If the change stream has been closed explicitly, do not process error.
|
||||
if (this[kClosed])
|
||||
return;
|
||||
if ((0, error_1.isResumableError)(changeStreamError, this.cursor.maxWireVersion)) {
|
||||
this._endStream();
|
||||
this.cursor.close().catch(() => null);
|
||||
const topology = (0, utils_1.getTopology)(this.parent);
|
||||
topology.selectServer(this.cursor.readPreference, {}, serverSelectionError => {
|
||||
if (serverSelectionError)
|
||||
return this._closeEmitterModeWithError(changeStreamError);
|
||||
this.cursor = this._createChangeStreamCursor(this.cursor.resumeOptions);
|
||||
});
|
||||
}
|
||||
else {
|
||||
this._closeEmitterModeWithError(changeStreamError);
|
||||
}
|
||||
}
|
||||
/** @internal */
|
||||
async _processErrorIteratorMode(changeStreamError) {
|
||||
if (this[kClosed]) {
|
||||
// TODO(NODE-3485): Replace with MongoChangeStreamClosedError
|
||||
throw new error_1.MongoAPIError(CHANGESTREAM_CLOSED_ERROR);
|
||||
}
|
||||
if (!(0, error_1.isResumableError)(changeStreamError, this.cursor.maxWireVersion)) {
|
||||
try {
|
||||
await this.close();
|
||||
}
|
||||
catch {
|
||||
// ignore errors from close
|
||||
}
|
||||
throw changeStreamError;
|
||||
}
|
||||
await this.cursor.close().catch(() => null);
|
||||
const topology = (0, utils_1.getTopology)(this.parent);
|
||||
try {
|
||||
await topology.selectServerAsync(this.cursor.readPreference, {});
|
||||
this.cursor = this._createChangeStreamCursor(this.cursor.resumeOptions);
|
||||
}
|
||||
catch {
|
||||
// if the topology can't reconnect, close the stream
|
||||
await this.close();
|
||||
throw changeStreamError;
|
||||
}
|
||||
}
|
||||
}
|
||||
/** @event */
|
||||
ChangeStream.RESPONSE = constants_1.RESPONSE;
|
||||
/** @event */
|
||||
ChangeStream.MORE = constants_1.MORE;
|
||||
/** @event */
|
||||
ChangeStream.INIT = constants_1.INIT;
|
||||
/** @event */
|
||||
ChangeStream.CLOSE = constants_1.CLOSE;
|
||||
/**
|
||||
* Fired for each new matching change in the specified namespace. Attaching a `change`
|
||||
* event listener to a Change Stream will switch the stream into flowing mode. Data will
|
||||
* then be passed as soon as it is available.
|
||||
* @event
|
||||
*/
|
||||
ChangeStream.CHANGE = constants_1.CHANGE;
|
||||
/** @event */
|
||||
ChangeStream.END = constants_1.END;
|
||||
/** @event */
|
||||
ChangeStream.ERROR = constants_1.ERROR;
|
||||
/**
|
||||
* Emitted each time the change stream stores a new resume token.
|
||||
* @event
|
||||
*/
|
||||
ChangeStream.RESUME_TOKEN_CHANGED = constants_1.RESUME_TOKEN_CHANGED;
|
||||
exports.ChangeStream = ChangeStream;
|
||||
//# sourceMappingURL=change_stream.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/change_stream.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/change_stream.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
47
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/auth_provider.js
generated
vendored
Executable file
47
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/auth_provider.js
generated
vendored
Executable file
|
|
@ -0,0 +1,47 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.AuthProvider = exports.AuthContext = void 0;
|
||||
const error_1 = require("../../error");
|
||||
/**
|
||||
* Context used during authentication
|
||||
* @internal
|
||||
*/
|
||||
class AuthContext {
|
||||
constructor(connection, credentials, options) {
|
||||
/** If the context is for reauthentication. */
|
||||
this.reauthenticating = false;
|
||||
this.connection = connection;
|
||||
this.credentials = credentials;
|
||||
this.options = options;
|
||||
}
|
||||
}
|
||||
exports.AuthContext = AuthContext;
|
||||
class AuthProvider {
|
||||
/**
|
||||
* Prepare the handshake document before the initial handshake.
|
||||
*
|
||||
* @param handshakeDoc - The document used for the initial handshake on a connection
|
||||
* @param authContext - Context for authentication flow
|
||||
*/
|
||||
async prepare(handshakeDoc, _authContext) {
|
||||
return handshakeDoc;
|
||||
}
|
||||
/**
|
||||
* Reauthenticate.
|
||||
* @param context - The shared auth context.
|
||||
*/
|
||||
async reauth(context) {
|
||||
if (context.reauthenticating) {
|
||||
throw new error_1.MongoRuntimeError('Reauthentication already in progress.');
|
||||
}
|
||||
try {
|
||||
context.reauthenticating = true;
|
||||
await this.auth(context);
|
||||
}
|
||||
finally {
|
||||
context.reauthenticating = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.AuthProvider = AuthProvider;
|
||||
//# sourceMappingURL=auth_provider.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/auth_provider.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/auth_provider.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"auth_provider.js","sourceRoot":"","sources":["../../../src/cmap/auth/auth_provider.ts"],"names":[],"mappings":";;;AACA,uCAAgD;AAKhD;;;GAGG;AACH,MAAa,WAAW;IAetB,YACE,UAAsB,EACtB,WAAyC,EACzC,OAA0B;QAb5B,8CAA8C;QAC9C,qBAAgB,GAAG,KAAK,CAAC;QAcvB,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;QAC7B,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;QAC/B,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;IACzB,CAAC;CACF;AAxBD,kCAwBC;AAED,MAAsB,YAAY;IAChC;;;;;OAKG;IACH,KAAK,CAAC,OAAO,CACX,YAA+B,EAC/B,YAAyB;QAEzB,OAAO,YAAY,CAAC;IACtB,CAAC;IASD;;;OAGG;IACH,KAAK,CAAC,MAAM,CAAC,OAAoB;QAC/B,IAAI,OAAO,CAAC,gBAAgB,EAAE;YAC5B,MAAM,IAAI,yBAAiB,CAAC,uCAAuC,CAAC,CAAC;SACtE;QACD,IAAI;YACF,OAAO,CAAC,gBAAgB,GAAG,IAAI,CAAC;YAChC,MAAM,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;SAC1B;gBAAS;YACR,OAAO,CAAC,gBAAgB,GAAG,KAAK,CAAC;SAClC;IACH,CAAC;CACF;AApCD,oCAoCC"}
|
||||
153
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/gssapi.js
generated
vendored
Executable file
153
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/gssapi.js
generated
vendored
Executable file
|
|
@ -0,0 +1,153 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.resolveCname = exports.performGSSAPICanonicalizeHostName = exports.GSSAPI = exports.GSSAPICanonicalizationValue = void 0;
|
||||
const dns = require("dns");
|
||||
const deps_1 = require("../../deps");
|
||||
const error_1 = require("../../error");
|
||||
const utils_1 = require("../../utils");
|
||||
const auth_provider_1 = require("./auth_provider");
|
||||
/** @public */
|
||||
exports.GSSAPICanonicalizationValue = Object.freeze({
|
||||
on: true,
|
||||
off: false,
|
||||
none: 'none',
|
||||
forward: 'forward',
|
||||
forwardAndReverse: 'forwardAndReverse'
|
||||
});
|
||||
async function externalCommand(connection, command) {
|
||||
return connection.commandAsync((0, utils_1.ns)('$external.$cmd'), command, undefined);
|
||||
}
|
||||
let krb;
|
||||
class GSSAPI extends auth_provider_1.AuthProvider {
|
||||
async auth(authContext) {
|
||||
const { connection, credentials } = authContext;
|
||||
if (credentials == null) {
|
||||
throw new error_1.MongoMissingCredentialsError('Credentials required for GSSAPI authentication');
|
||||
}
|
||||
const { username } = credentials;
|
||||
const client = await makeKerberosClient(authContext);
|
||||
const payload = await client.step('');
|
||||
const saslStartResponse = await externalCommand(connection, saslStart(payload));
|
||||
const negotiatedPayload = await negotiate(client, 10, saslStartResponse.payload);
|
||||
const saslContinueResponse = await externalCommand(connection, saslContinue(negotiatedPayload, saslStartResponse.conversationId));
|
||||
const finalizePayload = await finalize(client, username, saslContinueResponse.payload);
|
||||
await externalCommand(connection, {
|
||||
saslContinue: 1,
|
||||
conversationId: saslContinueResponse.conversationId,
|
||||
payload: finalizePayload
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.GSSAPI = GSSAPI;
|
||||
async function makeKerberosClient(authContext) {
|
||||
const { hostAddress } = authContext.options;
|
||||
const { credentials } = authContext;
|
||||
if (!hostAddress || typeof hostAddress.host !== 'string' || !credentials) {
|
||||
throw new error_1.MongoInvalidArgumentError('Connection must have host and port and credentials defined.');
|
||||
}
|
||||
loadKrb();
|
||||
if ('kModuleError' in krb) {
|
||||
throw krb['kModuleError'];
|
||||
}
|
||||
const { initializeClient } = krb;
|
||||
const { username, password } = credentials;
|
||||
const mechanismProperties = credentials.mechanismProperties;
|
||||
const serviceName = mechanismProperties.SERVICE_NAME ?? 'mongodb';
|
||||
const host = await performGSSAPICanonicalizeHostName(hostAddress.host, mechanismProperties);
|
||||
const initOptions = {};
|
||||
if (password != null) {
|
||||
// TODO(NODE-5139): These do not match the typescript options in initializeClient
|
||||
Object.assign(initOptions, { user: username, password: password });
|
||||
}
|
||||
const spnHost = mechanismProperties.SERVICE_HOST ?? host;
|
||||
let spn = `${serviceName}${process.platform === 'win32' ? '/' : '@'}${spnHost}`;
|
||||
if ('SERVICE_REALM' in mechanismProperties) {
|
||||
spn = `${spn}@${mechanismProperties.SERVICE_REALM}`;
|
||||
}
|
||||
return initializeClient(spn, initOptions);
|
||||
}
|
||||
function saslStart(payload) {
|
||||
return {
|
||||
saslStart: 1,
|
||||
mechanism: 'GSSAPI',
|
||||
payload,
|
||||
autoAuthorize: 1
|
||||
};
|
||||
}
|
||||
function saslContinue(payload, conversationId) {
|
||||
return {
|
||||
saslContinue: 1,
|
||||
conversationId,
|
||||
payload
|
||||
};
|
||||
}
|
||||
async function negotiate(client, retries, payload) {
|
||||
try {
|
||||
const response = await client.step(payload);
|
||||
return response || '';
|
||||
}
|
||||
catch (error) {
|
||||
if (retries === 0) {
|
||||
// Retries exhausted, raise error
|
||||
throw error;
|
||||
}
|
||||
// Adjust number of retries and call step again
|
||||
return negotiate(client, retries - 1, payload);
|
||||
}
|
||||
}
|
||||
async function finalize(client, user, payload) {
|
||||
// GSS Client Unwrap
|
||||
const response = await client.unwrap(payload);
|
||||
return client.wrap(response || '', { user });
|
||||
}
|
||||
async function performGSSAPICanonicalizeHostName(host, mechanismProperties) {
|
||||
const mode = mechanismProperties.CANONICALIZE_HOST_NAME;
|
||||
if (!mode || mode === exports.GSSAPICanonicalizationValue.none) {
|
||||
return host;
|
||||
}
|
||||
// If forward and reverse or true
|
||||
if (mode === exports.GSSAPICanonicalizationValue.on ||
|
||||
mode === exports.GSSAPICanonicalizationValue.forwardAndReverse) {
|
||||
// Perform the lookup of the ip address.
|
||||
const { address } = await dns.promises.lookup(host);
|
||||
try {
|
||||
// Perform a reverse ptr lookup on the ip address.
|
||||
const results = await dns.promises.resolvePtr(address);
|
||||
// If the ptr did not error but had no results, return the host.
|
||||
return results.length > 0 ? results[0] : host;
|
||||
}
|
||||
catch (error) {
|
||||
// This can error as ptr records may not exist for all ips. In this case
|
||||
// fallback to a cname lookup as dns.lookup() does not return the
|
||||
// cname.
|
||||
return resolveCname(host);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// The case for forward is just to resolve the cname as dns.lookup()
|
||||
// will not return it.
|
||||
return resolveCname(host);
|
||||
}
|
||||
}
|
||||
exports.performGSSAPICanonicalizeHostName = performGSSAPICanonicalizeHostName;
|
||||
async function resolveCname(host) {
|
||||
// Attempt to resolve the host name
|
||||
try {
|
||||
const results = await dns.promises.resolveCname(host);
|
||||
// Get the first resolved host id
|
||||
return results.length > 0 ? results[0] : host;
|
||||
}
|
||||
catch {
|
||||
return host;
|
||||
}
|
||||
}
|
||||
exports.resolveCname = resolveCname;
|
||||
/**
|
||||
* Load the Kerberos library.
|
||||
*/
|
||||
function loadKrb() {
|
||||
if (!krb) {
|
||||
krb = (0, deps_1.getKerberos)();
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=gssapi.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/gssapi.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/gssapi.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"gssapi.js","sourceRoot":"","sources":["../../../src/cmap/auth/gssapi.ts"],"names":[],"mappings":";;;AAAA,2BAA2B;AAE3B,qCAA6E;AAC7E,uCAAsF;AACtF,uCAAiC;AAEjC,mDAAiE;AAEjE,cAAc;AACD,QAAA,2BAA2B,GAAG,MAAM,CAAC,MAAM,CAAC;IACvD,EAAE,EAAE,IAAI;IACR,GAAG,EAAE,KAAK;IACV,IAAI,EAAE,MAAM;IACZ,OAAO,EAAE,SAAS;IAClB,iBAAiB,EAAE,mBAAmB;CAC9B,CAAC,CAAC;AAaZ,KAAK,UAAU,eAAe,CAC5B,UAAsB,EACtB,OAAuE;IAEvE,OAAO,UAAU,CAAC,YAAY,CAAC,IAAA,UAAE,EAAC,gBAAgB,CAAC,EAAE,OAAO,EAAE,SAAS,CAGrE,CAAC;AACL,CAAC;AAED,IAAI,GAAoB,CAAC;AAEzB,MAAa,MAAO,SAAQ,4BAAY;IAC7B,KAAK,CAAC,IAAI,CAAC,WAAwB;QAC1C,MAAM,EAAE,UAAU,EAAE,WAAW,EAAE,GAAG,WAAW,CAAC;QAChD,IAAI,WAAW,IAAI,IAAI,EAAE;YACvB,MAAM,IAAI,oCAA4B,CAAC,gDAAgD,CAAC,CAAC;SAC1F;QAED,MAAM,EAAE,QAAQ,EAAE,GAAG,WAAW,CAAC;QAEjC,MAAM,MAAM,GAAG,MAAM,kBAAkB,CAAC,WAAW,CAAC,CAAC;QAErD,MAAM,OAAO,GAAG,MAAM,MAAM,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC;QAEtC,MAAM,iBAAiB,GAAG,MAAM,eAAe,CAAC,UAAU,EAAE,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC;QAEhF,MAAM,iBAAiB,GAAG,MAAM,SAAS,CAAC,MAAM,EAAE,EAAE,EAAE,iBAAiB,CAAC,OAAO,CAAC,CAAC;QAEjF,MAAM,oBAAoB,GAAG,MAAM,eAAe,CAChD,UAAU,EACV,YAAY,CAAC,iBAAiB,EAAE,iBAAiB,CAAC,cAAc,CAAC,CAClE,CAAC;QAEF,MAAM,eAAe,GAAG,MAAM,QAAQ,CAAC,MAAM,EAAE,QAAQ,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;QAEvF,MAAM,eAAe,CAAC,UAAU,EAAE;YAChC,YAAY,EAAE,CAAC;YACf,cAAc,EAAE,oBAAoB,CAAC,cAAc;YACnD,OAAO,EAAE,eAAe;SACzB,CAAC,CAAC;IACL,CAAC;CACF;AA9BD,wBA8BC;AAED,KAAK,UAAU,kBAAkB,CAAC,WAAwB;IACxD,MAAM,EAAE,WAAW,EAAE,GAAG,WAAW,CAAC,OAAO,CAAC;IAC5C,MAAM,EAAE,WAAW,EAAE,GAAG,WAAW,CAAC;IACpC,IAAI,CAAC,WAAW,IAAI,OAAO,WAAW,CAAC,IAAI,KAAK,QAAQ,IAAI,CAAC,WAAW,EAAE;QACxE,MAAM,IAAI,iCAAyB,CACjC,6DAA6D,CAC9D,CAAC;KACH;IAED,OAAO,EAAE,CAAC;IACV,IAAI,cAAc,IAAI,GAAG,EAAE;QACzB,MAAM,GAAG,CAAC,cAAc,CAAC,CAAC;KAC3B;IACD,MAAM,EAAE,gBAAgB,EAAE,GAAG,GAAG,CAAC;IAEjC,MAAM,EAAE,QAAQ,EAAE,QAAQ,EAAE,GAAG,WAAW,CAAC;IAC3C,MAAM,mBAAmB,GAAG,WAAW,CAAC,mBAA0C,CAAC;IAEnF,MAAM,WAAW,GAAG,mBAAmB,CAAC,YAAY,IAAI,SAAS,CAAC;IAElE,MAAM,IAAI,GAAG,MAAM,iCAAiC,CAAC,WAAW,CAAC,IAAI,EAAE,mBAAmB,CAAC,CAAC;IAE5F,MAAM,WAAW,GAAG,EAAE,CAAC;IACvB,IAAI,QAAQ,IAAI,IAAI,EAAE;QACpB,iFAAiF;QACjF,MAAM,CAAC,MAAM,CAAC,WAAW,EAAE,EAAE,IAAI,EAAE,QAAQ,EAAE,QAAQ,EAAE,QAAQ,EAAE,CAAC,CAAC;KACpE;IAED,MAAM,OAAO,GAAG,mBAAmB,CAAC,YAAY,IAAI,IAAI,CAAC;IACzD,IAAI,GAAG,GAAG,GAAG,WAAW,GAAG,OAAO,CAAC,QAAQ,KAAK,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,GAAG,OAAO,EAAE,CAAC;IAChF,IAAI,eAAe,IAAI,mBAAmB,EAAE;QAC1C,GAAG,GAAG,GAAG,GAAG,IAAI,mBAAmB,CAAC,aAAa,EAAE,CAAC;KACrD;IAED,OAAO,gBAAgB,CAAC,GAAG,EAAE,WAAW,CAAC,CAAC;AAC5C,CAAC;AAED,SAAS,SAAS,CAAC,OAAe;IAChC,OAAO;QACL,SAAS,EAAE,CAAC;QACZ,SAAS,EAAE,QAAQ;QACnB,OAAO;QACP,aAAa,EAAE,CAAC;KACR,CAAC;AACb,CAAC;AAED,SAAS,YAAY,CAAC,OAAe,EAAE,cAAsB;IAC3D,OAAO;QACL,YAAY,EAAE,CAAC;QACf,cAAc;QACd,OAAO;KACC,CAAC;AACb,CAAC;AAED,KAAK,UAAU,SAAS,CACtB,MAAsB,EACtB,OAAe,EACf,OAAe;IAEf,IAAI;QACF,MAAM,QAAQ,GAAG,MAAM,MAAM,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;QAC5C,OAAO,QAAQ,IAAI,EAAE,CAAC;KACvB;IAAC,OAAO,KAAK,EAAE;QACd,IAAI,OAAO,KAAK,CAAC,EAAE;YACjB,iCAAiC;YACjC,MAAM,KAAK,CAAC;SACb;QACD,+CAA+C;QAC/C,OAAO,SAAS,CAAC,MAAM,EAAE,OAAO,GAAG,CAAC,EAAE,OAAO,CAAC,CAAC;KAChD;AACH,CAAC;AAED,KAAK,UAAU,QAAQ,CAAC,MAAsB,EAAE,IAAY,EAAE,OAAe;IAC3E,oBAAoB;IACpB,MAAM,QAAQ,GAAG,MAAM,MAAM,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC;IAC9C,OAAO,MAAM,CAAC,IAAI,CAAC,QAAQ,IAAI,EAAE,EAAE,EAAE,IAAI,EAAE,CAAC,CAAC;AAC/C,CAAC;AAEM,KAAK,UAAU,iCAAiC,CACrD,IAAY,EACZ,mBAAwC;IAExC,MAAM,IAAI,GAAG,mBAAmB,CAAC,sBAAsB,CAAC;IACxD,IAAI,CAAC,IAAI,IAAI,IAAI,KAAK,mCAA2B,CAAC,IAAI,EAAE;QACtD,OAAO,IAAI,CAAC;KACb;IAED,iCAAiC;IACjC,IACE,IAAI,KAAK,mCAA2B,CAAC,EAAE;QACvC,IAAI,KAAK,mCAA2B,CAAC,iBAAiB,EACtD;QACA,wCAAwC;QACxC,MAAM,EAAE,OAAO,EAAE,GAAG,MAAM,GAAG,CAAC,QAAQ,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC;QAEpD,IAAI;YACF,kDAAkD;YAClD,MAAM,OAAO,GAAG,MAAM,GAAG,CAAC,QAAQ,CAAC,UAAU,CAAC,OAAO,CAAC,CAAC;YACvD,gEAAgE;YAChE,OAAO,OAAO,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;SAC/C;QAAC,OAAO,KAAK,EAAE;YACd,wEAAwE;YACxE,iEAAiE;YACjE,SAAS;YACT,OAAO,YAAY,CAAC,IAAI,CAAC,CAAC;SAC3B;KACF;SAAM;QACL,oEAAoE;QACpE,sBAAsB;QACtB,OAAO,YAAY,CAAC,IAAI,CAAC,CAAC;KAC3B;AACH,CAAC;AAjCD,8EAiCC;AAEM,KAAK,UAAU,YAAY,CAAC,IAAY;IAC7C,mCAAmC;IACnC,IAAI;QACF,MAAM,OAAO,GAAG,MAAM,GAAG,CAAC,QAAQ,CAAC,YAAY,CAAC,IAAI,CAAC,CAAC;QACtD,iCAAiC;QACjC,OAAO,OAAO,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;KAC/C;IAAC,MAAM;QACN,OAAO,IAAI,CAAC;KACb;AACH,CAAC;AATD,oCASC;AAED;;GAEG;AACH,SAAS,OAAO;IACd,IAAI,CAAC,GAAG,EAAE;QACR,GAAG,GAAG,IAAA,kBAAW,GAAE,CAAC;KACrB;AACH,CAAC"}
|
||||
177
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongo_credentials.js
generated
vendored
Executable file
177
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongo_credentials.js
generated
vendored
Executable file
|
|
@ -0,0 +1,177 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.MongoCredentials = exports.DEFAULT_ALLOWED_HOSTS = void 0;
|
||||
const error_1 = require("../../error");
|
||||
const gssapi_1 = require("./gssapi");
|
||||
const providers_1 = require("./providers");
|
||||
// https://github.com/mongodb/specifications/blob/master/source/auth/auth.rst
|
||||
function getDefaultAuthMechanism(hello) {
|
||||
if (hello) {
|
||||
// If hello contains saslSupportedMechs, use scram-sha-256
|
||||
// if it is available, else scram-sha-1
|
||||
if (Array.isArray(hello.saslSupportedMechs)) {
|
||||
return hello.saslSupportedMechs.includes(providers_1.AuthMechanism.MONGODB_SCRAM_SHA256)
|
||||
? providers_1.AuthMechanism.MONGODB_SCRAM_SHA256
|
||||
: providers_1.AuthMechanism.MONGODB_SCRAM_SHA1;
|
||||
}
|
||||
// Fallback to legacy selection method. If wire version >= 3, use scram-sha-1
|
||||
if (hello.maxWireVersion >= 3) {
|
||||
return providers_1.AuthMechanism.MONGODB_SCRAM_SHA1;
|
||||
}
|
||||
}
|
||||
// Default for wireprotocol < 3
|
||||
return providers_1.AuthMechanism.MONGODB_CR;
|
||||
}
|
||||
const ALLOWED_PROVIDER_NAMES = ['aws', 'azure'];
|
||||
const ALLOWED_HOSTS_ERROR = 'Auth mechanism property ALLOWED_HOSTS must be an array of strings.';
|
||||
/** @internal */
|
||||
exports.DEFAULT_ALLOWED_HOSTS = [
|
||||
'*.mongodb.net',
|
||||
'*.mongodb-dev.net',
|
||||
'*.mongodbgov.net',
|
||||
'localhost',
|
||||
'127.0.0.1',
|
||||
'::1'
|
||||
];
|
||||
/** Error for when the token audience is missing in the environment. */
|
||||
const TOKEN_AUDIENCE_MISSING_ERROR = 'TOKEN_AUDIENCE must be set in the auth mechanism properties when PROVIDER_NAME is azure.';
|
||||
/**
|
||||
* A representation of the credentials used by MongoDB
|
||||
* @public
|
||||
*/
|
||||
class MongoCredentials {
|
||||
constructor(options) {
|
||||
this.username = options.username ?? '';
|
||||
this.password = options.password;
|
||||
this.source = options.source;
|
||||
if (!this.source && options.db) {
|
||||
this.source = options.db;
|
||||
}
|
||||
this.mechanism = options.mechanism || providers_1.AuthMechanism.MONGODB_DEFAULT;
|
||||
this.mechanismProperties = options.mechanismProperties || {};
|
||||
if (this.mechanism.match(/MONGODB-AWS/i)) {
|
||||
if (!this.username && process.env.AWS_ACCESS_KEY_ID) {
|
||||
this.username = process.env.AWS_ACCESS_KEY_ID;
|
||||
}
|
||||
if (!this.password && process.env.AWS_SECRET_ACCESS_KEY) {
|
||||
this.password = process.env.AWS_SECRET_ACCESS_KEY;
|
||||
}
|
||||
if (this.mechanismProperties.AWS_SESSION_TOKEN == null &&
|
||||
process.env.AWS_SESSION_TOKEN != null) {
|
||||
this.mechanismProperties = {
|
||||
...this.mechanismProperties,
|
||||
AWS_SESSION_TOKEN: process.env.AWS_SESSION_TOKEN
|
||||
};
|
||||
}
|
||||
}
|
||||
if (this.mechanism === providers_1.AuthMechanism.MONGODB_OIDC && !this.mechanismProperties.ALLOWED_HOSTS) {
|
||||
this.mechanismProperties = {
|
||||
...this.mechanismProperties,
|
||||
ALLOWED_HOSTS: exports.DEFAULT_ALLOWED_HOSTS
|
||||
};
|
||||
}
|
||||
Object.freeze(this.mechanismProperties);
|
||||
Object.freeze(this);
|
||||
}
|
||||
/** Determines if two MongoCredentials objects are equivalent */
|
||||
equals(other) {
|
||||
return (this.mechanism === other.mechanism &&
|
||||
this.username === other.username &&
|
||||
this.password === other.password &&
|
||||
this.source === other.source);
|
||||
}
|
||||
/**
|
||||
* If the authentication mechanism is set to "default", resolves the authMechanism
|
||||
* based on the server version and server supported sasl mechanisms.
|
||||
*
|
||||
* @param hello - A hello response from the server
|
||||
*/
|
||||
resolveAuthMechanism(hello) {
|
||||
// If the mechanism is not "default", then it does not need to be resolved
|
||||
if (this.mechanism.match(/DEFAULT/i)) {
|
||||
return new MongoCredentials({
|
||||
username: this.username,
|
||||
password: this.password,
|
||||
source: this.source,
|
||||
mechanism: getDefaultAuthMechanism(hello),
|
||||
mechanismProperties: this.mechanismProperties
|
||||
});
|
||||
}
|
||||
return this;
|
||||
}
|
||||
validate() {
|
||||
if ((this.mechanism === providers_1.AuthMechanism.MONGODB_GSSAPI ||
|
||||
this.mechanism === providers_1.AuthMechanism.MONGODB_CR ||
|
||||
this.mechanism === providers_1.AuthMechanism.MONGODB_PLAIN ||
|
||||
this.mechanism === providers_1.AuthMechanism.MONGODB_SCRAM_SHA1 ||
|
||||
this.mechanism === providers_1.AuthMechanism.MONGODB_SCRAM_SHA256) &&
|
||||
!this.username) {
|
||||
throw new error_1.MongoMissingCredentialsError(`Username required for mechanism '${this.mechanism}'`);
|
||||
}
|
||||
if (this.mechanism === providers_1.AuthMechanism.MONGODB_OIDC) {
|
||||
if (this.username && this.mechanismProperties.PROVIDER_NAME) {
|
||||
throw new error_1.MongoInvalidArgumentError(`username and PROVIDER_NAME may not be used together for mechanism '${this.mechanism}'.`);
|
||||
}
|
||||
if (this.mechanismProperties.PROVIDER_NAME === 'azure' &&
|
||||
!this.mechanismProperties.TOKEN_AUDIENCE) {
|
||||
throw new error_1.MongoAzureError(TOKEN_AUDIENCE_MISSING_ERROR);
|
||||
}
|
||||
if (this.mechanismProperties.PROVIDER_NAME &&
|
||||
!ALLOWED_PROVIDER_NAMES.includes(this.mechanismProperties.PROVIDER_NAME)) {
|
||||
throw new error_1.MongoInvalidArgumentError(`Currently only a PROVIDER_NAME in ${ALLOWED_PROVIDER_NAMES.join(',')} is supported for mechanism '${this.mechanism}'.`);
|
||||
}
|
||||
if (this.mechanismProperties.REFRESH_TOKEN_CALLBACK &&
|
||||
!this.mechanismProperties.REQUEST_TOKEN_CALLBACK) {
|
||||
throw new error_1.MongoInvalidArgumentError(`A REQUEST_TOKEN_CALLBACK must be provided when using a REFRESH_TOKEN_CALLBACK for mechanism '${this.mechanism}'`);
|
||||
}
|
||||
if (!this.mechanismProperties.PROVIDER_NAME &&
|
||||
!this.mechanismProperties.REQUEST_TOKEN_CALLBACK) {
|
||||
throw new error_1.MongoInvalidArgumentError(`Either a PROVIDER_NAME or a REQUEST_TOKEN_CALLBACK must be specified for mechanism '${this.mechanism}'.`);
|
||||
}
|
||||
if (this.mechanismProperties.ALLOWED_HOSTS) {
|
||||
const hosts = this.mechanismProperties.ALLOWED_HOSTS;
|
||||
if (!Array.isArray(hosts)) {
|
||||
throw new error_1.MongoInvalidArgumentError(ALLOWED_HOSTS_ERROR);
|
||||
}
|
||||
for (const host of hosts) {
|
||||
if (typeof host !== 'string') {
|
||||
throw new error_1.MongoInvalidArgumentError(ALLOWED_HOSTS_ERROR);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (providers_1.AUTH_MECHS_AUTH_SRC_EXTERNAL.has(this.mechanism)) {
|
||||
if (this.source != null && this.source !== '$external') {
|
||||
// TODO(NODE-3485): Replace this with a MongoAuthValidationError
|
||||
throw new error_1.MongoAPIError(`Invalid source '${this.source}' for mechanism '${this.mechanism}' specified.`);
|
||||
}
|
||||
}
|
||||
if (this.mechanism === providers_1.AuthMechanism.MONGODB_PLAIN && this.source == null) {
|
||||
// TODO(NODE-3485): Replace this with a MongoAuthValidationError
|
||||
throw new error_1.MongoAPIError('PLAIN Authentication Mechanism needs an auth source');
|
||||
}
|
||||
if (this.mechanism === providers_1.AuthMechanism.MONGODB_X509 && this.password != null) {
|
||||
if (this.password === '') {
|
||||
Reflect.set(this, 'password', undefined);
|
||||
return;
|
||||
}
|
||||
// TODO(NODE-3485): Replace this with a MongoAuthValidationError
|
||||
throw new error_1.MongoAPIError(`Password not allowed for mechanism MONGODB-X509`);
|
||||
}
|
||||
const canonicalization = this.mechanismProperties.CANONICALIZE_HOST_NAME ?? false;
|
||||
if (!Object.values(gssapi_1.GSSAPICanonicalizationValue).includes(canonicalization)) {
|
||||
throw new error_1.MongoAPIError(`Invalid CANONICALIZE_HOST_NAME value: ${canonicalization}`);
|
||||
}
|
||||
}
|
||||
static merge(creds, options) {
|
||||
return new MongoCredentials({
|
||||
username: options.username ?? creds?.username ?? '',
|
||||
password: options.password ?? creds?.password ?? '',
|
||||
mechanism: options.mechanism ?? creds?.mechanism ?? providers_1.AuthMechanism.MONGODB_DEFAULT,
|
||||
mechanismProperties: options.mechanismProperties ?? creds?.mechanismProperties ?? {},
|
||||
source: options.source ?? options.db ?? creds?.source ?? 'admin'
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.MongoCredentials = MongoCredentials;
|
||||
//# sourceMappingURL=mongo_credentials.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongo_credentials.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongo_credentials.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
35
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongocr.js
generated
vendored
Executable file
35
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongocr.js
generated
vendored
Executable file
|
|
@ -0,0 +1,35 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.MongoCR = void 0;
|
||||
const crypto = require("crypto");
|
||||
const error_1 = require("../../error");
|
||||
const utils_1 = require("../../utils");
|
||||
const auth_provider_1 = require("./auth_provider");
|
||||
class MongoCR extends auth_provider_1.AuthProvider {
|
||||
async auth(authContext) {
|
||||
const { connection, credentials } = authContext;
|
||||
if (!credentials) {
|
||||
throw new error_1.MongoMissingCredentialsError('AuthContext must provide credentials.');
|
||||
}
|
||||
const { username, password, source } = credentials;
|
||||
const { nonce } = await connection.commandAsync((0, utils_1.ns)(`${source}.$cmd`), { getnonce: 1 }, undefined);
|
||||
const hashPassword = crypto
|
||||
.createHash('md5')
|
||||
.update(`${username}:mongo:${password}`, 'utf8')
|
||||
.digest('hex');
|
||||
// Final key
|
||||
const key = crypto
|
||||
.createHash('md5')
|
||||
.update(`${nonce}${username}${hashPassword}`, 'utf8')
|
||||
.digest('hex');
|
||||
const authenticateCommand = {
|
||||
authenticate: 1,
|
||||
user: username,
|
||||
nonce,
|
||||
key
|
||||
};
|
||||
await connection.commandAsync((0, utils_1.ns)(`${source}.$cmd`), authenticateCommand, undefined);
|
||||
}
|
||||
}
|
||||
exports.MongoCR = MongoCR;
|
||||
//# sourceMappingURL=mongocr.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongocr.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongocr.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"mongocr.js","sourceRoot":"","sources":["../../../src/cmap/auth/mongocr.ts"],"names":[],"mappings":";;;AAAA,iCAAiC;AAEjC,uCAA2D;AAC3D,uCAAiC;AACjC,mDAAiE;AAEjE,MAAa,OAAQ,SAAQ,4BAAY;IAC9B,KAAK,CAAC,IAAI,CAAC,WAAwB;QAC1C,MAAM,EAAE,UAAU,EAAE,WAAW,EAAE,GAAG,WAAW,CAAC;QAChD,IAAI,CAAC,WAAW,EAAE;YAChB,MAAM,IAAI,oCAA4B,CAAC,uCAAuC,CAAC,CAAC;SACjF;QAED,MAAM,EAAE,QAAQ,EAAE,QAAQ,EAAE,MAAM,EAAE,GAAG,WAAW,CAAC;QAEnD,MAAM,EAAE,KAAK,EAAE,GAAG,MAAM,UAAU,CAAC,YAAY,CAC7C,IAAA,UAAE,EAAC,GAAG,MAAM,OAAO,CAAC,EACpB,EAAE,QAAQ,EAAE,CAAC,EAAE,EACf,SAAS,CACV,CAAC;QAEF,MAAM,YAAY,GAAG,MAAM;aACxB,UAAU,CAAC,KAAK,CAAC;aACjB,MAAM,CAAC,GAAG,QAAQ,UAAU,QAAQ,EAAE,EAAE,MAAM,CAAC;aAC/C,MAAM,CAAC,KAAK,CAAC,CAAC;QAEjB,YAAY;QACZ,MAAM,GAAG,GAAG,MAAM;aACf,UAAU,CAAC,KAAK,CAAC;aACjB,MAAM,CAAC,GAAG,KAAK,GAAG,QAAQ,GAAG,YAAY,EAAE,EAAE,MAAM,CAAC;aACpD,MAAM,CAAC,KAAK,CAAC,CAAC;QAEjB,MAAM,mBAAmB,GAAG;YAC1B,YAAY,EAAE,CAAC;YACf,IAAI,EAAE,QAAQ;YACd,KAAK;YACL,GAAG;SACJ,CAAC;QAEF,MAAM,UAAU,CAAC,YAAY,CAAC,IAAA,UAAE,EAAC,GAAG,MAAM,OAAO,CAAC,EAAE,mBAAmB,EAAE,SAAS,CAAC,CAAC;IACtF,CAAC;CACF;AAnCD,0BAmCC"}
|
||||
186
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_aws.js
generated
vendored
Executable file
186
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_aws.js
generated
vendored
Executable file
|
|
@ -0,0 +1,186 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.MongoDBAWS = void 0;
|
||||
const crypto = require("crypto");
|
||||
const util_1 = require("util");
|
||||
const BSON = require("../../bson");
|
||||
const deps_1 = require("../../deps");
|
||||
const error_1 = require("../../error");
|
||||
const utils_1 = require("../../utils");
|
||||
const auth_provider_1 = require("./auth_provider");
|
||||
const mongo_credentials_1 = require("./mongo_credentials");
|
||||
const providers_1 = require("./providers");
|
||||
const ASCII_N = 110;
|
||||
const AWS_RELATIVE_URI = 'http://169.254.170.2';
|
||||
const AWS_EC2_URI = 'http://169.254.169.254';
|
||||
const AWS_EC2_PATH = '/latest/meta-data/iam/security-credentials';
|
||||
const bsonOptions = {
|
||||
useBigInt64: false,
|
||||
promoteLongs: true,
|
||||
promoteValues: true,
|
||||
promoteBuffers: false,
|
||||
bsonRegExp: false
|
||||
};
|
||||
class MongoDBAWS extends auth_provider_1.AuthProvider {
|
||||
constructor() {
|
||||
super();
|
||||
this.randomBytesAsync = (0, util_1.promisify)(crypto.randomBytes);
|
||||
}
|
||||
async auth(authContext) {
|
||||
const { connection } = authContext;
|
||||
if (!authContext.credentials) {
|
||||
throw new error_1.MongoMissingCredentialsError('AuthContext must provide credentials.');
|
||||
}
|
||||
if ('kModuleError' in deps_1.aws4) {
|
||||
throw deps_1.aws4['kModuleError'];
|
||||
}
|
||||
const { sign } = deps_1.aws4;
|
||||
if ((0, utils_1.maxWireVersion)(connection) < 9) {
|
||||
throw new error_1.MongoCompatibilityError('MONGODB-AWS authentication requires MongoDB version 4.4 or later');
|
||||
}
|
||||
if (!authContext.credentials.username) {
|
||||
authContext.credentials = await makeTempCredentials(authContext.credentials);
|
||||
}
|
||||
const { credentials } = authContext;
|
||||
const accessKeyId = credentials.username;
|
||||
const secretAccessKey = credentials.password;
|
||||
const sessionToken = credentials.mechanismProperties.AWS_SESSION_TOKEN;
|
||||
// If all three defined, include sessionToken, else include username and pass, else no credentials
|
||||
const awsCredentials = accessKeyId && secretAccessKey && sessionToken
|
||||
? { accessKeyId, secretAccessKey, sessionToken }
|
||||
: accessKeyId && secretAccessKey
|
||||
? { accessKeyId, secretAccessKey }
|
||||
: undefined;
|
||||
const db = credentials.source;
|
||||
const nonce = await this.randomBytesAsync(32);
|
||||
const saslStart = {
|
||||
saslStart: 1,
|
||||
mechanism: 'MONGODB-AWS',
|
||||
payload: BSON.serialize({ r: nonce, p: ASCII_N }, bsonOptions)
|
||||
};
|
||||
const saslStartResponse = await connection.commandAsync((0, utils_1.ns)(`${db}.$cmd`), saslStart, undefined);
|
||||
const serverResponse = BSON.deserialize(saslStartResponse.payload.buffer, bsonOptions);
|
||||
const host = serverResponse.h;
|
||||
const serverNonce = serverResponse.s.buffer;
|
||||
if (serverNonce.length !== 64) {
|
||||
// TODO(NODE-3483)
|
||||
throw new error_1.MongoRuntimeError(`Invalid server nonce length ${serverNonce.length}, expected 64`);
|
||||
}
|
||||
if (!utils_1.ByteUtils.equals(serverNonce.subarray(0, nonce.byteLength), nonce)) {
|
||||
// throw because the serverNonce's leading 32 bytes must equal the client nonce's 32 bytes
|
||||
// https://github.com/mongodb/specifications/blob/875446db44aade414011731840831f38a6c668df/source/auth/auth.rst#id11
|
||||
// TODO(NODE-3483)
|
||||
throw new error_1.MongoRuntimeError('Server nonce does not begin with client nonce');
|
||||
}
|
||||
if (host.length < 1 || host.length > 255 || host.indexOf('..') !== -1) {
|
||||
// TODO(NODE-3483)
|
||||
throw new error_1.MongoRuntimeError(`Server returned an invalid host: "${host}"`);
|
||||
}
|
||||
const body = 'Action=GetCallerIdentity&Version=2011-06-15';
|
||||
const options = sign({
|
||||
method: 'POST',
|
||||
host,
|
||||
region: deriveRegion(serverResponse.h),
|
||||
service: 'sts',
|
||||
headers: {
|
||||
'Content-Type': 'application/x-www-form-urlencoded',
|
||||
'Content-Length': body.length,
|
||||
'X-MongoDB-Server-Nonce': utils_1.ByteUtils.toBase64(serverNonce),
|
||||
'X-MongoDB-GS2-CB-Flag': 'n'
|
||||
},
|
||||
path: '/',
|
||||
body
|
||||
}, awsCredentials);
|
||||
const payload = {
|
||||
a: options.headers.Authorization,
|
||||
d: options.headers['X-Amz-Date']
|
||||
};
|
||||
if (sessionToken) {
|
||||
payload.t = sessionToken;
|
||||
}
|
||||
const saslContinue = {
|
||||
saslContinue: 1,
|
||||
conversationId: 1,
|
||||
payload: BSON.serialize(payload, bsonOptions)
|
||||
};
|
||||
await connection.commandAsync((0, utils_1.ns)(`${db}.$cmd`), saslContinue, undefined);
|
||||
}
|
||||
}
|
||||
exports.MongoDBAWS = MongoDBAWS;
|
||||
async function makeTempCredentials(credentials) {
|
||||
function makeMongoCredentialsFromAWSTemp(creds) {
|
||||
if (!creds.AccessKeyId || !creds.SecretAccessKey || !creds.Token) {
|
||||
throw new error_1.MongoMissingCredentialsError('Could not obtain temporary MONGODB-AWS credentials');
|
||||
}
|
||||
return new mongo_credentials_1.MongoCredentials({
|
||||
username: creds.AccessKeyId,
|
||||
password: creds.SecretAccessKey,
|
||||
source: credentials.source,
|
||||
mechanism: providers_1.AuthMechanism.MONGODB_AWS,
|
||||
mechanismProperties: {
|
||||
AWS_SESSION_TOKEN: creds.Token
|
||||
}
|
||||
});
|
||||
}
|
||||
const credentialProvider = (0, deps_1.getAwsCredentialProvider)();
|
||||
// Check if the AWS credential provider from the SDK is present. If not,
|
||||
// use the old method.
|
||||
if ('kModuleError' in credentialProvider) {
|
||||
// If the environment variable AWS_CONTAINER_CREDENTIALS_RELATIVE_URI
|
||||
// is set then drivers MUST assume that it was set by an AWS ECS agent
|
||||
if (process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI) {
|
||||
return makeMongoCredentialsFromAWSTemp(await (0, utils_1.request)(`${AWS_RELATIVE_URI}${process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI}`));
|
||||
}
|
||||
// Otherwise assume we are on an EC2 instance
|
||||
// get a token
|
||||
const token = await (0, utils_1.request)(`${AWS_EC2_URI}/latest/api/token`, {
|
||||
method: 'PUT',
|
||||
json: false,
|
||||
headers: { 'X-aws-ec2-metadata-token-ttl-seconds': 30 }
|
||||
});
|
||||
// get role name
|
||||
const roleName = await (0, utils_1.request)(`${AWS_EC2_URI}/${AWS_EC2_PATH}`, {
|
||||
json: false,
|
||||
headers: { 'X-aws-ec2-metadata-token': token }
|
||||
});
|
||||
// get temp credentials
|
||||
const creds = await (0, utils_1.request)(`${AWS_EC2_URI}/${AWS_EC2_PATH}/${roleName}`, {
|
||||
headers: { 'X-aws-ec2-metadata-token': token }
|
||||
});
|
||||
return makeMongoCredentialsFromAWSTemp(creds);
|
||||
}
|
||||
else {
|
||||
/*
|
||||
* Creates a credential provider that will attempt to find credentials from the
|
||||
* following sources (listed in order of precedence):
|
||||
*
|
||||
* - Environment variables exposed via process.env
|
||||
* - SSO credentials from token cache
|
||||
* - Web identity token credentials
|
||||
* - Shared credentials and config ini files
|
||||
* - The EC2/ECS Instance Metadata Service
|
||||
*/
|
||||
const { fromNodeProviderChain } = credentialProvider;
|
||||
const provider = fromNodeProviderChain();
|
||||
try {
|
||||
const creds = await provider();
|
||||
return makeMongoCredentialsFromAWSTemp({
|
||||
AccessKeyId: creds.accessKeyId,
|
||||
SecretAccessKey: creds.secretAccessKey,
|
||||
Token: creds.sessionToken,
|
||||
Expiration: creds.expiration
|
||||
});
|
||||
}
|
||||
catch (error) {
|
||||
throw new error_1.MongoAWSError(error.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
function deriveRegion(host) {
|
||||
const parts = host.split('.');
|
||||
if (parts.length === 1 || parts[1] === 'amazonaws') {
|
||||
return 'us-east-1';
|
||||
}
|
||||
return parts[1];
|
||||
}
|
||||
//# sourceMappingURL=mongodb_aws.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_aws.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_aws.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
68
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc.js
generated
vendored
Executable file
68
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc.js
generated
vendored
Executable file
|
|
@ -0,0 +1,68 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.MongoDBOIDC = exports.OIDC_WORKFLOWS = void 0;
|
||||
const error_1 = require("../../error");
|
||||
const auth_provider_1 = require("./auth_provider");
|
||||
const aws_service_workflow_1 = require("./mongodb_oidc/aws_service_workflow");
|
||||
const azure_service_workflow_1 = require("./mongodb_oidc/azure_service_workflow");
|
||||
const callback_workflow_1 = require("./mongodb_oidc/callback_workflow");
|
||||
/** Error when credentials are missing. */
|
||||
const MISSING_CREDENTIALS_ERROR = 'AuthContext must provide credentials.';
|
||||
/** @internal */
|
||||
exports.OIDC_WORKFLOWS = new Map();
|
||||
exports.OIDC_WORKFLOWS.set('callback', new callback_workflow_1.CallbackWorkflow());
|
||||
exports.OIDC_WORKFLOWS.set('aws', new aws_service_workflow_1.AwsServiceWorkflow());
|
||||
exports.OIDC_WORKFLOWS.set('azure', new azure_service_workflow_1.AzureServiceWorkflow());
|
||||
/**
|
||||
* OIDC auth provider.
|
||||
* @experimental
|
||||
*/
|
||||
class MongoDBOIDC extends auth_provider_1.AuthProvider {
|
||||
/**
|
||||
* Instantiate the auth provider.
|
||||
*/
|
||||
constructor() {
|
||||
super();
|
||||
}
|
||||
/**
|
||||
* Authenticate using OIDC
|
||||
*/
|
||||
async auth(authContext) {
|
||||
const { connection, reauthenticating, response } = authContext;
|
||||
const credentials = getCredentials(authContext);
|
||||
const workflow = getWorkflow(credentials);
|
||||
await workflow.execute(connection, credentials, reauthenticating, response);
|
||||
}
|
||||
/**
|
||||
* Add the speculative auth for the initial handshake.
|
||||
*/
|
||||
async prepare(handshakeDoc, authContext) {
|
||||
const credentials = getCredentials(authContext);
|
||||
const workflow = getWorkflow(credentials);
|
||||
const result = await workflow.speculativeAuth(credentials);
|
||||
return { ...handshakeDoc, ...result };
|
||||
}
|
||||
}
|
||||
exports.MongoDBOIDC = MongoDBOIDC;
|
||||
/**
|
||||
* Get credentials from the auth context, throwing if they do not exist.
|
||||
*/
|
||||
function getCredentials(authContext) {
|
||||
const { credentials } = authContext;
|
||||
if (!credentials) {
|
||||
throw new error_1.MongoMissingCredentialsError(MISSING_CREDENTIALS_ERROR);
|
||||
}
|
||||
return credentials;
|
||||
}
|
||||
/**
|
||||
* Gets either a device workflow or callback workflow.
|
||||
*/
|
||||
function getWorkflow(credentials) {
|
||||
const providerName = credentials.mechanismProperties.PROVIDER_NAME;
|
||||
const workflow = exports.OIDC_WORKFLOWS.get(providerName || 'callback');
|
||||
if (!workflow) {
|
||||
throw new error_1.MongoInvalidArgumentError(`Could not load workflow for provider ${credentials.mechanismProperties.PROVIDER_NAME}`);
|
||||
}
|
||||
return workflow;
|
||||
}
|
||||
//# sourceMappingURL=mongodb_oidc.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"mongodb_oidc.js","sourceRoot":"","sources":["../../../src/cmap/auth/mongodb_oidc.ts"],"names":[],"mappings":";;;AAEA,uCAAsF;AAGtF,mDAAiE;AAEjE,8EAAyE;AACzE,kFAA6E;AAC7E,wEAAoE;AAEpE,0CAA0C;AAC1C,MAAM,yBAAyB,GAAG,uCAAuC,CAAC;AAuE1E,gBAAgB;AACH,QAAA,cAAc,GAAgC,IAAI,GAAG,EAAE,CAAC;AACrE,sBAAc,CAAC,GAAG,CAAC,UAAU,EAAE,IAAI,oCAAgB,EAAE,CAAC,CAAC;AACvD,sBAAc,CAAC,GAAG,CAAC,KAAK,EAAE,IAAI,yCAAkB,EAAE,CAAC,CAAC;AACpD,sBAAc,CAAC,GAAG,CAAC,OAAO,EAAE,IAAI,6CAAoB,EAAE,CAAC,CAAC;AAExD;;;GAGG;AACH,MAAa,WAAY,SAAQ,4BAAY;IAC3C;;OAEG;IACH;QACE,KAAK,EAAE,CAAC;IACV,CAAC;IAED;;OAEG;IACM,KAAK,CAAC,IAAI,CAAC,WAAwB;QAC1C,MAAM,EAAE,UAAU,EAAE,gBAAgB,EAAE,QAAQ,EAAE,GAAG,WAAW,CAAC;QAC/D,MAAM,WAAW,GAAG,cAAc,CAAC,WAAW,CAAC,CAAC;QAChD,MAAM,QAAQ,GAAG,WAAW,CAAC,WAAW,CAAC,CAAC;QAC1C,MAAM,QAAQ,CAAC,OAAO,CAAC,UAAU,EAAE,WAAW,EAAE,gBAAgB,EAAE,QAAQ,CAAC,CAAC;IAC9E,CAAC;IAED;;OAEG;IACM,KAAK,CAAC,OAAO,CACpB,YAA+B,EAC/B,WAAwB;QAExB,MAAM,WAAW,GAAG,cAAc,CAAC,WAAW,CAAC,CAAC;QAChD,MAAM,QAAQ,GAAG,WAAW,CAAC,WAAW,CAAC,CAAC;QAC1C,MAAM,MAAM,GAAG,MAAM,QAAQ,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC;QAC3D,OAAO,EAAE,GAAG,YAAY,EAAE,GAAG,MAAM,EAAE,CAAC;IACxC,CAAC;CACF;AA9BD,kCA8BC;AAED;;GAEG;AACH,SAAS,cAAc,CAAC,WAAwB;IAC9C,MAAM,EAAE,WAAW,EAAE,GAAG,WAAW,CAAC;IACpC,IAAI,CAAC,WAAW,EAAE;QAChB,MAAM,IAAI,oCAA4B,CAAC,yBAAyB,CAAC,CAAC;KACnE;IACD,OAAO,WAAW,CAAC;AACrB,CAAC;AAED;;GAEG;AACH,SAAS,WAAW,CAAC,WAA6B;IAChD,MAAM,YAAY,GAAG,WAAW,CAAC,mBAAmB,CAAC,aAAa,CAAC;IACnE,MAAM,QAAQ,GAAG,sBAAc,CAAC,GAAG,CAAC,YAAY,IAAI,UAAU,CAAC,CAAC;IAChE,IAAI,CAAC,QAAQ,EAAE;QACb,MAAM,IAAI,iCAAyB,CACjC,wCAAwC,WAAW,CAAC,mBAAmB,CAAC,aAAa,EAAE,CACxF,CAAC;KACH;IACD,OAAO,QAAQ,CAAC;AAClB,CAAC"}
|
||||
30
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/aws_service_workflow.js
generated
vendored
Executable file
30
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/aws_service_workflow.js
generated
vendored
Executable file
|
|
@ -0,0 +1,30 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.AwsServiceWorkflow = void 0;
|
||||
const fs = require("fs");
|
||||
const error_1 = require("../../../error");
|
||||
const service_workflow_1 = require("./service_workflow");
|
||||
/** Error for when the token is missing in the environment. */
|
||||
const TOKEN_MISSING_ERROR = 'AWS_WEB_IDENTITY_TOKEN_FILE must be set in the environment.';
|
||||
/**
|
||||
* Device workflow implementation for AWS.
|
||||
*
|
||||
* @internal
|
||||
*/
|
||||
class AwsServiceWorkflow extends service_workflow_1.ServiceWorkflow {
|
||||
constructor() {
|
||||
super();
|
||||
}
|
||||
/**
|
||||
* Get the token from the environment.
|
||||
*/
|
||||
async getToken() {
|
||||
const tokenFile = process.env.AWS_WEB_IDENTITY_TOKEN_FILE;
|
||||
if (!tokenFile) {
|
||||
throw new error_1.MongoAWSError(TOKEN_MISSING_ERROR);
|
||||
}
|
||||
return fs.promises.readFile(tokenFile, 'utf8');
|
||||
}
|
||||
}
|
||||
exports.AwsServiceWorkflow = AwsServiceWorkflow;
|
||||
//# sourceMappingURL=aws_service_workflow.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/aws_service_workflow.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/aws_service_workflow.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"aws_service_workflow.js","sourceRoot":"","sources":["../../../../src/cmap/auth/mongodb_oidc/aws_service_workflow.ts"],"names":[],"mappings":";;;AAAA,yBAAyB;AAEzB,0CAA+C;AAC/C,yDAAqD;AAErD,8DAA8D;AAC9D,MAAM,mBAAmB,GAAG,6DAA6D,CAAC;AAE1F;;;;GAIG;AACH,MAAa,kBAAmB,SAAQ,kCAAe;IACrD;QACE,KAAK,EAAE,CAAC;IACV,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,QAAQ;QACZ,MAAM,SAAS,GAAG,OAAO,CAAC,GAAG,CAAC,2BAA2B,CAAC;QAC1D,IAAI,CAAC,SAAS,EAAE;YACd,MAAM,IAAI,qBAAa,CAAC,mBAAmB,CAAC,CAAC;SAC9C;QACD,OAAO,EAAE,CAAC,QAAQ,CAAC,QAAQ,CAAC,SAAS,EAAE,MAAM,CAAC,CAAC;IACjD,CAAC;CACF;AAfD,gDAeC"}
|
||||
73
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/azure_service_workflow.js
generated
vendored
Executable file
73
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/azure_service_workflow.js
generated
vendored
Executable file
|
|
@ -0,0 +1,73 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.AzureServiceWorkflow = void 0;
|
||||
const error_1 = require("../../../error");
|
||||
const utils_1 = require("../../../utils");
|
||||
const azure_token_cache_1 = require("./azure_token_cache");
|
||||
const service_workflow_1 = require("./service_workflow");
|
||||
/** Base URL for getting Azure tokens. */
|
||||
const AZURE_BASE_URL = 'http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01';
|
||||
/** Azure request headers. */
|
||||
const AZURE_HEADERS = Object.freeze({ Metadata: 'true', Accept: 'application/json' });
|
||||
/** Invalid endpoint result error. */
|
||||
const ENDPOINT_RESULT_ERROR = 'Azure endpoint did not return a value with only access_token and expires_in properties';
|
||||
/** Error for when the token audience is missing in the environment. */
|
||||
const TOKEN_AUDIENCE_MISSING_ERROR = 'TOKEN_AUDIENCE must be set in the auth mechanism properties when PROVIDER_NAME is azure.';
|
||||
/**
|
||||
* Device workflow implementation for Azure.
|
||||
*
|
||||
* @internal
|
||||
*/
|
||||
class AzureServiceWorkflow extends service_workflow_1.ServiceWorkflow {
|
||||
constructor() {
|
||||
super(...arguments);
|
||||
this.cache = new azure_token_cache_1.AzureTokenCache();
|
||||
}
|
||||
/**
|
||||
* Get the token from the environment.
|
||||
*/
|
||||
async getToken(credentials) {
|
||||
const tokenAudience = credentials?.mechanismProperties.TOKEN_AUDIENCE;
|
||||
if (!tokenAudience) {
|
||||
throw new error_1.MongoAzureError(TOKEN_AUDIENCE_MISSING_ERROR);
|
||||
}
|
||||
let token;
|
||||
const entry = this.cache.getEntry(tokenAudience);
|
||||
if (entry?.isValid()) {
|
||||
token = entry.token;
|
||||
}
|
||||
else {
|
||||
this.cache.deleteEntry(tokenAudience);
|
||||
const response = await getAzureTokenData(tokenAudience);
|
||||
if (!isEndpointResultValid(response)) {
|
||||
throw new error_1.MongoAzureError(ENDPOINT_RESULT_ERROR);
|
||||
}
|
||||
this.cache.addEntry(tokenAudience, response);
|
||||
token = response.access_token;
|
||||
}
|
||||
return token;
|
||||
}
|
||||
}
|
||||
exports.AzureServiceWorkflow = AzureServiceWorkflow;
|
||||
/**
|
||||
* Hit the Azure endpoint to get the token data.
|
||||
*/
|
||||
async function getAzureTokenData(tokenAudience) {
|
||||
const url = `${AZURE_BASE_URL}&resource=${tokenAudience}`;
|
||||
const data = await (0, utils_1.request)(url, {
|
||||
json: true,
|
||||
headers: AZURE_HEADERS
|
||||
});
|
||||
return data;
|
||||
}
|
||||
/**
|
||||
* Determines if a result returned from the endpoint is valid.
|
||||
* This means the result is not nullish, contains the access_token required field
|
||||
* and the expires_in required field.
|
||||
*/
|
||||
function isEndpointResultValid(token) {
|
||||
if (token == null || typeof token !== 'object')
|
||||
return false;
|
||||
return 'access_token' in token && 'expires_in' in token;
|
||||
}
|
||||
//# sourceMappingURL=azure_service_workflow.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/azure_service_workflow.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/azure_service_workflow.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"azure_service_workflow.js","sourceRoot":"","sources":["../../../../src/cmap/auth/mongodb_oidc/azure_service_workflow.ts"],"names":[],"mappings":";;;AAAA,0CAAiD;AACjD,0CAAyC;AAEzC,2DAAsD;AACtD,yDAAqD;AAErD,yCAAyC;AACzC,MAAM,cAAc,GAClB,8EAA8E,CAAC;AAEjF,6BAA6B;AAC7B,MAAM,aAAa,GAAG,MAAM,CAAC,MAAM,CAAC,EAAE,QAAQ,EAAE,MAAM,EAAE,MAAM,EAAE,kBAAkB,EAAE,CAAC,CAAC;AAEtF,qCAAqC;AACrC,MAAM,qBAAqB,GACzB,wFAAwF,CAAC;AAE3F,uEAAuE;AACvE,MAAM,4BAA4B,GAChC,0FAA0F,CAAC;AAW7F;;;;GAIG;AACH,MAAa,oBAAqB,SAAQ,kCAAe;IAAzD;;QACE,UAAK,GAAG,IAAI,mCAAe,EAAE,CAAC;IAyBhC,CAAC;IAvBC;;OAEG;IACH,KAAK,CAAC,QAAQ,CAAC,WAA8B;QAC3C,MAAM,aAAa,GAAG,WAAW,EAAE,mBAAmB,CAAC,cAAc,CAAC;QACtE,IAAI,CAAC,aAAa,EAAE;YAClB,MAAM,IAAI,uBAAe,CAAC,4BAA4B,CAAC,CAAC;SACzD;QACD,IAAI,KAAK,CAAC;QACV,MAAM,KAAK,GAAG,IAAI,CAAC,KAAK,CAAC,QAAQ,CAAC,aAAa,CAAC,CAAC;QACjD,IAAI,KAAK,EAAE,OAAO,EAAE,EAAE;YACpB,KAAK,GAAG,KAAK,CAAC,KAAK,CAAC;SACrB;aAAM;YACL,IAAI,CAAC,KAAK,CAAC,WAAW,CAAC,aAAa,CAAC,CAAC;YACtC,MAAM,QAAQ,GAAG,MAAM,iBAAiB,CAAC,aAAa,CAAC,CAAC;YACxD,IAAI,CAAC,qBAAqB,CAAC,QAAQ,CAAC,EAAE;gBACpC,MAAM,IAAI,uBAAe,CAAC,qBAAqB,CAAC,CAAC;aAClD;YACD,IAAI,CAAC,KAAK,CAAC,QAAQ,CAAC,aAAa,EAAE,QAAQ,CAAC,CAAC;YAC7C,KAAK,GAAG,QAAQ,CAAC,YAAY,CAAC;SAC/B;QACD,OAAO,KAAK,CAAC;IACf,CAAC;CACF;AA1BD,oDA0BC;AAED;;GAEG;AACH,KAAK,UAAU,iBAAiB,CAAC,aAAqB;IACpD,MAAM,GAAG,GAAG,GAAG,cAAc,aAAa,aAAa,EAAE,CAAC;IAC1D,MAAM,IAAI,GAAG,MAAM,IAAA,eAAO,EAAC,GAAG,EAAE;QAC9B,IAAI,EAAE,IAAI;QACV,OAAO,EAAE,aAAa;KACvB,CAAC,CAAC;IACH,OAAO,IAAwB,CAAC;AAClC,CAAC;AAED;;;;GAIG;AACH,SAAS,qBAAqB,CAC5B,KAAc;IAEd,IAAI,KAAK,IAAI,IAAI,IAAI,OAAO,KAAK,KAAK,QAAQ;QAAE,OAAO,KAAK,CAAC;IAC7D,OAAO,cAAc,IAAI,KAAK,IAAI,YAAY,IAAI,KAAK,CAAC;AAC1D,CAAC"}
|
||||
49
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/azure_token_cache.js
generated
vendored
Executable file
49
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/azure_token_cache.js
generated
vendored
Executable file
|
|
@ -0,0 +1,49 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.AzureTokenCache = exports.AzureTokenEntry = void 0;
|
||||
const cache_1 = require("./cache");
|
||||
/** @internal */
|
||||
class AzureTokenEntry extends cache_1.ExpiringCacheEntry {
|
||||
/**
|
||||
* Instantiate the entry.
|
||||
*/
|
||||
constructor(token, expiration) {
|
||||
super(expiration);
|
||||
this.token = token;
|
||||
}
|
||||
}
|
||||
exports.AzureTokenEntry = AzureTokenEntry;
|
||||
/**
|
||||
* A cache of access tokens from Azure.
|
||||
* @internal
|
||||
*/
|
||||
class AzureTokenCache extends cache_1.Cache {
|
||||
/**
|
||||
* Add an entry to the cache.
|
||||
*/
|
||||
addEntry(tokenAudience, token) {
|
||||
const entry = new AzureTokenEntry(token.access_token, token.expires_in);
|
||||
this.entries.set(tokenAudience, entry);
|
||||
return entry;
|
||||
}
|
||||
/**
|
||||
* Create a cache key.
|
||||
*/
|
||||
cacheKey(tokenAudience) {
|
||||
return tokenAudience;
|
||||
}
|
||||
/**
|
||||
* Delete an entry from the cache.
|
||||
*/
|
||||
deleteEntry(tokenAudience) {
|
||||
this.entries.delete(tokenAudience);
|
||||
}
|
||||
/**
|
||||
* Get an Azure token entry from the cache.
|
||||
*/
|
||||
getEntry(tokenAudience) {
|
||||
return this.entries.get(tokenAudience);
|
||||
}
|
||||
}
|
||||
exports.AzureTokenCache = AzureTokenCache;
|
||||
//# sourceMappingURL=azure_token_cache.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/azure_token_cache.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/azure_token_cache.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"azure_token_cache.js","sourceRoot":"","sources":["../../../../src/cmap/auth/mongodb_oidc/azure_token_cache.ts"],"names":[],"mappings":";;;AACA,mCAAoD;AAEpD,gBAAgB;AAChB,MAAa,eAAgB,SAAQ,0BAAkB;IAGrD;;OAEG;IACH,YAAY,KAAa,EAAE,UAAkB;QAC3C,KAAK,CAAC,UAAU,CAAC,CAAC;QAClB,IAAI,CAAC,KAAK,GAAG,KAAK,CAAC;IACrB,CAAC;CACF;AAVD,0CAUC;AAED;;;GAGG;AACH,MAAa,eAAgB,SAAQ,aAAsB;IACzD;;OAEG;IACH,QAAQ,CAAC,aAAqB,EAAE,KAAuB;QACrD,MAAM,KAAK,GAAG,IAAI,eAAe,CAAC,KAAK,CAAC,YAAY,EAAE,KAAK,CAAC,UAAU,CAAC,CAAC;QACxE,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,aAAa,EAAE,KAAK,CAAC,CAAC;QACvC,OAAO,KAAK,CAAC;IACf,CAAC;IAED;;OAEG;IACH,QAAQ,CAAC,aAAqB;QAC5B,OAAO,aAAa,CAAC;IACvB,CAAC;IAED;;OAEG;IACH,WAAW,CAAC,aAAqB;QAC/B,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,aAAa,CAAC,CAAC;IACrC,CAAC;IAED;;OAEG;IACH,QAAQ,CAAC,aAAqB;QAC5B,OAAO,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,aAAa,CAAC,CAAC;IACzC,CAAC;CACF;AA9BD,0CA8BC"}
|
||||
55
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/cache.js
generated
vendored
Executable file
55
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/cache.js
generated
vendored
Executable file
|
|
@ -0,0 +1,55 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Cache = exports.ExpiringCacheEntry = void 0;
|
||||
/* 5 minutes in milliseconds */
|
||||
const EXPIRATION_BUFFER_MS = 300000;
|
||||
/**
|
||||
* An entry in a cache that can expire in a certain amount of time.
|
||||
*/
|
||||
class ExpiringCacheEntry {
|
||||
/**
|
||||
* Create a new expiring token entry.
|
||||
*/
|
||||
constructor(expiration) {
|
||||
this.expiration = this.expirationTime(expiration);
|
||||
}
|
||||
/**
|
||||
* The entry is still valid if the expiration is more than
|
||||
* 5 minutes from the expiration time.
|
||||
*/
|
||||
isValid() {
|
||||
return this.expiration - Date.now() > EXPIRATION_BUFFER_MS;
|
||||
}
|
||||
/**
|
||||
* Get an expiration time in milliseconds past epoch.
|
||||
*/
|
||||
expirationTime(expiresInSeconds) {
|
||||
return Date.now() + expiresInSeconds * 1000;
|
||||
}
|
||||
}
|
||||
exports.ExpiringCacheEntry = ExpiringCacheEntry;
|
||||
/**
|
||||
* Base class for OIDC caches.
|
||||
*/
|
||||
class Cache {
|
||||
/**
|
||||
* Create a new cache.
|
||||
*/
|
||||
constructor() {
|
||||
this.entries = new Map();
|
||||
}
|
||||
/**
|
||||
* Clear the cache.
|
||||
*/
|
||||
clear() {
|
||||
this.entries.clear();
|
||||
}
|
||||
/**
|
||||
* Create a cache key from the address and username.
|
||||
*/
|
||||
hashedCacheKey(address, username, callbackHash) {
|
||||
return JSON.stringify([address, username, callbackHash]);
|
||||
}
|
||||
}
|
||||
exports.Cache = Cache;
|
||||
//# sourceMappingURL=cache.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/cache.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/cache.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"cache.js","sourceRoot":"","sources":["../../../../src/cmap/auth/mongodb_oidc/cache.ts"],"names":[],"mappings":";;;AAAA,+BAA+B;AAC/B,MAAM,oBAAoB,GAAG,MAAM,CAAC;AAEpC;;GAEG;AACH,MAAsB,kBAAkB;IAGtC;;OAEG;IACH,YAAY,UAAkB;QAC5B,IAAI,CAAC,UAAU,GAAG,IAAI,CAAC,cAAc,CAAC,UAAU,CAAC,CAAC;IACpD,CAAC;IACD;;;OAGG;IACH,OAAO;QACL,OAAO,IAAI,CAAC,UAAU,GAAG,IAAI,CAAC,GAAG,EAAE,GAAG,oBAAoB,CAAC;IAC7D,CAAC;IAED;;OAEG;IACK,cAAc,CAAC,gBAAwB;QAC7C,OAAO,IAAI,CAAC,GAAG,EAAE,GAAG,gBAAgB,GAAG,IAAI,CAAC;IAC9C,CAAC;CACF;AAvBD,gDAuBC;AAED;;GAEG;AACH,MAAsB,KAAK;IAGzB;;OAEG;IACH;QACE,IAAI,CAAC,OAAO,GAAG,IAAI,GAAG,EAAa,CAAC;IACtC,CAAC;IAED;;OAEG;IACH,KAAK;QACH,IAAI,CAAC,OAAO,CAAC,KAAK,EAAE,CAAC;IACvB,CAAC;IAOD;;OAEG;IACH,cAAc,CAAC,OAAe,EAAE,QAAgB,EAAE,YAAoB;QACpE,OAAO,IAAI,CAAC,SAAS,CAAC,CAAC,OAAO,EAAE,QAAQ,EAAE,YAAY,CAAC,CAAC,CAAC;IAC3D,CAAC;CACF;AA5BD,sBA4BC"}
|
||||
89
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/callback_lock_cache.js
generated
vendored
Executable file
89
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/callback_lock_cache.js
generated
vendored
Executable file
|
|
@ -0,0 +1,89 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.CallbackLockCache = void 0;
|
||||
const error_1 = require("../../../error");
|
||||
const cache_1 = require("./cache");
|
||||
/** Error message for when request callback is missing. */
|
||||
const REQUEST_CALLBACK_REQUIRED_ERROR = 'Auth mechanism property REQUEST_TOKEN_CALLBACK is required.';
|
||||
/* Counter for function "hashes".*/
|
||||
let FN_HASH_COUNTER = 0;
|
||||
/* No function present function */
|
||||
const NO_FUNCTION = async () => ({ accessToken: 'test' });
|
||||
/* The map of function hashes */
|
||||
const FN_HASHES = new WeakMap();
|
||||
/* Put the no function hash in the map. */
|
||||
FN_HASHES.set(NO_FUNCTION, FN_HASH_COUNTER);
|
||||
/**
|
||||
* A cache of request and refresh callbacks per server/user.
|
||||
*/
|
||||
class CallbackLockCache extends cache_1.Cache {
|
||||
/**
|
||||
* Get the callbacks for the connection and credentials. If an entry does not
|
||||
* exist a new one will get set.
|
||||
*/
|
||||
getEntry(connection, credentials) {
|
||||
const requestCallback = credentials.mechanismProperties.REQUEST_TOKEN_CALLBACK;
|
||||
const refreshCallback = credentials.mechanismProperties.REFRESH_TOKEN_CALLBACK;
|
||||
if (!requestCallback) {
|
||||
throw new error_1.MongoInvalidArgumentError(REQUEST_CALLBACK_REQUIRED_ERROR);
|
||||
}
|
||||
const callbackHash = hashFunctions(requestCallback, refreshCallback);
|
||||
const key = this.cacheKey(connection.address, credentials.username, callbackHash);
|
||||
const entry = this.entries.get(key);
|
||||
if (entry) {
|
||||
return entry;
|
||||
}
|
||||
return this.addEntry(key, callbackHash, requestCallback, refreshCallback);
|
||||
}
|
||||
/**
|
||||
* Set locked callbacks on for connection and credentials.
|
||||
*/
|
||||
addEntry(key, callbackHash, requestCallback, refreshCallback) {
|
||||
const entry = {
|
||||
requestCallback: withLock(requestCallback),
|
||||
refreshCallback: refreshCallback ? withLock(refreshCallback) : undefined,
|
||||
callbackHash: callbackHash
|
||||
};
|
||||
this.entries.set(key, entry);
|
||||
return entry;
|
||||
}
|
||||
/**
|
||||
* Create a cache key from the address and username.
|
||||
*/
|
||||
cacheKey(address, username, callbackHash) {
|
||||
return this.hashedCacheKey(address, username, callbackHash);
|
||||
}
|
||||
}
|
||||
exports.CallbackLockCache = CallbackLockCache;
|
||||
/**
|
||||
* Ensure the callback is only executed one at a time.
|
||||
*/
|
||||
function withLock(callback) {
|
||||
let lock = Promise.resolve();
|
||||
return async (info, context) => {
|
||||
await lock;
|
||||
lock = lock.then(() => callback(info, context));
|
||||
return lock;
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Get the hash string for the request and refresh functions.
|
||||
*/
|
||||
function hashFunctions(requestFn, refreshFn) {
|
||||
let requestHash = FN_HASHES.get(requestFn);
|
||||
let refreshHash = FN_HASHES.get(refreshFn ?? NO_FUNCTION);
|
||||
if (requestHash == null) {
|
||||
// Create a new one for the function and put it in the map.
|
||||
FN_HASH_COUNTER++;
|
||||
requestHash = FN_HASH_COUNTER;
|
||||
FN_HASHES.set(requestFn, FN_HASH_COUNTER);
|
||||
}
|
||||
if (refreshHash == null && refreshFn) {
|
||||
// Create a new one for the function and put it in the map.
|
||||
FN_HASH_COUNTER++;
|
||||
refreshHash = FN_HASH_COUNTER;
|
||||
FN_HASHES.set(refreshFn, FN_HASH_COUNTER);
|
||||
}
|
||||
return `${requestHash}-${refreshHash}`;
|
||||
}
|
||||
//# sourceMappingURL=callback_lock_cache.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/callback_lock_cache.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/callback_lock_cache.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"callback_lock_cache.js","sourceRoot":"","sources":["../../../../src/cmap/auth/mongodb_oidc/callback_lock_cache.ts"],"names":[],"mappings":";;;AAAA,0CAA2D;AAU3D,mCAAgC;AAEhC,0DAA0D;AAC1D,MAAM,+BAA+B,GACnC,6DAA6D,CAAC;AAChE,mCAAmC;AACnC,IAAI,eAAe,GAAG,CAAC,CAAC;AACxB,kCAAkC;AAClC,MAAM,WAAW,GAAwB,KAAK,IAAI,EAAE,CAAC,CAAC,EAAE,WAAW,EAAE,MAAM,EAAE,CAAC,CAAC;AAC/E,gCAAgC;AAChC,MAAM,SAAS,GAAG,IAAI,OAAO,EAAqD,CAAC;AACnF,0CAA0C;AAC1C,SAAS,CAAC,GAAG,CAAC,WAAW,EAAE,eAAe,CAAC,CAAC;AAW5C;;GAEG;AACH,MAAa,iBAAkB,SAAQ,aAAqB;IAC1D;;;OAGG;IACH,QAAQ,CAAC,UAAsB,EAAE,WAA6B;QAC5D,MAAM,eAAe,GAAG,WAAW,CAAC,mBAAmB,CAAC,sBAAsB,CAAC;QAC/E,MAAM,eAAe,GAAG,WAAW,CAAC,mBAAmB,CAAC,sBAAsB,CAAC;QAC/E,IAAI,CAAC,eAAe,EAAE;YACpB,MAAM,IAAI,iCAAyB,CAAC,+BAA+B,CAAC,CAAC;SACtE;QACD,MAAM,YAAY,GAAG,aAAa,CAAC,eAAe,EAAE,eAAe,CAAC,CAAC;QACrE,MAAM,GAAG,GAAG,IAAI,CAAC,QAAQ,CAAC,UAAU,CAAC,OAAO,EAAE,WAAW,CAAC,QAAQ,EAAE,YAAY,CAAC,CAAC;QAClF,MAAM,KAAK,GAAG,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;QACpC,IAAI,KAAK,EAAE;YACT,OAAO,KAAK,CAAC;SACd;QACD,OAAO,IAAI,CAAC,QAAQ,CAAC,GAAG,EAAE,YAAY,EAAE,eAAe,EAAE,eAAe,CAAC,CAAC;IAC5E,CAAC;IAED;;OAEG;IACK,QAAQ,CACd,GAAW,EACX,YAAoB,EACpB,eAAoC,EACpC,eAAqC;QAErC,MAAM,KAAK,GAAG;YACZ,eAAe,EAAE,QAAQ,CAAC,eAAe,CAAC;YAC1C,eAAe,EAAE,eAAe,CAAC,CAAC,CAAC,QAAQ,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC,SAAS;YACxE,YAAY,EAAE,YAAY;SAC3B,CAAC;QACF,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC;QAC7B,OAAO,KAAK,CAAC;IACf,CAAC;IAED;;OAEG;IACH,QAAQ,CAAC,OAAe,EAAE,QAAgB,EAAE,YAAoB;QAC9D,OAAO,IAAI,CAAC,cAAc,CAAC,OAAO,EAAE,QAAQ,EAAE,YAAY,CAAC,CAAC;IAC9D,CAAC;CACF;AA5CD,8CA4CC;AAED;;GAEG;AACH,SAAS,QAAQ,CAAC,QAAmD;IACnE,IAAI,IAAI,GAAiB,OAAO,CAAC,OAAO,EAAE,CAAC;IAC3C,OAAO,KAAK,EAAE,IAAmB,EAAE,OAA4B,EAA8B,EAAE;QAC7F,MAAM,IAAI,CAAC;QACX,IAAI,GAAG,IAAI,CAAC,IAAI,CAAC,GAAG,EAAE,CAAC,QAAQ,CAAC,IAAI,EAAE,OAAO,CAAC,CAAC,CAAC;QAChD,OAAO,IAAI,CAAC;IACd,CAAC,CAAC;AACJ,CAAC;AAED;;GAEG;AACH,SAAS,aAAa,CAAC,SAA8B,EAAE,SAA+B;IACpF,IAAI,WAAW,GAAG,SAAS,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC;IAC3C,IAAI,WAAW,GAAG,SAAS,CAAC,GAAG,CAAC,SAAS,IAAI,WAAW,CAAC,CAAC;IAC1D,IAAI,WAAW,IAAI,IAAI,EAAE;QACvB,2DAA2D;QAC3D,eAAe,EAAE,CAAC;QAClB,WAAW,GAAG,eAAe,CAAC;QAC9B,SAAS,CAAC,GAAG,CAAC,SAAS,EAAE,eAAe,CAAC,CAAC;KAC3C;IACD,IAAI,WAAW,IAAI,IAAI,IAAI,SAAS,EAAE;QACpC,2DAA2D;QAC3D,eAAe,EAAE,CAAC;QAClB,WAAW,GAAG,eAAe,CAAC;QAC9B,SAAS,CAAC,GAAG,CAAC,SAAS,EAAE,eAAe,CAAC,CAAC;KAC3C;IACD,OAAO,GAAG,WAAW,IAAI,WAAW,EAAE,CAAC;AACzC,CAAC"}
|
||||
204
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/callback_workflow.js
generated
vendored
Executable file
204
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/callback_workflow.js
generated
vendored
Executable file
|
|
@ -0,0 +1,204 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.CallbackWorkflow = void 0;
|
||||
const bson_1 = require("bson");
|
||||
const error_1 = require("../../../error");
|
||||
const utils_1 = require("../../../utils");
|
||||
const providers_1 = require("../providers");
|
||||
const callback_lock_cache_1 = require("./callback_lock_cache");
|
||||
const token_entry_cache_1 = require("./token_entry_cache");
|
||||
/** The current version of OIDC implementation. */
|
||||
const OIDC_VERSION = 0;
|
||||
/** 5 minutes in seconds */
|
||||
const TIMEOUT_S = 300;
|
||||
/** Properties allowed on results of callbacks. */
|
||||
const RESULT_PROPERTIES = ['accessToken', 'expiresInSeconds', 'refreshToken'];
|
||||
/** Error message when the callback result is invalid. */
|
||||
const CALLBACK_RESULT_ERROR = 'User provided OIDC callbacks must return a valid object with an accessToken.';
|
||||
/**
|
||||
* OIDC implementation of a callback based workflow.
|
||||
* @internal
|
||||
*/
|
||||
class CallbackWorkflow {
|
||||
/**
|
||||
* Instantiate the workflow
|
||||
*/
|
||||
constructor() {
|
||||
this.cache = new token_entry_cache_1.TokenEntryCache();
|
||||
this.callbackCache = new callback_lock_cache_1.CallbackLockCache();
|
||||
}
|
||||
/**
|
||||
* Get the document to add for speculative authentication. This also needs
|
||||
* to add a db field from the credentials source.
|
||||
*/
|
||||
async speculativeAuth(credentials) {
|
||||
const document = startCommandDocument(credentials);
|
||||
document.db = credentials.source;
|
||||
return { speculativeAuthenticate: document };
|
||||
}
|
||||
/**
|
||||
* Execute the OIDC callback workflow.
|
||||
*/
|
||||
async execute(connection, credentials, reauthenticating, response) {
|
||||
// Get the callbacks with locks from the callback lock cache.
|
||||
const { requestCallback, refreshCallback, callbackHash } = this.callbackCache.getEntry(connection, credentials);
|
||||
// Look for an existing entry in the cache.
|
||||
const entry = this.cache.getEntry(connection.address, credentials.username, callbackHash);
|
||||
let result;
|
||||
if (entry) {
|
||||
// Reauthentication cannot use a token from the cache since the server has
|
||||
// stated it is invalid by the request for reauthentication.
|
||||
if (entry.isValid() && !reauthenticating) {
|
||||
// Presence of a valid cache entry means we can skip to the finishing step.
|
||||
result = await this.finishAuthentication(connection, credentials, entry.tokenResult, response?.speculativeAuthenticate?.conversationId);
|
||||
}
|
||||
else {
|
||||
// Presence of an expired cache entry means we must fetch a new one and
|
||||
// then execute the final step.
|
||||
const tokenResult = await this.fetchAccessToken(connection, credentials, entry.serverInfo, reauthenticating, callbackHash, requestCallback, refreshCallback);
|
||||
try {
|
||||
result = await this.finishAuthentication(connection, credentials, tokenResult, reauthenticating ? undefined : response?.speculativeAuthenticate?.conversationId);
|
||||
}
|
||||
catch (error) {
|
||||
// If we are reauthenticating and this errors with reauthentication
|
||||
// required, we need to do the entire process over again and clear
|
||||
// the cache entry.
|
||||
if (reauthenticating &&
|
||||
error instanceof error_1.MongoError &&
|
||||
error.code === error_1.MONGODB_ERROR_CODES.Reauthenticate) {
|
||||
this.cache.deleteEntry(connection.address, credentials.username, callbackHash);
|
||||
result = await this.execute(connection, credentials, reauthenticating);
|
||||
}
|
||||
else {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// No entry in the cache requires us to do all authentication steps
|
||||
// from start to finish, including getting a fresh token for the cache.
|
||||
const startDocument = await this.startAuthentication(connection, credentials, reauthenticating, response);
|
||||
const conversationId = startDocument.conversationId;
|
||||
const serverResult = bson_1.BSON.deserialize(startDocument.payload.buffer);
|
||||
const tokenResult = await this.fetchAccessToken(connection, credentials, serverResult, reauthenticating, callbackHash, requestCallback, refreshCallback);
|
||||
result = await this.finishAuthentication(connection, credentials, tokenResult, conversationId);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
/**
|
||||
* Starts the callback authentication process. If there is a speculative
|
||||
* authentication document from the initial handshake, then we will use that
|
||||
* value to get the issuer, otherwise we will send the saslStart command.
|
||||
*/
|
||||
async startAuthentication(connection, credentials, reauthenticating, response) {
|
||||
let result;
|
||||
if (!reauthenticating && response?.speculativeAuthenticate) {
|
||||
result = response.speculativeAuthenticate;
|
||||
}
|
||||
else {
|
||||
result = await connection.commandAsync((0, utils_1.ns)(credentials.source), startCommandDocument(credentials), undefined);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
/**
|
||||
* Finishes the callback authentication process.
|
||||
*/
|
||||
async finishAuthentication(connection, credentials, tokenResult, conversationId) {
|
||||
const result = await connection.commandAsync((0, utils_1.ns)(credentials.source), finishCommandDocument(tokenResult.accessToken, conversationId), undefined);
|
||||
return result;
|
||||
}
|
||||
/**
|
||||
* Fetches an access token using either the request or refresh callbacks and
|
||||
* puts it in the cache.
|
||||
*/
|
||||
async fetchAccessToken(connection, credentials, serverInfo, reauthenticating, callbackHash, requestCallback, refreshCallback) {
|
||||
// Get the token from the cache.
|
||||
const entry = this.cache.getEntry(connection.address, credentials.username, callbackHash);
|
||||
let result;
|
||||
const context = { timeoutSeconds: TIMEOUT_S, version: OIDC_VERSION };
|
||||
// Check if there's a token in the cache.
|
||||
if (entry) {
|
||||
// If the cache entry is valid, return the token result.
|
||||
if (entry.isValid() && !reauthenticating) {
|
||||
return entry.tokenResult;
|
||||
}
|
||||
// If the cache entry is not valid, remove it from the cache and first attempt
|
||||
// to use the refresh callback to get a new token. If no refresh callback
|
||||
// exists, then fallback to the request callback.
|
||||
if (refreshCallback) {
|
||||
context.refreshToken = entry.tokenResult.refreshToken;
|
||||
result = await refreshCallback(serverInfo, context);
|
||||
}
|
||||
else {
|
||||
result = await requestCallback(serverInfo, context);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// With no token in the cache we use the request callback.
|
||||
result = await requestCallback(serverInfo, context);
|
||||
}
|
||||
// Validate that the result returned by the callback is acceptable. If it is not
|
||||
// we must clear the token result from the cache.
|
||||
if (isCallbackResultInvalid(result)) {
|
||||
this.cache.deleteEntry(connection.address, credentials.username, callbackHash);
|
||||
throw new error_1.MongoMissingCredentialsError(CALLBACK_RESULT_ERROR);
|
||||
}
|
||||
// Cleanup the cache.
|
||||
this.cache.deleteExpiredEntries();
|
||||
// Put the new entry into the cache.
|
||||
this.cache.addEntry(connection.address, credentials.username || '', callbackHash, result, serverInfo);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
exports.CallbackWorkflow = CallbackWorkflow;
|
||||
/**
|
||||
* Generate the finishing command document for authentication. Will be a
|
||||
* saslStart or saslContinue depending on the presence of a conversation id.
|
||||
*/
|
||||
function finishCommandDocument(token, conversationId) {
|
||||
if (conversationId != null && typeof conversationId === 'number') {
|
||||
return {
|
||||
saslContinue: 1,
|
||||
conversationId: conversationId,
|
||||
payload: new bson_1.Binary(bson_1.BSON.serialize({ jwt: token }))
|
||||
};
|
||||
}
|
||||
// saslContinue requires a conversationId in the command to be valid so in this
|
||||
// case the server allows "step two" to actually be a saslStart with the token
|
||||
// as the jwt since the use of the cached value has no correlating conversating
|
||||
// on the particular connection.
|
||||
return {
|
||||
saslStart: 1,
|
||||
mechanism: providers_1.AuthMechanism.MONGODB_OIDC,
|
||||
payload: new bson_1.Binary(bson_1.BSON.serialize({ jwt: token }))
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Determines if a result returned from a request or refresh callback
|
||||
* function is invalid. This means the result is nullish, doesn't contain
|
||||
* the accessToken required field, and does not contain extra fields.
|
||||
*/
|
||||
function isCallbackResultInvalid(tokenResult) {
|
||||
if (tokenResult == null || typeof tokenResult !== 'object')
|
||||
return true;
|
||||
if (!('accessToken' in tokenResult))
|
||||
return true;
|
||||
return !Object.getOwnPropertyNames(tokenResult).every(prop => RESULT_PROPERTIES.includes(prop));
|
||||
}
|
||||
/**
|
||||
* Generate the saslStart command document.
|
||||
*/
|
||||
function startCommandDocument(credentials) {
|
||||
const payload = {};
|
||||
if (credentials.username) {
|
||||
payload.n = credentials.username;
|
||||
}
|
||||
return {
|
||||
saslStart: 1,
|
||||
autoAuthorize: 1,
|
||||
mechanism: providers_1.AuthMechanism.MONGODB_OIDC,
|
||||
payload: new bson_1.Binary(bson_1.BSON.serialize(payload))
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=callback_workflow.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/callback_workflow.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/callback_workflow.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
43
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/service_workflow.js
generated
vendored
Executable file
43
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/service_workflow.js
generated
vendored
Executable file
|
|
@ -0,0 +1,43 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.commandDocument = exports.ServiceWorkflow = void 0;
|
||||
const bson_1 = require("bson");
|
||||
const utils_1 = require("../../../utils");
|
||||
const providers_1 = require("../providers");
|
||||
/**
|
||||
* Common behaviour for OIDC device workflows.
|
||||
* @internal
|
||||
*/
|
||||
class ServiceWorkflow {
|
||||
/**
|
||||
* Execute the workflow. Looks for AWS_WEB_IDENTITY_TOKEN_FILE in the environment
|
||||
* and then attempts to read the token from that path.
|
||||
*/
|
||||
async execute(connection, credentials) {
|
||||
const token = await this.getToken(credentials);
|
||||
const command = commandDocument(token);
|
||||
return connection.commandAsync((0, utils_1.ns)(credentials.source), command, undefined);
|
||||
}
|
||||
/**
|
||||
* Get the document to add for speculative authentication.
|
||||
*/
|
||||
async speculativeAuth(credentials) {
|
||||
const token = await this.getToken(credentials);
|
||||
const document = commandDocument(token);
|
||||
document.db = credentials.source;
|
||||
return { speculativeAuthenticate: document };
|
||||
}
|
||||
}
|
||||
exports.ServiceWorkflow = ServiceWorkflow;
|
||||
/**
|
||||
* Create the saslStart command document.
|
||||
*/
|
||||
function commandDocument(token) {
|
||||
return {
|
||||
saslStart: 1,
|
||||
mechanism: providers_1.AuthMechanism.MONGODB_OIDC,
|
||||
payload: bson_1.BSON.serialize({ jwt: token })
|
||||
};
|
||||
}
|
||||
exports.commandDocument = commandDocument;
|
||||
//# sourceMappingURL=service_workflow.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/service_workflow.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/service_workflow.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"service_workflow.js","sourceRoot":"","sources":["../../../../src/cmap/auth/mongodb_oidc/service_workflow.ts"],"names":[],"mappings":";;;AAAA,+BAA2C;AAE3C,0CAAoC;AAIpC,4CAA6C;AAE7C;;;GAGG;AACH,MAAsB,eAAe;IACnC;;;OAGG;IACH,KAAK,CAAC,OAAO,CAAC,UAAsB,EAAE,WAA6B;QACjE,MAAM,KAAK,GAAG,MAAM,IAAI,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC;QAC/C,MAAM,OAAO,GAAG,eAAe,CAAC,KAAK,CAAC,CAAC;QACvC,OAAO,UAAU,CAAC,YAAY,CAAC,IAAA,UAAE,EAAC,WAAW,CAAC,MAAM,CAAC,EAAE,OAAO,EAAE,SAAS,CAAC,CAAC;IAC7E,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,eAAe,CAAC,WAA6B;QACjD,MAAM,KAAK,GAAG,MAAM,IAAI,CAAC,QAAQ,CAAC,WAAW,CAAC,CAAC;QAC/C,MAAM,QAAQ,GAAG,eAAe,CAAC,KAAK,CAAC,CAAC;QACxC,QAAQ,CAAC,EAAE,GAAG,WAAW,CAAC,MAAM,CAAC;QACjC,OAAO,EAAE,uBAAuB,EAAE,QAAQ,EAAE,CAAC;IAC/C,CAAC;CAMF;AAzBD,0CAyBC;AAED;;GAEG;AACH,SAAgB,eAAe,CAAC,KAAa;IAC3C,OAAO;QACL,SAAS,EAAE,CAAC;QACZ,SAAS,EAAE,yBAAa,CAAC,YAAY;QACrC,OAAO,EAAE,WAAI,CAAC,SAAS,CAAC,EAAE,GAAG,EAAE,KAAK,EAAE,CAAC;KACxC,CAAC;AACJ,CAAC;AAND,0CAMC"}
|
||||
62
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/token_entry_cache.js
generated
vendored
Executable file
62
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/token_entry_cache.js
generated
vendored
Executable file
|
|
@ -0,0 +1,62 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.TokenEntryCache = exports.TokenEntry = void 0;
|
||||
const cache_1 = require("./cache");
|
||||
/* Default expiration is now for when no expiration provided */
|
||||
const DEFAULT_EXPIRATION_SECS = 0;
|
||||
/** @internal */
|
||||
class TokenEntry extends cache_1.ExpiringCacheEntry {
|
||||
/**
|
||||
* Instantiate the entry.
|
||||
*/
|
||||
constructor(tokenResult, serverInfo, expiration) {
|
||||
super(expiration);
|
||||
this.tokenResult = tokenResult;
|
||||
this.serverInfo = serverInfo;
|
||||
}
|
||||
}
|
||||
exports.TokenEntry = TokenEntry;
|
||||
/**
|
||||
* Cache of OIDC token entries.
|
||||
* @internal
|
||||
*/
|
||||
class TokenEntryCache extends cache_1.Cache {
|
||||
/**
|
||||
* Set an entry in the token cache.
|
||||
*/
|
||||
addEntry(address, username, callbackHash, tokenResult, serverInfo) {
|
||||
const entry = new TokenEntry(tokenResult, serverInfo, tokenResult.expiresInSeconds ?? DEFAULT_EXPIRATION_SECS);
|
||||
this.entries.set(this.cacheKey(address, username, callbackHash), entry);
|
||||
return entry;
|
||||
}
|
||||
/**
|
||||
* Delete an entry from the cache.
|
||||
*/
|
||||
deleteEntry(address, username, callbackHash) {
|
||||
this.entries.delete(this.cacheKey(address, username, callbackHash));
|
||||
}
|
||||
/**
|
||||
* Get an entry from the cache.
|
||||
*/
|
||||
getEntry(address, username, callbackHash) {
|
||||
return this.entries.get(this.cacheKey(address, username, callbackHash));
|
||||
}
|
||||
/**
|
||||
* Delete all expired entries from the cache.
|
||||
*/
|
||||
deleteExpiredEntries() {
|
||||
for (const [key, entry] of this.entries) {
|
||||
if (!entry.isValid()) {
|
||||
this.entries.delete(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Create a cache key from the address and username.
|
||||
*/
|
||||
cacheKey(address, username, callbackHash) {
|
||||
return this.hashedCacheKey(address, username, callbackHash);
|
||||
}
|
||||
}
|
||||
exports.TokenEntryCache = TokenEntryCache;
|
||||
//# sourceMappingURL=token_entry_cache.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/token_entry_cache.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/mongodb_oidc/token_entry_cache.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"token_entry_cache.js","sourceRoot":"","sources":["../../../../src/cmap/auth/mongodb_oidc/token_entry_cache.ts"],"names":[],"mappings":";;;AACA,mCAAoD;AAEpD,+DAA+D;AAC/D,MAAM,uBAAuB,GAAG,CAAC,CAAC;AAElC,gBAAgB;AAChB,MAAa,UAAW,SAAQ,0BAAkB;IAIhD;;OAEG;IACH,YAAY,WAA8B,EAAE,UAAyB,EAAE,UAAkB;QACvF,KAAK,CAAC,UAAU,CAAC,CAAC;QAClB,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;QAC/B,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;IAC/B,CAAC;CACF;AAZD,gCAYC;AAED;;;GAGG;AACH,MAAa,eAAgB,SAAQ,aAAiB;IACpD;;OAEG;IACH,QAAQ,CACN,OAAe,EACf,QAAgB,EAChB,YAAoB,EACpB,WAA8B,EAC9B,UAAyB;QAEzB,MAAM,KAAK,GAAG,IAAI,UAAU,CAC1B,WAAW,EACX,UAAU,EACV,WAAW,CAAC,gBAAgB,IAAI,uBAAuB,CACxD,CAAC;QACF,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,QAAQ,CAAC,OAAO,EAAE,QAAQ,EAAE,YAAY,CAAC,EAAE,KAAK,CAAC,CAAC;QACxE,OAAO,KAAK,CAAC;IACf,CAAC;IAED;;OAEG;IACH,WAAW,CAAC,OAAe,EAAE,QAAgB,EAAE,YAAoB;QACjE,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,IAAI,CAAC,QAAQ,CAAC,OAAO,EAAE,QAAQ,EAAE,YAAY,CAAC,CAAC,CAAC;IACtE,CAAC;IAED;;OAEG;IACH,QAAQ,CAAC,OAAe,EAAE,QAAgB,EAAE,YAAoB;QAC9D,OAAO,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,QAAQ,CAAC,OAAO,EAAE,QAAQ,EAAE,YAAY,CAAC,CAAC,CAAC;IAC1E,CAAC;IAED;;OAEG;IACH,oBAAoB;QAClB,KAAK,MAAM,CAAC,GAAG,EAAE,KAAK,CAAC,IAAI,IAAI,CAAC,OAAO,EAAE;YACvC,IAAI,CAAC,KAAK,CAAC,OAAO,EAAE,EAAE;gBACpB,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC;aAC1B;SACF;IACH,CAAC;IAED;;OAEG;IACH,QAAQ,CAAC,OAAe,EAAE,QAAgB,EAAE,YAAoB;QAC9D,OAAO,IAAI,CAAC,cAAc,CAAC,OAAO,EAAE,QAAQ,EAAE,YAAY,CAAC,CAAC;IAC9D,CAAC;CACF;AAnDD,0CAmDC"}
|
||||
26
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/plain.js
generated
vendored
Executable file
26
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/plain.js
generated
vendored
Executable file
|
|
@ -0,0 +1,26 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Plain = void 0;
|
||||
const bson_1 = require("../../bson");
|
||||
const error_1 = require("../../error");
|
||||
const utils_1 = require("../../utils");
|
||||
const auth_provider_1 = require("./auth_provider");
|
||||
class Plain extends auth_provider_1.AuthProvider {
|
||||
async auth(authContext) {
|
||||
const { connection, credentials } = authContext;
|
||||
if (!credentials) {
|
||||
throw new error_1.MongoMissingCredentialsError('AuthContext must provide credentials.');
|
||||
}
|
||||
const { username, password } = credentials;
|
||||
const payload = new bson_1.Binary(Buffer.from(`\x00${username}\x00${password}`));
|
||||
const command = {
|
||||
saslStart: 1,
|
||||
mechanism: 'PLAIN',
|
||||
payload: payload,
|
||||
autoAuthorize: 1
|
||||
};
|
||||
await connection.commandAsync((0, utils_1.ns)('$external.$cmd'), command, undefined);
|
||||
}
|
||||
}
|
||||
exports.Plain = Plain;
|
||||
//# sourceMappingURL=plain.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/plain.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/plain.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"plain.js","sourceRoot":"","sources":["../../../src/cmap/auth/plain.ts"],"names":[],"mappings":";;;AAAA,qCAAoC;AACpC,uCAA2D;AAC3D,uCAAiC;AACjC,mDAAiE;AAEjE,MAAa,KAAM,SAAQ,4BAAY;IAC5B,KAAK,CAAC,IAAI,CAAC,WAAwB;QAC1C,MAAM,EAAE,UAAU,EAAE,WAAW,EAAE,GAAG,WAAW,CAAC;QAChD,IAAI,CAAC,WAAW,EAAE;YAChB,MAAM,IAAI,oCAA4B,CAAC,uCAAuC,CAAC,CAAC;SACjF;QAED,MAAM,EAAE,QAAQ,EAAE,QAAQ,EAAE,GAAG,WAAW,CAAC;QAE3C,MAAM,OAAO,GAAG,IAAI,aAAM,CAAC,MAAM,CAAC,IAAI,CAAC,OAAO,QAAQ,OAAO,QAAQ,EAAE,CAAC,CAAC,CAAC;QAC1E,MAAM,OAAO,GAAG;YACd,SAAS,EAAE,CAAC;YACZ,SAAS,EAAE,OAAO;YAClB,OAAO,EAAE,OAAO;YAChB,aAAa,EAAE,CAAC;SACjB,CAAC;QAEF,MAAM,UAAU,CAAC,YAAY,CAAC,IAAA,UAAE,EAAC,gBAAgB,CAAC,EAAE,OAAO,EAAE,SAAS,CAAC,CAAC;IAC1E,CAAC;CACF;AAnBD,sBAmBC"}
|
||||
24
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/providers.js
generated
vendored
Executable file
24
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/providers.js
generated
vendored
Executable file
|
|
@ -0,0 +1,24 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.AUTH_MECHS_AUTH_SRC_EXTERNAL = exports.AuthMechanism = void 0;
|
||||
/** @public */
|
||||
exports.AuthMechanism = Object.freeze({
|
||||
MONGODB_AWS: 'MONGODB-AWS',
|
||||
MONGODB_CR: 'MONGODB-CR',
|
||||
MONGODB_DEFAULT: 'DEFAULT',
|
||||
MONGODB_GSSAPI: 'GSSAPI',
|
||||
MONGODB_PLAIN: 'PLAIN',
|
||||
MONGODB_SCRAM_SHA1: 'SCRAM-SHA-1',
|
||||
MONGODB_SCRAM_SHA256: 'SCRAM-SHA-256',
|
||||
MONGODB_X509: 'MONGODB-X509',
|
||||
/** @experimental */
|
||||
MONGODB_OIDC: 'MONGODB-OIDC'
|
||||
});
|
||||
/** @internal */
|
||||
exports.AUTH_MECHS_AUTH_SRC_EXTERNAL = new Set([
|
||||
exports.AuthMechanism.MONGODB_GSSAPI,
|
||||
exports.AuthMechanism.MONGODB_AWS,
|
||||
exports.AuthMechanism.MONGODB_OIDC,
|
||||
exports.AuthMechanism.MONGODB_X509
|
||||
]);
|
||||
//# sourceMappingURL=providers.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/providers.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/providers.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"providers.js","sourceRoot":"","sources":["../../../src/cmap/auth/providers.ts"],"names":[],"mappings":";;;AAAA,cAAc;AACD,QAAA,aAAa,GAAG,MAAM,CAAC,MAAM,CAAC;IACzC,WAAW,EAAE,aAAa;IAC1B,UAAU,EAAE,YAAY;IACxB,eAAe,EAAE,SAAS;IAC1B,cAAc,EAAE,QAAQ;IACxB,aAAa,EAAE,OAAO;IACtB,kBAAkB,EAAE,aAAa;IACjC,oBAAoB,EAAE,eAAe;IACrC,YAAY,EAAE,cAAc;IAC5B,oBAAoB;IACpB,YAAY,EAAE,cAAc;CACpB,CAAC,CAAC;AAKZ,gBAAgB;AACH,QAAA,4BAA4B,GAAG,IAAI,GAAG,CAAgB;IACjE,qBAAa,CAAC,cAAc;IAC5B,qBAAa,CAAC,WAAW;IACzB,qBAAa,CAAC,YAAY;IAC1B,qBAAa,CAAC,YAAY;CAC3B,CAAC,CAAC"}
|
||||
262
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/scram.js
generated
vendored
Executable file
262
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/scram.js
generated
vendored
Executable file
|
|
@ -0,0 +1,262 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ScramSHA256 = exports.ScramSHA1 = void 0;
|
||||
const crypto = require("crypto");
|
||||
const util_1 = require("util");
|
||||
const bson_1 = require("../../bson");
|
||||
const deps_1 = require("../../deps");
|
||||
const error_1 = require("../../error");
|
||||
const utils_1 = require("../../utils");
|
||||
const auth_provider_1 = require("./auth_provider");
|
||||
const providers_1 = require("./providers");
|
||||
class ScramSHA extends auth_provider_1.AuthProvider {
|
||||
constructor(cryptoMethod) {
|
||||
super();
|
||||
this.cryptoMethod = cryptoMethod || 'sha1';
|
||||
this.randomBytesAsync = (0, util_1.promisify)(crypto.randomBytes);
|
||||
}
|
||||
async prepare(handshakeDoc, authContext) {
|
||||
const cryptoMethod = this.cryptoMethod;
|
||||
const credentials = authContext.credentials;
|
||||
if (!credentials) {
|
||||
throw new error_1.MongoMissingCredentialsError('AuthContext must provide credentials.');
|
||||
}
|
||||
if (cryptoMethod === 'sha256' &&
|
||||
('kModuleError' in deps_1.saslprep || typeof deps_1.saslprep !== 'function')) {
|
||||
(0, utils_1.emitWarning)('Warning: no saslprep library specified. Passwords will not be sanitized');
|
||||
}
|
||||
const nonce = await this.randomBytesAsync(24);
|
||||
// store the nonce for later use
|
||||
authContext.nonce = nonce;
|
||||
const request = {
|
||||
...handshakeDoc,
|
||||
speculativeAuthenticate: {
|
||||
...makeFirstMessage(cryptoMethod, credentials, nonce),
|
||||
db: credentials.source
|
||||
}
|
||||
};
|
||||
return request;
|
||||
}
|
||||
async auth(authContext) {
|
||||
const { reauthenticating, response } = authContext;
|
||||
if (response?.speculativeAuthenticate && !reauthenticating) {
|
||||
return continueScramConversation(this.cryptoMethod, response.speculativeAuthenticate, authContext);
|
||||
}
|
||||
return executeScram(this.cryptoMethod, authContext);
|
||||
}
|
||||
}
|
||||
function cleanUsername(username) {
|
||||
return username.replace('=', '=3D').replace(',', '=2C');
|
||||
}
|
||||
function clientFirstMessageBare(username, nonce) {
|
||||
// NOTE: This is done b/c Javascript uses UTF-16, but the server is hashing in UTF-8.
|
||||
// Since the username is not sasl-prep-d, we need to do this here.
|
||||
return Buffer.concat([
|
||||
Buffer.from('n=', 'utf8'),
|
||||
Buffer.from(username, 'utf8'),
|
||||
Buffer.from(',r=', 'utf8'),
|
||||
Buffer.from(nonce.toString('base64'), 'utf8')
|
||||
]);
|
||||
}
|
||||
function makeFirstMessage(cryptoMethod, credentials, nonce) {
|
||||
const username = cleanUsername(credentials.username);
|
||||
const mechanism = cryptoMethod === 'sha1' ? providers_1.AuthMechanism.MONGODB_SCRAM_SHA1 : providers_1.AuthMechanism.MONGODB_SCRAM_SHA256;
|
||||
// NOTE: This is done b/c Javascript uses UTF-16, but the server is hashing in UTF-8.
|
||||
// Since the username is not sasl-prep-d, we need to do this here.
|
||||
return {
|
||||
saslStart: 1,
|
||||
mechanism,
|
||||
payload: new bson_1.Binary(Buffer.concat([Buffer.from('n,,', 'utf8'), clientFirstMessageBare(username, nonce)])),
|
||||
autoAuthorize: 1,
|
||||
options: { skipEmptyExchange: true }
|
||||
};
|
||||
}
|
||||
async function executeScram(cryptoMethod, authContext) {
|
||||
const { connection, credentials } = authContext;
|
||||
if (!credentials) {
|
||||
throw new error_1.MongoMissingCredentialsError('AuthContext must provide credentials.');
|
||||
}
|
||||
if (!authContext.nonce) {
|
||||
throw new error_1.MongoInvalidArgumentError('AuthContext must contain a valid nonce property');
|
||||
}
|
||||
const nonce = authContext.nonce;
|
||||
const db = credentials.source;
|
||||
const saslStartCmd = makeFirstMessage(cryptoMethod, credentials, nonce);
|
||||
const response = await connection.commandAsync((0, utils_1.ns)(`${db}.$cmd`), saslStartCmd, undefined);
|
||||
await continueScramConversation(cryptoMethod, response, authContext);
|
||||
}
|
||||
async function continueScramConversation(cryptoMethod, response, authContext) {
|
||||
const connection = authContext.connection;
|
||||
const credentials = authContext.credentials;
|
||||
if (!credentials) {
|
||||
throw new error_1.MongoMissingCredentialsError('AuthContext must provide credentials.');
|
||||
}
|
||||
if (!authContext.nonce) {
|
||||
throw new error_1.MongoInvalidArgumentError('Unable to continue SCRAM without valid nonce');
|
||||
}
|
||||
const nonce = authContext.nonce;
|
||||
const db = credentials.source;
|
||||
const username = cleanUsername(credentials.username);
|
||||
const password = credentials.password;
|
||||
let processedPassword;
|
||||
if (cryptoMethod === 'sha256') {
|
||||
processedPassword =
|
||||
'kModuleError' in deps_1.saslprep || typeof deps_1.saslprep !== 'function' ? password : (0, deps_1.saslprep)(password);
|
||||
}
|
||||
else {
|
||||
processedPassword = passwordDigest(username, password);
|
||||
}
|
||||
const payload = Buffer.isBuffer(response.payload)
|
||||
? new bson_1.Binary(response.payload)
|
||||
: response.payload;
|
||||
const dict = parsePayload(payload.value());
|
||||
const iterations = parseInt(dict.i, 10);
|
||||
if (iterations && iterations < 4096) {
|
||||
// TODO(NODE-3483)
|
||||
throw new error_1.MongoRuntimeError(`Server returned an invalid iteration count ${iterations}`);
|
||||
}
|
||||
const salt = dict.s;
|
||||
const rnonce = dict.r;
|
||||
if (rnonce.startsWith('nonce')) {
|
||||
// TODO(NODE-3483)
|
||||
throw new error_1.MongoRuntimeError(`Server returned an invalid nonce: ${rnonce}`);
|
||||
}
|
||||
// Set up start of proof
|
||||
const withoutProof = `c=biws,r=${rnonce}`;
|
||||
const saltedPassword = HI(processedPassword, Buffer.from(salt, 'base64'), iterations, cryptoMethod);
|
||||
const clientKey = HMAC(cryptoMethod, saltedPassword, 'Client Key');
|
||||
const serverKey = HMAC(cryptoMethod, saltedPassword, 'Server Key');
|
||||
const storedKey = H(cryptoMethod, clientKey);
|
||||
const authMessage = [clientFirstMessageBare(username, nonce), payload.value(), withoutProof].join(',');
|
||||
const clientSignature = HMAC(cryptoMethod, storedKey, authMessage);
|
||||
const clientProof = `p=${xor(clientKey, clientSignature)}`;
|
||||
const clientFinal = [withoutProof, clientProof].join(',');
|
||||
const serverSignature = HMAC(cryptoMethod, serverKey, authMessage);
|
||||
const saslContinueCmd = {
|
||||
saslContinue: 1,
|
||||
conversationId: response.conversationId,
|
||||
payload: new bson_1.Binary(Buffer.from(clientFinal))
|
||||
};
|
||||
const r = await connection.commandAsync((0, utils_1.ns)(`${db}.$cmd`), saslContinueCmd, undefined);
|
||||
const parsedResponse = parsePayload(r.payload.value());
|
||||
if (!compareDigest(Buffer.from(parsedResponse.v, 'base64'), serverSignature)) {
|
||||
throw new error_1.MongoRuntimeError('Server returned an invalid signature');
|
||||
}
|
||||
if (r.done !== false) {
|
||||
// If the server sends r.done === true we can save one RTT
|
||||
return;
|
||||
}
|
||||
const retrySaslContinueCmd = {
|
||||
saslContinue: 1,
|
||||
conversationId: r.conversationId,
|
||||
payload: Buffer.alloc(0)
|
||||
};
|
||||
await connection.commandAsync((0, utils_1.ns)(`${db}.$cmd`), retrySaslContinueCmd, undefined);
|
||||
}
|
||||
function parsePayload(payload) {
|
||||
const dict = {};
|
||||
const parts = payload.split(',');
|
||||
for (let i = 0; i < parts.length; i++) {
|
||||
const valueParts = parts[i].split('=');
|
||||
dict[valueParts[0]] = valueParts[1];
|
||||
}
|
||||
return dict;
|
||||
}
|
||||
function passwordDigest(username, password) {
|
||||
if (typeof username !== 'string') {
|
||||
throw new error_1.MongoInvalidArgumentError('Username must be a string');
|
||||
}
|
||||
if (typeof password !== 'string') {
|
||||
throw new error_1.MongoInvalidArgumentError('Password must be a string');
|
||||
}
|
||||
if (password.length === 0) {
|
||||
throw new error_1.MongoInvalidArgumentError('Password cannot be empty');
|
||||
}
|
||||
let md5;
|
||||
try {
|
||||
md5 = crypto.createHash('md5');
|
||||
}
|
||||
catch (err) {
|
||||
if (crypto.getFips()) {
|
||||
// This error is (slightly) more helpful than what comes from OpenSSL directly, e.g.
|
||||
// 'Error: error:060800C8:digital envelope routines:EVP_DigestInit_ex:disabled for FIPS'
|
||||
throw new Error('Auth mechanism SCRAM-SHA-1 is not supported in FIPS mode');
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
md5.update(`${username}:mongo:${password}`, 'utf8');
|
||||
return md5.digest('hex');
|
||||
}
|
||||
// XOR two buffers
|
||||
function xor(a, b) {
|
||||
if (!Buffer.isBuffer(a)) {
|
||||
a = Buffer.from(a);
|
||||
}
|
||||
if (!Buffer.isBuffer(b)) {
|
||||
b = Buffer.from(b);
|
||||
}
|
||||
const length = Math.max(a.length, b.length);
|
||||
const res = [];
|
||||
for (let i = 0; i < length; i += 1) {
|
||||
res.push(a[i] ^ b[i]);
|
||||
}
|
||||
return Buffer.from(res).toString('base64');
|
||||
}
|
||||
function H(method, text) {
|
||||
return crypto.createHash(method).update(text).digest();
|
||||
}
|
||||
function HMAC(method, key, text) {
|
||||
return crypto.createHmac(method, key).update(text).digest();
|
||||
}
|
||||
let _hiCache = {};
|
||||
let _hiCacheCount = 0;
|
||||
function _hiCachePurge() {
|
||||
_hiCache = {};
|
||||
_hiCacheCount = 0;
|
||||
}
|
||||
const hiLengthMap = {
|
||||
sha256: 32,
|
||||
sha1: 20
|
||||
};
|
||||
function HI(data, salt, iterations, cryptoMethod) {
|
||||
// omit the work if already generated
|
||||
const key = [data, salt.toString('base64'), iterations].join('_');
|
||||
if (_hiCache[key] != null) {
|
||||
return _hiCache[key];
|
||||
}
|
||||
// generate the salt
|
||||
const saltedData = crypto.pbkdf2Sync(data, salt, iterations, hiLengthMap[cryptoMethod], cryptoMethod);
|
||||
// cache a copy to speed up the next lookup, but prevent unbounded cache growth
|
||||
if (_hiCacheCount >= 200) {
|
||||
_hiCachePurge();
|
||||
}
|
||||
_hiCache[key] = saltedData;
|
||||
_hiCacheCount += 1;
|
||||
return saltedData;
|
||||
}
|
||||
function compareDigest(lhs, rhs) {
|
||||
if (lhs.length !== rhs.length) {
|
||||
return false;
|
||||
}
|
||||
if (typeof crypto.timingSafeEqual === 'function') {
|
||||
return crypto.timingSafeEqual(lhs, rhs);
|
||||
}
|
||||
let result = 0;
|
||||
for (let i = 0; i < lhs.length; i++) {
|
||||
result |= lhs[i] ^ rhs[i];
|
||||
}
|
||||
return result === 0;
|
||||
}
|
||||
class ScramSHA1 extends ScramSHA {
|
||||
constructor() {
|
||||
super('sha1');
|
||||
}
|
||||
}
|
||||
exports.ScramSHA1 = ScramSHA1;
|
||||
class ScramSHA256 extends ScramSHA {
|
||||
constructor() {
|
||||
super('sha256');
|
||||
}
|
||||
}
|
||||
exports.ScramSHA256 = ScramSHA256;
|
||||
//# sourceMappingURL=scram.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/scram.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/scram.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
36
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/x509.js
generated
vendored
Executable file
36
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/x509.js
generated
vendored
Executable file
|
|
@ -0,0 +1,36 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.X509 = void 0;
|
||||
const error_1 = require("../../error");
|
||||
const utils_1 = require("../../utils");
|
||||
const auth_provider_1 = require("./auth_provider");
|
||||
class X509 extends auth_provider_1.AuthProvider {
|
||||
async prepare(handshakeDoc, authContext) {
|
||||
const { credentials } = authContext;
|
||||
if (!credentials) {
|
||||
throw new error_1.MongoMissingCredentialsError('AuthContext must provide credentials.');
|
||||
}
|
||||
return { ...handshakeDoc, speculativeAuthenticate: x509AuthenticateCommand(credentials) };
|
||||
}
|
||||
async auth(authContext) {
|
||||
const connection = authContext.connection;
|
||||
const credentials = authContext.credentials;
|
||||
if (!credentials) {
|
||||
throw new error_1.MongoMissingCredentialsError('AuthContext must provide credentials.');
|
||||
}
|
||||
const response = authContext.response;
|
||||
if (response?.speculativeAuthenticate) {
|
||||
return;
|
||||
}
|
||||
await connection.commandAsync((0, utils_1.ns)('$external.$cmd'), x509AuthenticateCommand(credentials), undefined);
|
||||
}
|
||||
}
|
||||
exports.X509 = X509;
|
||||
function x509AuthenticateCommand(credentials) {
|
||||
const command = { authenticate: 1, mechanism: 'MONGODB-X509' };
|
||||
if (credentials.username) {
|
||||
command.user = credentials.username;
|
||||
}
|
||||
return command;
|
||||
}
|
||||
//# sourceMappingURL=x509.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/x509.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/auth/x509.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"x509.js","sourceRoot":"","sources":["../../../src/cmap/auth/x509.ts"],"names":[],"mappings":";;;AACA,uCAA2D;AAC3D,uCAAiC;AAEjC,mDAAiE;AAGjE,MAAa,IAAK,SAAQ,4BAAY;IAC3B,KAAK,CAAC,OAAO,CACpB,YAA+B,EAC/B,WAAwB;QAExB,MAAM,EAAE,WAAW,EAAE,GAAG,WAAW,CAAC;QACpC,IAAI,CAAC,WAAW,EAAE;YAChB,MAAM,IAAI,oCAA4B,CAAC,uCAAuC,CAAC,CAAC;SACjF;QACD,OAAO,EAAE,GAAG,YAAY,EAAE,uBAAuB,EAAE,uBAAuB,CAAC,WAAW,CAAC,EAAE,CAAC;IAC5F,CAAC;IAEQ,KAAK,CAAC,IAAI,CAAC,WAAwB;QAC1C,MAAM,UAAU,GAAG,WAAW,CAAC,UAAU,CAAC;QAC1C,MAAM,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;QAC5C,IAAI,CAAC,WAAW,EAAE;YAChB,MAAM,IAAI,oCAA4B,CAAC,uCAAuC,CAAC,CAAC;SACjF;QACD,MAAM,QAAQ,GAAG,WAAW,CAAC,QAAQ,CAAC;QAEtC,IAAI,QAAQ,EAAE,uBAAuB,EAAE;YACrC,OAAO;SACR;QAED,MAAM,UAAU,CAAC,YAAY,CAC3B,IAAA,UAAE,EAAC,gBAAgB,CAAC,EACpB,uBAAuB,CAAC,WAAW,CAAC,EACpC,SAAS,CACV,CAAC;IACJ,CAAC;CACF;AA9BD,oBA8BC;AAED,SAAS,uBAAuB,CAAC,WAA6B;IAC5D,MAAM,OAAO,GAAa,EAAE,YAAY,EAAE,CAAC,EAAE,SAAS,EAAE,cAAc,EAAE,CAAC;IACzE,IAAI,WAAW,CAAC,QAAQ,EAAE;QACxB,OAAO,CAAC,IAAI,GAAG,WAAW,CAAC,QAAQ,CAAC;KACrC;IAED,OAAO,OAAO,CAAC;AACjB,CAAC"}
|
||||
251
VISUALIZACION/node_modules/mongodb/lib/cmap/command_monitoring_events.js
generated
vendored
Executable file
251
VISUALIZACION/node_modules/mongodb/lib/cmap/command_monitoring_events.js
generated
vendored
Executable file
|
|
@ -0,0 +1,251 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.SENSITIVE_COMMANDS = exports.CommandFailedEvent = exports.CommandSucceededEvent = exports.CommandStartedEvent = void 0;
|
||||
const constants_1 = require("../constants");
|
||||
const utils_1 = require("../utils");
|
||||
const commands_1 = require("./commands");
|
||||
/**
|
||||
* An event indicating the start of a given command
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class CommandStartedEvent {
|
||||
/**
|
||||
* Create a started event
|
||||
*
|
||||
* @internal
|
||||
* @param pool - the pool that originated the command
|
||||
* @param command - the command
|
||||
*/
|
||||
constructor(connection, command) {
|
||||
/** @internal */
|
||||
this.name = constants_1.COMMAND_STARTED;
|
||||
const cmd = extractCommand(command);
|
||||
const commandName = extractCommandName(cmd);
|
||||
const { address, connectionId, serviceId } = extractConnectionDetails(connection);
|
||||
// TODO: remove in major revision, this is not spec behavior
|
||||
if (exports.SENSITIVE_COMMANDS.has(commandName)) {
|
||||
this.commandObj = {};
|
||||
this.commandObj[commandName] = true;
|
||||
}
|
||||
this.address = address;
|
||||
this.connectionId = connectionId;
|
||||
this.serviceId = serviceId;
|
||||
this.requestId = command.requestId;
|
||||
this.databaseName = databaseName(command);
|
||||
this.commandName = commandName;
|
||||
this.command = maybeRedact(commandName, cmd, cmd);
|
||||
}
|
||||
/* @internal */
|
||||
get hasServiceId() {
|
||||
return !!this.serviceId;
|
||||
}
|
||||
}
|
||||
exports.CommandStartedEvent = CommandStartedEvent;
|
||||
/**
|
||||
* An event indicating the success of a given command
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class CommandSucceededEvent {
|
||||
/**
|
||||
* Create a succeeded event
|
||||
*
|
||||
* @internal
|
||||
* @param pool - the pool that originated the command
|
||||
* @param command - the command
|
||||
* @param reply - the reply for this command from the server
|
||||
* @param started - a high resolution tuple timestamp of when the command was first sent, to calculate duration
|
||||
*/
|
||||
constructor(connection, command, reply, started) {
|
||||
/** @internal */
|
||||
this.name = constants_1.COMMAND_SUCCEEDED;
|
||||
const cmd = extractCommand(command);
|
||||
const commandName = extractCommandName(cmd);
|
||||
const { address, connectionId, serviceId } = extractConnectionDetails(connection);
|
||||
this.address = address;
|
||||
this.connectionId = connectionId;
|
||||
this.serviceId = serviceId;
|
||||
this.requestId = command.requestId;
|
||||
this.commandName = commandName;
|
||||
this.duration = (0, utils_1.calculateDurationInMs)(started);
|
||||
this.reply = maybeRedact(commandName, cmd, extractReply(command, reply));
|
||||
}
|
||||
/* @internal */
|
||||
get hasServiceId() {
|
||||
return !!this.serviceId;
|
||||
}
|
||||
}
|
||||
exports.CommandSucceededEvent = CommandSucceededEvent;
|
||||
/**
|
||||
* An event indicating the failure of a given command
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class CommandFailedEvent {
|
||||
/**
|
||||
* Create a failure event
|
||||
*
|
||||
* @internal
|
||||
* @param pool - the pool that originated the command
|
||||
* @param command - the command
|
||||
* @param error - the generated error or a server error response
|
||||
* @param started - a high resolution tuple timestamp of when the command was first sent, to calculate duration
|
||||
*/
|
||||
constructor(connection, command, error, started) {
|
||||
/** @internal */
|
||||
this.name = constants_1.COMMAND_FAILED;
|
||||
const cmd = extractCommand(command);
|
||||
const commandName = extractCommandName(cmd);
|
||||
const { address, connectionId, serviceId } = extractConnectionDetails(connection);
|
||||
this.address = address;
|
||||
this.connectionId = connectionId;
|
||||
this.serviceId = serviceId;
|
||||
this.requestId = command.requestId;
|
||||
this.commandName = commandName;
|
||||
this.duration = (0, utils_1.calculateDurationInMs)(started);
|
||||
this.failure = maybeRedact(commandName, cmd, error);
|
||||
}
|
||||
/* @internal */
|
||||
get hasServiceId() {
|
||||
return !!this.serviceId;
|
||||
}
|
||||
}
|
||||
exports.CommandFailedEvent = CommandFailedEvent;
|
||||
/**
|
||||
* Commands that we want to redact because of the sensitive nature of their contents
|
||||
* @internal
|
||||
*/
|
||||
exports.SENSITIVE_COMMANDS = new Set([
|
||||
'authenticate',
|
||||
'saslStart',
|
||||
'saslContinue',
|
||||
'getnonce',
|
||||
'createUser',
|
||||
'updateUser',
|
||||
'copydbgetnonce',
|
||||
'copydbsaslstart',
|
||||
'copydb'
|
||||
]);
|
||||
const HELLO_COMMANDS = new Set(['hello', constants_1.LEGACY_HELLO_COMMAND, constants_1.LEGACY_HELLO_COMMAND_CAMEL_CASE]);
|
||||
// helper methods
|
||||
const extractCommandName = (commandDoc) => Object.keys(commandDoc)[0];
|
||||
const namespace = (command) => command.ns;
|
||||
const databaseName = (command) => command.ns.split('.')[0];
|
||||
const collectionName = (command) => command.ns.split('.')[1];
|
||||
const maybeRedact = (commandName, commandDoc, result) => exports.SENSITIVE_COMMANDS.has(commandName) ||
|
||||
(HELLO_COMMANDS.has(commandName) && commandDoc.speculativeAuthenticate)
|
||||
? {}
|
||||
: result;
|
||||
const LEGACY_FIND_QUERY_MAP = {
|
||||
$query: 'filter',
|
||||
$orderby: 'sort',
|
||||
$hint: 'hint',
|
||||
$comment: 'comment',
|
||||
$maxScan: 'maxScan',
|
||||
$max: 'max',
|
||||
$min: 'min',
|
||||
$returnKey: 'returnKey',
|
||||
$showDiskLoc: 'showRecordId',
|
||||
$maxTimeMS: 'maxTimeMS',
|
||||
$snapshot: 'snapshot'
|
||||
};
|
||||
const LEGACY_FIND_OPTIONS_MAP = {
|
||||
numberToSkip: 'skip',
|
||||
numberToReturn: 'batchSize',
|
||||
returnFieldSelector: 'projection'
|
||||
};
|
||||
const OP_QUERY_KEYS = [
|
||||
'tailable',
|
||||
'oplogReplay',
|
||||
'noCursorTimeout',
|
||||
'awaitData',
|
||||
'partial',
|
||||
'exhaust'
|
||||
];
|
||||
/** Extract the actual command from the query, possibly up-converting if it's a legacy format */
|
||||
function extractCommand(command) {
|
||||
if (command instanceof commands_1.Msg) {
|
||||
return (0, utils_1.deepCopy)(command.command);
|
||||
}
|
||||
if (command.query?.$query) {
|
||||
let result;
|
||||
if (command.ns === 'admin.$cmd') {
|
||||
// up-convert legacy command
|
||||
result = Object.assign({}, command.query.$query);
|
||||
}
|
||||
else {
|
||||
// up-convert legacy find command
|
||||
result = { find: collectionName(command) };
|
||||
Object.keys(LEGACY_FIND_QUERY_MAP).forEach(key => {
|
||||
if (command.query[key] != null) {
|
||||
result[LEGACY_FIND_QUERY_MAP[key]] = (0, utils_1.deepCopy)(command.query[key]);
|
||||
}
|
||||
});
|
||||
}
|
||||
Object.keys(LEGACY_FIND_OPTIONS_MAP).forEach(key => {
|
||||
const legacyKey = key;
|
||||
if (command[legacyKey] != null) {
|
||||
result[LEGACY_FIND_OPTIONS_MAP[legacyKey]] = (0, utils_1.deepCopy)(command[legacyKey]);
|
||||
}
|
||||
});
|
||||
OP_QUERY_KEYS.forEach(key => {
|
||||
if (command[key]) {
|
||||
result[key] = command[key];
|
||||
}
|
||||
});
|
||||
if (command.pre32Limit != null) {
|
||||
result.limit = command.pre32Limit;
|
||||
}
|
||||
if (command.query.$explain) {
|
||||
return { explain: result };
|
||||
}
|
||||
return result;
|
||||
}
|
||||
const clonedQuery = {};
|
||||
const clonedCommand = {};
|
||||
if (command.query) {
|
||||
for (const k in command.query) {
|
||||
clonedQuery[k] = (0, utils_1.deepCopy)(command.query[k]);
|
||||
}
|
||||
clonedCommand.query = clonedQuery;
|
||||
}
|
||||
for (const k in command) {
|
||||
if (k === 'query')
|
||||
continue;
|
||||
clonedCommand[k] = (0, utils_1.deepCopy)(command[k]);
|
||||
}
|
||||
return command.query ? clonedQuery : clonedCommand;
|
||||
}
|
||||
function extractReply(command, reply) {
|
||||
if (!reply) {
|
||||
return reply;
|
||||
}
|
||||
if (command instanceof commands_1.Msg) {
|
||||
return (0, utils_1.deepCopy)(reply.result ? reply.result : reply);
|
||||
}
|
||||
// is this a legacy find command?
|
||||
if (command.query && command.query.$query != null) {
|
||||
return {
|
||||
ok: 1,
|
||||
cursor: {
|
||||
id: (0, utils_1.deepCopy)(reply.cursorId),
|
||||
ns: namespace(command),
|
||||
firstBatch: (0, utils_1.deepCopy)(reply.documents)
|
||||
}
|
||||
};
|
||||
}
|
||||
return (0, utils_1.deepCopy)(reply.result ? reply.result : reply);
|
||||
}
|
||||
function extractConnectionDetails(connection) {
|
||||
let connectionId;
|
||||
if ('id' in connection) {
|
||||
connectionId = connection.id;
|
||||
}
|
||||
return {
|
||||
address: connection.address,
|
||||
serviceId: connection.serviceId,
|
||||
connectionId
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=command_monitoring_events.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/command_monitoring_events.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/command_monitoring_events.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
487
VISUALIZACION/node_modules/mongodb/lib/cmap/commands.js
generated
vendored
Executable file
487
VISUALIZACION/node_modules/mongodb/lib/cmap/commands.js
generated
vendored
Executable file
|
|
@ -0,0 +1,487 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.BinMsg = exports.Msg = exports.Response = exports.Query = void 0;
|
||||
const BSON = require("../bson");
|
||||
const error_1 = require("../error");
|
||||
const read_preference_1 = require("../read_preference");
|
||||
const utils_1 = require("../utils");
|
||||
const constants_1 = require("./wire_protocol/constants");
|
||||
// Incrementing request id
|
||||
let _requestId = 0;
|
||||
// Query flags
|
||||
const OPTS_TAILABLE_CURSOR = 2;
|
||||
const OPTS_SECONDARY = 4;
|
||||
const OPTS_OPLOG_REPLAY = 8;
|
||||
const OPTS_NO_CURSOR_TIMEOUT = 16;
|
||||
const OPTS_AWAIT_DATA = 32;
|
||||
const OPTS_EXHAUST = 64;
|
||||
const OPTS_PARTIAL = 128;
|
||||
// Response flags
|
||||
const CURSOR_NOT_FOUND = 1;
|
||||
const QUERY_FAILURE = 2;
|
||||
const SHARD_CONFIG_STALE = 4;
|
||||
const AWAIT_CAPABLE = 8;
|
||||
/**************************************************************
|
||||
* QUERY
|
||||
**************************************************************/
|
||||
/** @internal */
|
||||
class Query {
|
||||
constructor(ns, query, options) {
|
||||
// Basic options needed to be passed in
|
||||
// TODO(NODE-3483): Replace with MongoCommandError
|
||||
if (ns == null)
|
||||
throw new error_1.MongoRuntimeError('Namespace must be specified for query');
|
||||
// TODO(NODE-3483): Replace with MongoCommandError
|
||||
if (query == null)
|
||||
throw new error_1.MongoRuntimeError('A query document must be specified for query');
|
||||
// Validate that we are not passing 0x00 in the collection name
|
||||
if (ns.indexOf('\x00') !== -1) {
|
||||
// TODO(NODE-3483): Use MongoNamespace static method
|
||||
throw new error_1.MongoRuntimeError('Namespace cannot contain a null character');
|
||||
}
|
||||
// Basic options
|
||||
this.ns = ns;
|
||||
this.query = query;
|
||||
// Additional options
|
||||
this.numberToSkip = options.numberToSkip || 0;
|
||||
this.numberToReturn = options.numberToReturn || 0;
|
||||
this.returnFieldSelector = options.returnFieldSelector || undefined;
|
||||
this.requestId = Query.getRequestId();
|
||||
// special case for pre-3.2 find commands, delete ASAP
|
||||
this.pre32Limit = options.pre32Limit;
|
||||
// Serialization option
|
||||
this.serializeFunctions =
|
||||
typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false;
|
||||
this.ignoreUndefined =
|
||||
typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false;
|
||||
this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16;
|
||||
this.checkKeys = typeof options.checkKeys === 'boolean' ? options.checkKeys : false;
|
||||
this.batchSize = this.numberToReturn;
|
||||
// Flags
|
||||
this.tailable = false;
|
||||
this.secondaryOk = typeof options.secondaryOk === 'boolean' ? options.secondaryOk : false;
|
||||
this.oplogReplay = false;
|
||||
this.noCursorTimeout = false;
|
||||
this.awaitData = false;
|
||||
this.exhaust = false;
|
||||
this.partial = false;
|
||||
}
|
||||
/** Assign next request Id. */
|
||||
incRequestId() {
|
||||
this.requestId = _requestId++;
|
||||
}
|
||||
/** Peek next request Id. */
|
||||
nextRequestId() {
|
||||
return _requestId + 1;
|
||||
}
|
||||
/** Increment then return next request Id. */
|
||||
static getRequestId() {
|
||||
return ++_requestId;
|
||||
}
|
||||
// Uses a single allocated buffer for the process, avoiding multiple memory allocations
|
||||
toBin() {
|
||||
const buffers = [];
|
||||
let projection = null;
|
||||
// Set up the flags
|
||||
let flags = 0;
|
||||
if (this.tailable) {
|
||||
flags |= OPTS_TAILABLE_CURSOR;
|
||||
}
|
||||
if (this.secondaryOk) {
|
||||
flags |= OPTS_SECONDARY;
|
||||
}
|
||||
if (this.oplogReplay) {
|
||||
flags |= OPTS_OPLOG_REPLAY;
|
||||
}
|
||||
if (this.noCursorTimeout) {
|
||||
flags |= OPTS_NO_CURSOR_TIMEOUT;
|
||||
}
|
||||
if (this.awaitData) {
|
||||
flags |= OPTS_AWAIT_DATA;
|
||||
}
|
||||
if (this.exhaust) {
|
||||
flags |= OPTS_EXHAUST;
|
||||
}
|
||||
if (this.partial) {
|
||||
flags |= OPTS_PARTIAL;
|
||||
}
|
||||
// If batchSize is different to this.numberToReturn
|
||||
if (this.batchSize !== this.numberToReturn)
|
||||
this.numberToReturn = this.batchSize;
|
||||
// Allocate write protocol header buffer
|
||||
const header = Buffer.alloc(4 * 4 + // Header
|
||||
4 + // Flags
|
||||
Buffer.byteLength(this.ns) +
|
||||
1 + // namespace
|
||||
4 + // numberToSkip
|
||||
4 // numberToReturn
|
||||
);
|
||||
// Add header to buffers
|
||||
buffers.push(header);
|
||||
// Serialize the query
|
||||
const query = BSON.serialize(this.query, {
|
||||
checkKeys: this.checkKeys,
|
||||
serializeFunctions: this.serializeFunctions,
|
||||
ignoreUndefined: this.ignoreUndefined
|
||||
});
|
||||
// Add query document
|
||||
buffers.push(query);
|
||||
if (this.returnFieldSelector && Object.keys(this.returnFieldSelector).length > 0) {
|
||||
// Serialize the projection document
|
||||
projection = BSON.serialize(this.returnFieldSelector, {
|
||||
checkKeys: this.checkKeys,
|
||||
serializeFunctions: this.serializeFunctions,
|
||||
ignoreUndefined: this.ignoreUndefined
|
||||
});
|
||||
// Add projection document
|
||||
buffers.push(projection);
|
||||
}
|
||||
// Total message size
|
||||
const totalLength = header.length + query.length + (projection ? projection.length : 0);
|
||||
// Set up the index
|
||||
let index = 4;
|
||||
// Write total document length
|
||||
header[3] = (totalLength >> 24) & 0xff;
|
||||
header[2] = (totalLength >> 16) & 0xff;
|
||||
header[1] = (totalLength >> 8) & 0xff;
|
||||
header[0] = totalLength & 0xff;
|
||||
// Write header information requestId
|
||||
header[index + 3] = (this.requestId >> 24) & 0xff;
|
||||
header[index + 2] = (this.requestId >> 16) & 0xff;
|
||||
header[index + 1] = (this.requestId >> 8) & 0xff;
|
||||
header[index] = this.requestId & 0xff;
|
||||
index = index + 4;
|
||||
// Write header information responseTo
|
||||
header[index + 3] = (0 >> 24) & 0xff;
|
||||
header[index + 2] = (0 >> 16) & 0xff;
|
||||
header[index + 1] = (0 >> 8) & 0xff;
|
||||
header[index] = 0 & 0xff;
|
||||
index = index + 4;
|
||||
// Write header information OP_QUERY
|
||||
header[index + 3] = (constants_1.OP_QUERY >> 24) & 0xff;
|
||||
header[index + 2] = (constants_1.OP_QUERY >> 16) & 0xff;
|
||||
header[index + 1] = (constants_1.OP_QUERY >> 8) & 0xff;
|
||||
header[index] = constants_1.OP_QUERY & 0xff;
|
||||
index = index + 4;
|
||||
// Write header information flags
|
||||
header[index + 3] = (flags >> 24) & 0xff;
|
||||
header[index + 2] = (flags >> 16) & 0xff;
|
||||
header[index + 1] = (flags >> 8) & 0xff;
|
||||
header[index] = flags & 0xff;
|
||||
index = index + 4;
|
||||
// Write collection name
|
||||
index = index + header.write(this.ns, index, 'utf8') + 1;
|
||||
header[index - 1] = 0;
|
||||
// Write header information flags numberToSkip
|
||||
header[index + 3] = (this.numberToSkip >> 24) & 0xff;
|
||||
header[index + 2] = (this.numberToSkip >> 16) & 0xff;
|
||||
header[index + 1] = (this.numberToSkip >> 8) & 0xff;
|
||||
header[index] = this.numberToSkip & 0xff;
|
||||
index = index + 4;
|
||||
// Write header information flags numberToReturn
|
||||
header[index + 3] = (this.numberToReturn >> 24) & 0xff;
|
||||
header[index + 2] = (this.numberToReturn >> 16) & 0xff;
|
||||
header[index + 1] = (this.numberToReturn >> 8) & 0xff;
|
||||
header[index] = this.numberToReturn & 0xff;
|
||||
index = index + 4;
|
||||
// Return the buffers
|
||||
return buffers;
|
||||
}
|
||||
}
|
||||
exports.Query = Query;
|
||||
/** @internal */
|
||||
class Response {
|
||||
constructor(message, msgHeader, msgBody, opts) {
|
||||
this.documents = new Array(0);
|
||||
this.parsed = false;
|
||||
this.raw = message;
|
||||
this.data = msgBody;
|
||||
this.opts = opts ?? {
|
||||
useBigInt64: false,
|
||||
promoteLongs: true,
|
||||
promoteValues: true,
|
||||
promoteBuffers: false,
|
||||
bsonRegExp: false
|
||||
};
|
||||
// Read the message header
|
||||
this.length = msgHeader.length;
|
||||
this.requestId = msgHeader.requestId;
|
||||
this.responseTo = msgHeader.responseTo;
|
||||
this.opCode = msgHeader.opCode;
|
||||
this.fromCompressed = msgHeader.fromCompressed;
|
||||
// Flag values
|
||||
this.useBigInt64 = typeof this.opts.useBigInt64 === 'boolean' ? this.opts.useBigInt64 : false;
|
||||
this.promoteLongs = typeof this.opts.promoteLongs === 'boolean' ? this.opts.promoteLongs : true;
|
||||
this.promoteValues =
|
||||
typeof this.opts.promoteValues === 'boolean' ? this.opts.promoteValues : true;
|
||||
this.promoteBuffers =
|
||||
typeof this.opts.promoteBuffers === 'boolean' ? this.opts.promoteBuffers : false;
|
||||
this.bsonRegExp = typeof this.opts.bsonRegExp === 'boolean' ? this.opts.bsonRegExp : false;
|
||||
}
|
||||
isParsed() {
|
||||
return this.parsed;
|
||||
}
|
||||
parse(options) {
|
||||
// Don't parse again if not needed
|
||||
if (this.parsed)
|
||||
return;
|
||||
options = options ?? {};
|
||||
// Allow the return of raw documents instead of parsing
|
||||
const raw = options.raw || false;
|
||||
const documentsReturnedIn = options.documentsReturnedIn || null;
|
||||
const useBigInt64 = options.useBigInt64 ?? this.opts.useBigInt64;
|
||||
const promoteLongs = options.promoteLongs ?? this.opts.promoteLongs;
|
||||
const promoteValues = options.promoteValues ?? this.opts.promoteValues;
|
||||
const promoteBuffers = options.promoteBuffers ?? this.opts.promoteBuffers;
|
||||
const bsonRegExp = options.bsonRegExp ?? this.opts.bsonRegExp;
|
||||
let bsonSize;
|
||||
// Set up the options
|
||||
const _options = {
|
||||
useBigInt64,
|
||||
promoteLongs,
|
||||
promoteValues,
|
||||
promoteBuffers,
|
||||
bsonRegExp
|
||||
};
|
||||
// Position within OP_REPLY at which documents start
|
||||
// (See https://www.mongodb.com/docs/manual/reference/mongodb-wire-protocol/#wire-op-reply)
|
||||
this.index = 20;
|
||||
// Read the message body
|
||||
this.responseFlags = this.data.readInt32LE(0);
|
||||
this.cursorId = new BSON.Long(this.data.readInt32LE(4), this.data.readInt32LE(8));
|
||||
this.startingFrom = this.data.readInt32LE(12);
|
||||
this.numberReturned = this.data.readInt32LE(16);
|
||||
// Preallocate document array
|
||||
this.documents = new Array(this.numberReturned);
|
||||
this.cursorNotFound = (this.responseFlags & CURSOR_NOT_FOUND) !== 0;
|
||||
this.queryFailure = (this.responseFlags & QUERY_FAILURE) !== 0;
|
||||
this.shardConfigStale = (this.responseFlags & SHARD_CONFIG_STALE) !== 0;
|
||||
this.awaitCapable = (this.responseFlags & AWAIT_CAPABLE) !== 0;
|
||||
// Parse Body
|
||||
for (let i = 0; i < this.numberReturned; i++) {
|
||||
bsonSize =
|
||||
this.data[this.index] |
|
||||
(this.data[this.index + 1] << 8) |
|
||||
(this.data[this.index + 2] << 16) |
|
||||
(this.data[this.index + 3] << 24);
|
||||
// If we have raw results specified slice the return document
|
||||
if (raw) {
|
||||
this.documents[i] = this.data.slice(this.index, this.index + bsonSize);
|
||||
}
|
||||
else {
|
||||
this.documents[i] = BSON.deserialize(this.data.slice(this.index, this.index + bsonSize), _options);
|
||||
}
|
||||
// Adjust the index
|
||||
this.index = this.index + bsonSize;
|
||||
}
|
||||
if (this.documents.length === 1 && documentsReturnedIn != null && raw) {
|
||||
const fieldsAsRaw = {};
|
||||
fieldsAsRaw[documentsReturnedIn] = true;
|
||||
_options.fieldsAsRaw = fieldsAsRaw;
|
||||
const doc = BSON.deserialize(this.documents[0], _options);
|
||||
this.documents = [doc];
|
||||
}
|
||||
// Set parsed
|
||||
this.parsed = true;
|
||||
}
|
||||
}
|
||||
exports.Response = Response;
|
||||
// Implementation of OP_MSG spec:
|
||||
// https://github.com/mongodb/specifications/blob/master/source/message/OP_MSG.rst
|
||||
//
|
||||
// struct Section {
|
||||
// uint8 payloadType;
|
||||
// union payload {
|
||||
// document document; // payloadType == 0
|
||||
// struct sequence { // payloadType == 1
|
||||
// int32 size;
|
||||
// cstring identifier;
|
||||
// document* documents;
|
||||
// };
|
||||
// };
|
||||
// };
|
||||
// struct OP_MSG {
|
||||
// struct MsgHeader {
|
||||
// int32 messageLength;
|
||||
// int32 requestID;
|
||||
// int32 responseTo;
|
||||
// int32 opCode = 2013;
|
||||
// };
|
||||
// uint32 flagBits;
|
||||
// Section+ sections;
|
||||
// [uint32 checksum;]
|
||||
// };
|
||||
// Msg Flags
|
||||
const OPTS_CHECKSUM_PRESENT = 1;
|
||||
const OPTS_MORE_TO_COME = 2;
|
||||
const OPTS_EXHAUST_ALLOWED = 1 << 16;
|
||||
/** @internal */
|
||||
class Msg {
|
||||
constructor(ns, command, options) {
|
||||
// Basic options needed to be passed in
|
||||
if (command == null)
|
||||
throw new error_1.MongoInvalidArgumentError('Query document must be specified for query');
|
||||
// Basic options
|
||||
this.ns = ns;
|
||||
this.command = command;
|
||||
this.command.$db = (0, utils_1.databaseNamespace)(ns);
|
||||
if (options.readPreference && options.readPreference.mode !== read_preference_1.ReadPreference.PRIMARY) {
|
||||
this.command.$readPreference = options.readPreference.toJSON();
|
||||
}
|
||||
// Ensure empty options
|
||||
this.options = options ?? {};
|
||||
// Additional options
|
||||
this.requestId = options.requestId ? options.requestId : Msg.getRequestId();
|
||||
// Serialization option
|
||||
this.serializeFunctions =
|
||||
typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false;
|
||||
this.ignoreUndefined =
|
||||
typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false;
|
||||
this.checkKeys = typeof options.checkKeys === 'boolean' ? options.checkKeys : false;
|
||||
this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16;
|
||||
// flags
|
||||
this.checksumPresent = false;
|
||||
this.moreToCome = options.moreToCome || false;
|
||||
this.exhaustAllowed =
|
||||
typeof options.exhaustAllowed === 'boolean' ? options.exhaustAllowed : false;
|
||||
}
|
||||
toBin() {
|
||||
const buffers = [];
|
||||
let flags = 0;
|
||||
if (this.checksumPresent) {
|
||||
flags |= OPTS_CHECKSUM_PRESENT;
|
||||
}
|
||||
if (this.moreToCome) {
|
||||
flags |= OPTS_MORE_TO_COME;
|
||||
}
|
||||
if (this.exhaustAllowed) {
|
||||
flags |= OPTS_EXHAUST_ALLOWED;
|
||||
}
|
||||
const header = Buffer.alloc(4 * 4 + // Header
|
||||
4 // Flags
|
||||
);
|
||||
buffers.push(header);
|
||||
let totalLength = header.length;
|
||||
const command = this.command;
|
||||
totalLength += this.makeDocumentSegment(buffers, command);
|
||||
header.writeInt32LE(totalLength, 0); // messageLength
|
||||
header.writeInt32LE(this.requestId, 4); // requestID
|
||||
header.writeInt32LE(0, 8); // responseTo
|
||||
header.writeInt32LE(constants_1.OP_MSG, 12); // opCode
|
||||
header.writeUInt32LE(flags, 16); // flags
|
||||
return buffers;
|
||||
}
|
||||
makeDocumentSegment(buffers, document) {
|
||||
const payloadTypeBuffer = Buffer.alloc(1);
|
||||
payloadTypeBuffer[0] = 0;
|
||||
const documentBuffer = this.serializeBson(document);
|
||||
buffers.push(payloadTypeBuffer);
|
||||
buffers.push(documentBuffer);
|
||||
return payloadTypeBuffer.length + documentBuffer.length;
|
||||
}
|
||||
serializeBson(document) {
|
||||
return BSON.serialize(document, {
|
||||
checkKeys: this.checkKeys,
|
||||
serializeFunctions: this.serializeFunctions,
|
||||
ignoreUndefined: this.ignoreUndefined
|
||||
});
|
||||
}
|
||||
static getRequestId() {
|
||||
_requestId = (_requestId + 1) & 0x7fffffff;
|
||||
return _requestId;
|
||||
}
|
||||
}
|
||||
exports.Msg = Msg;
|
||||
/** @internal */
|
||||
class BinMsg {
|
||||
constructor(message, msgHeader, msgBody, opts) {
|
||||
this.parsed = false;
|
||||
this.raw = message;
|
||||
this.data = msgBody;
|
||||
this.opts = opts ?? {
|
||||
useBigInt64: false,
|
||||
promoteLongs: true,
|
||||
promoteValues: true,
|
||||
promoteBuffers: false,
|
||||
bsonRegExp: false
|
||||
};
|
||||
// Read the message header
|
||||
this.length = msgHeader.length;
|
||||
this.requestId = msgHeader.requestId;
|
||||
this.responseTo = msgHeader.responseTo;
|
||||
this.opCode = msgHeader.opCode;
|
||||
this.fromCompressed = msgHeader.fromCompressed;
|
||||
// Read response flags
|
||||
this.responseFlags = msgBody.readInt32LE(0);
|
||||
this.checksumPresent = (this.responseFlags & OPTS_CHECKSUM_PRESENT) !== 0;
|
||||
this.moreToCome = (this.responseFlags & OPTS_MORE_TO_COME) !== 0;
|
||||
this.exhaustAllowed = (this.responseFlags & OPTS_EXHAUST_ALLOWED) !== 0;
|
||||
this.useBigInt64 = typeof this.opts.useBigInt64 === 'boolean' ? this.opts.useBigInt64 : false;
|
||||
this.promoteLongs = typeof this.opts.promoteLongs === 'boolean' ? this.opts.promoteLongs : true;
|
||||
this.promoteValues =
|
||||
typeof this.opts.promoteValues === 'boolean' ? this.opts.promoteValues : true;
|
||||
this.promoteBuffers =
|
||||
typeof this.opts.promoteBuffers === 'boolean' ? this.opts.promoteBuffers : false;
|
||||
this.bsonRegExp = typeof this.opts.bsonRegExp === 'boolean' ? this.opts.bsonRegExp : false;
|
||||
this.documents = [];
|
||||
}
|
||||
isParsed() {
|
||||
return this.parsed;
|
||||
}
|
||||
parse(options) {
|
||||
// Don't parse again if not needed
|
||||
if (this.parsed)
|
||||
return;
|
||||
options = options ?? {};
|
||||
this.index = 4;
|
||||
// Allow the return of raw documents instead of parsing
|
||||
const raw = options.raw || false;
|
||||
const documentsReturnedIn = options.documentsReturnedIn || null;
|
||||
const useBigInt64 = options.useBigInt64 ?? this.opts.useBigInt64;
|
||||
const promoteLongs = options.promoteLongs ?? this.opts.promoteLongs;
|
||||
const promoteValues = options.promoteValues ?? this.opts.promoteValues;
|
||||
const promoteBuffers = options.promoteBuffers ?? this.opts.promoteBuffers;
|
||||
const bsonRegExp = options.bsonRegExp ?? this.opts.bsonRegExp;
|
||||
const validation = this.parseBsonSerializationOptions(options);
|
||||
// Set up the options
|
||||
const bsonOptions = {
|
||||
useBigInt64,
|
||||
promoteLongs,
|
||||
promoteValues,
|
||||
promoteBuffers,
|
||||
bsonRegExp,
|
||||
validation
|
||||
// Due to the strictness of the BSON libraries validation option we need this cast
|
||||
};
|
||||
while (this.index < this.data.length) {
|
||||
const payloadType = this.data.readUInt8(this.index++);
|
||||
if (payloadType === 0) {
|
||||
const bsonSize = this.data.readUInt32LE(this.index);
|
||||
const bin = this.data.slice(this.index, this.index + bsonSize);
|
||||
this.documents.push(raw ? bin : BSON.deserialize(bin, bsonOptions));
|
||||
this.index += bsonSize;
|
||||
}
|
||||
else if (payloadType === 1) {
|
||||
// It was decided that no driver makes use of payload type 1
|
||||
// TODO(NODE-3483): Replace with MongoDeprecationError
|
||||
throw new error_1.MongoRuntimeError('OP_MSG Payload Type 1 detected unsupported protocol');
|
||||
}
|
||||
}
|
||||
if (this.documents.length === 1 && documentsReturnedIn != null && raw) {
|
||||
const fieldsAsRaw = {};
|
||||
fieldsAsRaw[documentsReturnedIn] = true;
|
||||
bsonOptions.fieldsAsRaw = fieldsAsRaw;
|
||||
const doc = BSON.deserialize(this.documents[0], bsonOptions);
|
||||
this.documents = [doc];
|
||||
}
|
||||
this.parsed = true;
|
||||
}
|
||||
parseBsonSerializationOptions({ enableUtf8Validation }) {
|
||||
if (enableUtf8Validation === false) {
|
||||
return { utf8: false };
|
||||
}
|
||||
return { utf8: { writeErrors: false } };
|
||||
}
|
||||
}
|
||||
exports.BinMsg = BinMsg;
|
||||
//# sourceMappingURL=commands.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/commands.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/commands.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
378
VISUALIZACION/node_modules/mongodb/lib/cmap/connect.js
generated
vendored
Executable file
378
VISUALIZACION/node_modules/mongodb/lib/cmap/connect.js
generated
vendored
Executable file
|
|
@ -0,0 +1,378 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.LEGAL_TCP_SOCKET_OPTIONS = exports.LEGAL_TLS_SOCKET_OPTIONS = exports.prepareHandshakeDocument = exports.connect = exports.AUTH_PROVIDERS = void 0;
|
||||
const net = require("net");
|
||||
const socks_1 = require("socks");
|
||||
const tls = require("tls");
|
||||
const constants_1 = require("../constants");
|
||||
const error_1 = require("../error");
|
||||
const utils_1 = require("../utils");
|
||||
const auth_provider_1 = require("./auth/auth_provider");
|
||||
const gssapi_1 = require("./auth/gssapi");
|
||||
const mongocr_1 = require("./auth/mongocr");
|
||||
const mongodb_aws_1 = require("./auth/mongodb_aws");
|
||||
const mongodb_oidc_1 = require("./auth/mongodb_oidc");
|
||||
const plain_1 = require("./auth/plain");
|
||||
const providers_1 = require("./auth/providers");
|
||||
const scram_1 = require("./auth/scram");
|
||||
const x509_1 = require("./auth/x509");
|
||||
const connection_1 = require("./connection");
|
||||
const constants_2 = require("./wire_protocol/constants");
|
||||
/** @internal */
|
||||
exports.AUTH_PROVIDERS = new Map([
|
||||
[providers_1.AuthMechanism.MONGODB_AWS, new mongodb_aws_1.MongoDBAWS()],
|
||||
[providers_1.AuthMechanism.MONGODB_CR, new mongocr_1.MongoCR()],
|
||||
[providers_1.AuthMechanism.MONGODB_GSSAPI, new gssapi_1.GSSAPI()],
|
||||
[providers_1.AuthMechanism.MONGODB_OIDC, new mongodb_oidc_1.MongoDBOIDC()],
|
||||
[providers_1.AuthMechanism.MONGODB_PLAIN, new plain_1.Plain()],
|
||||
[providers_1.AuthMechanism.MONGODB_SCRAM_SHA1, new scram_1.ScramSHA1()],
|
||||
[providers_1.AuthMechanism.MONGODB_SCRAM_SHA256, new scram_1.ScramSHA256()],
|
||||
[providers_1.AuthMechanism.MONGODB_X509, new x509_1.X509()]
|
||||
]);
|
||||
function connect(options, callback) {
|
||||
makeConnection({ ...options, existingSocket: undefined }, (err, socket) => {
|
||||
if (err || !socket) {
|
||||
return callback(err);
|
||||
}
|
||||
let ConnectionType = options.connectionType ?? connection_1.Connection;
|
||||
if (options.autoEncrypter) {
|
||||
ConnectionType = connection_1.CryptoConnection;
|
||||
}
|
||||
const connection = new ConnectionType(socket, options);
|
||||
performInitialHandshake(connection, options).then(() => callback(undefined, connection), error => {
|
||||
connection.destroy({ force: false });
|
||||
callback(error);
|
||||
});
|
||||
});
|
||||
}
|
||||
exports.connect = connect;
|
||||
function checkSupportedServer(hello, options) {
|
||||
const maxWireVersion = Number(hello.maxWireVersion);
|
||||
const minWireVersion = Number(hello.minWireVersion);
|
||||
const serverVersionHighEnough = !Number.isNaN(maxWireVersion) && maxWireVersion >= constants_2.MIN_SUPPORTED_WIRE_VERSION;
|
||||
const serverVersionLowEnough = !Number.isNaN(minWireVersion) && minWireVersion <= constants_2.MAX_SUPPORTED_WIRE_VERSION;
|
||||
if (serverVersionHighEnough) {
|
||||
if (serverVersionLowEnough) {
|
||||
return null;
|
||||
}
|
||||
const message = `Server at ${options.hostAddress} reports minimum wire version ${JSON.stringify(hello.minWireVersion)}, but this version of the Node.js Driver requires at most ${constants_2.MAX_SUPPORTED_WIRE_VERSION} (MongoDB ${constants_2.MAX_SUPPORTED_SERVER_VERSION})`;
|
||||
return new error_1.MongoCompatibilityError(message);
|
||||
}
|
||||
const message = `Server at ${options.hostAddress} reports maximum wire version ${JSON.stringify(hello.maxWireVersion) ?? 0}, but this version of the Node.js Driver requires at least ${constants_2.MIN_SUPPORTED_WIRE_VERSION} (MongoDB ${constants_2.MIN_SUPPORTED_SERVER_VERSION})`;
|
||||
return new error_1.MongoCompatibilityError(message);
|
||||
}
|
||||
async function performInitialHandshake(conn, options) {
|
||||
const credentials = options.credentials;
|
||||
if (credentials) {
|
||||
if (!(credentials.mechanism === providers_1.AuthMechanism.MONGODB_DEFAULT) &&
|
||||
!exports.AUTH_PROVIDERS.get(credentials.mechanism)) {
|
||||
throw new error_1.MongoInvalidArgumentError(`AuthMechanism '${credentials.mechanism}' not supported`);
|
||||
}
|
||||
}
|
||||
const authContext = new auth_provider_1.AuthContext(conn, credentials, options);
|
||||
conn.authContext = authContext;
|
||||
const handshakeDoc = await prepareHandshakeDocument(authContext);
|
||||
// @ts-expect-error: TODO(NODE-5141): The options need to be filtered properly, Connection options differ from Command options
|
||||
const handshakeOptions = { ...options };
|
||||
if (typeof options.connectTimeoutMS === 'number') {
|
||||
// The handshake technically is a monitoring check, so its socket timeout should be connectTimeoutMS
|
||||
handshakeOptions.socketTimeoutMS = options.connectTimeoutMS;
|
||||
}
|
||||
const start = new Date().getTime();
|
||||
const response = await conn.commandAsync((0, utils_1.ns)('admin.$cmd'), handshakeDoc, handshakeOptions);
|
||||
if (!('isWritablePrimary' in response)) {
|
||||
// Provide hello-style response document.
|
||||
response.isWritablePrimary = response[constants_1.LEGACY_HELLO_COMMAND];
|
||||
}
|
||||
if (response.helloOk) {
|
||||
conn.helloOk = true;
|
||||
}
|
||||
const supportedServerErr = checkSupportedServer(response, options);
|
||||
if (supportedServerErr) {
|
||||
throw supportedServerErr;
|
||||
}
|
||||
if (options.loadBalanced) {
|
||||
if (!response.serviceId) {
|
||||
throw new error_1.MongoCompatibilityError('Driver attempted to initialize in load balancing mode, ' +
|
||||
'but the server does not support this mode.');
|
||||
}
|
||||
}
|
||||
// NOTE: This is metadata attached to the connection while porting away from
|
||||
// handshake being done in the `Server` class. Likely, it should be
|
||||
// relocated, or at very least restructured.
|
||||
conn.hello = response;
|
||||
conn.lastHelloMS = new Date().getTime() - start;
|
||||
if (!response.arbiterOnly && credentials) {
|
||||
// store the response on auth context
|
||||
authContext.response = response;
|
||||
const resolvedCredentials = credentials.resolveAuthMechanism(response);
|
||||
const provider = exports.AUTH_PROVIDERS.get(resolvedCredentials.mechanism);
|
||||
if (!provider) {
|
||||
throw new error_1.MongoInvalidArgumentError(`No AuthProvider for ${resolvedCredentials.mechanism} defined.`);
|
||||
}
|
||||
try {
|
||||
await provider.auth(authContext);
|
||||
}
|
||||
catch (error) {
|
||||
if (error instanceof error_1.MongoError) {
|
||||
error.addErrorLabel(error_1.MongoErrorLabel.HandshakeError);
|
||||
if ((0, error_1.needsRetryableWriteLabel)(error, response.maxWireVersion)) {
|
||||
error.addErrorLabel(error_1.MongoErrorLabel.RetryableWriteError);
|
||||
}
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @internal
|
||||
*
|
||||
* This function is only exposed for testing purposes.
|
||||
*/
|
||||
async function prepareHandshakeDocument(authContext) {
|
||||
const options = authContext.options;
|
||||
const compressors = options.compressors ? options.compressors : [];
|
||||
const { serverApi } = authContext.connection;
|
||||
const handshakeDoc = {
|
||||
[serverApi?.version ? 'hello' : constants_1.LEGACY_HELLO_COMMAND]: 1,
|
||||
helloOk: true,
|
||||
client: options.metadata,
|
||||
compression: compressors
|
||||
};
|
||||
if (options.loadBalanced === true) {
|
||||
handshakeDoc.loadBalanced = true;
|
||||
}
|
||||
const credentials = authContext.credentials;
|
||||
if (credentials) {
|
||||
if (credentials.mechanism === providers_1.AuthMechanism.MONGODB_DEFAULT && credentials.username) {
|
||||
handshakeDoc.saslSupportedMechs = `${credentials.source}.${credentials.username}`;
|
||||
const provider = exports.AUTH_PROVIDERS.get(providers_1.AuthMechanism.MONGODB_SCRAM_SHA256);
|
||||
if (!provider) {
|
||||
// This auth mechanism is always present.
|
||||
throw new error_1.MongoInvalidArgumentError(`No AuthProvider for ${providers_1.AuthMechanism.MONGODB_SCRAM_SHA256} defined.`);
|
||||
}
|
||||
return provider.prepare(handshakeDoc, authContext);
|
||||
}
|
||||
const provider = exports.AUTH_PROVIDERS.get(credentials.mechanism);
|
||||
if (!provider) {
|
||||
throw new error_1.MongoInvalidArgumentError(`No AuthProvider for ${credentials.mechanism} defined.`);
|
||||
}
|
||||
return provider.prepare(handshakeDoc, authContext);
|
||||
}
|
||||
return handshakeDoc;
|
||||
}
|
||||
exports.prepareHandshakeDocument = prepareHandshakeDocument;
|
||||
/** @public */
|
||||
exports.LEGAL_TLS_SOCKET_OPTIONS = [
|
||||
'ALPNProtocols',
|
||||
'ca',
|
||||
'cert',
|
||||
'checkServerIdentity',
|
||||
'ciphers',
|
||||
'crl',
|
||||
'ecdhCurve',
|
||||
'key',
|
||||
'minDHSize',
|
||||
'passphrase',
|
||||
'pfx',
|
||||
'rejectUnauthorized',
|
||||
'secureContext',
|
||||
'secureProtocol',
|
||||
'servername',
|
||||
'session'
|
||||
];
|
||||
/** @public */
|
||||
exports.LEGAL_TCP_SOCKET_OPTIONS = [
|
||||
'family',
|
||||
'hints',
|
||||
'localAddress',
|
||||
'localPort',
|
||||
'lookup'
|
||||
];
|
||||
function parseConnectOptions(options) {
|
||||
const hostAddress = options.hostAddress;
|
||||
if (!hostAddress)
|
||||
throw new error_1.MongoInvalidArgumentError('Option "hostAddress" is required');
|
||||
const result = {};
|
||||
for (const name of exports.LEGAL_TCP_SOCKET_OPTIONS) {
|
||||
if (options[name] != null) {
|
||||
result[name] = options[name];
|
||||
}
|
||||
}
|
||||
if (typeof hostAddress.socketPath === 'string') {
|
||||
result.path = hostAddress.socketPath;
|
||||
return result;
|
||||
}
|
||||
else if (typeof hostAddress.host === 'string') {
|
||||
result.host = hostAddress.host;
|
||||
result.port = hostAddress.port;
|
||||
return result;
|
||||
}
|
||||
else {
|
||||
// This should never happen since we set up HostAddresses
|
||||
// But if we don't throw here the socket could hang until timeout
|
||||
// TODO(NODE-3483)
|
||||
throw new error_1.MongoRuntimeError(`Unexpected HostAddress ${JSON.stringify(hostAddress)}`);
|
||||
}
|
||||
}
|
||||
function parseSslOptions(options) {
|
||||
const result = parseConnectOptions(options);
|
||||
// Merge in valid SSL options
|
||||
for (const name of exports.LEGAL_TLS_SOCKET_OPTIONS) {
|
||||
if (options[name] != null) {
|
||||
result[name] = options[name];
|
||||
}
|
||||
}
|
||||
if (options.existingSocket) {
|
||||
result.socket = options.existingSocket;
|
||||
}
|
||||
// Set default sni servername to be the same as host
|
||||
if (result.servername == null && result.host && !net.isIP(result.host)) {
|
||||
result.servername = result.host;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
const SOCKET_ERROR_EVENT_LIST = ['error', 'close', 'timeout', 'parseError'];
|
||||
const SOCKET_ERROR_EVENTS = new Set(SOCKET_ERROR_EVENT_LIST);
|
||||
function makeConnection(options, _callback) {
|
||||
const useTLS = options.tls ?? false;
|
||||
const keepAlive = options.keepAlive ?? true;
|
||||
const socketTimeoutMS = options.socketTimeoutMS ?? Reflect.get(options, 'socketTimeout') ?? 0;
|
||||
const noDelay = options.noDelay ?? true;
|
||||
const connectTimeoutMS = options.connectTimeoutMS ?? 30000;
|
||||
const rejectUnauthorized = options.rejectUnauthorized ?? true;
|
||||
const keepAliveInitialDelay = ((options.keepAliveInitialDelay ?? 120000) > socketTimeoutMS
|
||||
? Math.round(socketTimeoutMS / 2)
|
||||
: options.keepAliveInitialDelay) ?? 120000;
|
||||
const existingSocket = options.existingSocket;
|
||||
let socket;
|
||||
const callback = function (err, ret) {
|
||||
if (err && socket) {
|
||||
socket.destroy();
|
||||
}
|
||||
_callback(err, ret);
|
||||
};
|
||||
if (options.proxyHost != null) {
|
||||
// Currently, only Socks5 is supported.
|
||||
return makeSocks5Connection({
|
||||
...options,
|
||||
connectTimeoutMS // Should always be present for Socks5
|
||||
}, callback);
|
||||
}
|
||||
if (useTLS) {
|
||||
const tlsSocket = tls.connect(parseSslOptions(options));
|
||||
if (typeof tlsSocket.disableRenegotiation === 'function') {
|
||||
tlsSocket.disableRenegotiation();
|
||||
}
|
||||
socket = tlsSocket;
|
||||
}
|
||||
else if (existingSocket) {
|
||||
// In the TLS case, parseSslOptions() sets options.socket to existingSocket,
|
||||
// so we only need to handle the non-TLS case here (where existingSocket
|
||||
// gives us all we need out of the box).
|
||||
socket = existingSocket;
|
||||
}
|
||||
else {
|
||||
socket = net.createConnection(parseConnectOptions(options));
|
||||
}
|
||||
socket.setKeepAlive(keepAlive, keepAliveInitialDelay);
|
||||
socket.setTimeout(connectTimeoutMS);
|
||||
socket.setNoDelay(noDelay);
|
||||
const connectEvent = useTLS ? 'secureConnect' : 'connect';
|
||||
let cancellationHandler;
|
||||
function errorHandler(eventName) {
|
||||
return (err) => {
|
||||
SOCKET_ERROR_EVENTS.forEach(event => socket.removeAllListeners(event));
|
||||
if (cancellationHandler && options.cancellationToken) {
|
||||
options.cancellationToken.removeListener('cancel', cancellationHandler);
|
||||
}
|
||||
socket.removeListener(connectEvent, connectHandler);
|
||||
callback(connectionFailureError(eventName, err));
|
||||
};
|
||||
}
|
||||
function connectHandler() {
|
||||
SOCKET_ERROR_EVENTS.forEach(event => socket.removeAllListeners(event));
|
||||
if (cancellationHandler && options.cancellationToken) {
|
||||
options.cancellationToken.removeListener('cancel', cancellationHandler);
|
||||
}
|
||||
if ('authorizationError' in socket) {
|
||||
if (socket.authorizationError && rejectUnauthorized) {
|
||||
// TODO(NODE-5192): wrap this with a MongoError subclass
|
||||
return callback(socket.authorizationError);
|
||||
}
|
||||
}
|
||||
socket.setTimeout(0);
|
||||
callback(undefined, socket);
|
||||
}
|
||||
SOCKET_ERROR_EVENTS.forEach(event => socket.once(event, errorHandler(event)));
|
||||
if (options.cancellationToken) {
|
||||
cancellationHandler = errorHandler('cancel');
|
||||
options.cancellationToken.once('cancel', cancellationHandler);
|
||||
}
|
||||
if (existingSocket) {
|
||||
process.nextTick(connectHandler);
|
||||
}
|
||||
else {
|
||||
socket.once(connectEvent, connectHandler);
|
||||
}
|
||||
}
|
||||
function makeSocks5Connection(options, callback) {
|
||||
const hostAddress = utils_1.HostAddress.fromHostPort(options.proxyHost ?? '', // proxyHost is guaranteed to set here
|
||||
options.proxyPort ?? 1080);
|
||||
// First, connect to the proxy server itself:
|
||||
makeConnection({
|
||||
...options,
|
||||
hostAddress,
|
||||
tls: false,
|
||||
proxyHost: undefined
|
||||
}, (err, rawSocket) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
const destination = parseConnectOptions(options);
|
||||
if (typeof destination.host !== 'string' || typeof destination.port !== 'number') {
|
||||
return callback(new error_1.MongoInvalidArgumentError('Can only make Socks5 connections to TCP hosts'));
|
||||
}
|
||||
// Then, establish the Socks5 proxy connection:
|
||||
socks_1.SocksClient.createConnection({
|
||||
existing_socket: rawSocket,
|
||||
timeout: options.connectTimeoutMS,
|
||||
command: 'connect',
|
||||
destination: {
|
||||
host: destination.host,
|
||||
port: destination.port
|
||||
},
|
||||
proxy: {
|
||||
// host and port are ignored because we pass existing_socket
|
||||
host: 'iLoveJavaScript',
|
||||
port: 0,
|
||||
type: 5,
|
||||
userId: options.proxyUsername || undefined,
|
||||
password: options.proxyPassword || undefined
|
||||
}
|
||||
}).then(({ socket }) => {
|
||||
// Finally, now treat the resulting duplex stream as the
|
||||
// socket over which we send and receive wire protocol messages:
|
||||
makeConnection({
|
||||
...options,
|
||||
existingSocket: socket,
|
||||
proxyHost: undefined
|
||||
}, callback);
|
||||
}, error => callback(connectionFailureError('error', error)));
|
||||
});
|
||||
}
|
||||
function connectionFailureError(type, err) {
|
||||
switch (type) {
|
||||
case 'error':
|
||||
return new error_1.MongoNetworkError(err);
|
||||
case 'timeout':
|
||||
return new error_1.MongoNetworkTimeoutError('connection timed out');
|
||||
case 'close':
|
||||
return new error_1.MongoNetworkError('connection closed');
|
||||
case 'cancel':
|
||||
return new error_1.MongoNetworkError('connection establishment was cancelled');
|
||||
default:
|
||||
return new error_1.MongoNetworkError('unknown network error');
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=connect.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/connect.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/connect.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
492
VISUALIZACION/node_modules/mongodb/lib/cmap/connection.js
generated
vendored
Executable file
492
VISUALIZACION/node_modules/mongodb/lib/cmap/connection.js
generated
vendored
Executable file
|
|
@ -0,0 +1,492 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.hasSessionSupport = exports.CryptoConnection = exports.Connection = void 0;
|
||||
const timers_1 = require("timers");
|
||||
const util_1 = require("util");
|
||||
const constants_1 = require("../constants");
|
||||
const error_1 = require("../error");
|
||||
const mongo_types_1 = require("../mongo_types");
|
||||
const sessions_1 = require("../sessions");
|
||||
const utils_1 = require("../utils");
|
||||
const command_monitoring_events_1 = require("./command_monitoring_events");
|
||||
const commands_1 = require("./commands");
|
||||
const message_stream_1 = require("./message_stream");
|
||||
const stream_description_1 = require("./stream_description");
|
||||
const shared_1 = require("./wire_protocol/shared");
|
||||
/** @internal */
|
||||
const kStream = Symbol('stream');
|
||||
/** @internal */
|
||||
const kQueue = Symbol('queue');
|
||||
/** @internal */
|
||||
const kMessageStream = Symbol('messageStream');
|
||||
/** @internal */
|
||||
const kGeneration = Symbol('generation');
|
||||
/** @internal */
|
||||
const kLastUseTime = Symbol('lastUseTime');
|
||||
/** @internal */
|
||||
const kClusterTime = Symbol('clusterTime');
|
||||
/** @internal */
|
||||
const kDescription = Symbol('description');
|
||||
/** @internal */
|
||||
const kHello = Symbol('hello');
|
||||
/** @internal */
|
||||
const kAutoEncrypter = Symbol('autoEncrypter');
|
||||
/** @internal */
|
||||
const kDelayedTimeoutId = Symbol('delayedTimeoutId');
|
||||
const INVALID_QUEUE_SIZE = 'Connection internal queue contains more than 1 operation description';
|
||||
/** @internal */
|
||||
class Connection extends mongo_types_1.TypedEventEmitter {
|
||||
constructor(stream, options) {
|
||||
super();
|
||||
this.commandAsync = (0, util_1.promisify)((ns, cmd, options, callback) => this.command(ns, cmd, options, callback));
|
||||
this.id = options.id;
|
||||
this.address = streamIdentifier(stream, options);
|
||||
this.socketTimeoutMS = options.socketTimeoutMS ?? 0;
|
||||
this.monitorCommands = options.monitorCommands;
|
||||
this.serverApi = options.serverApi;
|
||||
this.closed = false;
|
||||
this[kHello] = null;
|
||||
this[kClusterTime] = null;
|
||||
this[kDescription] = new stream_description_1.StreamDescription(this.address, options);
|
||||
this[kGeneration] = options.generation;
|
||||
this[kLastUseTime] = (0, utils_1.now)();
|
||||
// setup parser stream and message handling
|
||||
this[kQueue] = new Map();
|
||||
this[kMessageStream] = new message_stream_1.MessageStream({
|
||||
...options,
|
||||
maxBsonMessageSize: this.hello?.maxBsonMessageSize
|
||||
});
|
||||
this[kStream] = stream;
|
||||
this[kDelayedTimeoutId] = null;
|
||||
this[kMessageStream].on('message', message => this.onMessage(message));
|
||||
this[kMessageStream].on('error', error => this.onError(error));
|
||||
this[kStream].on('close', () => this.onClose());
|
||||
this[kStream].on('timeout', () => this.onTimeout());
|
||||
this[kStream].on('error', () => {
|
||||
/* ignore errors, listen to `close` instead */
|
||||
});
|
||||
// hook the message stream up to the passed in stream
|
||||
this[kStream].pipe(this[kMessageStream]);
|
||||
this[kMessageStream].pipe(this[kStream]);
|
||||
}
|
||||
get description() {
|
||||
return this[kDescription];
|
||||
}
|
||||
get hello() {
|
||||
return this[kHello];
|
||||
}
|
||||
// the `connect` method stores the result of the handshake hello on the connection
|
||||
set hello(response) {
|
||||
this[kDescription].receiveResponse(response);
|
||||
this[kDescription] = Object.freeze(this[kDescription]);
|
||||
// TODO: remove this, and only use the `StreamDescription` in the future
|
||||
this[kHello] = response;
|
||||
}
|
||||
// Set the whether the message stream is for a monitoring connection.
|
||||
set isMonitoringConnection(value) {
|
||||
this[kMessageStream].isMonitoringConnection = value;
|
||||
}
|
||||
get isMonitoringConnection() {
|
||||
return this[kMessageStream].isMonitoringConnection;
|
||||
}
|
||||
get serviceId() {
|
||||
return this.hello?.serviceId;
|
||||
}
|
||||
get loadBalanced() {
|
||||
return this.description.loadBalanced;
|
||||
}
|
||||
get generation() {
|
||||
return this[kGeneration] || 0;
|
||||
}
|
||||
set generation(generation) {
|
||||
this[kGeneration] = generation;
|
||||
}
|
||||
get idleTime() {
|
||||
return (0, utils_1.calculateDurationInMs)(this[kLastUseTime]);
|
||||
}
|
||||
get clusterTime() {
|
||||
return this[kClusterTime];
|
||||
}
|
||||
get stream() {
|
||||
return this[kStream];
|
||||
}
|
||||
markAvailable() {
|
||||
this[kLastUseTime] = (0, utils_1.now)();
|
||||
}
|
||||
onError(error) {
|
||||
this.cleanup(true, error);
|
||||
}
|
||||
onClose() {
|
||||
const message = `connection ${this.id} to ${this.address} closed`;
|
||||
this.cleanup(true, new error_1.MongoNetworkError(message));
|
||||
}
|
||||
onTimeout() {
|
||||
this[kDelayedTimeoutId] = (0, timers_1.setTimeout)(() => {
|
||||
const message = `connection ${this.id} to ${this.address} timed out`;
|
||||
const beforeHandshake = this.hello == null;
|
||||
this.cleanup(true, new error_1.MongoNetworkTimeoutError(message, { beforeHandshake }));
|
||||
}, 1).unref(); // No need for this timer to hold the event loop open
|
||||
}
|
||||
onMessage(message) {
|
||||
const delayedTimeoutId = this[kDelayedTimeoutId];
|
||||
if (delayedTimeoutId != null) {
|
||||
(0, timers_1.clearTimeout)(delayedTimeoutId);
|
||||
this[kDelayedTimeoutId] = null;
|
||||
}
|
||||
const socketTimeoutMS = this[kStream].timeout ?? 0;
|
||||
this[kStream].setTimeout(0);
|
||||
// always emit the message, in case we are streaming
|
||||
this.emit('message', message);
|
||||
let operationDescription = this[kQueue].get(message.responseTo);
|
||||
if (!operationDescription && this.isMonitoringConnection) {
|
||||
// This is how we recover when the initial hello's requestId is not
|
||||
// the responseTo when hello responses have been skipped:
|
||||
// First check if the map is of invalid size
|
||||
if (this[kQueue].size > 1) {
|
||||
this.cleanup(true, new error_1.MongoRuntimeError(INVALID_QUEUE_SIZE));
|
||||
}
|
||||
else {
|
||||
// Get the first orphaned operation description.
|
||||
const entry = this[kQueue].entries().next();
|
||||
if (entry.value != null) {
|
||||
const [requestId, orphaned] = entry.value;
|
||||
// If the orphaned operation description exists then set it.
|
||||
operationDescription = orphaned;
|
||||
// Remove the entry with the bad request id from the queue.
|
||||
this[kQueue].delete(requestId);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!operationDescription) {
|
||||
return;
|
||||
}
|
||||
const callback = operationDescription.cb;
|
||||
// SERVER-45775: For exhaust responses we should be able to use the same requestId to
|
||||
// track response, however the server currently synthetically produces remote requests
|
||||
// making the `responseTo` change on each response
|
||||
this[kQueue].delete(message.responseTo);
|
||||
if ('moreToCome' in message && message.moreToCome) {
|
||||
// If the operation description check above does find an orphaned
|
||||
// description and sets the operationDescription then this line will put one
|
||||
// back in the queue with the correct requestId and will resolve not being able
|
||||
// to find the next one via the responseTo of the next streaming hello.
|
||||
this[kQueue].set(message.requestId, operationDescription);
|
||||
this[kStream].setTimeout(socketTimeoutMS);
|
||||
}
|
||||
try {
|
||||
// Pass in the entire description because it has BSON parsing options
|
||||
message.parse(operationDescription);
|
||||
}
|
||||
catch (err) {
|
||||
// If this error is generated by our own code, it will already have the correct class applied
|
||||
// if it is not, then it is coming from a catastrophic data parse failure or the BSON library
|
||||
// in either case, it should not be wrapped
|
||||
callback(err);
|
||||
return;
|
||||
}
|
||||
if (message.documents[0]) {
|
||||
const document = message.documents[0];
|
||||
const session = operationDescription.session;
|
||||
if (session) {
|
||||
(0, sessions_1.updateSessionFromResponse)(session, document);
|
||||
}
|
||||
if (document.$clusterTime) {
|
||||
this[kClusterTime] = document.$clusterTime;
|
||||
this.emit(Connection.CLUSTER_TIME_RECEIVED, document.$clusterTime);
|
||||
}
|
||||
if (document.writeConcernError) {
|
||||
callback(new error_1.MongoWriteConcernError(document.writeConcernError, document), document);
|
||||
return;
|
||||
}
|
||||
if (document.ok === 0 || document.$err || document.errmsg || document.code) {
|
||||
callback(new error_1.MongoServerError(document));
|
||||
return;
|
||||
}
|
||||
}
|
||||
callback(undefined, message.documents[0]);
|
||||
}
|
||||
destroy(options, callback) {
|
||||
if (this.closed) {
|
||||
process.nextTick(() => callback?.());
|
||||
return;
|
||||
}
|
||||
if (typeof callback === 'function') {
|
||||
this.once('close', () => process.nextTick(() => callback()));
|
||||
}
|
||||
// load balanced mode requires that these listeners remain on the connection
|
||||
// after cleanup on timeouts, errors or close so we remove them before calling
|
||||
// cleanup.
|
||||
this.removeAllListeners(Connection.PINNED);
|
||||
this.removeAllListeners(Connection.UNPINNED);
|
||||
const message = `connection ${this.id} to ${this.address} closed`;
|
||||
this.cleanup(options.force, new error_1.MongoNetworkError(message));
|
||||
}
|
||||
/**
|
||||
* A method that cleans up the connection. When `force` is true, this method
|
||||
* forcibly destroys the socket.
|
||||
*
|
||||
* If an error is provided, any in-flight operations will be closed with the error.
|
||||
*
|
||||
* This method does nothing if the connection is already closed.
|
||||
*/
|
||||
cleanup(force, error) {
|
||||
if (this.closed) {
|
||||
return;
|
||||
}
|
||||
this.closed = true;
|
||||
const completeCleanup = () => {
|
||||
for (const op of this[kQueue].values()) {
|
||||
op.cb(error);
|
||||
}
|
||||
this[kQueue].clear();
|
||||
this.emit(Connection.CLOSE);
|
||||
};
|
||||
this[kStream].removeAllListeners();
|
||||
this[kMessageStream].removeAllListeners();
|
||||
this[kMessageStream].destroy();
|
||||
if (force) {
|
||||
this[kStream].destroy();
|
||||
completeCleanup();
|
||||
return;
|
||||
}
|
||||
if (!this[kStream].writableEnded) {
|
||||
this[kStream].end(() => {
|
||||
this[kStream].destroy();
|
||||
completeCleanup();
|
||||
});
|
||||
}
|
||||
else {
|
||||
completeCleanup();
|
||||
}
|
||||
}
|
||||
command(ns, command, options, callback) {
|
||||
let cmd = { ...command };
|
||||
const readPreference = (0, shared_1.getReadPreference)(options);
|
||||
const shouldUseOpMsg = supportsOpMsg(this);
|
||||
const session = options?.session;
|
||||
let clusterTime = this.clusterTime;
|
||||
if (this.serverApi) {
|
||||
const { version, strict, deprecationErrors } = this.serverApi;
|
||||
cmd.apiVersion = version;
|
||||
if (strict != null)
|
||||
cmd.apiStrict = strict;
|
||||
if (deprecationErrors != null)
|
||||
cmd.apiDeprecationErrors = deprecationErrors;
|
||||
}
|
||||
if (hasSessionSupport(this) && session) {
|
||||
if (session.clusterTime &&
|
||||
clusterTime &&
|
||||
session.clusterTime.clusterTime.greaterThan(clusterTime.clusterTime)) {
|
||||
clusterTime = session.clusterTime;
|
||||
}
|
||||
const err = (0, sessions_1.applySession)(session, cmd, options);
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
}
|
||||
else if (session?.explicit) {
|
||||
return callback(new error_1.MongoCompatibilityError('Current topology does not support sessions'));
|
||||
}
|
||||
// if we have a known cluster time, gossip it
|
||||
if (clusterTime) {
|
||||
cmd.$clusterTime = clusterTime;
|
||||
}
|
||||
if ((0, shared_1.isSharded)(this) && !shouldUseOpMsg && readPreference && readPreference.mode !== 'primary') {
|
||||
cmd = {
|
||||
$query: cmd,
|
||||
$readPreference: readPreference.toJSON()
|
||||
};
|
||||
}
|
||||
const commandOptions = Object.assign({
|
||||
numberToSkip: 0,
|
||||
numberToReturn: -1,
|
||||
checkKeys: false,
|
||||
// This value is not overridable
|
||||
secondaryOk: readPreference.secondaryOk()
|
||||
}, options);
|
||||
const cmdNs = `${ns.db}.$cmd`;
|
||||
const message = shouldUseOpMsg
|
||||
? new commands_1.Msg(cmdNs, cmd, commandOptions)
|
||||
: new commands_1.Query(cmdNs, cmd, commandOptions);
|
||||
try {
|
||||
write(this, message, commandOptions, callback);
|
||||
}
|
||||
catch (err) {
|
||||
callback(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
/** @event */
|
||||
Connection.COMMAND_STARTED = constants_1.COMMAND_STARTED;
|
||||
/** @event */
|
||||
Connection.COMMAND_SUCCEEDED = constants_1.COMMAND_SUCCEEDED;
|
||||
/** @event */
|
||||
Connection.COMMAND_FAILED = constants_1.COMMAND_FAILED;
|
||||
/** @event */
|
||||
Connection.CLUSTER_TIME_RECEIVED = constants_1.CLUSTER_TIME_RECEIVED;
|
||||
/** @event */
|
||||
Connection.CLOSE = constants_1.CLOSE;
|
||||
/** @event */
|
||||
Connection.MESSAGE = constants_1.MESSAGE;
|
||||
/** @event */
|
||||
Connection.PINNED = constants_1.PINNED;
|
||||
/** @event */
|
||||
Connection.UNPINNED = constants_1.UNPINNED;
|
||||
exports.Connection = Connection;
|
||||
/** @internal */
|
||||
class CryptoConnection extends Connection {
|
||||
constructor(stream, options) {
|
||||
super(stream, options);
|
||||
this[kAutoEncrypter] = options.autoEncrypter;
|
||||
}
|
||||
/** @internal @override */
|
||||
command(ns, cmd, options, callback) {
|
||||
const autoEncrypter = this[kAutoEncrypter];
|
||||
if (!autoEncrypter) {
|
||||
return callback(new error_1.MongoMissingDependencyError('No AutoEncrypter available for encryption'));
|
||||
}
|
||||
const serverWireVersion = (0, utils_1.maxWireVersion)(this);
|
||||
if (serverWireVersion === 0) {
|
||||
// This means the initial handshake hasn't happened yet
|
||||
return super.command(ns, cmd, options, callback);
|
||||
}
|
||||
if (serverWireVersion < 8) {
|
||||
callback(new error_1.MongoCompatibilityError('Auto-encryption requires a minimum MongoDB version of 4.2'));
|
||||
return;
|
||||
}
|
||||
// Save sort or indexKeys based on the command being run
|
||||
// the encrypt API serializes our JS objects to BSON to pass to the native code layer
|
||||
// and then deserializes the encrypted result, the protocol level components
|
||||
// of the command (ex. sort) are then converted to JS objects potentially losing
|
||||
// import key order information. These fields are never encrypted so we can save the values
|
||||
// from before the encryption and replace them after encryption has been performed
|
||||
const sort = cmd.find || cmd.findAndModify ? cmd.sort : null;
|
||||
const indexKeys = cmd.createIndexes
|
||||
? cmd.indexes.map((index) => index.key)
|
||||
: null;
|
||||
autoEncrypter.encrypt(ns.toString(), cmd, options, (err, encrypted) => {
|
||||
if (err || encrypted == null) {
|
||||
callback(err, null);
|
||||
return;
|
||||
}
|
||||
// Replace the saved values
|
||||
if (sort != null && (cmd.find || cmd.findAndModify)) {
|
||||
encrypted.sort = sort;
|
||||
}
|
||||
if (indexKeys != null && cmd.createIndexes) {
|
||||
for (const [offset, index] of indexKeys.entries()) {
|
||||
encrypted.indexes[offset].key = index;
|
||||
}
|
||||
}
|
||||
super.command(ns, encrypted, options, (err, response) => {
|
||||
if (err || response == null) {
|
||||
callback(err, response);
|
||||
return;
|
||||
}
|
||||
autoEncrypter.decrypt(response, options, callback);
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.CryptoConnection = CryptoConnection;
|
||||
/** @internal */
|
||||
function hasSessionSupport(conn) {
|
||||
const description = conn.description;
|
||||
return description.logicalSessionTimeoutMinutes != null;
|
||||
}
|
||||
exports.hasSessionSupport = hasSessionSupport;
|
||||
function supportsOpMsg(conn) {
|
||||
const description = conn.description;
|
||||
if (description == null) {
|
||||
return false;
|
||||
}
|
||||
return (0, utils_1.maxWireVersion)(conn) >= 6 && !description.__nodejs_mock_server__;
|
||||
}
|
||||
function streamIdentifier(stream, options) {
|
||||
if (options.proxyHost) {
|
||||
// If proxy options are specified, the properties of `stream` itself
|
||||
// will not accurately reflect what endpoint this is connected to.
|
||||
return options.hostAddress.toString();
|
||||
}
|
||||
const { remoteAddress, remotePort } = stream;
|
||||
if (typeof remoteAddress === 'string' && typeof remotePort === 'number') {
|
||||
return utils_1.HostAddress.fromHostPort(remoteAddress, remotePort).toString();
|
||||
}
|
||||
return (0, utils_1.uuidV4)().toString('hex');
|
||||
}
|
||||
function write(conn, command, options, callback) {
|
||||
options = options ?? {};
|
||||
const operationDescription = {
|
||||
requestId: command.requestId,
|
||||
cb: callback,
|
||||
session: options.session,
|
||||
noResponse: typeof options.noResponse === 'boolean' ? options.noResponse : false,
|
||||
documentsReturnedIn: options.documentsReturnedIn,
|
||||
// for BSON parsing
|
||||
useBigInt64: typeof options.useBigInt64 === 'boolean' ? options.useBigInt64 : false,
|
||||
promoteLongs: typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true,
|
||||
promoteValues: typeof options.promoteValues === 'boolean' ? options.promoteValues : true,
|
||||
promoteBuffers: typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false,
|
||||
bsonRegExp: typeof options.bsonRegExp === 'boolean' ? options.bsonRegExp : false,
|
||||
enableUtf8Validation: typeof options.enableUtf8Validation === 'boolean' ? options.enableUtf8Validation : true,
|
||||
raw: typeof options.raw === 'boolean' ? options.raw : false,
|
||||
started: 0
|
||||
};
|
||||
if (conn[kDescription] && conn[kDescription].compressor) {
|
||||
operationDescription.agreedCompressor = conn[kDescription].compressor;
|
||||
if (conn[kDescription].zlibCompressionLevel) {
|
||||
operationDescription.zlibCompressionLevel = conn[kDescription].zlibCompressionLevel;
|
||||
}
|
||||
}
|
||||
if (typeof options.socketTimeoutMS === 'number') {
|
||||
conn[kStream].setTimeout(options.socketTimeoutMS);
|
||||
}
|
||||
else if (conn.socketTimeoutMS !== 0) {
|
||||
conn[kStream].setTimeout(conn.socketTimeoutMS);
|
||||
}
|
||||
// if command monitoring is enabled we need to modify the callback here
|
||||
if (conn.monitorCommands) {
|
||||
conn.emit(Connection.COMMAND_STARTED, new command_monitoring_events_1.CommandStartedEvent(conn, command));
|
||||
operationDescription.started = (0, utils_1.now)();
|
||||
operationDescription.cb = (err, reply) => {
|
||||
// Command monitoring spec states that if ok is 1, then we must always emit
|
||||
// a command succeeded event, even if there's an error. Write concern errors
|
||||
// will have an ok: 1 in their reply.
|
||||
if (err && reply?.ok !== 1) {
|
||||
conn.emit(Connection.COMMAND_FAILED, new command_monitoring_events_1.CommandFailedEvent(conn, command, err, operationDescription.started));
|
||||
}
|
||||
else {
|
||||
if (reply && (reply.ok === 0 || reply.$err)) {
|
||||
conn.emit(Connection.COMMAND_FAILED, new command_monitoring_events_1.CommandFailedEvent(conn, command, reply, operationDescription.started));
|
||||
}
|
||||
else {
|
||||
conn.emit(Connection.COMMAND_SUCCEEDED, new command_monitoring_events_1.CommandSucceededEvent(conn, command, reply, operationDescription.started));
|
||||
}
|
||||
}
|
||||
if (typeof callback === 'function') {
|
||||
// Since we're passing through the reply with the write concern error now, we
|
||||
// need it not to be provided to the original callback in this case so
|
||||
// retryability does not get tricked into thinking the command actually
|
||||
// succeeded.
|
||||
callback(err, err instanceof error_1.MongoWriteConcernError ? undefined : reply);
|
||||
}
|
||||
};
|
||||
}
|
||||
if (!operationDescription.noResponse) {
|
||||
conn[kQueue].set(operationDescription.requestId, operationDescription);
|
||||
}
|
||||
try {
|
||||
conn[kMessageStream].writeCommand(command, operationDescription);
|
||||
}
|
||||
catch (e) {
|
||||
if (!operationDescription.noResponse) {
|
||||
conn[kQueue].delete(operationDescription.requestId);
|
||||
operationDescription.cb(e);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (operationDescription.noResponse) {
|
||||
operationDescription.cb();
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=connection.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/connection.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/connection.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
637
VISUALIZACION/node_modules/mongodb/lib/cmap/connection_pool.js
generated
vendored
Executable file
637
VISUALIZACION/node_modules/mongodb/lib/cmap/connection_pool.js
generated
vendored
Executable file
|
|
@ -0,0 +1,637 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ConnectionPool = exports.PoolState = void 0;
|
||||
const timers_1 = require("timers");
|
||||
const constants_1 = require("../constants");
|
||||
const error_1 = require("../error");
|
||||
const mongo_types_1 = require("../mongo_types");
|
||||
const utils_1 = require("../utils");
|
||||
const connect_1 = require("./connect");
|
||||
const connection_1 = require("./connection");
|
||||
const connection_pool_events_1 = require("./connection_pool_events");
|
||||
const errors_1 = require("./errors");
|
||||
const metrics_1 = require("./metrics");
|
||||
/** @internal */
|
||||
const kServer = Symbol('server');
|
||||
/** @internal */
|
||||
const kConnections = Symbol('connections');
|
||||
/** @internal */
|
||||
const kPending = Symbol('pending');
|
||||
/** @internal */
|
||||
const kCheckedOut = Symbol('checkedOut');
|
||||
/** @internal */
|
||||
const kMinPoolSizeTimer = Symbol('minPoolSizeTimer');
|
||||
/** @internal */
|
||||
const kGeneration = Symbol('generation');
|
||||
/** @internal */
|
||||
const kServiceGenerations = Symbol('serviceGenerations');
|
||||
/** @internal */
|
||||
const kConnectionCounter = Symbol('connectionCounter');
|
||||
/** @internal */
|
||||
const kCancellationToken = Symbol('cancellationToken');
|
||||
/** @internal */
|
||||
const kWaitQueue = Symbol('waitQueue');
|
||||
/** @internal */
|
||||
const kCancelled = Symbol('cancelled');
|
||||
/** @internal */
|
||||
const kMetrics = Symbol('metrics');
|
||||
/** @internal */
|
||||
const kProcessingWaitQueue = Symbol('processingWaitQueue');
|
||||
/** @internal */
|
||||
const kPoolState = Symbol('poolState');
|
||||
/** @internal */
|
||||
exports.PoolState = Object.freeze({
|
||||
paused: 'paused',
|
||||
ready: 'ready',
|
||||
closed: 'closed'
|
||||
});
|
||||
/**
|
||||
* A pool of connections which dynamically resizes, and emit events related to pool activity
|
||||
* @internal
|
||||
*/
|
||||
class ConnectionPool extends mongo_types_1.TypedEventEmitter {
|
||||
constructor(server, options) {
|
||||
super();
|
||||
this.options = Object.freeze({
|
||||
...options,
|
||||
connectionType: connection_1.Connection,
|
||||
maxPoolSize: options.maxPoolSize ?? 100,
|
||||
minPoolSize: options.minPoolSize ?? 0,
|
||||
maxConnecting: options.maxConnecting ?? 2,
|
||||
maxIdleTimeMS: options.maxIdleTimeMS ?? 0,
|
||||
waitQueueTimeoutMS: options.waitQueueTimeoutMS ?? 0,
|
||||
minPoolSizeCheckFrequencyMS: options.minPoolSizeCheckFrequencyMS ?? 100,
|
||||
autoEncrypter: options.autoEncrypter,
|
||||
metadata: options.metadata
|
||||
});
|
||||
if (this.options.minPoolSize > this.options.maxPoolSize) {
|
||||
throw new error_1.MongoInvalidArgumentError('Connection pool minimum size must not be greater than maximum pool size');
|
||||
}
|
||||
this[kPoolState] = exports.PoolState.paused;
|
||||
this[kServer] = server;
|
||||
this[kConnections] = new utils_1.List();
|
||||
this[kPending] = 0;
|
||||
this[kCheckedOut] = new Set();
|
||||
this[kMinPoolSizeTimer] = undefined;
|
||||
this[kGeneration] = 0;
|
||||
this[kServiceGenerations] = new Map();
|
||||
this[kConnectionCounter] = (0, utils_1.makeCounter)(1);
|
||||
this[kCancellationToken] = new mongo_types_1.CancellationToken();
|
||||
this[kCancellationToken].setMaxListeners(Infinity);
|
||||
this[kWaitQueue] = new utils_1.List();
|
||||
this[kMetrics] = new metrics_1.ConnectionPoolMetrics();
|
||||
this[kProcessingWaitQueue] = false;
|
||||
this.mongoLogger = this[kServer].topology.client.mongoLogger;
|
||||
this.component = 'connection';
|
||||
process.nextTick(() => {
|
||||
this.emitAndLog(ConnectionPool.CONNECTION_POOL_CREATED, new connection_pool_events_1.ConnectionPoolCreatedEvent(this));
|
||||
});
|
||||
}
|
||||
/** The address of the endpoint the pool is connected to */
|
||||
get address() {
|
||||
return this.options.hostAddress.toString();
|
||||
}
|
||||
/**
|
||||
* Check if the pool has been closed
|
||||
*
|
||||
* TODO(NODE-3263): We can remove this property once shell no longer needs it
|
||||
*/
|
||||
get closed() {
|
||||
return this[kPoolState] === exports.PoolState.closed;
|
||||
}
|
||||
/** An integer representing the SDAM generation of the pool */
|
||||
get generation() {
|
||||
return this[kGeneration];
|
||||
}
|
||||
/** An integer expressing how many total connections (available + pending + in use) the pool currently has */
|
||||
get totalConnectionCount() {
|
||||
return (this.availableConnectionCount + this.pendingConnectionCount + this.currentCheckedOutCount);
|
||||
}
|
||||
/** An integer expressing how many connections are currently available in the pool. */
|
||||
get availableConnectionCount() {
|
||||
return this[kConnections].length;
|
||||
}
|
||||
get pendingConnectionCount() {
|
||||
return this[kPending];
|
||||
}
|
||||
get currentCheckedOutCount() {
|
||||
return this[kCheckedOut].size;
|
||||
}
|
||||
get waitQueueSize() {
|
||||
return this[kWaitQueue].length;
|
||||
}
|
||||
get loadBalanced() {
|
||||
return this.options.loadBalanced;
|
||||
}
|
||||
get serviceGenerations() {
|
||||
return this[kServiceGenerations];
|
||||
}
|
||||
get serverError() {
|
||||
return this[kServer].description.error;
|
||||
}
|
||||
/**
|
||||
* This is exposed ONLY for use in mongosh, to enable
|
||||
* killing all connections if a user quits the shell with
|
||||
* operations in progress.
|
||||
*
|
||||
* This property may be removed as a part of NODE-3263.
|
||||
*/
|
||||
get checkedOutConnections() {
|
||||
return this[kCheckedOut];
|
||||
}
|
||||
/**
|
||||
* Get the metrics information for the pool when a wait queue timeout occurs.
|
||||
*/
|
||||
waitQueueErrorMetrics() {
|
||||
return this[kMetrics].info(this.options.maxPoolSize);
|
||||
}
|
||||
/**
|
||||
* Set the pool state to "ready"
|
||||
*/
|
||||
ready() {
|
||||
if (this[kPoolState] !== exports.PoolState.paused) {
|
||||
return;
|
||||
}
|
||||
this[kPoolState] = exports.PoolState.ready;
|
||||
this.emitAndLog(ConnectionPool.CONNECTION_POOL_READY, new connection_pool_events_1.ConnectionPoolReadyEvent(this));
|
||||
(0, timers_1.clearTimeout)(this[kMinPoolSizeTimer]);
|
||||
this.ensureMinPoolSize();
|
||||
}
|
||||
/**
|
||||
* Check a connection out of this pool. The connection will continue to be tracked, but no reference to it
|
||||
* will be held by the pool. This means that if a connection is checked out it MUST be checked back in or
|
||||
* explicitly destroyed by the new owner.
|
||||
*/
|
||||
checkOut(callback) {
|
||||
this.emitAndLog(ConnectionPool.CONNECTION_CHECK_OUT_STARTED, new connection_pool_events_1.ConnectionCheckOutStartedEvent(this));
|
||||
const waitQueueMember = { callback };
|
||||
const waitQueueTimeoutMS = this.options.waitQueueTimeoutMS;
|
||||
if (waitQueueTimeoutMS) {
|
||||
waitQueueMember.timer = (0, timers_1.setTimeout)(() => {
|
||||
waitQueueMember[kCancelled] = true;
|
||||
waitQueueMember.timer = undefined;
|
||||
this.emitAndLog(ConnectionPool.CONNECTION_CHECK_OUT_FAILED, new connection_pool_events_1.ConnectionCheckOutFailedEvent(this, 'timeout'));
|
||||
waitQueueMember.callback(new errors_1.WaitQueueTimeoutError(this.loadBalanced
|
||||
? this.waitQueueErrorMetrics()
|
||||
: 'Timed out while checking out a connection from connection pool', this.address));
|
||||
}, waitQueueTimeoutMS);
|
||||
}
|
||||
this[kWaitQueue].push(waitQueueMember);
|
||||
process.nextTick(() => this.processWaitQueue());
|
||||
}
|
||||
/**
|
||||
* Check a connection into the pool.
|
||||
*
|
||||
* @param connection - The connection to check in
|
||||
*/
|
||||
checkIn(connection) {
|
||||
if (!this[kCheckedOut].has(connection)) {
|
||||
return;
|
||||
}
|
||||
const poolClosed = this.closed;
|
||||
const stale = this.connectionIsStale(connection);
|
||||
const willDestroy = !!(poolClosed || stale || connection.closed);
|
||||
if (!willDestroy) {
|
||||
connection.markAvailable();
|
||||
this[kConnections].unshift(connection);
|
||||
}
|
||||
this[kCheckedOut].delete(connection);
|
||||
this.emitAndLog(ConnectionPool.CONNECTION_CHECKED_IN, new connection_pool_events_1.ConnectionCheckedInEvent(this, connection));
|
||||
if (willDestroy) {
|
||||
const reason = connection.closed ? 'error' : poolClosed ? 'poolClosed' : 'stale';
|
||||
this.destroyConnection(connection, reason);
|
||||
}
|
||||
process.nextTick(() => this.processWaitQueue());
|
||||
}
|
||||
/**
|
||||
* Clear the pool
|
||||
*
|
||||
* Pool reset is handled by incrementing the pool's generation count. Any existing connection of a
|
||||
* previous generation will eventually be pruned during subsequent checkouts.
|
||||
*/
|
||||
clear(options = {}) {
|
||||
if (this.closed) {
|
||||
return;
|
||||
}
|
||||
// handle load balanced case
|
||||
if (this.loadBalanced) {
|
||||
const { serviceId } = options;
|
||||
if (!serviceId) {
|
||||
throw new error_1.MongoRuntimeError('ConnectionPool.clear() called in load balanced mode with no serviceId.');
|
||||
}
|
||||
const sid = serviceId.toHexString();
|
||||
const generation = this.serviceGenerations.get(sid);
|
||||
// Only need to worry if the generation exists, since it should
|
||||
// always be there but typescript needs the check.
|
||||
if (generation == null) {
|
||||
throw new error_1.MongoRuntimeError('Service generations are required in load balancer mode.');
|
||||
}
|
||||
else {
|
||||
// Increment the generation for the service id.
|
||||
this.serviceGenerations.set(sid, generation + 1);
|
||||
}
|
||||
this.emitAndLog(ConnectionPool.CONNECTION_POOL_CLEARED, new connection_pool_events_1.ConnectionPoolClearedEvent(this, { serviceId }));
|
||||
return;
|
||||
}
|
||||
// handle non load-balanced case
|
||||
const interruptInUseConnections = options.interruptInUseConnections ?? false;
|
||||
const oldGeneration = this[kGeneration];
|
||||
this[kGeneration] += 1;
|
||||
const alreadyPaused = this[kPoolState] === exports.PoolState.paused;
|
||||
this[kPoolState] = exports.PoolState.paused;
|
||||
this.clearMinPoolSizeTimer();
|
||||
if (!alreadyPaused) {
|
||||
this.emitAndLog(ConnectionPool.CONNECTION_POOL_CLEARED, new connection_pool_events_1.ConnectionPoolClearedEvent(this, {
|
||||
interruptInUseConnections
|
||||
}));
|
||||
}
|
||||
if (interruptInUseConnections) {
|
||||
process.nextTick(() => this.interruptInUseConnections(oldGeneration));
|
||||
}
|
||||
this.processWaitQueue();
|
||||
}
|
||||
/**
|
||||
* Closes all stale in-use connections in the pool with a resumable PoolClearedOnNetworkError.
|
||||
*
|
||||
* Only connections where `connection.generation <= minGeneration` are killed.
|
||||
*/
|
||||
interruptInUseConnections(minGeneration) {
|
||||
for (const connection of this[kCheckedOut]) {
|
||||
if (connection.generation <= minGeneration) {
|
||||
this.checkIn(connection);
|
||||
connection.onError(new errors_1.PoolClearedOnNetworkError(this));
|
||||
}
|
||||
}
|
||||
}
|
||||
close(_options, _cb) {
|
||||
let options = _options;
|
||||
const callback = (_cb ?? _options);
|
||||
if (typeof options === 'function') {
|
||||
options = {};
|
||||
}
|
||||
options = Object.assign({ force: false }, options);
|
||||
if (this.closed) {
|
||||
return callback();
|
||||
}
|
||||
// immediately cancel any in-flight connections
|
||||
this[kCancellationToken].emit('cancel');
|
||||
// end the connection counter
|
||||
if (typeof this[kConnectionCounter].return === 'function') {
|
||||
this[kConnectionCounter].return(undefined);
|
||||
}
|
||||
this[kPoolState] = exports.PoolState.closed;
|
||||
this.clearMinPoolSizeTimer();
|
||||
this.processWaitQueue();
|
||||
(0, utils_1.eachAsync)(this[kConnections].toArray(), (conn, cb) => {
|
||||
this.emitAndLog(ConnectionPool.CONNECTION_CLOSED, new connection_pool_events_1.ConnectionClosedEvent(this, conn, 'poolClosed'));
|
||||
conn.destroy({ force: !!options.force }, cb);
|
||||
}, err => {
|
||||
this[kConnections].clear();
|
||||
this.emitAndLog(ConnectionPool.CONNECTION_POOL_CLOSED, new connection_pool_events_1.ConnectionPoolClosedEvent(this));
|
||||
callback(err);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Runs a lambda with an implicitly checked out connection, checking that connection back in when the lambda
|
||||
* has completed by calling back.
|
||||
*
|
||||
* NOTE: please note the required signature of `fn`
|
||||
*
|
||||
* @remarks When in load balancer mode, connections can be pinned to cursors or transactions.
|
||||
* In these cases we pass the connection in to this method to ensure it is used and a new
|
||||
* connection is not checked out.
|
||||
*
|
||||
* @param conn - A pinned connection for use in load balancing mode.
|
||||
* @param fn - A function which operates on a managed connection
|
||||
* @param callback - The original callback
|
||||
*/
|
||||
withConnection(conn, fn, callback) {
|
||||
if (conn) {
|
||||
// use the provided connection, and do _not_ check it in after execution
|
||||
fn(undefined, conn, (fnErr, result) => {
|
||||
if (fnErr) {
|
||||
return this.withReauthentication(fnErr, conn, fn, callback);
|
||||
}
|
||||
callback(undefined, result);
|
||||
});
|
||||
return;
|
||||
}
|
||||
this.checkOut((err, conn) => {
|
||||
// don't callback with `err` here, we might want to act upon it inside `fn`
|
||||
fn(err, conn, (fnErr, result) => {
|
||||
if (fnErr) {
|
||||
if (conn) {
|
||||
this.withReauthentication(fnErr, conn, fn, callback);
|
||||
}
|
||||
else {
|
||||
callback(fnErr);
|
||||
}
|
||||
}
|
||||
else {
|
||||
callback(undefined, result);
|
||||
}
|
||||
if (conn) {
|
||||
this.checkIn(conn);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
withReauthentication(fnErr, conn, fn, callback) {
|
||||
if (fnErr instanceof error_1.MongoError && fnErr.code === error_1.MONGODB_ERROR_CODES.Reauthenticate) {
|
||||
this.reauthenticate(conn, fn, (error, res) => {
|
||||
if (error) {
|
||||
return callback(error);
|
||||
}
|
||||
callback(undefined, res);
|
||||
});
|
||||
}
|
||||
else {
|
||||
callback(fnErr);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Reauthenticate on the same connection and then retry the operation.
|
||||
*/
|
||||
reauthenticate(connection, fn, callback) {
|
||||
const authContext = connection.authContext;
|
||||
if (!authContext) {
|
||||
return callback(new error_1.MongoRuntimeError('No auth context found on connection.'));
|
||||
}
|
||||
const credentials = authContext.credentials;
|
||||
if (!credentials) {
|
||||
return callback(new error_1.MongoMissingCredentialsError('Connection is missing credentials when asked to reauthenticate'));
|
||||
}
|
||||
const resolvedCredentials = credentials.resolveAuthMechanism(connection.hello || undefined);
|
||||
const provider = connect_1.AUTH_PROVIDERS.get(resolvedCredentials.mechanism);
|
||||
if (!provider) {
|
||||
return callback(new error_1.MongoMissingCredentialsError(`Reauthenticate failed due to no auth provider for ${credentials.mechanism}`));
|
||||
}
|
||||
provider.reauth(authContext).then(() => {
|
||||
fn(undefined, connection, (fnErr, fnResult) => {
|
||||
if (fnErr) {
|
||||
return callback(fnErr);
|
||||
}
|
||||
callback(undefined, fnResult);
|
||||
});
|
||||
}, error => callback(error));
|
||||
}
|
||||
/** Clear the min pool size timer */
|
||||
clearMinPoolSizeTimer() {
|
||||
const minPoolSizeTimer = this[kMinPoolSizeTimer];
|
||||
if (minPoolSizeTimer) {
|
||||
(0, timers_1.clearTimeout)(minPoolSizeTimer);
|
||||
}
|
||||
}
|
||||
destroyConnection(connection, reason) {
|
||||
this.emitAndLog(ConnectionPool.CONNECTION_CLOSED, new connection_pool_events_1.ConnectionClosedEvent(this, connection, reason));
|
||||
// destroy the connection
|
||||
process.nextTick(() => connection.destroy({ force: false }));
|
||||
}
|
||||
connectionIsStale(connection) {
|
||||
const serviceId = connection.serviceId;
|
||||
if (this.loadBalanced && serviceId) {
|
||||
const sid = serviceId.toHexString();
|
||||
const generation = this.serviceGenerations.get(sid);
|
||||
return connection.generation !== generation;
|
||||
}
|
||||
return connection.generation !== this[kGeneration];
|
||||
}
|
||||
connectionIsIdle(connection) {
|
||||
return !!(this.options.maxIdleTimeMS && connection.idleTime > this.options.maxIdleTimeMS);
|
||||
}
|
||||
/**
|
||||
* Destroys a connection if the connection is perished.
|
||||
*
|
||||
* @returns `true` if the connection was destroyed, `false` otherwise.
|
||||
*/
|
||||
destroyConnectionIfPerished(connection) {
|
||||
const isStale = this.connectionIsStale(connection);
|
||||
const isIdle = this.connectionIsIdle(connection);
|
||||
if (!isStale && !isIdle && !connection.closed) {
|
||||
return false;
|
||||
}
|
||||
const reason = connection.closed ? 'error' : isStale ? 'stale' : 'idle';
|
||||
this.destroyConnection(connection, reason);
|
||||
return true;
|
||||
}
|
||||
createConnection(callback) {
|
||||
const connectOptions = {
|
||||
...this.options,
|
||||
id: this[kConnectionCounter].next().value,
|
||||
generation: this[kGeneration],
|
||||
cancellationToken: this[kCancellationToken]
|
||||
};
|
||||
this[kPending]++;
|
||||
// This is our version of a "virtual" no-I/O connection as the spec requires
|
||||
this.emitAndLog(ConnectionPool.CONNECTION_CREATED, new connection_pool_events_1.ConnectionCreatedEvent(this, { id: connectOptions.id }));
|
||||
(0, connect_1.connect)(connectOptions, (err, connection) => {
|
||||
if (err || !connection) {
|
||||
this[kPending]--;
|
||||
this.emitAndLog(ConnectionPool.CONNECTION_CLOSED, new connection_pool_events_1.ConnectionClosedEvent(this, { id: connectOptions.id, serviceId: undefined }, 'error',
|
||||
// TODO(NODE-5192): Remove this cast
|
||||
err));
|
||||
if (err instanceof error_1.MongoNetworkError || err instanceof error_1.MongoServerError) {
|
||||
err.connectionGeneration = connectOptions.generation;
|
||||
}
|
||||
callback(err ?? new error_1.MongoRuntimeError('Connection creation failed without error'));
|
||||
return;
|
||||
}
|
||||
// The pool might have closed since we started trying to create a connection
|
||||
if (this[kPoolState] !== exports.PoolState.ready) {
|
||||
this[kPending]--;
|
||||
connection.destroy({ force: true });
|
||||
callback(this.closed ? new errors_1.PoolClosedError(this) : new errors_1.PoolClearedError(this));
|
||||
return;
|
||||
}
|
||||
// forward all events from the connection to the pool
|
||||
for (const event of [...constants_1.APM_EVENTS, connection_1.Connection.CLUSTER_TIME_RECEIVED]) {
|
||||
connection.on(event, (e) => this.emit(event, e));
|
||||
}
|
||||
if (this.loadBalanced) {
|
||||
connection.on(connection_1.Connection.PINNED, pinType => this[kMetrics].markPinned(pinType));
|
||||
connection.on(connection_1.Connection.UNPINNED, pinType => this[kMetrics].markUnpinned(pinType));
|
||||
const serviceId = connection.serviceId;
|
||||
if (serviceId) {
|
||||
let generation;
|
||||
const sid = serviceId.toHexString();
|
||||
if ((generation = this.serviceGenerations.get(sid))) {
|
||||
connection.generation = generation;
|
||||
}
|
||||
else {
|
||||
this.serviceGenerations.set(sid, 0);
|
||||
connection.generation = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
connection.markAvailable();
|
||||
this.emitAndLog(ConnectionPool.CONNECTION_READY, new connection_pool_events_1.ConnectionReadyEvent(this, connection));
|
||||
this[kPending]--;
|
||||
callback(undefined, connection);
|
||||
return;
|
||||
});
|
||||
}
|
||||
ensureMinPoolSize() {
|
||||
const minPoolSize = this.options.minPoolSize;
|
||||
if (this[kPoolState] !== exports.PoolState.ready || minPoolSize === 0) {
|
||||
return;
|
||||
}
|
||||
this[kConnections].prune(connection => this.destroyConnectionIfPerished(connection));
|
||||
if (this.totalConnectionCount < minPoolSize &&
|
||||
this.pendingConnectionCount < this.options.maxConnecting) {
|
||||
// NOTE: ensureMinPoolSize should not try to get all the pending
|
||||
// connection permits because that potentially delays the availability of
|
||||
// the connection to a checkout request
|
||||
this.createConnection((err, connection) => {
|
||||
if (err) {
|
||||
this[kServer].handleError(err);
|
||||
}
|
||||
if (!err && connection) {
|
||||
this[kConnections].push(connection);
|
||||
process.nextTick(() => this.processWaitQueue());
|
||||
}
|
||||
if (this[kPoolState] === exports.PoolState.ready) {
|
||||
(0, timers_1.clearTimeout)(this[kMinPoolSizeTimer]);
|
||||
this[kMinPoolSizeTimer] = (0, timers_1.setTimeout)(() => this.ensureMinPoolSize(), this.options.minPoolSizeCheckFrequencyMS);
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
(0, timers_1.clearTimeout)(this[kMinPoolSizeTimer]);
|
||||
this[kMinPoolSizeTimer] = (0, timers_1.setTimeout)(() => this.ensureMinPoolSize(), this.options.minPoolSizeCheckFrequencyMS);
|
||||
}
|
||||
}
|
||||
processWaitQueue() {
|
||||
if (this[kProcessingWaitQueue]) {
|
||||
return;
|
||||
}
|
||||
this[kProcessingWaitQueue] = true;
|
||||
while (this.waitQueueSize) {
|
||||
const waitQueueMember = this[kWaitQueue].first();
|
||||
if (!waitQueueMember) {
|
||||
this[kWaitQueue].shift();
|
||||
continue;
|
||||
}
|
||||
if (waitQueueMember[kCancelled]) {
|
||||
this[kWaitQueue].shift();
|
||||
continue;
|
||||
}
|
||||
if (this[kPoolState] !== exports.PoolState.ready) {
|
||||
const reason = this.closed ? 'poolClosed' : 'connectionError';
|
||||
const error = this.closed ? new errors_1.PoolClosedError(this) : new errors_1.PoolClearedError(this);
|
||||
this.emitAndLog(ConnectionPool.CONNECTION_CHECK_OUT_FAILED, new connection_pool_events_1.ConnectionCheckOutFailedEvent(this, reason, error));
|
||||
if (waitQueueMember.timer) {
|
||||
(0, timers_1.clearTimeout)(waitQueueMember.timer);
|
||||
}
|
||||
this[kWaitQueue].shift();
|
||||
waitQueueMember.callback(error);
|
||||
continue;
|
||||
}
|
||||
if (!this.availableConnectionCount) {
|
||||
break;
|
||||
}
|
||||
const connection = this[kConnections].shift();
|
||||
if (!connection) {
|
||||
break;
|
||||
}
|
||||
if (!this.destroyConnectionIfPerished(connection)) {
|
||||
this[kCheckedOut].add(connection);
|
||||
this.emitAndLog(ConnectionPool.CONNECTION_CHECKED_OUT, new connection_pool_events_1.ConnectionCheckedOutEvent(this, connection));
|
||||
if (waitQueueMember.timer) {
|
||||
(0, timers_1.clearTimeout)(waitQueueMember.timer);
|
||||
}
|
||||
this[kWaitQueue].shift();
|
||||
waitQueueMember.callback(undefined, connection);
|
||||
}
|
||||
}
|
||||
const { maxPoolSize, maxConnecting } = this.options;
|
||||
while (this.waitQueueSize > 0 &&
|
||||
this.pendingConnectionCount < maxConnecting &&
|
||||
(maxPoolSize === 0 || this.totalConnectionCount < maxPoolSize)) {
|
||||
const waitQueueMember = this[kWaitQueue].shift();
|
||||
if (!waitQueueMember || waitQueueMember[kCancelled]) {
|
||||
continue;
|
||||
}
|
||||
this.createConnection((err, connection) => {
|
||||
if (waitQueueMember[kCancelled]) {
|
||||
if (!err && connection) {
|
||||
this[kConnections].push(connection);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (err) {
|
||||
this.emitAndLog(ConnectionPool.CONNECTION_CHECK_OUT_FAILED,
|
||||
// TODO(NODE-5192): Remove this cast
|
||||
new connection_pool_events_1.ConnectionCheckOutFailedEvent(this, 'connectionError', err));
|
||||
}
|
||||
else if (connection) {
|
||||
this[kCheckedOut].add(connection);
|
||||
this.emitAndLog(ConnectionPool.CONNECTION_CHECKED_OUT, new connection_pool_events_1.ConnectionCheckedOutEvent(this, connection));
|
||||
}
|
||||
if (waitQueueMember.timer) {
|
||||
(0, timers_1.clearTimeout)(waitQueueMember.timer);
|
||||
}
|
||||
waitQueueMember.callback(err, connection);
|
||||
}
|
||||
process.nextTick(() => this.processWaitQueue());
|
||||
});
|
||||
}
|
||||
this[kProcessingWaitQueue] = false;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Emitted when the connection pool is created.
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_POOL_CREATED = constants_1.CONNECTION_POOL_CREATED;
|
||||
/**
|
||||
* Emitted once when the connection pool is closed
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_POOL_CLOSED = constants_1.CONNECTION_POOL_CLOSED;
|
||||
/**
|
||||
* Emitted each time the connection pool is cleared and it's generation incremented
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_POOL_CLEARED = constants_1.CONNECTION_POOL_CLEARED;
|
||||
/**
|
||||
* Emitted each time the connection pool is marked ready
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_POOL_READY = constants_1.CONNECTION_POOL_READY;
|
||||
/**
|
||||
* Emitted when a connection is created.
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_CREATED = constants_1.CONNECTION_CREATED;
|
||||
/**
|
||||
* Emitted when a connection becomes established, and is ready to use
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_READY = constants_1.CONNECTION_READY;
|
||||
/**
|
||||
* Emitted when a connection is closed
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_CLOSED = constants_1.CONNECTION_CLOSED;
|
||||
/**
|
||||
* Emitted when an attempt to check out a connection begins
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_CHECK_OUT_STARTED = constants_1.CONNECTION_CHECK_OUT_STARTED;
|
||||
/**
|
||||
* Emitted when an attempt to check out a connection fails
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_CHECK_OUT_FAILED = constants_1.CONNECTION_CHECK_OUT_FAILED;
|
||||
/**
|
||||
* Emitted each time a connection is successfully checked out of the connection pool
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_CHECKED_OUT = constants_1.CONNECTION_CHECKED_OUT;
|
||||
/**
|
||||
* Emitted each time a connection is successfully checked into the connection pool
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_CHECKED_IN = constants_1.CONNECTION_CHECKED_IN;
|
||||
exports.ConnectionPool = ConnectionPool;
|
||||
//# sourceMappingURL=connection_pool.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/connection_pool.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/connection_pool.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
191
VISUALIZACION/node_modules/mongodb/lib/cmap/connection_pool_events.js
generated
vendored
Executable file
191
VISUALIZACION/node_modules/mongodb/lib/cmap/connection_pool_events.js
generated
vendored
Executable file
|
|
@ -0,0 +1,191 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ConnectionPoolClearedEvent = exports.ConnectionCheckedInEvent = exports.ConnectionCheckedOutEvent = exports.ConnectionCheckOutFailedEvent = exports.ConnectionCheckOutStartedEvent = exports.ConnectionClosedEvent = exports.ConnectionReadyEvent = exports.ConnectionCreatedEvent = exports.ConnectionPoolClosedEvent = exports.ConnectionPoolReadyEvent = exports.ConnectionPoolCreatedEvent = exports.ConnectionPoolMonitoringEvent = void 0;
|
||||
const constants_1 = require("../constants");
|
||||
/**
|
||||
* The base export class for all monitoring events published from the connection pool
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool) {
|
||||
this.time = new Date();
|
||||
this.address = pool.address;
|
||||
}
|
||||
}
|
||||
exports.ConnectionPoolMonitoringEvent = ConnectionPoolMonitoringEvent;
|
||||
/**
|
||||
* An event published when a connection pool is created
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionPoolCreatedEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool) {
|
||||
super(pool);
|
||||
/** @internal */
|
||||
this.name = constants_1.CONNECTION_POOL_CREATED;
|
||||
if (pool.options.credentials != null) {
|
||||
// Intentionally remove credentials: NODE-5460
|
||||
this.options = { ...pool.options, credentials: {} };
|
||||
}
|
||||
else {
|
||||
this.options = pool.options;
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.ConnectionPoolCreatedEvent = ConnectionPoolCreatedEvent;
|
||||
/**
|
||||
* An event published when a connection pool is ready
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionPoolReadyEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool) {
|
||||
super(pool);
|
||||
/** @internal */
|
||||
this.name = constants_1.CONNECTION_POOL_READY;
|
||||
}
|
||||
}
|
||||
exports.ConnectionPoolReadyEvent = ConnectionPoolReadyEvent;
|
||||
/**
|
||||
* An event published when a connection pool is closed
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionPoolClosedEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool) {
|
||||
super(pool);
|
||||
/** @internal */
|
||||
this.name = constants_1.CONNECTION_POOL_CLOSED;
|
||||
}
|
||||
}
|
||||
exports.ConnectionPoolClosedEvent = ConnectionPoolClosedEvent;
|
||||
/**
|
||||
* An event published when a connection pool creates a new connection
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionCreatedEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool, connection) {
|
||||
super(pool);
|
||||
/** @internal */
|
||||
this.name = constants_1.CONNECTION_CREATED;
|
||||
this.connectionId = connection.id;
|
||||
}
|
||||
}
|
||||
exports.ConnectionCreatedEvent = ConnectionCreatedEvent;
|
||||
/**
|
||||
* An event published when a connection is ready for use
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionReadyEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool, connection) {
|
||||
super(pool);
|
||||
/** @internal */
|
||||
this.name = constants_1.CONNECTION_READY;
|
||||
this.connectionId = connection.id;
|
||||
}
|
||||
}
|
||||
exports.ConnectionReadyEvent = ConnectionReadyEvent;
|
||||
/**
|
||||
* An event published when a connection is closed
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionClosedEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool, connection, reason, error) {
|
||||
super(pool);
|
||||
/** @internal */
|
||||
this.name = constants_1.CONNECTION_CLOSED;
|
||||
this.connectionId = connection.id;
|
||||
this.reason = reason;
|
||||
this.serviceId = connection.serviceId;
|
||||
this.error = error ?? null;
|
||||
}
|
||||
}
|
||||
exports.ConnectionClosedEvent = ConnectionClosedEvent;
|
||||
/**
|
||||
* An event published when a request to check a connection out begins
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionCheckOutStartedEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool) {
|
||||
super(pool);
|
||||
/** @internal */
|
||||
this.name = constants_1.CONNECTION_CHECK_OUT_STARTED;
|
||||
}
|
||||
}
|
||||
exports.ConnectionCheckOutStartedEvent = ConnectionCheckOutStartedEvent;
|
||||
/**
|
||||
* An event published when a request to check a connection out fails
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionCheckOutFailedEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool, reason, error) {
|
||||
super(pool);
|
||||
/** @internal */
|
||||
this.name = constants_1.CONNECTION_CHECK_OUT_FAILED;
|
||||
this.reason = reason;
|
||||
this.error = error;
|
||||
}
|
||||
}
|
||||
exports.ConnectionCheckOutFailedEvent = ConnectionCheckOutFailedEvent;
|
||||
/**
|
||||
* An event published when a connection is checked out of the connection pool
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionCheckedOutEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool, connection) {
|
||||
super(pool);
|
||||
/** @internal */
|
||||
this.name = constants_1.CONNECTION_CHECKED_OUT;
|
||||
this.connectionId = connection.id;
|
||||
}
|
||||
}
|
||||
exports.ConnectionCheckedOutEvent = ConnectionCheckedOutEvent;
|
||||
/**
|
||||
* An event published when a connection is checked into the connection pool
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionCheckedInEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool, connection) {
|
||||
super(pool);
|
||||
/** @internal */
|
||||
this.name = constants_1.CONNECTION_CHECKED_IN;
|
||||
this.connectionId = connection.id;
|
||||
}
|
||||
}
|
||||
exports.ConnectionCheckedInEvent = ConnectionCheckedInEvent;
|
||||
/**
|
||||
* An event published when a connection pool is cleared
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionPoolClearedEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool, options = {}) {
|
||||
super(pool);
|
||||
/** @internal */
|
||||
this.name = constants_1.CONNECTION_POOL_CLEARED;
|
||||
this.serviceId = options.serviceId;
|
||||
this.interruptInUseConnections = options.interruptInUseConnections;
|
||||
}
|
||||
}
|
||||
exports.ConnectionPoolClearedEvent = ConnectionPoolClearedEvent;
|
||||
//# sourceMappingURL=connection_pool_events.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/connection_pool_events.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/connection_pool_events.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"connection_pool_events.js","sourceRoot":"","sources":["../../src/cmap/connection_pool_events.ts"],"names":[],"mappings":";;;AACA,4CAYsB;AAKtB;;;;GAIG;AACH,MAAsB,6BAA6B;IAmBjD,gBAAgB;IAChB,YAAY,IAAoB;QAC9B,IAAI,CAAC,IAAI,GAAG,IAAI,IAAI,EAAE,CAAC;QACvB,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC,OAAO,CAAC;IAC9B,CAAC;CACF;AAxBD,sEAwBC;AAED;;;;GAIG;AACH,MAAa,0BAA2B,SAAQ,6BAA6B;IAM3E,gBAAgB;IAChB,YAAY,IAAoB;QAC9B,KAAK,CAAC,IAAI,CAAC,CAAC;QALd,gBAAgB;QAChB,SAAI,GAAG,mCAAuB,CAAC;QAK7B,IAAI,IAAI,CAAC,OAAO,CAAC,WAAW,IAAI,IAAI,EAAE;YACpC,8CAA8C;YAC9C,IAAI,CAAC,OAAO,GAAG,EAAE,GAAG,IAAI,CAAC,OAAO,EAAE,WAAW,EAAE,EAAE,EAAE,CAAC;SACrD;aAAM;YACL,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC,OAAO,CAAC;SAC7B;IACH,CAAC;CACF;AAhBD,gEAgBC;AAED;;;;GAIG;AACH,MAAa,wBAAyB,SAAQ,6BAA6B;IAIzE,gBAAgB;IAChB,YAAY,IAAoB;QAC9B,KAAK,CAAC,IAAI,CAAC,CAAC;QALd,gBAAgB;QAChB,SAAI,GAAG,iCAAqB,CAAC;IAK7B,CAAC;CACF;AARD,4DAQC;AAED;;;;GAIG;AACH,MAAa,yBAA0B,SAAQ,6BAA6B;IAI1E,gBAAgB;IAChB,YAAY,IAAoB;QAC9B,KAAK,CAAC,IAAI,CAAC,CAAC;QALd,gBAAgB;QAChB,SAAI,GAAG,kCAAsB,CAAC;IAK9B,CAAC;CACF;AARD,8DAQC;AAED;;;;GAIG;AACH,MAAa,sBAAuB,SAAQ,6BAA6B;IAMvE,gBAAgB;IAChB,YAAY,IAAoB,EAAE,UAAwC;QACxE,KAAK,CAAC,IAAI,CAAC,CAAC;QALd,gBAAgB;QAChB,SAAI,GAAG,8BAAkB,CAAC;QAKxB,IAAI,CAAC,YAAY,GAAG,UAAU,CAAC,EAAE,CAAC;IACpC,CAAC;CACF;AAXD,wDAWC;AAED;;;;GAIG;AACH,MAAa,oBAAqB,SAAQ,6BAA6B;IAMrE,gBAAgB;IAChB,YAAY,IAAoB,EAAE,UAAsB;QACtD,KAAK,CAAC,IAAI,CAAC,CAAC;QALd,gBAAgB;QAChB,SAAI,GAAG,4BAAgB,CAAC;QAKtB,IAAI,CAAC,YAAY,GAAG,UAAU,CAAC,EAAE,CAAC;IACpC,CAAC;CACF;AAXD,oDAWC;AAED;;;;GAIG;AACH,MAAa,qBAAsB,SAAQ,6BAA6B;IAWtE,gBAAgB;IAChB,YACE,IAAoB,EACpB,UAAgD,EAChD,MAAiD,EACjD,KAAkB;QAElB,KAAK,CAAC,IAAI,CAAC,CAAC;QAZd,gBAAgB;QAChB,SAAI,GAAG,6BAAiB,CAAC;QAYvB,IAAI,CAAC,YAAY,GAAG,UAAU,CAAC,EAAE,CAAC;QAClC,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;QACrB,IAAI,CAAC,SAAS,GAAG,UAAU,CAAC,SAAS,CAAC;QACtC,IAAI,CAAC,KAAK,GAAG,KAAK,IAAI,IAAI,CAAC;IAC7B,CAAC;CACF;AAxBD,sDAwBC;AAED;;;;GAIG;AACH,MAAa,8BAA+B,SAAQ,6BAA6B;IAI/E,gBAAgB;IAChB,YAAY,IAAoB;QAC9B,KAAK,CAAC,IAAI,CAAC,CAAC;QALd,gBAAgB;QAChB,SAAI,GAAG,wCAA4B,CAAC;IAKpC,CAAC;CACF;AARD,wEAQC;AAED;;;;GAIG;AACH,MAAa,6BAA8B,SAAQ,6BAA6B;IAQ9E,gBAAgB;IAChB,YACE,IAAoB,EACpB,MAAoD,EACpD,KAAkB;QAElB,KAAK,CAAC,IAAI,CAAC,CAAC;QATd,gBAAgB;QAChB,SAAI,GAAG,uCAA2B,CAAC;QASjC,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;QACrB,IAAI,CAAC,KAAK,GAAG,KAAK,CAAC;IACrB,CAAC;CACF;AAlBD,sEAkBC;AAED;;;;GAIG;AACH,MAAa,yBAA0B,SAAQ,6BAA6B;IAM1E,gBAAgB;IAChB,YAAY,IAAoB,EAAE,UAAsB;QACtD,KAAK,CAAC,IAAI,CAAC,CAAC;QALd,gBAAgB;QAChB,SAAI,GAAG,kCAAsB,CAAC;QAK5B,IAAI,CAAC,YAAY,GAAG,UAAU,CAAC,EAAE,CAAC;IACpC,CAAC;CACF;AAXD,8DAWC;AAED;;;;GAIG;AACH,MAAa,wBAAyB,SAAQ,6BAA6B;IAMzE,gBAAgB;IAChB,YAAY,IAAoB,EAAE,UAAsB;QACtD,KAAK,CAAC,IAAI,CAAC,CAAC;QALd,gBAAgB;QAChB,SAAI,GAAG,iCAAqB,CAAC;QAK3B,IAAI,CAAC,YAAY,GAAG,UAAU,CAAC,EAAE,CAAC;IACpC,CAAC;CACF;AAXD,4DAWC;AAED;;;;GAIG;AACH,MAAa,0BAA2B,SAAQ,6BAA6B;IAQ3E,gBAAgB;IAChB,YACE,IAAoB,EACpB,UAAyE,EAAE;QAE3E,KAAK,CAAC,IAAI,CAAC,CAAC;QARd,gBAAgB;QAChB,SAAI,GAAG,mCAAuB,CAAC;QAQ7B,IAAI,CAAC,SAAS,GAAG,OAAO,CAAC,SAAS,CAAC;QACnC,IAAI,CAAC,yBAAyB,GAAG,OAAO,CAAC,yBAAyB,CAAC;IACrE,CAAC;CACF;AAjBD,gEAiBC"}
|
||||
64
VISUALIZACION/node_modules/mongodb/lib/cmap/errors.js
generated
vendored
Executable file
64
VISUALIZACION/node_modules/mongodb/lib/cmap/errors.js
generated
vendored
Executable file
|
|
@ -0,0 +1,64 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.WaitQueueTimeoutError = exports.PoolClearedOnNetworkError = exports.PoolClearedError = exports.PoolClosedError = void 0;
|
||||
const error_1 = require("../error");
|
||||
/**
|
||||
* An error indicating a connection pool is closed
|
||||
* @category Error
|
||||
*/
|
||||
class PoolClosedError extends error_1.MongoDriverError {
|
||||
constructor(pool) {
|
||||
super('Attempted to check out a connection from closed connection pool');
|
||||
this.address = pool.address;
|
||||
}
|
||||
get name() {
|
||||
return 'MongoPoolClosedError';
|
||||
}
|
||||
}
|
||||
exports.PoolClosedError = PoolClosedError;
|
||||
/**
|
||||
* An error indicating a connection pool is currently paused
|
||||
* @category Error
|
||||
*/
|
||||
class PoolClearedError extends error_1.MongoNetworkError {
|
||||
constructor(pool, message) {
|
||||
const errorMessage = message
|
||||
? message
|
||||
: `Connection pool for ${pool.address} was cleared because another operation failed with: "${pool.serverError?.message}"`;
|
||||
super(errorMessage);
|
||||
this.address = pool.address;
|
||||
this.addErrorLabel(error_1.MongoErrorLabel.RetryableWriteError);
|
||||
}
|
||||
get name() {
|
||||
return 'MongoPoolClearedError';
|
||||
}
|
||||
}
|
||||
exports.PoolClearedError = PoolClearedError;
|
||||
/**
|
||||
* An error indicating that a connection pool has been cleared after the monitor for that server timed out.
|
||||
* @category Error
|
||||
*/
|
||||
class PoolClearedOnNetworkError extends PoolClearedError {
|
||||
constructor(pool) {
|
||||
super(pool, `Connection to ${pool.address} interrupted due to server monitor timeout`);
|
||||
}
|
||||
get name() {
|
||||
return 'PoolClearedOnNetworkError';
|
||||
}
|
||||
}
|
||||
exports.PoolClearedOnNetworkError = PoolClearedOnNetworkError;
|
||||
/**
|
||||
* An error thrown when a request to check out a connection times out
|
||||
* @category Error
|
||||
*/
|
||||
class WaitQueueTimeoutError extends error_1.MongoDriverError {
|
||||
constructor(message, address) {
|
||||
super(message);
|
||||
this.address = address;
|
||||
}
|
||||
get name() {
|
||||
return 'MongoWaitQueueTimeoutError';
|
||||
}
|
||||
}
|
||||
exports.WaitQueueTimeoutError = WaitQueueTimeoutError;
|
||||
//# sourceMappingURL=errors.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/errors.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/errors.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"errors.js","sourceRoot":"","sources":["../../src/cmap/errors.ts"],"names":[],"mappings":";;;AAAA,oCAAgF;AAGhF;;;GAGG;AACH,MAAa,eAAgB,SAAQ,wBAAgB;IAInD,YAAY,IAAoB;QAC9B,KAAK,CAAC,iEAAiE,CAAC,CAAC;QACzE,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC,OAAO,CAAC;IAC9B,CAAC;IAED,IAAa,IAAI;QACf,OAAO,sBAAsB,CAAC;IAChC,CAAC;CACF;AAZD,0CAYC;AAED;;;GAGG;AACH,MAAa,gBAAiB,SAAQ,yBAAiB;IAIrD,YAAY,IAAoB,EAAE,OAAgB;QAChD,MAAM,YAAY,GAAG,OAAO;YAC1B,CAAC,CAAC,OAAO;YACT,CAAC,CAAC,uBAAuB,IAAI,CAAC,OAAO,wDAAwD,IAAI,CAAC,WAAW,EAAE,OAAO,GAAG,CAAC;QAC5H,KAAK,CAAC,YAAY,CAAC,CAAC;QACpB,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC,OAAO,CAAC;QAE5B,IAAI,CAAC,aAAa,CAAC,uBAAe,CAAC,mBAAmB,CAAC,CAAC;IAC1D,CAAC;IAED,IAAa,IAAI;QACf,OAAO,uBAAuB,CAAC;IACjC,CAAC;CACF;AAjBD,4CAiBC;AAED;;;GAGG;AACH,MAAa,yBAA0B,SAAQ,gBAAgB;IAC7D,YAAY,IAAoB;QAC9B,KAAK,CAAC,IAAI,EAAE,iBAAiB,IAAI,CAAC,OAAO,4CAA4C,CAAC,CAAC;IACzF,CAAC;IAED,IAAa,IAAI;QACf,OAAO,2BAA2B,CAAC;IACrC,CAAC;CACF;AARD,8DAQC;AAED;;;GAGG;AACH,MAAa,qBAAsB,SAAQ,wBAAgB;IAIzD,YAAY,OAAe,EAAE,OAAe;QAC1C,KAAK,CAAC,OAAO,CAAC,CAAC;QACf,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;IACzB,CAAC;IAED,IAAa,IAAI;QACf,OAAO,4BAA4B,CAAC;IACtC,CAAC;CACF;AAZD,sDAYC"}
|
||||
173
VISUALIZACION/node_modules/mongodb/lib/cmap/handshake/client_metadata.js
generated
vendored
Executable file
173
VISUALIZACION/node_modules/mongodb/lib/cmap/handshake/client_metadata.js
generated
vendored
Executable file
|
|
@ -0,0 +1,173 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getFAASEnv = exports.makeClientMetadata = exports.LimitedSizeDocument = void 0;
|
||||
const os = require("os");
|
||||
const process = require("process");
|
||||
const bson_1 = require("../../bson");
|
||||
const error_1 = require("../../error");
|
||||
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
||||
const NODE_DRIVER_VERSION = require('../../../package.json').version;
|
||||
/** @internal */
|
||||
class LimitedSizeDocument {
|
||||
constructor(maxSize) {
|
||||
this.maxSize = maxSize;
|
||||
this.document = new Map();
|
||||
/** BSON overhead: Int32 + Null byte */
|
||||
this.documentSize = 5;
|
||||
}
|
||||
/** Only adds key/value if the bsonByteLength is less than MAX_SIZE */
|
||||
ifItFitsItSits(key, value) {
|
||||
// The BSON byteLength of the new element is the same as serializing it to its own document
|
||||
// subtracting the document size int32 and the null terminator.
|
||||
const newElementSize = bson_1.BSON.serialize(new Map().set(key, value)).byteLength - 5;
|
||||
if (newElementSize + this.documentSize > this.maxSize) {
|
||||
return false;
|
||||
}
|
||||
this.documentSize += newElementSize;
|
||||
this.document.set(key, value);
|
||||
return true;
|
||||
}
|
||||
toObject() {
|
||||
return bson_1.BSON.deserialize(bson_1.BSON.serialize(this.document), {
|
||||
promoteLongs: false,
|
||||
promoteBuffers: false,
|
||||
promoteValues: false,
|
||||
useBigInt64: false
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.LimitedSizeDocument = LimitedSizeDocument;
|
||||
/**
|
||||
* From the specs:
|
||||
* Implementors SHOULD cumulatively update fields in the following order until the document is under the size limit:
|
||||
* 1. Omit fields from `env` except `env.name`.
|
||||
* 2. Omit fields from `os` except `os.type`.
|
||||
* 3. Omit the `env` document entirely.
|
||||
* 4. Truncate `platform`. -- special we do not truncate this field
|
||||
*/
|
||||
function makeClientMetadata(options) {
|
||||
const metadataDocument = new LimitedSizeDocument(512);
|
||||
const { appName = '' } = options;
|
||||
// Add app name first, it must be sent
|
||||
if (appName.length > 0) {
|
||||
const name = Buffer.byteLength(appName, 'utf8') <= 128
|
||||
? options.appName
|
||||
: Buffer.from(appName, 'utf8').subarray(0, 128).toString('utf8');
|
||||
metadataDocument.ifItFitsItSits('application', { name });
|
||||
}
|
||||
const { name = '', version = '', platform = '' } = options.driverInfo;
|
||||
const driverInfo = {
|
||||
name: name.length > 0 ? `nodejs|${name}` : 'nodejs',
|
||||
version: version.length > 0 ? `${NODE_DRIVER_VERSION}|${version}` : NODE_DRIVER_VERSION
|
||||
};
|
||||
if (!metadataDocument.ifItFitsItSits('driver', driverInfo)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Unable to include driverInfo name and version, metadata cannot exceed 512 bytes');
|
||||
}
|
||||
let runtimeInfo = getRuntimeInfo();
|
||||
if (platform.length > 0) {
|
||||
runtimeInfo = `${runtimeInfo}|${platform}`;
|
||||
}
|
||||
if (!metadataDocument.ifItFitsItSits('platform', runtimeInfo)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Unable to include driverInfo platform, metadata cannot exceed 512 bytes');
|
||||
}
|
||||
// Note: order matters, os.type is last so it will be removed last if we're at maxSize
|
||||
const osInfo = new Map()
|
||||
.set('name', process.platform)
|
||||
.set('architecture', process.arch)
|
||||
.set('version', os.release())
|
||||
.set('type', os.type());
|
||||
if (!metadataDocument.ifItFitsItSits('os', osInfo)) {
|
||||
for (const key of osInfo.keys()) {
|
||||
osInfo.delete(key);
|
||||
if (osInfo.size === 0)
|
||||
break;
|
||||
if (metadataDocument.ifItFitsItSits('os', osInfo))
|
||||
break;
|
||||
}
|
||||
}
|
||||
const faasEnv = getFAASEnv();
|
||||
if (faasEnv != null) {
|
||||
if (!metadataDocument.ifItFitsItSits('env', faasEnv)) {
|
||||
for (const key of faasEnv.keys()) {
|
||||
faasEnv.delete(key);
|
||||
if (faasEnv.size === 0)
|
||||
break;
|
||||
if (metadataDocument.ifItFitsItSits('env', faasEnv))
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return metadataDocument.toObject();
|
||||
}
|
||||
exports.makeClientMetadata = makeClientMetadata;
|
||||
/**
|
||||
* Collects FaaS metadata.
|
||||
* - `name` MUST be the last key in the Map returned.
|
||||
*/
|
||||
function getFAASEnv() {
|
||||
const { AWS_EXECUTION_ENV = '', AWS_LAMBDA_RUNTIME_API = '', FUNCTIONS_WORKER_RUNTIME = '', K_SERVICE = '', FUNCTION_NAME = '', VERCEL = '', AWS_LAMBDA_FUNCTION_MEMORY_SIZE = '', AWS_REGION = '', FUNCTION_MEMORY_MB = '', FUNCTION_REGION = '', FUNCTION_TIMEOUT_SEC = '', VERCEL_REGION = '' } = process.env;
|
||||
const isAWSFaaS = AWS_EXECUTION_ENV.startsWith('AWS_Lambda_') || AWS_LAMBDA_RUNTIME_API.length > 0;
|
||||
const isAzureFaaS = FUNCTIONS_WORKER_RUNTIME.length > 0;
|
||||
const isGCPFaaS = K_SERVICE.length > 0 || FUNCTION_NAME.length > 0;
|
||||
const isVercelFaaS = VERCEL.length > 0;
|
||||
// Note: order matters, name must always be the last key
|
||||
const faasEnv = new Map();
|
||||
// When isVercelFaaS is true so is isAWSFaaS; Vercel inherits the AWS env
|
||||
if (isVercelFaaS && !(isAzureFaaS || isGCPFaaS)) {
|
||||
if (VERCEL_REGION.length > 0) {
|
||||
faasEnv.set('region', VERCEL_REGION);
|
||||
}
|
||||
faasEnv.set('name', 'vercel');
|
||||
return faasEnv;
|
||||
}
|
||||
if (isAWSFaaS && !(isAzureFaaS || isGCPFaaS || isVercelFaaS)) {
|
||||
if (AWS_REGION.length > 0) {
|
||||
faasEnv.set('region', AWS_REGION);
|
||||
}
|
||||
if (AWS_LAMBDA_FUNCTION_MEMORY_SIZE.length > 0 &&
|
||||
Number.isInteger(+AWS_LAMBDA_FUNCTION_MEMORY_SIZE)) {
|
||||
faasEnv.set('memory_mb', new bson_1.Int32(AWS_LAMBDA_FUNCTION_MEMORY_SIZE));
|
||||
}
|
||||
faasEnv.set('name', 'aws.lambda');
|
||||
return faasEnv;
|
||||
}
|
||||
if (isAzureFaaS && !(isGCPFaaS || isAWSFaaS || isVercelFaaS)) {
|
||||
faasEnv.set('name', 'azure.func');
|
||||
return faasEnv;
|
||||
}
|
||||
if (isGCPFaaS && !(isAzureFaaS || isAWSFaaS || isVercelFaaS)) {
|
||||
if (FUNCTION_REGION.length > 0) {
|
||||
faasEnv.set('region', FUNCTION_REGION);
|
||||
}
|
||||
if (FUNCTION_MEMORY_MB.length > 0 && Number.isInteger(+FUNCTION_MEMORY_MB)) {
|
||||
faasEnv.set('memory_mb', new bson_1.Int32(FUNCTION_MEMORY_MB));
|
||||
}
|
||||
if (FUNCTION_TIMEOUT_SEC.length > 0 && Number.isInteger(+FUNCTION_TIMEOUT_SEC)) {
|
||||
faasEnv.set('timeout_sec', new bson_1.Int32(FUNCTION_TIMEOUT_SEC));
|
||||
}
|
||||
faasEnv.set('name', 'gcp.func');
|
||||
return faasEnv;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
exports.getFAASEnv = getFAASEnv;
|
||||
/**
|
||||
* @internal
|
||||
* Get current JavaScript runtime platform
|
||||
*
|
||||
* NOTE: The version information fetching is intentionally written defensively
|
||||
* to avoid having a released driver version that becomes incompatible
|
||||
* with a future change to these global objects.
|
||||
*/
|
||||
function getRuntimeInfo() {
|
||||
if ('Deno' in globalThis) {
|
||||
const version = typeof Deno?.version?.deno === 'string' ? Deno?.version?.deno : '0.0.0-unknown';
|
||||
return `Deno v${version}, ${os.endianness()}`;
|
||||
}
|
||||
if ('Bun' in globalThis) {
|
||||
const version = typeof Bun?.version === 'string' ? Bun?.version : '0.0.0-unknown';
|
||||
return `Bun v${version}, ${os.endianness()}`;
|
||||
}
|
||||
return `Node.js ${process.version}, ${os.endianness()}`;
|
||||
}
|
||||
//# sourceMappingURL=client_metadata.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/handshake/client_metadata.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/handshake/client_metadata.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
156
VISUALIZACION/node_modules/mongodb/lib/cmap/message_stream.js
generated
vendored
Executable file
156
VISUALIZACION/node_modules/mongodb/lib/cmap/message_stream.js
generated
vendored
Executable file
|
|
@ -0,0 +1,156 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.MessageStream = void 0;
|
||||
const stream_1 = require("stream");
|
||||
const error_1 = require("../error");
|
||||
const utils_1 = require("../utils");
|
||||
const commands_1 = require("./commands");
|
||||
const compression_1 = require("./wire_protocol/compression");
|
||||
const constants_1 = require("./wire_protocol/constants");
|
||||
const MESSAGE_HEADER_SIZE = 16;
|
||||
const COMPRESSION_DETAILS_SIZE = 9; // originalOpcode + uncompressedSize, compressorID
|
||||
const kDefaultMaxBsonMessageSize = 1024 * 1024 * 16 * 4;
|
||||
/** @internal */
|
||||
const kBuffer = Symbol('buffer');
|
||||
/**
|
||||
* A duplex stream that is capable of reading and writing raw wire protocol messages, with
|
||||
* support for optional compression
|
||||
* @internal
|
||||
*/
|
||||
class MessageStream extends stream_1.Duplex {
|
||||
constructor(options = {}) {
|
||||
super(options);
|
||||
/** @internal */
|
||||
this.isMonitoringConnection = false;
|
||||
this.maxBsonMessageSize = options.maxBsonMessageSize || kDefaultMaxBsonMessageSize;
|
||||
this[kBuffer] = new utils_1.BufferPool();
|
||||
}
|
||||
get buffer() {
|
||||
return this[kBuffer];
|
||||
}
|
||||
_write(chunk, _, callback) {
|
||||
this[kBuffer].append(chunk);
|
||||
processIncomingData(this, callback);
|
||||
}
|
||||
_read( /* size */) {
|
||||
// NOTE: This implementation is empty because we explicitly push data to be read
|
||||
// when `writeMessage` is called.
|
||||
return;
|
||||
}
|
||||
writeCommand(command, operationDescription) {
|
||||
const agreedCompressor = operationDescription.agreedCompressor ?? 'none';
|
||||
if (agreedCompressor === 'none' || !canCompress(command)) {
|
||||
const data = command.toBin();
|
||||
this.push(Array.isArray(data) ? Buffer.concat(data) : data);
|
||||
return;
|
||||
}
|
||||
// otherwise, compress the message
|
||||
const concatenatedOriginalCommandBuffer = Buffer.concat(command.toBin());
|
||||
const messageToBeCompressed = concatenatedOriginalCommandBuffer.slice(MESSAGE_HEADER_SIZE);
|
||||
// Extract information needed for OP_COMPRESSED from the uncompressed message
|
||||
const originalCommandOpCode = concatenatedOriginalCommandBuffer.readInt32LE(12);
|
||||
const options = {
|
||||
agreedCompressor,
|
||||
zlibCompressionLevel: operationDescription.zlibCompressionLevel ?? 0
|
||||
};
|
||||
// Compress the message body
|
||||
(0, compression_1.compress)(options, messageToBeCompressed).then(compressedMessage => {
|
||||
// Create the msgHeader of OP_COMPRESSED
|
||||
const msgHeader = Buffer.alloc(MESSAGE_HEADER_SIZE);
|
||||
msgHeader.writeInt32LE(MESSAGE_HEADER_SIZE + COMPRESSION_DETAILS_SIZE + compressedMessage.length, 0); // messageLength
|
||||
msgHeader.writeInt32LE(command.requestId, 4); // requestID
|
||||
msgHeader.writeInt32LE(0, 8); // responseTo (zero)
|
||||
msgHeader.writeInt32LE(constants_1.OP_COMPRESSED, 12); // opCode
|
||||
// Create the compression details of OP_COMPRESSED
|
||||
const compressionDetails = Buffer.alloc(COMPRESSION_DETAILS_SIZE);
|
||||
compressionDetails.writeInt32LE(originalCommandOpCode, 0); // originalOpcode
|
||||
compressionDetails.writeInt32LE(messageToBeCompressed.length, 4); // Size of the uncompressed compressedMessage, excluding the MsgHeader
|
||||
compressionDetails.writeUInt8(compression_1.Compressor[agreedCompressor], 8); // compressorID
|
||||
this.push(Buffer.concat([msgHeader, compressionDetails, compressedMessage]));
|
||||
}, error => {
|
||||
operationDescription.cb(error);
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.MessageStream = MessageStream;
|
||||
// Return whether a command contains an uncompressible command term
|
||||
// Will return true if command contains no uncompressible command terms
|
||||
function canCompress(command) {
|
||||
const commandDoc = command instanceof commands_1.Msg ? command.command : command.query;
|
||||
const commandName = Object.keys(commandDoc)[0];
|
||||
return !compression_1.uncompressibleCommands.has(commandName);
|
||||
}
|
||||
function processIncomingData(stream, callback) {
|
||||
const buffer = stream[kBuffer];
|
||||
const sizeOfMessage = buffer.getInt32();
|
||||
if (sizeOfMessage == null) {
|
||||
return callback();
|
||||
}
|
||||
if (sizeOfMessage < 0) {
|
||||
return callback(new error_1.MongoParseError(`Invalid message size: ${sizeOfMessage}`));
|
||||
}
|
||||
if (sizeOfMessage > stream.maxBsonMessageSize) {
|
||||
return callback(new error_1.MongoParseError(`Invalid message size: ${sizeOfMessage}, max allowed: ${stream.maxBsonMessageSize}`));
|
||||
}
|
||||
if (sizeOfMessage > buffer.length) {
|
||||
return callback();
|
||||
}
|
||||
const message = buffer.read(sizeOfMessage);
|
||||
const messageHeader = {
|
||||
length: message.readInt32LE(0),
|
||||
requestId: message.readInt32LE(4),
|
||||
responseTo: message.readInt32LE(8),
|
||||
opCode: message.readInt32LE(12)
|
||||
};
|
||||
const monitorHasAnotherHello = () => {
|
||||
if (stream.isMonitoringConnection) {
|
||||
// Can we read the next message size?
|
||||
const sizeOfMessage = buffer.getInt32();
|
||||
if (sizeOfMessage != null && sizeOfMessage <= buffer.length) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
};
|
||||
let ResponseType = messageHeader.opCode === constants_1.OP_MSG ? commands_1.BinMsg : commands_1.Response;
|
||||
if (messageHeader.opCode !== constants_1.OP_COMPRESSED) {
|
||||
const messageBody = message.subarray(MESSAGE_HEADER_SIZE);
|
||||
// If we are a monitoring connection message stream and
|
||||
// there is more in the buffer that can be read, skip processing since we
|
||||
// want the last hello command response that is in the buffer.
|
||||
if (monitorHasAnotherHello()) {
|
||||
return processIncomingData(stream, callback);
|
||||
}
|
||||
stream.emit('message', new ResponseType(message, messageHeader, messageBody));
|
||||
if (buffer.length >= 4) {
|
||||
return processIncomingData(stream, callback);
|
||||
}
|
||||
return callback();
|
||||
}
|
||||
messageHeader.fromCompressed = true;
|
||||
messageHeader.opCode = message.readInt32LE(MESSAGE_HEADER_SIZE);
|
||||
messageHeader.length = message.readInt32LE(MESSAGE_HEADER_SIZE + 4);
|
||||
const compressorID = message[MESSAGE_HEADER_SIZE + 8];
|
||||
const compressedBuffer = message.slice(MESSAGE_HEADER_SIZE + 9);
|
||||
// recalculate based on wrapped opcode
|
||||
ResponseType = messageHeader.opCode === constants_1.OP_MSG ? commands_1.BinMsg : commands_1.Response;
|
||||
(0, compression_1.decompress)(compressorID, compressedBuffer).then(messageBody => {
|
||||
if (messageBody.length !== messageHeader.length) {
|
||||
return callback(new error_1.MongoDecompressionError('Message body and message header must be the same length'));
|
||||
}
|
||||
// If we are a monitoring connection message stream and
|
||||
// there is more in the buffer that can be read, skip processing since we
|
||||
// want the last hello command response that is in the buffer.
|
||||
if (monitorHasAnotherHello()) {
|
||||
return processIncomingData(stream, callback);
|
||||
}
|
||||
stream.emit('message', new ResponseType(message, messageHeader, messageBody));
|
||||
if (buffer.length >= 4) {
|
||||
return processIncomingData(stream, callback);
|
||||
}
|
||||
return callback();
|
||||
}, error => {
|
||||
return callback(error);
|
||||
});
|
||||
}
|
||||
//# sourceMappingURL=message_stream.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/message_stream.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/message_stream.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
62
VISUALIZACION/node_modules/mongodb/lib/cmap/metrics.js
generated
vendored
Executable file
62
VISUALIZACION/node_modules/mongodb/lib/cmap/metrics.js
generated
vendored
Executable file
|
|
@ -0,0 +1,62 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ConnectionPoolMetrics = void 0;
|
||||
/** @internal */
|
||||
class ConnectionPoolMetrics {
|
||||
constructor() {
|
||||
this.txnConnections = 0;
|
||||
this.cursorConnections = 0;
|
||||
this.otherConnections = 0;
|
||||
}
|
||||
/**
|
||||
* Mark a connection as pinned for a specific operation.
|
||||
*/
|
||||
markPinned(pinType) {
|
||||
if (pinType === ConnectionPoolMetrics.TXN) {
|
||||
this.txnConnections += 1;
|
||||
}
|
||||
else if (pinType === ConnectionPoolMetrics.CURSOR) {
|
||||
this.cursorConnections += 1;
|
||||
}
|
||||
else {
|
||||
this.otherConnections += 1;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Unmark a connection as pinned for an operation.
|
||||
*/
|
||||
markUnpinned(pinType) {
|
||||
if (pinType === ConnectionPoolMetrics.TXN) {
|
||||
this.txnConnections -= 1;
|
||||
}
|
||||
else if (pinType === ConnectionPoolMetrics.CURSOR) {
|
||||
this.cursorConnections -= 1;
|
||||
}
|
||||
else {
|
||||
this.otherConnections -= 1;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Return information about the cmap metrics as a string.
|
||||
*/
|
||||
info(maxPoolSize) {
|
||||
return ('Timed out while checking out a connection from connection pool: ' +
|
||||
`maxPoolSize: ${maxPoolSize}, ` +
|
||||
`connections in use by cursors: ${this.cursorConnections}, ` +
|
||||
`connections in use by transactions: ${this.txnConnections}, ` +
|
||||
`connections in use by other operations: ${this.otherConnections}`);
|
||||
}
|
||||
/**
|
||||
* Reset the metrics to the initial values.
|
||||
*/
|
||||
reset() {
|
||||
this.txnConnections = 0;
|
||||
this.cursorConnections = 0;
|
||||
this.otherConnections = 0;
|
||||
}
|
||||
}
|
||||
ConnectionPoolMetrics.TXN = 'txn';
|
||||
ConnectionPoolMetrics.CURSOR = 'cursor';
|
||||
ConnectionPoolMetrics.OTHER = 'other';
|
||||
exports.ConnectionPoolMetrics = ConnectionPoolMetrics;
|
||||
//# sourceMappingURL=metrics.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/metrics.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/metrics.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"metrics.js","sourceRoot":"","sources":["../../src/cmap/metrics.ts"],"names":[],"mappings":";;;AAAA,gBAAgB;AAChB,MAAa,qBAAqB;IAAlC;QAKE,mBAAc,GAAG,CAAC,CAAC;QACnB,sBAAiB,GAAG,CAAC,CAAC;QACtB,qBAAgB,GAAG,CAAC,CAAC;IAiDvB,CAAC;IA/CC;;OAEG;IACH,UAAU,CAAC,OAAe;QACxB,IAAI,OAAO,KAAK,qBAAqB,CAAC,GAAG,EAAE;YACzC,IAAI,CAAC,cAAc,IAAI,CAAC,CAAC;SAC1B;aAAM,IAAI,OAAO,KAAK,qBAAqB,CAAC,MAAM,EAAE;YACnD,IAAI,CAAC,iBAAiB,IAAI,CAAC,CAAC;SAC7B;aAAM;YACL,IAAI,CAAC,gBAAgB,IAAI,CAAC,CAAC;SAC5B;IACH,CAAC;IAED;;OAEG;IACH,YAAY,CAAC,OAAe;QAC1B,IAAI,OAAO,KAAK,qBAAqB,CAAC,GAAG,EAAE;YACzC,IAAI,CAAC,cAAc,IAAI,CAAC,CAAC;SAC1B;aAAM,IAAI,OAAO,KAAK,qBAAqB,CAAC,MAAM,EAAE;YACnD,IAAI,CAAC,iBAAiB,IAAI,CAAC,CAAC;SAC7B;aAAM;YACL,IAAI,CAAC,gBAAgB,IAAI,CAAC,CAAC;SAC5B;IACH,CAAC;IAED;;OAEG;IACH,IAAI,CAAC,WAAmB;QACtB,OAAO,CACL,kEAAkE;YAClE,gBAAgB,WAAW,IAAI;YAC/B,kCAAkC,IAAI,CAAC,iBAAiB,IAAI;YAC5D,uCAAuC,IAAI,CAAC,cAAc,IAAI;YAC9D,2CAA2C,IAAI,CAAC,gBAAgB,EAAE,CACnE,CAAC;IACJ,CAAC;IAED;;OAEG;IACH,KAAK;QACH,IAAI,CAAC,cAAc,GAAG,CAAC,CAAC;QACxB,IAAI,CAAC,iBAAiB,GAAG,CAAC,CAAC;QAC3B,IAAI,CAAC,gBAAgB,GAAG,CAAC,CAAC;IAC5B,CAAC;;AAtDe,yBAAG,GAAG,KAAc,AAAjB,CAAkB;AACrB,4BAAM,GAAG,QAAiB,AAApB,CAAqB;AAC3B,2BAAK,GAAG,OAAgB,AAAnB,CAAoB;AAH9B,sDAAqB"}
|
||||
51
VISUALIZACION/node_modules/mongodb/lib/cmap/stream_description.js
generated
vendored
Executable file
51
VISUALIZACION/node_modules/mongodb/lib/cmap/stream_description.js
generated
vendored
Executable file
|
|
@ -0,0 +1,51 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.StreamDescription = void 0;
|
||||
const common_1 = require("../sdam/common");
|
||||
const server_description_1 = require("../sdam/server_description");
|
||||
const RESPONSE_FIELDS = [
|
||||
'minWireVersion',
|
||||
'maxWireVersion',
|
||||
'maxBsonObjectSize',
|
||||
'maxMessageSizeBytes',
|
||||
'maxWriteBatchSize',
|
||||
'logicalSessionTimeoutMinutes'
|
||||
];
|
||||
/** @public */
|
||||
class StreamDescription {
|
||||
constructor(address, options) {
|
||||
this.address = address;
|
||||
this.type = common_1.ServerType.Unknown;
|
||||
this.minWireVersion = undefined;
|
||||
this.maxWireVersion = undefined;
|
||||
this.maxBsonObjectSize = 16777216;
|
||||
this.maxMessageSizeBytes = 48000000;
|
||||
this.maxWriteBatchSize = 100000;
|
||||
this.logicalSessionTimeoutMinutes = options?.logicalSessionTimeoutMinutes;
|
||||
this.loadBalanced = !!options?.loadBalanced;
|
||||
this.compressors =
|
||||
options && options.compressors && Array.isArray(options.compressors)
|
||||
? options.compressors
|
||||
: [];
|
||||
}
|
||||
receiveResponse(response) {
|
||||
if (response == null) {
|
||||
return;
|
||||
}
|
||||
this.type = (0, server_description_1.parseServerType)(response);
|
||||
for (const field of RESPONSE_FIELDS) {
|
||||
if (response[field] != null) {
|
||||
this[field] = response[field];
|
||||
}
|
||||
// testing case
|
||||
if ('__nodejs_mock_server__' in response) {
|
||||
this.__nodejs_mock_server__ = response['__nodejs_mock_server__'];
|
||||
}
|
||||
}
|
||||
if (response.compression) {
|
||||
this.compressor = this.compressors.filter(c => response.compression?.includes(c))[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.StreamDescription = StreamDescription;
|
||||
//# sourceMappingURL=stream_description.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/stream_description.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/stream_description.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"stream_description.js","sourceRoot":"","sources":["../../src/cmap/stream_description.ts"],"names":[],"mappings":";;;AACA,2CAA4C;AAC5C,mEAA6D;AAG7D,MAAM,eAAe,GAAG;IACtB,gBAAgB;IAChB,gBAAgB;IAChB,mBAAmB;IACnB,qBAAqB;IACrB,mBAAmB;IACnB,8BAA8B;CACtB,CAAC;AASX,cAAc;AACd,MAAa,iBAAiB;IAiB5B,YAAY,OAAe,EAAE,OAAkC;QAC7D,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;QACvB,IAAI,CAAC,IAAI,GAAG,mBAAU,CAAC,OAAO,CAAC;QAC/B,IAAI,CAAC,cAAc,GAAG,SAAS,CAAC;QAChC,IAAI,CAAC,cAAc,GAAG,SAAS,CAAC;QAChC,IAAI,CAAC,iBAAiB,GAAG,QAAQ,CAAC;QAClC,IAAI,CAAC,mBAAmB,GAAG,QAAQ,CAAC;QACpC,IAAI,CAAC,iBAAiB,GAAG,MAAM,CAAC;QAChC,IAAI,CAAC,4BAA4B,GAAG,OAAO,EAAE,4BAA4B,CAAC;QAC1E,IAAI,CAAC,YAAY,GAAG,CAAC,CAAC,OAAO,EAAE,YAAY,CAAC;QAC5C,IAAI,CAAC,WAAW;YACd,OAAO,IAAI,OAAO,CAAC,WAAW,IAAI,KAAK,CAAC,OAAO,CAAC,OAAO,CAAC,WAAW,CAAC;gBAClE,CAAC,CAAC,OAAO,CAAC,WAAW;gBACrB,CAAC,CAAC,EAAE,CAAC;IACX,CAAC;IAED,eAAe,CAAC,QAAyB;QACvC,IAAI,QAAQ,IAAI,IAAI,EAAE;YACpB,OAAO;SACR;QACD,IAAI,CAAC,IAAI,GAAG,IAAA,oCAAe,EAAC,QAAQ,CAAC,CAAC;QACtC,KAAK,MAAM,KAAK,IAAI,eAAe,EAAE;YACnC,IAAI,QAAQ,CAAC,KAAK,CAAC,IAAI,IAAI,EAAE;gBAC3B,IAAI,CAAC,KAAK,CAAC,GAAG,QAAQ,CAAC,KAAK,CAAC,CAAC;aAC/B;YAED,eAAe;YACf,IAAI,wBAAwB,IAAI,QAAQ,EAAE;gBACxC,IAAI,CAAC,sBAAsB,GAAG,QAAQ,CAAC,wBAAwB,CAAC,CAAC;aAClE;SACF;QAED,IAAI,QAAQ,CAAC,WAAW,EAAE;YACxB,IAAI,CAAC,UAAU,GAAG,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,WAAW,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;SACtF;IACH,CAAC;CACF;AArDD,8CAqDC"}
|
||||
107
VISUALIZACION/node_modules/mongodb/lib/cmap/wire_protocol/compression.js
generated
vendored
Executable file
107
VISUALIZACION/node_modules/mongodb/lib/cmap/wire_protocol/compression.js
generated
vendored
Executable file
|
|
@ -0,0 +1,107 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.decompress = exports.compress = exports.uncompressibleCommands = exports.Compressor = void 0;
|
||||
const util_1 = require("util");
|
||||
const zlib = require("zlib");
|
||||
const constants_1 = require("../../constants");
|
||||
const deps_1 = require("../../deps");
|
||||
const error_1 = require("../../error");
|
||||
/** @public */
|
||||
exports.Compressor = Object.freeze({
|
||||
none: 0,
|
||||
snappy: 1,
|
||||
zlib: 2,
|
||||
zstd: 3
|
||||
});
|
||||
exports.uncompressibleCommands = new Set([
|
||||
constants_1.LEGACY_HELLO_COMMAND,
|
||||
'saslStart',
|
||||
'saslContinue',
|
||||
'getnonce',
|
||||
'authenticate',
|
||||
'createUser',
|
||||
'updateUser',
|
||||
'copydbSaslStart',
|
||||
'copydbgetnonce',
|
||||
'copydb'
|
||||
]);
|
||||
const ZSTD_COMPRESSION_LEVEL = 3;
|
||||
const zlibInflate = (0, util_1.promisify)(zlib.inflate.bind(zlib));
|
||||
const zlibDeflate = (0, util_1.promisify)(zlib.deflate.bind(zlib));
|
||||
let zstd;
|
||||
let Snappy = null;
|
||||
function loadSnappy() {
|
||||
if (Snappy == null) {
|
||||
const snappyImport = (0, deps_1.getSnappy)();
|
||||
if ('kModuleError' in snappyImport) {
|
||||
throw snappyImport.kModuleError;
|
||||
}
|
||||
Snappy = snappyImport;
|
||||
}
|
||||
return Snappy;
|
||||
}
|
||||
// Facilitate compressing a message using an agreed compressor
|
||||
async function compress(options, dataToBeCompressed) {
|
||||
const zlibOptions = {};
|
||||
switch (options.agreedCompressor) {
|
||||
case 'snappy': {
|
||||
Snappy ?? (Snappy = loadSnappy());
|
||||
return Snappy.compress(dataToBeCompressed);
|
||||
}
|
||||
case 'zstd': {
|
||||
loadZstd();
|
||||
if ('kModuleError' in zstd) {
|
||||
throw zstd['kModuleError'];
|
||||
}
|
||||
return zstd.compress(dataToBeCompressed, ZSTD_COMPRESSION_LEVEL);
|
||||
}
|
||||
case 'zlib': {
|
||||
if (options.zlibCompressionLevel) {
|
||||
zlibOptions.level = options.zlibCompressionLevel;
|
||||
}
|
||||
return zlibDeflate(dataToBeCompressed, zlibOptions);
|
||||
}
|
||||
default: {
|
||||
throw new error_1.MongoInvalidArgumentError(`Unknown compressor ${options.agreedCompressor} failed to compress`);
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.compress = compress;
|
||||
// Decompress a message using the given compressor
|
||||
async function decompress(compressorID, compressedData) {
|
||||
if (compressorID !== exports.Compressor.snappy &&
|
||||
compressorID !== exports.Compressor.zstd &&
|
||||
compressorID !== exports.Compressor.zlib &&
|
||||
compressorID !== exports.Compressor.none) {
|
||||
throw new error_1.MongoDecompressionError(`Server sent message compressed using an unsupported compressor. (Received compressor ID ${compressorID})`);
|
||||
}
|
||||
switch (compressorID) {
|
||||
case exports.Compressor.snappy: {
|
||||
Snappy ?? (Snappy = loadSnappy());
|
||||
return Snappy.uncompress(compressedData, { asBuffer: true });
|
||||
}
|
||||
case exports.Compressor.zstd: {
|
||||
loadZstd();
|
||||
if ('kModuleError' in zstd) {
|
||||
throw zstd['kModuleError'];
|
||||
}
|
||||
return zstd.decompress(compressedData);
|
||||
}
|
||||
case exports.Compressor.zlib: {
|
||||
return zlibInflate(compressedData);
|
||||
}
|
||||
default: {
|
||||
return compressedData;
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.decompress = decompress;
|
||||
/**
|
||||
* Load ZStandard if it is not already set.
|
||||
*/
|
||||
function loadZstd() {
|
||||
if (!zstd) {
|
||||
zstd = (0, deps_1.getZstdLibrary)();
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=compression.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/wire_protocol/compression.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/wire_protocol/compression.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"compression.js","sourceRoot":"","sources":["../../../src/cmap/wire_protocol/compression.ts"],"names":[],"mappings":";;;AAAA,+BAAiC;AACjC,6BAA6B;AAE7B,+CAAuD;AACvD,qCAAuF;AACvF,uCAAiF;AAEjF,cAAc;AACD,QAAA,UAAU,GAAG,MAAM,CAAC,MAAM,CAAC;IACtC,IAAI,EAAE,CAAC;IACP,MAAM,EAAE,CAAC;IACT,IAAI,EAAE,CAAC;IACP,IAAI,EAAE,CAAC;CACC,CAAC,CAAC;AAQC,QAAA,sBAAsB,GAAG,IAAI,GAAG,CAAC;IAC5C,gCAAoB;IACpB,WAAW;IACX,cAAc;IACd,UAAU;IACV,cAAc;IACd,YAAY;IACZ,YAAY;IACZ,iBAAiB;IACjB,gBAAgB;IAChB,QAAQ;CACT,CAAC,CAAC;AAEH,MAAM,sBAAsB,GAAG,CAAC,CAAC;AAEjC,MAAM,WAAW,GAAG,IAAA,gBAAS,EAAC,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;AACvD,MAAM,WAAW,GAAG,IAAA,gBAAS,EAAC,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;AAEvD,IAAI,IAAsB,CAAC;AAC3B,IAAI,MAAM,GAAqB,IAAI,CAAC;AACpC,SAAS,UAAU;IACjB,IAAI,MAAM,IAAI,IAAI,EAAE;QAClB,MAAM,YAAY,GAAG,IAAA,gBAAS,GAAE,CAAC;QACjC,IAAI,cAAc,IAAI,YAAY,EAAE;YAClC,MAAM,YAAY,CAAC,YAAY,CAAC;SACjC;QACD,MAAM,GAAG,YAAY,CAAC;KACvB;IACD,OAAO,MAAM,CAAC;AAChB,CAAC;AAED,8DAA8D;AACvD,KAAK,UAAU,QAAQ,CAC5B,OAA2E,EAC3E,kBAA0B;IAE1B,MAAM,WAAW,GAAG,EAAsB,CAAC;IAC3C,QAAQ,OAAO,CAAC,gBAAgB,EAAE;QAChC,KAAK,QAAQ,CAAC,CAAC;YACb,MAAM,KAAN,MAAM,GAAK,UAAU,EAAE,EAAC;YACxB,OAAO,MAAM,CAAC,QAAQ,CAAC,kBAAkB,CAAC,CAAC;SAC5C;QACD,KAAK,MAAM,CAAC,CAAC;YACX,QAAQ,EAAE,CAAC;YACX,IAAI,cAAc,IAAI,IAAI,EAAE;gBAC1B,MAAM,IAAI,CAAC,cAAc,CAAC,CAAC;aAC5B;YACD,OAAO,IAAI,CAAC,QAAQ,CAAC,kBAAkB,EAAE,sBAAsB,CAAC,CAAC;SAClE;QACD,KAAK,MAAM,CAAC,CAAC;YACX,IAAI,OAAO,CAAC,oBAAoB,EAAE;gBAChC,WAAW,CAAC,KAAK,GAAG,OAAO,CAAC,oBAAoB,CAAC;aAClD;YACD,OAAO,WAAW,CAAC,kBAAkB,EAAE,WAAW,CAAC,CAAC;SACrD;QACD,OAAO,CAAC,CAAC;YACP,MAAM,IAAI,iCAAyB,CACjC,sBAAsB,OAAO,CAAC,gBAAgB,qBAAqB,CACpE,CAAC;SACH;KACF;AACH,CAAC;AA7BD,4BA6BC;AAED,kDAAkD;AAC3C,KAAK,UAAU,UAAU,CAAC,YAAoB,EAAE,cAAsB;IAC3E,IACE,YAAY,KAAK,kBAAU,CAAC,MAAM;QAClC,YAAY,KAAK,kBAAU,CAAC,IAAI;QAChC,YAAY,KAAK,kBAAU,CAAC,IAAI;QAChC,YAAY,KAAK,kBAAU,CAAC,IAAI,EAChC;QACA,MAAM,IAAI,+BAAuB,CAC/B,2FAA2F,YAAY,GAAG,CAC3G,CAAC;KACH;IAED,QAAQ,YAAY,EAAE;QACpB,KAAK,kBAAU,CAAC,MAAM,CAAC,CAAC;YACtB,MAAM,KAAN,MAAM,GAAK,UAAU,EAAE,EAAC;YACxB,OAAO,MAAM,CAAC,UAAU,CAAC,cAAc,EAAE,EAAE,QAAQ,EAAE,IAAI,EAAE,CAAC,CAAC;SAC9D;QACD,KAAK,kBAAU,CAAC,IAAI,CAAC,CAAC;YACpB,QAAQ,EAAE,CAAC;YACX,IAAI,cAAc,IAAI,IAAI,EAAE;gBAC1B,MAAM,IAAI,CAAC,cAAc,CAAC,CAAC;aAC5B;YACD,OAAO,IAAI,CAAC,UAAU,CAAC,cAAc,CAAC,CAAC;SACxC;QACD,KAAK,kBAAU,CAAC,IAAI,CAAC,CAAC;YACpB,OAAO,WAAW,CAAC,cAAc,CAAC,CAAC;SACpC;QACD,OAAO,CAAC,CAAC;YACP,OAAO,cAAc,CAAC;SACvB;KACF;AACH,CAAC;AA/BD,gCA+BC;AAED;;GAEG;AACH,SAAS,QAAQ;IACf,IAAI,CAAC,IAAI,EAAE;QACT,IAAI,GAAG,IAAA,qBAAc,GAAE,CAAC;KACzB;AACH,CAAC"}
|
||||
17
VISUALIZACION/node_modules/mongodb/lib/cmap/wire_protocol/constants.js
generated
vendored
Executable file
17
VISUALIZACION/node_modules/mongodb/lib/cmap/wire_protocol/constants.js
generated
vendored
Executable file
|
|
@ -0,0 +1,17 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.OP_MSG = exports.OP_COMPRESSED = exports.OP_DELETE = exports.OP_QUERY = exports.OP_INSERT = exports.OP_UPDATE = exports.OP_REPLY = exports.MIN_SUPPORTED_QE_SERVER_VERSION = exports.MIN_SUPPORTED_QE_WIRE_VERSION = exports.MAX_SUPPORTED_WIRE_VERSION = exports.MIN_SUPPORTED_WIRE_VERSION = exports.MAX_SUPPORTED_SERVER_VERSION = exports.MIN_SUPPORTED_SERVER_VERSION = void 0;
|
||||
exports.MIN_SUPPORTED_SERVER_VERSION = '3.6';
|
||||
exports.MAX_SUPPORTED_SERVER_VERSION = '7.0';
|
||||
exports.MIN_SUPPORTED_WIRE_VERSION = 6;
|
||||
exports.MAX_SUPPORTED_WIRE_VERSION = 21;
|
||||
exports.MIN_SUPPORTED_QE_WIRE_VERSION = 21;
|
||||
exports.MIN_SUPPORTED_QE_SERVER_VERSION = '7.0';
|
||||
exports.OP_REPLY = 1;
|
||||
exports.OP_UPDATE = 2001;
|
||||
exports.OP_INSERT = 2002;
|
||||
exports.OP_QUERY = 2004;
|
||||
exports.OP_DELETE = 2006;
|
||||
exports.OP_COMPRESSED = 2012;
|
||||
exports.OP_MSG = 2013;
|
||||
//# sourceMappingURL=constants.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/wire_protocol/constants.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/wire_protocol/constants.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"constants.js","sourceRoot":"","sources":["../../../src/cmap/wire_protocol/constants.ts"],"names":[],"mappings":";;;AAAa,QAAA,4BAA4B,GAAG,KAAK,CAAC;AACrC,QAAA,4BAA4B,GAAG,KAAK,CAAC;AACrC,QAAA,0BAA0B,GAAG,CAAC,CAAC;AAC/B,QAAA,0BAA0B,GAAG,EAAE,CAAC;AAChC,QAAA,6BAA6B,GAAG,EAAE,CAAC;AACnC,QAAA,+BAA+B,GAAG,KAAK,CAAC;AACxC,QAAA,QAAQ,GAAG,CAAC,CAAC;AACb,QAAA,SAAS,GAAG,IAAI,CAAC;AACjB,QAAA,SAAS,GAAG,IAAI,CAAC;AACjB,QAAA,QAAQ,GAAG,IAAI,CAAC;AAChB,QAAA,SAAS,GAAG,IAAI,CAAC;AACjB,QAAA,aAAa,GAAG,IAAI,CAAC;AACrB,QAAA,MAAM,GAAG,IAAI,CAAC"}
|
||||
40
VISUALIZACION/node_modules/mongodb/lib/cmap/wire_protocol/shared.js
generated
vendored
Executable file
40
VISUALIZACION/node_modules/mongodb/lib/cmap/wire_protocol/shared.js
generated
vendored
Executable file
|
|
@ -0,0 +1,40 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.isSharded = exports.getReadPreference = void 0;
|
||||
const error_1 = require("../../error");
|
||||
const read_preference_1 = require("../../read_preference");
|
||||
const common_1 = require("../../sdam/common");
|
||||
const topology_description_1 = require("../../sdam/topology_description");
|
||||
function getReadPreference(options) {
|
||||
// Default to command version of the readPreference
|
||||
let readPreference = options?.readPreference ?? read_preference_1.ReadPreference.primary;
|
||||
// If we have an option readPreference override the command one
|
||||
if (options?.readPreference) {
|
||||
readPreference = options.readPreference;
|
||||
}
|
||||
if (typeof readPreference === 'string') {
|
||||
readPreference = read_preference_1.ReadPreference.fromString(readPreference);
|
||||
}
|
||||
if (!(readPreference instanceof read_preference_1.ReadPreference)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Option "readPreference" must be a ReadPreference instance');
|
||||
}
|
||||
return readPreference;
|
||||
}
|
||||
exports.getReadPreference = getReadPreference;
|
||||
function isSharded(topologyOrServer) {
|
||||
if (topologyOrServer == null) {
|
||||
return false;
|
||||
}
|
||||
if (topologyOrServer.description && topologyOrServer.description.type === common_1.ServerType.Mongos) {
|
||||
return true;
|
||||
}
|
||||
// NOTE: This is incredibly inefficient, and should be removed once command construction
|
||||
// happens based on `Server` not `Topology`.
|
||||
if (topologyOrServer.description && topologyOrServer.description instanceof topology_description_1.TopologyDescription) {
|
||||
const servers = Array.from(topologyOrServer.description.servers.values());
|
||||
return servers.some((server) => server.type === common_1.ServerType.Mongos);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
exports.isSharded = isSharded;
|
||||
//# sourceMappingURL=shared.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cmap/wire_protocol/shared.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cmap/wire_protocol/shared.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"shared.js","sourceRoot":"","sources":["../../../src/cmap/wire_protocol/shared.ts"],"names":[],"mappings":";;;AAAA,uCAAwD;AAExD,2DAAuD;AACvD,8CAA+C;AAI/C,0EAAsE;AAOtE,SAAgB,iBAAiB,CAAC,OAA8B;IAC9D,mDAAmD;IACnD,IAAI,cAAc,GAAG,OAAO,EAAE,cAAc,IAAI,gCAAc,CAAC,OAAO,CAAC;IACvE,+DAA+D;IAC/D,IAAI,OAAO,EAAE,cAAc,EAAE;QAC3B,cAAc,GAAG,OAAO,CAAC,cAAc,CAAC;KACzC;IAED,IAAI,OAAO,cAAc,KAAK,QAAQ,EAAE;QACtC,cAAc,GAAG,gCAAc,CAAC,UAAU,CAAC,cAAc,CAAC,CAAC;KAC5D;IAED,IAAI,CAAC,CAAC,cAAc,YAAY,gCAAc,CAAC,EAAE;QAC/C,MAAM,IAAI,iCAAyB,CACjC,2DAA2D,CAC5D,CAAC;KACH;IAED,OAAO,cAAc,CAAC;AACxB,CAAC;AAnBD,8CAmBC;AAED,SAAgB,SAAS,CAAC,gBAAiD;IACzE,IAAI,gBAAgB,IAAI,IAAI,EAAE;QAC5B,OAAO,KAAK,CAAC;KACd;IAED,IAAI,gBAAgB,CAAC,WAAW,IAAI,gBAAgB,CAAC,WAAW,CAAC,IAAI,KAAK,mBAAU,CAAC,MAAM,EAAE;QAC3F,OAAO,IAAI,CAAC;KACb;IAED,wFAAwF;IACxF,kDAAkD;IAClD,IAAI,gBAAgB,CAAC,WAAW,IAAI,gBAAgB,CAAC,WAAW,YAAY,0CAAmB,EAAE;QAC/F,MAAM,OAAO,GAAwB,KAAK,CAAC,IAAI,CAAC,gBAAgB,CAAC,WAAW,CAAC,OAAO,CAAC,MAAM,EAAE,CAAC,CAAC;QAC/F,OAAO,OAAO,CAAC,IAAI,CAAC,CAAC,MAAyB,EAAE,EAAE,CAAC,MAAM,CAAC,IAAI,KAAK,mBAAU,CAAC,MAAM,CAAC,CAAC;KACvF;IAED,OAAO,KAAK,CAAC;AACf,CAAC;AAjBD,8BAiBC"}
|
||||
626
VISUALIZACION/node_modules/mongodb/lib/collection.js
generated
vendored
Executable file
626
VISUALIZACION/node_modules/mongodb/lib/collection.js
generated
vendored
Executable file
|
|
@ -0,0 +1,626 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Collection = void 0;
|
||||
const bson_1 = require("./bson");
|
||||
const ordered_1 = require("./bulk/ordered");
|
||||
const unordered_1 = require("./bulk/unordered");
|
||||
const change_stream_1 = require("./change_stream");
|
||||
const aggregation_cursor_1 = require("./cursor/aggregation_cursor");
|
||||
const find_cursor_1 = require("./cursor/find_cursor");
|
||||
const list_indexes_cursor_1 = require("./cursor/list_indexes_cursor");
|
||||
const list_search_indexes_cursor_1 = require("./cursor/list_search_indexes_cursor");
|
||||
const error_1 = require("./error");
|
||||
const bulk_write_1 = require("./operations/bulk_write");
|
||||
const count_1 = require("./operations/count");
|
||||
const count_documents_1 = require("./operations/count_documents");
|
||||
const delete_1 = require("./operations/delete");
|
||||
const distinct_1 = require("./operations/distinct");
|
||||
const drop_1 = require("./operations/drop");
|
||||
const estimated_document_count_1 = require("./operations/estimated_document_count");
|
||||
const execute_operation_1 = require("./operations/execute_operation");
|
||||
const find_and_modify_1 = require("./operations/find_and_modify");
|
||||
const indexes_1 = require("./operations/indexes");
|
||||
const insert_1 = require("./operations/insert");
|
||||
const is_capped_1 = require("./operations/is_capped");
|
||||
const options_operation_1 = require("./operations/options_operation");
|
||||
const rename_1 = require("./operations/rename");
|
||||
const create_1 = require("./operations/search_indexes/create");
|
||||
const drop_2 = require("./operations/search_indexes/drop");
|
||||
const update_1 = require("./operations/search_indexes/update");
|
||||
const stats_1 = require("./operations/stats");
|
||||
const update_2 = require("./operations/update");
|
||||
const read_concern_1 = require("./read_concern");
|
||||
const read_preference_1 = require("./read_preference");
|
||||
const utils_1 = require("./utils");
|
||||
const write_concern_1 = require("./write_concern");
|
||||
/**
|
||||
* The **Collection** class is an internal class that embodies a MongoDB collection
|
||||
* allowing for insert/find/update/delete and other command operation on that MongoDB collection.
|
||||
*
|
||||
* **COLLECTION Cannot directly be instantiated**
|
||||
* @public
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* import { MongoClient } from 'mongodb';
|
||||
*
|
||||
* interface Pet {
|
||||
* name: string;
|
||||
* kind: 'dog' | 'cat' | 'fish';
|
||||
* }
|
||||
*
|
||||
* const client = new MongoClient('mongodb://localhost:27017');
|
||||
* const pets = client.db().collection<Pet>('pets');
|
||||
*
|
||||
* const petCursor = pets.find();
|
||||
*
|
||||
* for await (const pet of petCursor) {
|
||||
* console.log(`${pet.name} is a ${pet.kind}!`);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
class Collection {
|
||||
/**
|
||||
* Create a new Collection instance
|
||||
* @internal
|
||||
*/
|
||||
constructor(db, name, options) {
|
||||
(0, utils_1.checkCollectionName)(name);
|
||||
// Internal state
|
||||
this.s = {
|
||||
db,
|
||||
options,
|
||||
namespace: new utils_1.MongoDBCollectionNamespace(db.databaseName, name),
|
||||
pkFactory: db.options?.pkFactory ?? utils_1.DEFAULT_PK_FACTORY,
|
||||
readPreference: read_preference_1.ReadPreference.fromOptions(options),
|
||||
bsonOptions: (0, bson_1.resolveBSONOptions)(options, db),
|
||||
readConcern: read_concern_1.ReadConcern.fromOptions(options),
|
||||
writeConcern: write_concern_1.WriteConcern.fromOptions(options)
|
||||
};
|
||||
this.client = db.client;
|
||||
}
|
||||
/**
|
||||
* The name of the database this collection belongs to
|
||||
*/
|
||||
get dbName() {
|
||||
return this.s.namespace.db;
|
||||
}
|
||||
/**
|
||||
* The name of this collection
|
||||
*/
|
||||
get collectionName() {
|
||||
return this.s.namespace.collection;
|
||||
}
|
||||
/**
|
||||
* The namespace of this collection, in the format `${this.dbName}.${this.collectionName}`
|
||||
*/
|
||||
get namespace() {
|
||||
return this.fullNamespace.toString();
|
||||
}
|
||||
/**
|
||||
* @internal
|
||||
*
|
||||
* The `MongoDBNamespace` for the collection.
|
||||
*/
|
||||
get fullNamespace() {
|
||||
return this.s.namespace;
|
||||
}
|
||||
/**
|
||||
* The current readConcern of the collection. If not explicitly defined for
|
||||
* this collection, will be inherited from the parent DB
|
||||
*/
|
||||
get readConcern() {
|
||||
if (this.s.readConcern == null) {
|
||||
return this.s.db.readConcern;
|
||||
}
|
||||
return this.s.readConcern;
|
||||
}
|
||||
/**
|
||||
* The current readPreference of the collection. If not explicitly defined for
|
||||
* this collection, will be inherited from the parent DB
|
||||
*/
|
||||
get readPreference() {
|
||||
if (this.s.readPreference == null) {
|
||||
return this.s.db.readPreference;
|
||||
}
|
||||
return this.s.readPreference;
|
||||
}
|
||||
get bsonOptions() {
|
||||
return this.s.bsonOptions;
|
||||
}
|
||||
/**
|
||||
* The current writeConcern of the collection. If not explicitly defined for
|
||||
* this collection, will be inherited from the parent DB
|
||||
*/
|
||||
get writeConcern() {
|
||||
if (this.s.writeConcern == null) {
|
||||
return this.s.db.writeConcern;
|
||||
}
|
||||
return this.s.writeConcern;
|
||||
}
|
||||
/** The current index hint for the collection */
|
||||
get hint() {
|
||||
return this.s.collectionHint;
|
||||
}
|
||||
set hint(v) {
|
||||
this.s.collectionHint = (0, utils_1.normalizeHintField)(v);
|
||||
}
|
||||
/**
|
||||
* Inserts a single document into MongoDB. If documents passed in do not contain the **_id** field,
|
||||
* one will be added to each of the documents missing it by the driver, mutating the document. This behavior
|
||||
* can be overridden by setting the **forceServerObjectId** flag.
|
||||
*
|
||||
* @param doc - The document to insert
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async insertOne(doc, options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new insert_1.InsertOneOperation(this, doc, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
/**
|
||||
* Inserts an array of documents into MongoDB. If documents passed in do not contain the **_id** field,
|
||||
* one will be added to each of the documents missing it by the driver, mutating the document. This behavior
|
||||
* can be overridden by setting the **forceServerObjectId** flag.
|
||||
*
|
||||
* @param docs - The documents to insert
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async insertMany(docs, options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new insert_1.InsertManyOperation(this, docs, (0, utils_1.resolveOptions)(this, options ?? { ordered: true })));
|
||||
}
|
||||
/**
|
||||
* Perform a bulkWrite operation without a fluent API
|
||||
*
|
||||
* Legal operation types are
|
||||
* - `insertOne`
|
||||
* - `replaceOne`
|
||||
* - `updateOne`
|
||||
* - `updateMany`
|
||||
* - `deleteOne`
|
||||
* - `deleteMany`
|
||||
*
|
||||
* If documents passed in do not contain the **_id** field,
|
||||
* one will be added to each of the documents missing it by the driver, mutating the document. This behavior
|
||||
* can be overridden by setting the **forceServerObjectId** flag.
|
||||
*
|
||||
* @param operations - Bulk operations to perform
|
||||
* @param options - Optional settings for the command
|
||||
* @throws MongoDriverError if operations is not an array
|
||||
*/
|
||||
async bulkWrite(operations, options) {
|
||||
if (!Array.isArray(operations)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Argument "operations" must be an array of documents');
|
||||
}
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new bulk_write_1.BulkWriteOperation(this, operations, (0, utils_1.resolveOptions)(this, options ?? { ordered: true })));
|
||||
}
|
||||
/**
|
||||
* Update a single document in a collection
|
||||
*
|
||||
* @param filter - The filter used to select the document to update
|
||||
* @param update - The update operations to be applied to the document
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async updateOne(filter, update, options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new update_2.UpdateOneOperation(this, filter, update, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
/**
|
||||
* Replace a document in a collection with another document
|
||||
*
|
||||
* @param filter - The filter used to select the document to replace
|
||||
* @param replacement - The Document that replaces the matching document
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async replaceOne(filter, replacement, options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new update_2.ReplaceOneOperation(this, filter, replacement, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
/**
|
||||
* Update multiple documents in a collection
|
||||
*
|
||||
* @param filter - The filter used to select the documents to update
|
||||
* @param update - The update operations to be applied to the documents
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async updateMany(filter, update, options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new update_2.UpdateManyOperation(this, filter, update, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
/**
|
||||
* Delete a document from a collection
|
||||
*
|
||||
* @param filter - The filter used to select the document to remove
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async deleteOne(filter = {}, options = {}) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new delete_1.DeleteOneOperation(this, filter, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
/**
|
||||
* Delete multiple documents from a collection
|
||||
*
|
||||
* @param filter - The filter used to select the documents to remove
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async deleteMany(filter = {}, options = {}) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new delete_1.DeleteManyOperation(this, filter, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
/**
|
||||
* Rename the collection.
|
||||
*
|
||||
* @remarks
|
||||
* This operation does not inherit options from the Db or MongoClient.
|
||||
*
|
||||
* @param newName - New name of of the collection.
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async rename(newName, options) {
|
||||
// Intentionally, we do not inherit options from parent for this operation.
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new rename_1.RenameOperation(this, newName, {
|
||||
...options,
|
||||
readPreference: read_preference_1.ReadPreference.PRIMARY
|
||||
}));
|
||||
}
|
||||
/**
|
||||
* Drop the collection from the database, removing it permanently. New accesses will create a new collection.
|
||||
*
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async drop(options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new drop_1.DropCollectionOperation(this.s.db, this.collectionName, options));
|
||||
}
|
||||
async findOne(filter = {}, options = {}) {
|
||||
return this.find(filter, options).limit(-1).batchSize(1).next();
|
||||
}
|
||||
find(filter = {}, options = {}) {
|
||||
return new find_cursor_1.FindCursor(this.client, this.s.namespace, filter, (0, utils_1.resolveOptions)(this, options));
|
||||
}
|
||||
/**
|
||||
* Returns the options of the collection.
|
||||
*
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async options(options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new options_operation_1.OptionsOperation(this, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
/**
|
||||
* Returns if the collection is a capped collection
|
||||
*
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async isCapped(options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new is_capped_1.IsCappedOperation(this, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
/**
|
||||
* Creates an index on the db and collection collection.
|
||||
*
|
||||
* @param indexSpec - The field name or index specification to create an index for
|
||||
* @param options - Optional settings for the command
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const collection = client.db('foo').collection('bar');
|
||||
*
|
||||
* await collection.createIndex({ a: 1, b: -1 });
|
||||
*
|
||||
* // Alternate syntax for { c: 1, d: -1 } that ensures order of indexes
|
||||
* await collection.createIndex([ [c, 1], [d, -1] ]);
|
||||
*
|
||||
* // Equivalent to { e: 1 }
|
||||
* await collection.createIndex('e');
|
||||
*
|
||||
* // Equivalent to { f: 1, g: 1 }
|
||||
* await collection.createIndex(['f', 'g'])
|
||||
*
|
||||
* // Equivalent to { h: 1, i: -1 }
|
||||
* await collection.createIndex([ { h: 1 }, { i: -1 } ]);
|
||||
*
|
||||
* // Equivalent to { j: 1, k: -1, l: 2d }
|
||||
* await collection.createIndex(['j', ['k', -1], { l: '2d' }])
|
||||
* ```
|
||||
*/
|
||||
async createIndex(indexSpec, options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new indexes_1.CreateIndexOperation(this, this.collectionName, indexSpec, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
/**
|
||||
* Creates multiple indexes in the collection, this method is only supported for
|
||||
* MongoDB 2.6 or higher. Earlier version of MongoDB will throw a command not supported
|
||||
* error.
|
||||
*
|
||||
* **Note**: Unlike {@link Collection#createIndex| createIndex}, this function takes in raw index specifications.
|
||||
* Index specifications are defined {@link https://www.mongodb.com/docs/manual/reference/command/createIndexes/| here}.
|
||||
*
|
||||
* @param indexSpecs - An array of index specifications to be created
|
||||
* @param options - Optional settings for the command
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const collection = client.db('foo').collection('bar');
|
||||
* await collection.createIndexes([
|
||||
* // Simple index on field fizz
|
||||
* {
|
||||
* key: { fizz: 1 },
|
||||
* }
|
||||
* // wildcard index
|
||||
* {
|
||||
* key: { '$**': 1 }
|
||||
* },
|
||||
* // named index on darmok and jalad
|
||||
* {
|
||||
* key: { darmok: 1, jalad: -1 }
|
||||
* name: 'tanagra'
|
||||
* }
|
||||
* ]);
|
||||
* ```
|
||||
*/
|
||||
async createIndexes(indexSpecs, options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new indexes_1.CreateIndexesOperation(this, this.collectionName, indexSpecs, (0, utils_1.resolveOptions)(this, { ...options, maxTimeMS: undefined })));
|
||||
}
|
||||
/**
|
||||
* Drops an index from this collection.
|
||||
*
|
||||
* @param indexName - Name of the index to drop.
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async dropIndex(indexName, options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new indexes_1.DropIndexOperation(this, indexName, {
|
||||
...(0, utils_1.resolveOptions)(this, options),
|
||||
readPreference: read_preference_1.ReadPreference.primary
|
||||
}));
|
||||
}
|
||||
/**
|
||||
* Drops all indexes from this collection.
|
||||
*
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async dropIndexes(options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new indexes_1.DropIndexesOperation(this, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
/**
|
||||
* Get the list of all indexes information for the collection.
|
||||
*
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
listIndexes(options) {
|
||||
return new list_indexes_cursor_1.ListIndexesCursor(this, (0, utils_1.resolveOptions)(this, options));
|
||||
}
|
||||
/**
|
||||
* Checks if one or more indexes exist on the collection, fails on first non-existing index
|
||||
*
|
||||
* @param indexes - One or more index names to check.
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async indexExists(indexes, options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new indexes_1.IndexExistsOperation(this, indexes, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
/**
|
||||
* Retrieves this collections index info.
|
||||
*
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async indexInformation(options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new indexes_1.IndexInformationOperation(this.s.db, this.collectionName, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
/**
|
||||
* Gets an estimate of the count of documents in a collection using collection metadata.
|
||||
* This will always run a count command on all server versions.
|
||||
*
|
||||
* due to an oversight in versions 5.0.0-5.0.8 of MongoDB, the count command,
|
||||
* which estimatedDocumentCount uses in its implementation, was not included in v1 of
|
||||
* the Stable API, and so users of the Stable API with estimatedDocumentCount are
|
||||
* recommended to upgrade their server version to 5.0.9+ or set apiStrict: false to avoid
|
||||
* encountering errors.
|
||||
*
|
||||
* @see {@link https://www.mongodb.com/docs/manual/reference/command/count/#behavior|Count: Behavior}
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async estimatedDocumentCount(options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new estimated_document_count_1.EstimatedDocumentCountOperation(this, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
/**
|
||||
* Gets the number of documents matching the filter.
|
||||
* For a fast count of the total documents in a collection see {@link Collection#estimatedDocumentCount| estimatedDocumentCount}.
|
||||
* **Note**: When migrating from {@link Collection#count| count} to {@link Collection#countDocuments| countDocuments}
|
||||
* the following query operators must be replaced:
|
||||
*
|
||||
* | Operator | Replacement |
|
||||
* | -------- | ----------- |
|
||||
* | `$where` | [`$expr`][1] |
|
||||
* | `$near` | [`$geoWithin`][2] with [`$center`][3] |
|
||||
* | `$nearSphere` | [`$geoWithin`][2] with [`$centerSphere`][4] |
|
||||
*
|
||||
* [1]: https://www.mongodb.com/docs/manual/reference/operator/query/expr/
|
||||
* [2]: https://www.mongodb.com/docs/manual/reference/operator/query/geoWithin/
|
||||
* [3]: https://www.mongodb.com/docs/manual/reference/operator/query/center/#op._S_center
|
||||
* [4]: https://www.mongodb.com/docs/manual/reference/operator/query/centerSphere/#op._S_centerSphere
|
||||
*
|
||||
* @param filter - The filter for the count
|
||||
* @param options - Optional settings for the command
|
||||
*
|
||||
* @see https://www.mongodb.com/docs/manual/reference/operator/query/expr/
|
||||
* @see https://www.mongodb.com/docs/manual/reference/operator/query/geoWithin/
|
||||
* @see https://www.mongodb.com/docs/manual/reference/operator/query/center/#op._S_center
|
||||
* @see https://www.mongodb.com/docs/manual/reference/operator/query/centerSphere/#op._S_centerSphere
|
||||
*/
|
||||
async countDocuments(filter = {}, options = {}) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new count_documents_1.CountDocumentsOperation(this, filter, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
async distinct(key, filter = {}, options = {}) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new distinct_1.DistinctOperation(this, key, filter, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
/**
|
||||
* Retrieve all the indexes on the collection.
|
||||
*
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async indexes(options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new indexes_1.IndexesOperation(this, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
/**
|
||||
* Get all the collection statistics.
|
||||
*
|
||||
* @deprecated the `collStats` operation will be removed in the next major release. Please
|
||||
* use an aggregation pipeline with the [`$collStats`](https://www.mongodb.com/docs/manual/reference/operator/aggregation/collStats/) stage instead
|
||||
*
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async stats(options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new stats_1.CollStatsOperation(this, options));
|
||||
}
|
||||
async findOneAndDelete(filter, options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new find_and_modify_1.FindOneAndDeleteOperation(this, filter, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
async findOneAndReplace(filter, replacement, options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new find_and_modify_1.FindOneAndReplaceOperation(this, filter, replacement, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
async findOneAndUpdate(filter, update, options) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new find_and_modify_1.FindOneAndUpdateOperation(this, filter, update, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
/**
|
||||
* Execute an aggregation framework pipeline against the collection, needs MongoDB \>= 2.2
|
||||
*
|
||||
* @param pipeline - An array of aggregation pipelines to execute
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
aggregate(pipeline = [], options) {
|
||||
if (!Array.isArray(pipeline)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Argument "pipeline" must be an array of aggregation stages');
|
||||
}
|
||||
return new aggregation_cursor_1.AggregationCursor(this.client, this.s.namespace, pipeline, (0, utils_1.resolveOptions)(this, options));
|
||||
}
|
||||
/**
|
||||
* Create a new Change Stream, watching for new changes (insertions, updates, replacements, deletions, and invalidations) in this collection.
|
||||
*
|
||||
* @remarks
|
||||
* watch() accepts two generic arguments for distinct use cases:
|
||||
* - The first is to override the schema that may be defined for this specific collection
|
||||
* - The second is to override the shape of the change stream document entirely, if it is not provided the type will default to ChangeStreamDocument of the first argument
|
||||
* @example
|
||||
* By just providing the first argument I can type the change to be `ChangeStreamDocument<{ _id: number }>`
|
||||
* ```ts
|
||||
* collection.watch<{ _id: number }>()
|
||||
* .on('change', change => console.log(change._id.toFixed(4)));
|
||||
* ```
|
||||
*
|
||||
* @example
|
||||
* Passing a second argument provides a way to reflect the type changes caused by an advanced pipeline.
|
||||
* Here, we are using a pipeline to have MongoDB filter for insert changes only and add a comment.
|
||||
* No need start from scratch on the ChangeStreamInsertDocument type!
|
||||
* By using an intersection we can save time and ensure defaults remain the same type!
|
||||
* ```ts
|
||||
* collection
|
||||
* .watch<Schema, ChangeStreamInsertDocument<Schema> & { comment: string }>([
|
||||
* { $addFields: { comment: 'big changes' } },
|
||||
* { $match: { operationType: 'insert' } }
|
||||
* ])
|
||||
* .on('change', change => {
|
||||
* change.comment.startsWith('big');
|
||||
* change.operationType === 'insert';
|
||||
* // No need to narrow in code because the generics did that for us!
|
||||
* expectType<Schema>(change.fullDocument);
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
* @param pipeline - An array of {@link https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/|aggregation pipeline stages} through which to pass change stream documents. This allows for filtering (using $match) and manipulating the change stream documents.
|
||||
* @param options - Optional settings for the command
|
||||
* @typeParam TLocal - Type of the data being detected by the change stream
|
||||
* @typeParam TChange - Type of the whole change stream document emitted
|
||||
*/
|
||||
watch(pipeline = [], options = {}) {
|
||||
// Allow optionally not specifying a pipeline
|
||||
if (!Array.isArray(pipeline)) {
|
||||
options = pipeline;
|
||||
pipeline = [];
|
||||
}
|
||||
return new change_stream_1.ChangeStream(this, pipeline, (0, utils_1.resolveOptions)(this, options));
|
||||
}
|
||||
/**
|
||||
* Initiate an Out of order batch write operation. All operations will be buffered into insert/update/remove commands executed out of order.
|
||||
*
|
||||
* @throws MongoNotConnectedError
|
||||
* @remarks
|
||||
* **NOTE:** MongoClient must be connected prior to calling this method due to a known limitation in this legacy implementation.
|
||||
* However, `collection.bulkWrite()` provides an equivalent API that does not require prior connecting.
|
||||
*/
|
||||
initializeUnorderedBulkOp(options) {
|
||||
return new unordered_1.UnorderedBulkOperation(this, (0, utils_1.resolveOptions)(this, options));
|
||||
}
|
||||
/**
|
||||
* Initiate an In order bulk write operation. Operations will be serially executed in the order they are added, creating a new operation for each switch in types.
|
||||
*
|
||||
* @throws MongoNotConnectedError
|
||||
* @remarks
|
||||
* **NOTE:** MongoClient must be connected prior to calling this method due to a known limitation in this legacy implementation.
|
||||
* However, `collection.bulkWrite()` provides an equivalent API that does not require prior connecting.
|
||||
*/
|
||||
initializeOrderedBulkOp(options) {
|
||||
return new ordered_1.OrderedBulkOperation(this, (0, utils_1.resolveOptions)(this, options));
|
||||
}
|
||||
/**
|
||||
* An estimated count of matching documents in the db to a filter.
|
||||
*
|
||||
* **NOTE:** This method has been deprecated, since it does not provide an accurate count of the documents
|
||||
* in a collection. To obtain an accurate count of documents in the collection, use {@link Collection#countDocuments| countDocuments}.
|
||||
* To obtain an estimated count of all documents in the collection, use {@link Collection#estimatedDocumentCount| estimatedDocumentCount}.
|
||||
*
|
||||
* @deprecated use {@link Collection#countDocuments| countDocuments} or {@link Collection#estimatedDocumentCount| estimatedDocumentCount} instead
|
||||
*
|
||||
* @param filter - The filter for the count.
|
||||
* @param options - Optional settings for the command
|
||||
*/
|
||||
async count(filter = {}, options = {}) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new count_1.CountOperation(this.fullNamespace, filter, (0, utils_1.resolveOptions)(this, options)));
|
||||
}
|
||||
listSearchIndexes(indexNameOrOptions, options) {
|
||||
options =
|
||||
typeof indexNameOrOptions === 'object' ? indexNameOrOptions : options == null ? {} : options;
|
||||
const indexName = indexNameOrOptions == null
|
||||
? null
|
||||
: typeof indexNameOrOptions === 'object'
|
||||
? null
|
||||
: indexNameOrOptions;
|
||||
return new list_search_indexes_cursor_1.ListSearchIndexesCursor(this, indexName, options);
|
||||
}
|
||||
/**
|
||||
* Creates a single search index for the collection.
|
||||
*
|
||||
* @param description - The index description for the new search index.
|
||||
* @returns A promise that resolves to the name of the new search index.
|
||||
*
|
||||
* @remarks Only available when used against a 7.0+ Atlas cluster.
|
||||
*/
|
||||
async createSearchIndex(description) {
|
||||
const [index] = await this.createSearchIndexes([description]);
|
||||
return index;
|
||||
}
|
||||
/**
|
||||
* Creates multiple search indexes for the current collection.
|
||||
*
|
||||
* @param descriptions - An array of `SearchIndexDescription`s for the new search indexes.
|
||||
* @returns A promise that resolves to an array of the newly created search index names.
|
||||
*
|
||||
* @remarks Only available when used against a 7.0+ Atlas cluster.
|
||||
* @returns
|
||||
*/
|
||||
async createSearchIndexes(descriptions) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new create_1.CreateSearchIndexesOperation(this, descriptions));
|
||||
}
|
||||
/**
|
||||
* Deletes a search index by index name.
|
||||
*
|
||||
* @param name - The name of the search index to be deleted.
|
||||
*
|
||||
* @remarks Only available when used against a 7.0+ Atlas cluster.
|
||||
*/
|
||||
async dropSearchIndex(name) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new drop_2.DropSearchIndexOperation(this, name));
|
||||
}
|
||||
/**
|
||||
* Updates a search index by replacing the existing index definition with the provided definition.
|
||||
*
|
||||
* @param name - The name of the search index to update.
|
||||
* @param definition - The new search index definition.
|
||||
*
|
||||
* @remarks Only available when used against a 7.0+ Atlas cluster.
|
||||
*/
|
||||
async updateSearchIndex(name, definition) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new update_1.UpdateSearchIndexOperation(this, name, definition));
|
||||
}
|
||||
}
|
||||
exports.Collection = Collection;
|
||||
//# sourceMappingURL=collection.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/collection.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/collection.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
1103
VISUALIZACION/node_modules/mongodb/lib/connection_string.js
generated
vendored
Executable file
1103
VISUALIZACION/node_modules/mongodb/lib/connection_string.js
generated
vendored
Executable file
File diff suppressed because it is too large
Load diff
1
VISUALIZACION/node_modules/mongodb/lib/connection_string.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/connection_string.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
142
VISUALIZACION/node_modules/mongodb/lib/constants.js
generated
vendored
Executable file
142
VISUALIZACION/node_modules/mongodb/lib/constants.js
generated
vendored
Executable file
|
|
@ -0,0 +1,142 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.TOPOLOGY_EVENTS = exports.CMAP_EVENTS = exports.HEARTBEAT_EVENTS = exports.RESUME_TOKEN_CHANGED = exports.END = exports.CHANGE = exports.INIT = exports.MORE = exports.RESPONSE = exports.SERVER_HEARTBEAT_FAILED = exports.SERVER_HEARTBEAT_SUCCEEDED = exports.SERVER_HEARTBEAT_STARTED = exports.COMMAND_FAILED = exports.COMMAND_SUCCEEDED = exports.COMMAND_STARTED = exports.CLUSTER_TIME_RECEIVED = exports.CONNECTION_CHECKED_IN = exports.CONNECTION_CHECKED_OUT = exports.CONNECTION_CHECK_OUT_FAILED = exports.CONNECTION_CHECK_OUT_STARTED = exports.CONNECTION_CLOSED = exports.CONNECTION_READY = exports.CONNECTION_CREATED = exports.CONNECTION_POOL_READY = exports.CONNECTION_POOL_CLEARED = exports.CONNECTION_POOL_CLOSED = exports.CONNECTION_POOL_CREATED = exports.TOPOLOGY_DESCRIPTION_CHANGED = exports.TOPOLOGY_CLOSED = exports.TOPOLOGY_OPENING = exports.SERVER_DESCRIPTION_CHANGED = exports.SERVER_CLOSED = exports.SERVER_OPENING = exports.DESCRIPTION_RECEIVED = exports.UNPINNED = exports.PINNED = exports.MESSAGE = exports.ENDED = exports.CLOSED = exports.CONNECT = exports.OPEN = exports.CLOSE = exports.TIMEOUT = exports.ERROR = exports.SYSTEM_JS_COLLECTION = exports.SYSTEM_COMMAND_COLLECTION = exports.SYSTEM_USER_COLLECTION = exports.SYSTEM_PROFILE_COLLECTION = exports.SYSTEM_INDEX_COLLECTION = exports.SYSTEM_NAMESPACE_COLLECTION = void 0;
|
||||
exports.LEGACY_HELLO_COMMAND_CAMEL_CASE = exports.LEGACY_HELLO_COMMAND = exports.MONGO_CLIENT_EVENTS = exports.LOCAL_SERVER_EVENTS = exports.SERVER_RELAY_EVENTS = exports.APM_EVENTS = void 0;
|
||||
exports.SYSTEM_NAMESPACE_COLLECTION = 'system.namespaces';
|
||||
exports.SYSTEM_INDEX_COLLECTION = 'system.indexes';
|
||||
exports.SYSTEM_PROFILE_COLLECTION = 'system.profile';
|
||||
exports.SYSTEM_USER_COLLECTION = 'system.users';
|
||||
exports.SYSTEM_COMMAND_COLLECTION = '$cmd';
|
||||
exports.SYSTEM_JS_COLLECTION = 'system.js';
|
||||
// events
|
||||
exports.ERROR = 'error';
|
||||
exports.TIMEOUT = 'timeout';
|
||||
exports.CLOSE = 'close';
|
||||
exports.OPEN = 'open';
|
||||
exports.CONNECT = 'connect';
|
||||
exports.CLOSED = 'closed';
|
||||
exports.ENDED = 'ended';
|
||||
exports.MESSAGE = 'message';
|
||||
exports.PINNED = 'pinned';
|
||||
exports.UNPINNED = 'unpinned';
|
||||
exports.DESCRIPTION_RECEIVED = 'descriptionReceived';
|
||||
exports.SERVER_OPENING = 'serverOpening';
|
||||
exports.SERVER_CLOSED = 'serverClosed';
|
||||
exports.SERVER_DESCRIPTION_CHANGED = 'serverDescriptionChanged';
|
||||
exports.TOPOLOGY_OPENING = 'topologyOpening';
|
||||
exports.TOPOLOGY_CLOSED = 'topologyClosed';
|
||||
exports.TOPOLOGY_DESCRIPTION_CHANGED = 'topologyDescriptionChanged';
|
||||
/** @internal */
|
||||
exports.CONNECTION_POOL_CREATED = 'connectionPoolCreated';
|
||||
/** @internal */
|
||||
exports.CONNECTION_POOL_CLOSED = 'connectionPoolClosed';
|
||||
/** @internal */
|
||||
exports.CONNECTION_POOL_CLEARED = 'connectionPoolCleared';
|
||||
/** @internal */
|
||||
exports.CONNECTION_POOL_READY = 'connectionPoolReady';
|
||||
/** @internal */
|
||||
exports.CONNECTION_CREATED = 'connectionCreated';
|
||||
/** @internal */
|
||||
exports.CONNECTION_READY = 'connectionReady';
|
||||
/** @internal */
|
||||
exports.CONNECTION_CLOSED = 'connectionClosed';
|
||||
/** @internal */
|
||||
exports.CONNECTION_CHECK_OUT_STARTED = 'connectionCheckOutStarted';
|
||||
/** @internal */
|
||||
exports.CONNECTION_CHECK_OUT_FAILED = 'connectionCheckOutFailed';
|
||||
/** @internal */
|
||||
exports.CONNECTION_CHECKED_OUT = 'connectionCheckedOut';
|
||||
/** @internal */
|
||||
exports.CONNECTION_CHECKED_IN = 'connectionCheckedIn';
|
||||
exports.CLUSTER_TIME_RECEIVED = 'clusterTimeReceived';
|
||||
exports.COMMAND_STARTED = 'commandStarted';
|
||||
exports.COMMAND_SUCCEEDED = 'commandSucceeded';
|
||||
exports.COMMAND_FAILED = 'commandFailed';
|
||||
exports.SERVER_HEARTBEAT_STARTED = 'serverHeartbeatStarted';
|
||||
exports.SERVER_HEARTBEAT_SUCCEEDED = 'serverHeartbeatSucceeded';
|
||||
exports.SERVER_HEARTBEAT_FAILED = 'serverHeartbeatFailed';
|
||||
exports.RESPONSE = 'response';
|
||||
exports.MORE = 'more';
|
||||
exports.INIT = 'init';
|
||||
exports.CHANGE = 'change';
|
||||
exports.END = 'end';
|
||||
exports.RESUME_TOKEN_CHANGED = 'resumeTokenChanged';
|
||||
/** @public */
|
||||
exports.HEARTBEAT_EVENTS = Object.freeze([
|
||||
exports.SERVER_HEARTBEAT_STARTED,
|
||||
exports.SERVER_HEARTBEAT_SUCCEEDED,
|
||||
exports.SERVER_HEARTBEAT_FAILED
|
||||
]);
|
||||
/** @public */
|
||||
exports.CMAP_EVENTS = Object.freeze([
|
||||
exports.CONNECTION_POOL_CREATED,
|
||||
exports.CONNECTION_POOL_READY,
|
||||
exports.CONNECTION_POOL_CLEARED,
|
||||
exports.CONNECTION_POOL_CLOSED,
|
||||
exports.CONNECTION_CREATED,
|
||||
exports.CONNECTION_READY,
|
||||
exports.CONNECTION_CLOSED,
|
||||
exports.CONNECTION_CHECK_OUT_STARTED,
|
||||
exports.CONNECTION_CHECK_OUT_FAILED,
|
||||
exports.CONNECTION_CHECKED_OUT,
|
||||
exports.CONNECTION_CHECKED_IN
|
||||
]);
|
||||
/** @public */
|
||||
exports.TOPOLOGY_EVENTS = Object.freeze([
|
||||
exports.SERVER_OPENING,
|
||||
exports.SERVER_CLOSED,
|
||||
exports.SERVER_DESCRIPTION_CHANGED,
|
||||
exports.TOPOLOGY_OPENING,
|
||||
exports.TOPOLOGY_CLOSED,
|
||||
exports.TOPOLOGY_DESCRIPTION_CHANGED,
|
||||
exports.ERROR,
|
||||
exports.TIMEOUT,
|
||||
exports.CLOSE
|
||||
]);
|
||||
/** @public */
|
||||
exports.APM_EVENTS = Object.freeze([
|
||||
exports.COMMAND_STARTED,
|
||||
exports.COMMAND_SUCCEEDED,
|
||||
exports.COMMAND_FAILED
|
||||
]);
|
||||
/**
|
||||
* All events that we relay to the `Topology`
|
||||
* @internal
|
||||
*/
|
||||
exports.SERVER_RELAY_EVENTS = Object.freeze([
|
||||
exports.SERVER_HEARTBEAT_STARTED,
|
||||
exports.SERVER_HEARTBEAT_SUCCEEDED,
|
||||
exports.SERVER_HEARTBEAT_FAILED,
|
||||
exports.COMMAND_STARTED,
|
||||
exports.COMMAND_SUCCEEDED,
|
||||
exports.COMMAND_FAILED,
|
||||
...exports.CMAP_EVENTS
|
||||
]);
|
||||
/**
|
||||
* All events we listen to from `Server` instances, but do not forward to the client
|
||||
* @internal
|
||||
*/
|
||||
exports.LOCAL_SERVER_EVENTS = Object.freeze([
|
||||
exports.CONNECT,
|
||||
exports.DESCRIPTION_RECEIVED,
|
||||
exports.CLOSED,
|
||||
exports.ENDED
|
||||
]);
|
||||
/** @public */
|
||||
exports.MONGO_CLIENT_EVENTS = Object.freeze([
|
||||
...exports.CMAP_EVENTS,
|
||||
...exports.APM_EVENTS,
|
||||
...exports.TOPOLOGY_EVENTS,
|
||||
...exports.HEARTBEAT_EVENTS
|
||||
]);
|
||||
/**
|
||||
* @internal
|
||||
* The legacy hello command that was deprecated in MongoDB 5.0.
|
||||
*/
|
||||
exports.LEGACY_HELLO_COMMAND = 'ismaster';
|
||||
/**
|
||||
* @internal
|
||||
* The legacy hello command that was deprecated in MongoDB 5.0.
|
||||
*/
|
||||
exports.LEGACY_HELLO_COMMAND_CAMEL_CASE = 'isMaster';
|
||||
//# sourceMappingURL=constants.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/constants.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/constants.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"constants.js","sourceRoot":"","sources":["../src/constants.ts"],"names":[],"mappings":";;;;AAAa,QAAA,2BAA2B,GAAG,mBAAmB,CAAC;AAClD,QAAA,uBAAuB,GAAG,gBAAgB,CAAC;AAC3C,QAAA,yBAAyB,GAAG,gBAAgB,CAAC;AAC7C,QAAA,sBAAsB,GAAG,cAAc,CAAC;AACxC,QAAA,yBAAyB,GAAG,MAAM,CAAC;AACnC,QAAA,oBAAoB,GAAG,WAAW,CAAC;AAEhD,SAAS;AACI,QAAA,KAAK,GAAG,OAAgB,CAAC;AACzB,QAAA,OAAO,GAAG,SAAkB,CAAC;AAC7B,QAAA,KAAK,GAAG,OAAgB,CAAC;AACzB,QAAA,IAAI,GAAG,MAAe,CAAC;AACvB,QAAA,OAAO,GAAG,SAAkB,CAAC;AAC7B,QAAA,MAAM,GAAG,QAAiB,CAAC;AAC3B,QAAA,KAAK,GAAG,OAAgB,CAAC;AACzB,QAAA,OAAO,GAAG,SAAkB,CAAC;AAC7B,QAAA,MAAM,GAAG,QAAiB,CAAC;AAC3B,QAAA,QAAQ,GAAG,UAAmB,CAAC;AAC/B,QAAA,oBAAoB,GAAG,qBAAqB,CAAC;AAC7C,QAAA,cAAc,GAAG,eAAwB,CAAC;AAC1C,QAAA,aAAa,GAAG,cAAuB,CAAC;AACxC,QAAA,0BAA0B,GAAG,0BAAmC,CAAC;AACjE,QAAA,gBAAgB,GAAG,iBAA0B,CAAC;AAC9C,QAAA,eAAe,GAAG,gBAAyB,CAAC;AAC5C,QAAA,4BAA4B,GAAG,4BAAqC,CAAC;AAClF,gBAAgB;AACH,QAAA,uBAAuB,GAAG,uBAAgC,CAAC;AACxE,gBAAgB;AACH,QAAA,sBAAsB,GAAG,sBAA+B,CAAC;AACtE,gBAAgB;AACH,QAAA,uBAAuB,GAAG,uBAAgC,CAAC;AACxE,gBAAgB;AACH,QAAA,qBAAqB,GAAG,qBAA8B,CAAC;AACpE,gBAAgB;AACH,QAAA,kBAAkB,GAAG,mBAA4B,CAAC;AAC/D,gBAAgB;AACH,QAAA,gBAAgB,GAAG,iBAA0B,CAAC;AAC3D,gBAAgB;AACH,QAAA,iBAAiB,GAAG,kBAA2B,CAAC;AAC7D,gBAAgB;AACH,QAAA,4BAA4B,GAAG,2BAAoC,CAAC;AACjF,gBAAgB;AACH,QAAA,2BAA2B,GAAG,0BAAmC,CAAC;AAC/E,gBAAgB;AACH,QAAA,sBAAsB,GAAG,sBAA+B,CAAC;AACtE,gBAAgB;AACH,QAAA,qBAAqB,GAAG,qBAA8B,CAAC;AACvD,QAAA,qBAAqB,GAAG,qBAA8B,CAAC;AACvD,QAAA,eAAe,GAAG,gBAAyB,CAAC;AAC5C,QAAA,iBAAiB,GAAG,kBAA2B,CAAC;AAChD,QAAA,cAAc,GAAG,eAAwB,CAAC;AAC1C,QAAA,wBAAwB,GAAG,wBAAiC,CAAC;AAC7D,QAAA,0BAA0B,GAAG,0BAAmC,CAAC;AACjE,QAAA,uBAAuB,GAAG,uBAAgC,CAAC;AAC3D,QAAA,QAAQ,GAAG,UAAmB,CAAC;AAC/B,QAAA,IAAI,GAAG,MAAe,CAAC;AACvB,QAAA,IAAI,GAAG,MAAe,CAAC;AACvB,QAAA,MAAM,GAAG,QAAiB,CAAC;AAC3B,QAAA,GAAG,GAAG,KAAc,CAAC;AACrB,QAAA,oBAAoB,GAAG,oBAA6B,CAAC;AAElE,cAAc;AACD,QAAA,gBAAgB,GAAG,MAAM,CAAC,MAAM,CAAC;IAC5C,gCAAwB;IACxB,kCAA0B;IAC1B,+BAAuB;CACf,CAAC,CAAC;AAEZ,cAAc;AACD,QAAA,WAAW,GAAG,MAAM,CAAC,MAAM,CAAC;IACvC,+BAAuB;IACvB,6BAAqB;IACrB,+BAAuB;IACvB,8BAAsB;IACtB,0BAAkB;IAClB,wBAAgB;IAChB,yBAAiB;IACjB,oCAA4B;IAC5B,mCAA2B;IAC3B,8BAAsB;IACtB,6BAAqB;CACb,CAAC,CAAC;AAEZ,cAAc;AACD,QAAA,eAAe,GAAG,MAAM,CAAC,MAAM,CAAC;IAC3C,sBAAc;IACd,qBAAa;IACb,kCAA0B;IAC1B,wBAAgB;IAChB,uBAAe;IACf,oCAA4B;IAC5B,aAAK;IACL,eAAO;IACP,aAAK;CACG,CAAC,CAAC;AAEZ,cAAc;AACD,QAAA,UAAU,GAAG,MAAM,CAAC,MAAM,CAAC;IACtC,uBAAe;IACf,yBAAiB;IACjB,sBAAc;CACN,CAAC,CAAC;AAEZ;;;GAGG;AACU,QAAA,mBAAmB,GAAG,MAAM,CAAC,MAAM,CAAC;IAC/C,gCAAwB;IACxB,kCAA0B;IAC1B,+BAAuB;IACvB,uBAAe;IACf,yBAAiB;IACjB,sBAAc;IACd,GAAG,mBAAW;CACN,CAAC,CAAC;AAEZ;;;GAGG;AACU,QAAA,mBAAmB,GAAG,MAAM,CAAC,MAAM,CAAC;IAC/C,eAAO;IACP,4BAAoB;IACpB,cAAM;IACN,aAAK;CACG,CAAC,CAAC;AAEZ,cAAc;AACD,QAAA,mBAAmB,GAAG,MAAM,CAAC,MAAM,CAAC;IAC/C,GAAG,mBAAW;IACd,GAAG,kBAAU;IACb,GAAG,uBAAe;IAClB,GAAG,wBAAgB;CACX,CAAC,CAAC;AAEZ;;;GAGG;AACU,QAAA,oBAAoB,GAAG,UAAU,CAAC;AAE/C;;;GAGG;AACU,QAAA,+BAA+B,GAAG,UAAU,CAAC"}
|
||||
705
VISUALIZACION/node_modules/mongodb/lib/cursor/abstract_cursor.js
generated
vendored
Executable file
705
VISUALIZACION/node_modules/mongodb/lib/cursor/abstract_cursor.js
generated
vendored
Executable file
|
|
@ -0,0 +1,705 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.assertUninitialized = exports.AbstractCursor = exports.CURSOR_FLAGS = void 0;
|
||||
const stream_1 = require("stream");
|
||||
const util_1 = require("util");
|
||||
const bson_1 = require("../bson");
|
||||
const error_1 = require("../error");
|
||||
const mongo_types_1 = require("../mongo_types");
|
||||
const execute_operation_1 = require("../operations/execute_operation");
|
||||
const get_more_1 = require("../operations/get_more");
|
||||
const kill_cursors_1 = require("../operations/kill_cursors");
|
||||
const read_concern_1 = require("../read_concern");
|
||||
const read_preference_1 = require("../read_preference");
|
||||
const sessions_1 = require("../sessions");
|
||||
const utils_1 = require("../utils");
|
||||
/** @internal */
|
||||
const kId = Symbol('id');
|
||||
/** @internal */
|
||||
const kDocuments = Symbol('documents');
|
||||
/** @internal */
|
||||
const kServer = Symbol('server');
|
||||
/** @internal */
|
||||
const kNamespace = Symbol('namespace');
|
||||
/** @internal */
|
||||
const kClient = Symbol('client');
|
||||
/** @internal */
|
||||
const kSession = Symbol('session');
|
||||
/** @internal */
|
||||
const kOptions = Symbol('options');
|
||||
/** @internal */
|
||||
const kTransform = Symbol('transform');
|
||||
/** @internal */
|
||||
const kInitialized = Symbol('initialized');
|
||||
/** @internal */
|
||||
const kClosed = Symbol('closed');
|
||||
/** @internal */
|
||||
const kKilled = Symbol('killed');
|
||||
/** @internal */
|
||||
const kInit = Symbol('kInit');
|
||||
/** @public */
|
||||
exports.CURSOR_FLAGS = [
|
||||
'tailable',
|
||||
'oplogReplay',
|
||||
'noCursorTimeout',
|
||||
'awaitData',
|
||||
'exhaust',
|
||||
'partial'
|
||||
];
|
||||
/** @public */
|
||||
class AbstractCursor extends mongo_types_1.TypedEventEmitter {
|
||||
/** @internal */
|
||||
constructor(client, namespace, options = {}) {
|
||||
super();
|
||||
if (!client.s.isMongoClient) {
|
||||
throw new error_1.MongoRuntimeError('Cursor must be constructed with MongoClient');
|
||||
}
|
||||
this[kClient] = client;
|
||||
this[kNamespace] = namespace;
|
||||
this[kId] = null;
|
||||
this[kDocuments] = new utils_1.List();
|
||||
this[kInitialized] = false;
|
||||
this[kClosed] = false;
|
||||
this[kKilled] = false;
|
||||
this[kOptions] = {
|
||||
readPreference: options.readPreference && options.readPreference instanceof read_preference_1.ReadPreference
|
||||
? options.readPreference
|
||||
: read_preference_1.ReadPreference.primary,
|
||||
...(0, bson_1.pluckBSONSerializeOptions)(options)
|
||||
};
|
||||
const readConcern = read_concern_1.ReadConcern.fromOptions(options);
|
||||
if (readConcern) {
|
||||
this[kOptions].readConcern = readConcern;
|
||||
}
|
||||
if (typeof options.batchSize === 'number') {
|
||||
this[kOptions].batchSize = options.batchSize;
|
||||
}
|
||||
// we check for undefined specifically here to allow falsy values
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
if (options.comment !== undefined) {
|
||||
this[kOptions].comment = options.comment;
|
||||
}
|
||||
if (typeof options.maxTimeMS === 'number') {
|
||||
this[kOptions].maxTimeMS = options.maxTimeMS;
|
||||
}
|
||||
if (typeof options.maxAwaitTimeMS === 'number') {
|
||||
this[kOptions].maxAwaitTimeMS = options.maxAwaitTimeMS;
|
||||
}
|
||||
if (options.session instanceof sessions_1.ClientSession) {
|
||||
this[kSession] = options.session;
|
||||
}
|
||||
else {
|
||||
this[kSession] = this[kClient].startSession({ owner: this, explicit: false });
|
||||
}
|
||||
}
|
||||
get id() {
|
||||
return this[kId] ?? undefined;
|
||||
}
|
||||
/** @internal */
|
||||
get client() {
|
||||
return this[kClient];
|
||||
}
|
||||
/** @internal */
|
||||
get server() {
|
||||
return this[kServer];
|
||||
}
|
||||
get namespace() {
|
||||
return this[kNamespace];
|
||||
}
|
||||
get readPreference() {
|
||||
return this[kOptions].readPreference;
|
||||
}
|
||||
get readConcern() {
|
||||
return this[kOptions].readConcern;
|
||||
}
|
||||
/** @internal */
|
||||
get session() {
|
||||
return this[kSession];
|
||||
}
|
||||
set session(clientSession) {
|
||||
this[kSession] = clientSession;
|
||||
}
|
||||
/** @internal */
|
||||
get cursorOptions() {
|
||||
return this[kOptions];
|
||||
}
|
||||
get closed() {
|
||||
return this[kClosed];
|
||||
}
|
||||
get killed() {
|
||||
return this[kKilled];
|
||||
}
|
||||
get loadBalanced() {
|
||||
return !!this[kClient].topology?.loadBalanced;
|
||||
}
|
||||
/** Returns current buffered documents length */
|
||||
bufferedCount() {
|
||||
return this[kDocuments].length;
|
||||
}
|
||||
/** Returns current buffered documents */
|
||||
readBufferedDocuments(number) {
|
||||
const bufferedDocs = [];
|
||||
const documentsToRead = Math.min(number ?? this[kDocuments].length, this[kDocuments].length);
|
||||
for (let count = 0; count < documentsToRead; count++) {
|
||||
const document = this[kDocuments].shift();
|
||||
if (document != null) {
|
||||
bufferedDocs.push(document);
|
||||
}
|
||||
}
|
||||
return bufferedDocs;
|
||||
}
|
||||
async *[Symbol.asyncIterator]() {
|
||||
if (this.closed) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
while (true) {
|
||||
const document = await this.next();
|
||||
// Intentional strict null check, because users can map cursors to falsey values.
|
||||
// We allow mapping to all values except for null.
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
if (document === null) {
|
||||
if (!this.closed) {
|
||||
const message = 'Cursor returned a `null` document, but the cursor is not exhausted. Mapping documents to `null` is not supported in the cursor transform.';
|
||||
await cleanupCursorAsync(this, { needsToEmitClosed: true }).catch(() => null);
|
||||
throw new error_1.MongoAPIError(message);
|
||||
}
|
||||
break;
|
||||
}
|
||||
yield document;
|
||||
if (this[kId] === bson_1.Long.ZERO) {
|
||||
// Cursor exhausted
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
finally {
|
||||
// Only close the cursor if it has not already been closed. This finally clause handles
|
||||
// the case when a user would break out of a for await of loop early.
|
||||
if (!this.closed) {
|
||||
await this.close().catch(() => null);
|
||||
}
|
||||
}
|
||||
}
|
||||
stream(options) {
|
||||
if (options?.transform) {
|
||||
const transform = options.transform;
|
||||
const readable = new ReadableCursorStream(this);
|
||||
return readable.pipe(new stream_1.Transform({
|
||||
objectMode: true,
|
||||
highWaterMark: 1,
|
||||
transform(chunk, _, callback) {
|
||||
try {
|
||||
const transformed = transform(chunk);
|
||||
callback(undefined, transformed);
|
||||
}
|
||||
catch (err) {
|
||||
callback(err);
|
||||
}
|
||||
}
|
||||
}));
|
||||
}
|
||||
return new ReadableCursorStream(this);
|
||||
}
|
||||
async hasNext() {
|
||||
if (this[kId] === bson_1.Long.ZERO) {
|
||||
return false;
|
||||
}
|
||||
if (this[kDocuments].length !== 0) {
|
||||
return true;
|
||||
}
|
||||
const doc = await next(this, { blocking: true, transform: false });
|
||||
if (doc) {
|
||||
this[kDocuments].unshift(doc);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
/** Get the next available document from the cursor, returns null if no more documents are available. */
|
||||
async next() {
|
||||
if (this[kId] === bson_1.Long.ZERO) {
|
||||
throw new error_1.MongoCursorExhaustedError();
|
||||
}
|
||||
return next(this, { blocking: true, transform: true });
|
||||
}
|
||||
/**
|
||||
* Try to get the next available document from the cursor or `null` if an empty batch is returned
|
||||
*/
|
||||
async tryNext() {
|
||||
if (this[kId] === bson_1.Long.ZERO) {
|
||||
throw new error_1.MongoCursorExhaustedError();
|
||||
}
|
||||
return next(this, { blocking: false, transform: true });
|
||||
}
|
||||
/**
|
||||
* Iterates over all the documents for this cursor using the iterator, callback pattern.
|
||||
*
|
||||
* If the iterator returns `false`, iteration will stop.
|
||||
*
|
||||
* @param iterator - The iteration callback.
|
||||
* @deprecated - Will be removed in a future release. Use for await...of instead.
|
||||
*/
|
||||
async forEach(iterator) {
|
||||
if (typeof iterator !== 'function') {
|
||||
throw new error_1.MongoInvalidArgumentError('Argument "iterator" must be a function');
|
||||
}
|
||||
for await (const document of this) {
|
||||
const result = iterator(document);
|
||||
if (result === false) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
async close() {
|
||||
const needsToEmitClosed = !this[kClosed];
|
||||
this[kClosed] = true;
|
||||
await cleanupCursorAsync(this, { needsToEmitClosed });
|
||||
}
|
||||
/**
|
||||
* Returns an array of documents. The caller is responsible for making sure that there
|
||||
* is enough memory to store the results. Note that the array only contains partial
|
||||
* results when this cursor had been previously accessed. In that case,
|
||||
* cursor.rewind() can be used to reset the cursor.
|
||||
*/
|
||||
async toArray() {
|
||||
const array = [];
|
||||
for await (const document of this) {
|
||||
array.push(document);
|
||||
}
|
||||
return array;
|
||||
}
|
||||
/**
|
||||
* Add a cursor flag to the cursor
|
||||
*
|
||||
* @param flag - The flag to set, must be one of following ['tailable', 'oplogReplay', 'noCursorTimeout', 'awaitData', 'partial' -.
|
||||
* @param value - The flag boolean value.
|
||||
*/
|
||||
addCursorFlag(flag, value) {
|
||||
assertUninitialized(this);
|
||||
if (!exports.CURSOR_FLAGS.includes(flag)) {
|
||||
throw new error_1.MongoInvalidArgumentError(`Flag ${flag} is not one of ${exports.CURSOR_FLAGS}`);
|
||||
}
|
||||
if (typeof value !== 'boolean') {
|
||||
throw new error_1.MongoInvalidArgumentError(`Flag ${flag} must be a boolean value`);
|
||||
}
|
||||
this[kOptions][flag] = value;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Map all documents using the provided function
|
||||
* If there is a transform set on the cursor, that will be called first and the result passed to
|
||||
* this function's transform.
|
||||
*
|
||||
* @remarks
|
||||
*
|
||||
* **Note** Cursors use `null` internally to indicate that there are no more documents in the cursor. Providing a mapping
|
||||
* function that maps values to `null` will result in the cursor closing itself before it has finished iterating
|
||||
* all documents. This will **not** result in a memory leak, just surprising behavior. For example:
|
||||
*
|
||||
* ```typescript
|
||||
* const cursor = collection.find({});
|
||||
* cursor.map(() => null);
|
||||
*
|
||||
* const documents = await cursor.toArray();
|
||||
* // documents is always [], regardless of how many documents are in the collection.
|
||||
* ```
|
||||
*
|
||||
* Other falsey values are allowed:
|
||||
*
|
||||
* ```typescript
|
||||
* const cursor = collection.find({});
|
||||
* cursor.map(() => '');
|
||||
*
|
||||
* const documents = await cursor.toArray();
|
||||
* // documents is now an array of empty strings
|
||||
* ```
|
||||
*
|
||||
* **Note for Typescript Users:** adding a transform changes the return type of the iteration of this cursor,
|
||||
* it **does not** return a new instance of a cursor. This means when calling map,
|
||||
* you should always assign the result to a new variable in order to get a correctly typed cursor variable.
|
||||
* Take note of the following example:
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const cursor: FindCursor<Document> = coll.find();
|
||||
* const mappedCursor: FindCursor<number> = cursor.map(doc => Object.keys(doc).length);
|
||||
* const keyCounts: number[] = await mappedCursor.toArray(); // cursor.toArray() still returns Document[]
|
||||
* ```
|
||||
* @param transform - The mapping transformation method.
|
||||
*/
|
||||
map(transform) {
|
||||
assertUninitialized(this);
|
||||
const oldTransform = this[kTransform]; // TODO(NODE-3283): Improve transform typing
|
||||
if (oldTransform) {
|
||||
this[kTransform] = doc => {
|
||||
return transform(oldTransform(doc));
|
||||
};
|
||||
}
|
||||
else {
|
||||
this[kTransform] = transform;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Set the ReadPreference for the cursor.
|
||||
*
|
||||
* @param readPreference - The new read preference for the cursor.
|
||||
*/
|
||||
withReadPreference(readPreference) {
|
||||
assertUninitialized(this);
|
||||
if (readPreference instanceof read_preference_1.ReadPreference) {
|
||||
this[kOptions].readPreference = readPreference;
|
||||
}
|
||||
else if (typeof readPreference === 'string') {
|
||||
this[kOptions].readPreference = read_preference_1.ReadPreference.fromString(readPreference);
|
||||
}
|
||||
else {
|
||||
throw new error_1.MongoInvalidArgumentError(`Invalid read preference: ${readPreference}`);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Set the ReadPreference for the cursor.
|
||||
*
|
||||
* @param readPreference - The new read preference for the cursor.
|
||||
*/
|
||||
withReadConcern(readConcern) {
|
||||
assertUninitialized(this);
|
||||
const resolvedReadConcern = read_concern_1.ReadConcern.fromOptions({ readConcern });
|
||||
if (resolvedReadConcern) {
|
||||
this[kOptions].readConcern = resolvedReadConcern;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Set a maxTimeMS on the cursor query, allowing for hard timeout limits on queries (Only supported on MongoDB 2.6 or higher)
|
||||
*
|
||||
* @param value - Number of milliseconds to wait before aborting the query.
|
||||
*/
|
||||
maxTimeMS(value) {
|
||||
assertUninitialized(this);
|
||||
if (typeof value !== 'number') {
|
||||
throw new error_1.MongoInvalidArgumentError('Argument for maxTimeMS must be a number');
|
||||
}
|
||||
this[kOptions].maxTimeMS = value;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Set the batch size for the cursor.
|
||||
*
|
||||
* @param value - The number of documents to return per batch. See {@link https://www.mongodb.com/docs/manual/reference/command/find/|find command documentation}.
|
||||
*/
|
||||
batchSize(value) {
|
||||
assertUninitialized(this);
|
||||
if (this[kOptions].tailable) {
|
||||
throw new error_1.MongoTailableCursorError('Tailable cursor does not support batchSize');
|
||||
}
|
||||
if (typeof value !== 'number') {
|
||||
throw new error_1.MongoInvalidArgumentError('Operation "batchSize" requires an integer');
|
||||
}
|
||||
this[kOptions].batchSize = value;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Rewind this cursor to its uninitialized state. Any options that are present on the cursor will
|
||||
* remain in effect. Iterating this cursor will cause new queries to be sent to the server, even
|
||||
* if the resultant data has already been retrieved by this cursor.
|
||||
*/
|
||||
rewind() {
|
||||
if (!this[kInitialized]) {
|
||||
return;
|
||||
}
|
||||
this[kId] = null;
|
||||
this[kDocuments].clear();
|
||||
this[kClosed] = false;
|
||||
this[kKilled] = false;
|
||||
this[kInitialized] = false;
|
||||
const session = this[kSession];
|
||||
if (session) {
|
||||
// We only want to end this session if we created it, and it hasn't ended yet
|
||||
if (session.explicit === false) {
|
||||
if (!session.hasEnded) {
|
||||
session.endSession().catch(() => null);
|
||||
}
|
||||
this[kSession] = this.client.startSession({ owner: this, explicit: false });
|
||||
}
|
||||
}
|
||||
}
|
||||
/** @internal */
|
||||
_getMore(batchSize, callback) {
|
||||
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
||||
const getMoreOperation = new get_more_1.GetMoreOperation(this[kNamespace], this[kId], this[kServer], {
|
||||
...this[kOptions],
|
||||
session: this[kSession],
|
||||
batchSize
|
||||
});
|
||||
(0, execute_operation_1.executeOperation)(this[kClient], getMoreOperation, callback);
|
||||
}
|
||||
/**
|
||||
* @internal
|
||||
*
|
||||
* This function is exposed for the unified test runner's createChangeStream
|
||||
* operation. We cannot refactor to use the abstract _initialize method without
|
||||
* a significant refactor.
|
||||
*/
|
||||
[kInit](callback) {
|
||||
this._initialize(this[kSession], (error, state) => {
|
||||
if (state) {
|
||||
const response = state.response;
|
||||
this[kServer] = state.server;
|
||||
if (response.cursor) {
|
||||
// TODO(NODE-2674): Preserve int64 sent from MongoDB
|
||||
this[kId] =
|
||||
typeof response.cursor.id === 'number'
|
||||
? bson_1.Long.fromNumber(response.cursor.id)
|
||||
: typeof response.cursor.id === 'bigint'
|
||||
? bson_1.Long.fromBigInt(response.cursor.id)
|
||||
: response.cursor.id;
|
||||
if (response.cursor.ns) {
|
||||
this[kNamespace] = (0, utils_1.ns)(response.cursor.ns);
|
||||
}
|
||||
this[kDocuments].pushMany(response.cursor.firstBatch);
|
||||
}
|
||||
// When server responses return without a cursor document, we close this cursor
|
||||
// and return the raw server response. This is often the case for explain commands
|
||||
// for example
|
||||
if (this[kId] == null) {
|
||||
this[kId] = bson_1.Long.ZERO;
|
||||
// TODO(NODE-3286): ExecutionResult needs to accept a generic parameter
|
||||
this[kDocuments].push(state.response);
|
||||
}
|
||||
}
|
||||
// the cursor is now initialized, even if an error occurred or it is dead
|
||||
this[kInitialized] = true;
|
||||
if (error) {
|
||||
return cleanupCursor(this, { error }, () => callback(error, undefined));
|
||||
}
|
||||
if (cursorIsDead(this)) {
|
||||
return cleanupCursor(this, undefined, () => callback());
|
||||
}
|
||||
callback();
|
||||
});
|
||||
}
|
||||
}
|
||||
/** @event */
|
||||
AbstractCursor.CLOSE = 'close';
|
||||
exports.AbstractCursor = AbstractCursor;
|
||||
/**
|
||||
* @param cursor - the cursor on which to call `next`
|
||||
* @param blocking - a boolean indicating whether or not the cursor should `block` until data
|
||||
* is available. Generally, this flag is set to `false` because if the getMore returns no documents,
|
||||
* the cursor has been exhausted. In certain scenarios (ChangeStreams, tailable await cursors and
|
||||
* `tryNext`, for example) blocking is necessary because a getMore returning no documents does
|
||||
* not indicate the end of the cursor.
|
||||
* @param transform - if true, the cursor's transform function is applied to the result document (if the transform exists)
|
||||
* @returns the next document in the cursor, or `null`. When `blocking` is `true`, a `null` document means
|
||||
* the cursor has been exhausted. Otherwise, it means that there is no document available in the cursor's buffer.
|
||||
*/
|
||||
async function next(cursor, { blocking, transform }) {
|
||||
const cursorId = cursor[kId];
|
||||
if (cursor.closed) {
|
||||
return null;
|
||||
}
|
||||
if (cursor[kDocuments].length !== 0) {
|
||||
const doc = cursor[kDocuments].shift();
|
||||
if (doc != null && transform && cursor[kTransform]) {
|
||||
try {
|
||||
return cursor[kTransform](doc);
|
||||
}
|
||||
catch (error) {
|
||||
await cleanupCursorAsync(cursor, { error, needsToEmitClosed: true }).catch(() => {
|
||||
// `cleanupCursorAsync` should never throw, but if it does we want to throw the original
|
||||
// error instead.
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
return doc;
|
||||
}
|
||||
if (cursorId == null) {
|
||||
// All cursors must operate within a session, one must be made implicitly if not explicitly provided
|
||||
const init = (0, util_1.promisify)(cb => cursor[kInit](cb));
|
||||
await init();
|
||||
return next(cursor, { blocking, transform });
|
||||
}
|
||||
if (cursorIsDead(cursor)) {
|
||||
// if the cursor is dead, we clean it up
|
||||
// cleanupCursorAsync should never throw, but if it does it indicates a bug in the driver
|
||||
// and we should surface the error
|
||||
await cleanupCursorAsync(cursor, {});
|
||||
return null;
|
||||
}
|
||||
// otherwise need to call getMore
|
||||
const batchSize = cursor[kOptions].batchSize || 1000;
|
||||
const getMore = (0, util_1.promisify)((batchSize, cb) => cursor._getMore(batchSize, cb));
|
||||
let response;
|
||||
try {
|
||||
response = await getMore(batchSize);
|
||||
}
|
||||
catch (error) {
|
||||
if (error) {
|
||||
await cleanupCursorAsync(cursor, { error }).catch(() => {
|
||||
// `cleanupCursorAsync` should never throw, but if it does we want to throw the original
|
||||
// error instead.
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
if (response) {
|
||||
const cursorId = typeof response.cursor.id === 'number'
|
||||
? bson_1.Long.fromNumber(response.cursor.id)
|
||||
: typeof response.cursor.id === 'bigint'
|
||||
? bson_1.Long.fromBigInt(response.cursor.id)
|
||||
: response.cursor.id;
|
||||
cursor[kDocuments].pushMany(response.cursor.nextBatch);
|
||||
cursor[kId] = cursorId;
|
||||
}
|
||||
if (cursorIsDead(cursor)) {
|
||||
// If we successfully received a response from a cursor BUT the cursor indicates that it is exhausted,
|
||||
// we intentionally clean up the cursor to release its session back into the pool before the cursor
|
||||
// is iterated. This prevents a cursor that is exhausted on the server from holding
|
||||
// onto a session indefinitely until the AbstractCursor is iterated.
|
||||
//
|
||||
// cleanupCursorAsync should never throw, but if it does it indicates a bug in the driver
|
||||
// and we should surface the error
|
||||
await cleanupCursorAsync(cursor, {});
|
||||
}
|
||||
if (cursor[kDocuments].length === 0 && blocking === false) {
|
||||
return null;
|
||||
}
|
||||
return next(cursor, { blocking, transform });
|
||||
}
|
||||
function cursorIsDead(cursor) {
|
||||
const cursorId = cursor[kId];
|
||||
return !!cursorId && cursorId.isZero();
|
||||
}
|
||||
const cleanupCursorAsync = (0, util_1.promisify)(cleanupCursor);
|
||||
function cleanupCursor(cursor, options, callback) {
|
||||
const cursorId = cursor[kId];
|
||||
const cursorNs = cursor[kNamespace];
|
||||
const server = cursor[kServer];
|
||||
const session = cursor[kSession];
|
||||
const error = options?.error;
|
||||
// Cursors only emit closed events once the client-side cursor has been exhausted fully or there
|
||||
// was an error. Notably, when the server returns a cursor id of 0 and a non-empty batch, we
|
||||
// cleanup the cursor but don't emit a `close` event.
|
||||
const needsToEmitClosed = options?.needsToEmitClosed ?? cursor[kDocuments].length === 0;
|
||||
if (error) {
|
||||
if (cursor.loadBalanced && error instanceof error_1.MongoNetworkError) {
|
||||
return completeCleanup();
|
||||
}
|
||||
}
|
||||
if (cursorId == null || server == null || cursorId.isZero() || cursorNs == null) {
|
||||
if (needsToEmitClosed) {
|
||||
cursor[kClosed] = true;
|
||||
cursor[kId] = bson_1.Long.ZERO;
|
||||
cursor.emit(AbstractCursor.CLOSE);
|
||||
}
|
||||
if (session) {
|
||||
if (session.owner === cursor) {
|
||||
session.endSession({ error }).finally(() => {
|
||||
callback();
|
||||
});
|
||||
return;
|
||||
}
|
||||
if (!session.inTransaction()) {
|
||||
(0, sessions_1.maybeClearPinnedConnection)(session, { error });
|
||||
}
|
||||
}
|
||||
return callback();
|
||||
}
|
||||
function completeCleanup() {
|
||||
if (session) {
|
||||
if (session.owner === cursor) {
|
||||
session.endSession({ error }).finally(() => {
|
||||
cursor.emit(AbstractCursor.CLOSE);
|
||||
callback();
|
||||
});
|
||||
return;
|
||||
}
|
||||
if (!session.inTransaction()) {
|
||||
(0, sessions_1.maybeClearPinnedConnection)(session, { error });
|
||||
}
|
||||
}
|
||||
cursor.emit(AbstractCursor.CLOSE);
|
||||
return callback();
|
||||
}
|
||||
cursor[kKilled] = true;
|
||||
if (session.hasEnded) {
|
||||
return completeCleanup();
|
||||
}
|
||||
(0, execute_operation_1.executeOperation)(cursor[kClient], new kill_cursors_1.KillCursorsOperation(cursorId, cursorNs, server, { session }))
|
||||
.catch(() => null)
|
||||
.finally(completeCleanup);
|
||||
}
|
||||
/** @internal */
|
||||
function assertUninitialized(cursor) {
|
||||
if (cursor[kInitialized]) {
|
||||
throw new error_1.MongoCursorInUseError();
|
||||
}
|
||||
}
|
||||
exports.assertUninitialized = assertUninitialized;
|
||||
class ReadableCursorStream extends stream_1.Readable {
|
||||
constructor(cursor) {
|
||||
super({
|
||||
objectMode: true,
|
||||
autoDestroy: false,
|
||||
highWaterMark: 1
|
||||
});
|
||||
this._readInProgress = false;
|
||||
this._cursor = cursor;
|
||||
}
|
||||
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||
_read(size) {
|
||||
if (!this._readInProgress) {
|
||||
this._readInProgress = true;
|
||||
this._readNext();
|
||||
}
|
||||
}
|
||||
_destroy(error, callback) {
|
||||
this._cursor.close().then(() => callback(error), closeError => callback(closeError));
|
||||
}
|
||||
_readNext() {
|
||||
next(this._cursor, { blocking: true, transform: true }).then(result => {
|
||||
if (result == null) {
|
||||
this.push(null);
|
||||
}
|
||||
else if (this.destroyed) {
|
||||
this._cursor.close().catch(() => null);
|
||||
}
|
||||
else {
|
||||
if (this.push(result)) {
|
||||
return this._readNext();
|
||||
}
|
||||
this._readInProgress = false;
|
||||
}
|
||||
}, err => {
|
||||
// NOTE: This is questionable, but we have a test backing the behavior. It seems the
|
||||
// desired behavior is that a stream ends cleanly when a user explicitly closes
|
||||
// a client during iteration. Alternatively, we could do the "right" thing and
|
||||
// propagate the error message by removing this special case.
|
||||
if (err.message.match(/server is closed/)) {
|
||||
this._cursor.close().catch(() => null);
|
||||
return this.push(null);
|
||||
}
|
||||
// NOTE: This is also perhaps questionable. The rationale here is that these errors tend
|
||||
// to be "operation was interrupted", where a cursor has been closed but there is an
|
||||
// active getMore in-flight. This used to check if the cursor was killed but once
|
||||
// that changed to happen in cleanup legitimate errors would not destroy the
|
||||
// stream. There are change streams test specifically test these cases.
|
||||
if (err.message.match(/operation was interrupted/)) {
|
||||
return this.push(null);
|
||||
}
|
||||
// NOTE: The two above checks on the message of the error will cause a null to be pushed
|
||||
// to the stream, thus closing the stream before the destroy call happens. This means
|
||||
// that either of those error messages on a change stream will not get a proper
|
||||
// 'error' event to be emitted (the error passed to destroy). Change stream resumability
|
||||
// relies on that error event to be emitted to create its new cursor and thus was not
|
||||
// working on 4.4 servers because the error emitted on failover was "interrupted at
|
||||
// shutdown" while on 5.0+ it is "The server is in quiesce mode and will shut down".
|
||||
// See NODE-4475.
|
||||
return this.destroy(err);
|
||||
});
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=abstract_cursor.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cursor/abstract_cursor.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cursor/abstract_cursor.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
168
VISUALIZACION/node_modules/mongodb/lib/cursor/aggregation_cursor.js
generated
vendored
Executable file
168
VISUALIZACION/node_modules/mongodb/lib/cursor/aggregation_cursor.js
generated
vendored
Executable file
|
|
@ -0,0 +1,168 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.AggregationCursor = void 0;
|
||||
const aggregate_1 = require("../operations/aggregate");
|
||||
const execute_operation_1 = require("../operations/execute_operation");
|
||||
const utils_1 = require("../utils");
|
||||
const abstract_cursor_1 = require("./abstract_cursor");
|
||||
/** @internal */
|
||||
const kPipeline = Symbol('pipeline');
|
||||
/** @internal */
|
||||
const kOptions = Symbol('options');
|
||||
/**
|
||||
* The **AggregationCursor** class is an internal class that embodies an aggregation cursor on MongoDB
|
||||
* allowing for iteration over the results returned from the underlying query. It supports
|
||||
* one by one document iteration, conversion to an array or can be iterated as a Node 4.X
|
||||
* or higher stream
|
||||
* @public
|
||||
*/
|
||||
class AggregationCursor extends abstract_cursor_1.AbstractCursor {
|
||||
/** @internal */
|
||||
constructor(client, namespace, pipeline = [], options = {}) {
|
||||
super(client, namespace, options);
|
||||
this[kPipeline] = pipeline;
|
||||
this[kOptions] = options;
|
||||
}
|
||||
get pipeline() {
|
||||
return this[kPipeline];
|
||||
}
|
||||
clone() {
|
||||
const clonedOptions = (0, utils_1.mergeOptions)({}, this[kOptions]);
|
||||
delete clonedOptions.session;
|
||||
return new AggregationCursor(this.client, this.namespace, this[kPipeline], {
|
||||
...clonedOptions
|
||||
});
|
||||
}
|
||||
map(transform) {
|
||||
return super.map(transform);
|
||||
}
|
||||
/** @internal */
|
||||
_initialize(session, callback) {
|
||||
const aggregateOperation = new aggregate_1.AggregateOperation(this.namespace, this[kPipeline], {
|
||||
...this[kOptions],
|
||||
...this.cursorOptions,
|
||||
session
|
||||
});
|
||||
(0, execute_operation_1.executeOperation)(this.client, aggregateOperation, (err, response) => {
|
||||
if (err || response == null)
|
||||
return callback(err);
|
||||
// TODO: NODE-2882
|
||||
callback(undefined, { server: aggregateOperation.server, session, response });
|
||||
});
|
||||
}
|
||||
/** Execute the explain for the cursor */
|
||||
async explain(verbosity) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new aggregate_1.AggregateOperation(this.namespace, this[kPipeline], {
|
||||
...this[kOptions],
|
||||
...this.cursorOptions,
|
||||
explain: verbosity ?? true
|
||||
}));
|
||||
}
|
||||
group($group) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kPipeline].push({ $group });
|
||||
return this;
|
||||
}
|
||||
/** Add a limit stage to the aggregation pipeline */
|
||||
limit($limit) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kPipeline].push({ $limit });
|
||||
return this;
|
||||
}
|
||||
/** Add a match stage to the aggregation pipeline */
|
||||
match($match) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kPipeline].push({ $match });
|
||||
return this;
|
||||
}
|
||||
/** Add an out stage to the aggregation pipeline */
|
||||
out($out) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kPipeline].push({ $out });
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Add a project stage to the aggregation pipeline
|
||||
*
|
||||
* @remarks
|
||||
* In order to strictly type this function you must provide an interface
|
||||
* that represents the effect of your projection on the result documents.
|
||||
*
|
||||
* By default chaining a projection to your cursor changes the returned type to the generic {@link Document} type.
|
||||
* You should specify a parameterized type to have assertions on your final results.
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* // Best way
|
||||
* const docs: AggregationCursor<{ a: number }> = cursor.project<{ a: number }>({ _id: 0, a: true });
|
||||
* // Flexible way
|
||||
* const docs: AggregationCursor<Document> = cursor.project({ _id: 0, a: true });
|
||||
* ```
|
||||
*
|
||||
* @remarks
|
||||
* In order to strictly type this function you must provide an interface
|
||||
* that represents the effect of your projection on the result documents.
|
||||
*
|
||||
* **Note for Typescript Users:** adding a transform changes the return type of the iteration of this cursor,
|
||||
* it **does not** return a new instance of a cursor. This means when calling project,
|
||||
* you should always assign the result to a new variable in order to get a correctly typed cursor variable.
|
||||
* Take note of the following example:
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const cursor: AggregationCursor<{ a: number; b: string }> = coll.aggregate([]);
|
||||
* const projectCursor = cursor.project<{ a: number }>({ _id: 0, a: true });
|
||||
* const aPropOnlyArray: {a: number}[] = await projectCursor.toArray();
|
||||
*
|
||||
* // or always use chaining and save the final cursor
|
||||
*
|
||||
* const cursor = coll.aggregate().project<{ a: string }>({
|
||||
* _id: 0,
|
||||
* a: { $convert: { input: '$a', to: 'string' }
|
||||
* }});
|
||||
* ```
|
||||
*/
|
||||
project($project) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kPipeline].push({ $project });
|
||||
return this;
|
||||
}
|
||||
/** Add a lookup stage to the aggregation pipeline */
|
||||
lookup($lookup) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kPipeline].push({ $lookup });
|
||||
return this;
|
||||
}
|
||||
/** Add a redact stage to the aggregation pipeline */
|
||||
redact($redact) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kPipeline].push({ $redact });
|
||||
return this;
|
||||
}
|
||||
/** Add a skip stage to the aggregation pipeline */
|
||||
skip($skip) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kPipeline].push({ $skip });
|
||||
return this;
|
||||
}
|
||||
/** Add a sort stage to the aggregation pipeline */
|
||||
sort($sort) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kPipeline].push({ $sort });
|
||||
return this;
|
||||
}
|
||||
/** Add a unwind stage to the aggregation pipeline */
|
||||
unwind($unwind) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kPipeline].push({ $unwind });
|
||||
return this;
|
||||
}
|
||||
/** Add a geoNear stage to the aggregation pipeline */
|
||||
geoNear($geoNear) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kPipeline].push({ $geoNear });
|
||||
return this;
|
||||
}
|
||||
}
|
||||
exports.AggregationCursor = AggregationCursor;
|
||||
//# sourceMappingURL=aggregation_cursor.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cursor/aggregation_cursor.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cursor/aggregation_cursor.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"aggregation_cursor.js","sourceRoot":"","sources":["../../src/cursor/aggregation_cursor.ts"],"names":[],"mappings":";;;AAGA,uDAAoF;AACpF,uEAAyF;AAIzF,oCAAwC;AAExC,uDAAwE;AAKxE,gBAAgB;AAChB,MAAM,SAAS,GAAG,MAAM,CAAC,UAAU,CAAC,CAAC;AACrC,gBAAgB;AAChB,MAAM,QAAQ,GAAG,MAAM,CAAC,SAAS,CAAC,CAAC;AAEnC;;;;;;GAMG;AACH,MAAa,iBAAiC,SAAQ,gCAAuB;IAM3E,gBAAgB;IAChB,YACE,MAAmB,EACnB,SAA2B,EAC3B,WAAuB,EAAE,EACzB,UAA4B,EAAE;QAE9B,KAAK,CAAC,MAAM,EAAE,SAAS,EAAE,OAAO,CAAC,CAAC;QAElC,IAAI,CAAC,SAAS,CAAC,GAAG,QAAQ,CAAC;QAC3B,IAAI,CAAC,QAAQ,CAAC,GAAG,OAAO,CAAC;IAC3B,CAAC;IAED,IAAI,QAAQ;QACV,OAAO,IAAI,CAAC,SAAS,CAAC,CAAC;IACzB,CAAC;IAED,KAAK;QACH,MAAM,aAAa,GAAG,IAAA,oBAAY,EAAC,EAAE,EAAE,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;QACvD,OAAO,aAAa,CAAC,OAAO,CAAC;QAC7B,OAAO,IAAI,iBAAiB,CAAC,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,SAAS,EAAE,IAAI,CAAC,SAAS,CAAC,EAAE;YACzE,GAAG,aAAa;SACjB,CAAC,CAAC;IACL,CAAC;IAEQ,GAAG,CAAI,SAA8B;QAC5C,OAAO,KAAK,CAAC,GAAG,CAAC,SAAS,CAAyB,CAAC;IACtD,CAAC;IAED,gBAAgB;IAChB,WAAW,CAAC,OAAsB,EAAE,QAAmC;QACrE,MAAM,kBAAkB,GAAG,IAAI,8BAAkB,CAAC,IAAI,CAAC,SAAS,EAAE,IAAI,CAAC,SAAS,CAAC,EAAE;YACjF,GAAG,IAAI,CAAC,QAAQ,CAAC;YACjB,GAAG,IAAI,CAAC,aAAa;YACrB,OAAO;SACR,CAAC,CAAC;QAEH,IAAA,oCAAgB,EAAC,IAAI,CAAC,MAAM,EAAE,kBAAkB,EAAE,CAAC,GAAG,EAAE,QAAQ,EAAE,EAAE;YAClE,IAAI,GAAG,IAAI,QAAQ,IAAI,IAAI;gBAAE,OAAO,QAAQ,CAAC,GAAG,CAAC,CAAC;YAElD,kBAAkB;YAClB,QAAQ,CAAC,SAAS,EAAE,EAAE,MAAM,EAAE,kBAAkB,CAAC,MAAM,EAAE,OAAO,EAAE,QAAQ,EAAE,CAAC,CAAC;QAChF,CAAC,CAAC,CAAC;IACL,CAAC;IAED,yCAAyC;IACzC,KAAK,CAAC,OAAO,CAAC,SAAgC;QAC5C,OAAO,IAAA,oCAAgB,EACrB,IAAI,CAAC,MAAM,EACX,IAAI,8BAAkB,CAAC,IAAI,CAAC,SAAS,EAAE,IAAI,CAAC,SAAS,CAAC,EAAE;YACtD,GAAG,IAAI,CAAC,QAAQ,CAAC;YACjB,GAAG,IAAI,CAAC,aAAa;YACrB,OAAO,EAAE,SAAS,IAAI,IAAI;SAC3B,CAAC,CACH,CAAC;IACJ,CAAC;IAID,KAAK,CAAC,MAAgB;QACpB,IAAA,qCAAmB,EAAC,IAAI,CAAC,CAAC;QAC1B,IAAI,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,EAAE,MAAM,EAAE,CAAC,CAAC;QACjC,OAAO,IAAI,CAAC;IACd,CAAC;IAED,oDAAoD;IACpD,KAAK,CAAC,MAAc;QAClB,IAAA,qCAAmB,EAAC,IAAI,CAAC,CAAC;QAC1B,IAAI,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,EAAE,MAAM,EAAE,CAAC,CAAC;QACjC,OAAO,IAAI,CAAC;IACd,CAAC;IAED,oDAAoD;IACpD,KAAK,CAAC,MAAgB;QACpB,IAAA,qCAAmB,EAAC,IAAI,CAAC,CAAC;QAC1B,IAAI,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,EAAE,MAAM,EAAE,CAAC,CAAC;QACjC,OAAO,IAAI,CAAC;IACd,CAAC;IAED,mDAAmD;IACnD,GAAG,CAAC,IAA2C;QAC7C,IAAA,qCAAmB,EAAC,IAAI,CAAC,CAAC;QAC1B,IAAI,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,CAAC,CAAC;QAC/B,OAAO,IAAI,CAAC;IACd,CAAC;IAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;OAwCG;IACH,OAAO,CAAgC,QAAkB;QACvD,IAAA,qCAAmB,EAAC,IAAI,CAAC,CAAC;QAC1B,IAAI,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,EAAE,QAAQ,EAAE,CAAC,CAAC;QACnC,OAAO,IAAuC,CAAC;IACjD,CAAC;IAED,qDAAqD;IACrD,MAAM,CAAC,OAAiB;QACtB,IAAA,qCAAmB,EAAC,IAAI,CAAC,CAAC;QAC1B,IAAI,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,EAAE,OAAO,EAAE,CAAC,CAAC;QAClC,OAAO,IAAI,CAAC;IACd,CAAC;IAED,qDAAqD;IACrD,MAAM,CAAC,OAAiB;QACtB,IAAA,qCAAmB,EAAC,IAAI,CAAC,CAAC;QAC1B,IAAI,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,EAAE,OAAO,EAAE,CAAC,CAAC;QAClC,OAAO,IAAI,CAAC;IACd,CAAC;IAED,mDAAmD;IACnD,IAAI,CAAC,KAAa;QAChB,IAAA,qCAAmB,EAAC,IAAI,CAAC,CAAC;QAC1B,IAAI,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,EAAE,KAAK,EAAE,CAAC,CAAC;QAChC,OAAO,IAAI,CAAC;IACd,CAAC;IAED,mDAAmD;IACnD,IAAI,CAAC,KAAW;QACd,IAAA,qCAAmB,EAAC,IAAI,CAAC,CAAC;QAC1B,IAAI,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,EAAE,KAAK,EAAE,CAAC,CAAC;QAChC,OAAO,IAAI,CAAC;IACd,CAAC;IAED,qDAAqD;IACrD,MAAM,CAAC,OAA0B;QAC/B,IAAA,qCAAmB,EAAC,IAAI,CAAC,CAAC;QAC1B,IAAI,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,EAAE,OAAO,EAAE,CAAC,CAAC;QAClC,OAAO,IAAI,CAAC;IACd,CAAC;IAED,sDAAsD;IACtD,OAAO,CAAC,QAAkB;QACxB,IAAA,qCAAmB,EAAC,IAAI,CAAC,CAAC;QAC1B,IAAI,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,EAAE,QAAQ,EAAE,CAAC,CAAC;QACnC,OAAO,IAAI,CAAC;IACd,CAAC;CACF;AApLD,8CAoLC"}
|
||||
115
VISUALIZACION/node_modules/mongodb/lib/cursor/change_stream_cursor.js
generated
vendored
Executable file
115
VISUALIZACION/node_modules/mongodb/lib/cursor/change_stream_cursor.js
generated
vendored
Executable file
|
|
@ -0,0 +1,115 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ChangeStreamCursor = void 0;
|
||||
const change_stream_1 = require("../change_stream");
|
||||
const constants_1 = require("../constants");
|
||||
const aggregate_1 = require("../operations/aggregate");
|
||||
const execute_operation_1 = require("../operations/execute_operation");
|
||||
const utils_1 = require("../utils");
|
||||
const abstract_cursor_1 = require("./abstract_cursor");
|
||||
/** @internal */
|
||||
class ChangeStreamCursor extends abstract_cursor_1.AbstractCursor {
|
||||
constructor(client, namespace, pipeline = [], options = {}) {
|
||||
super(client, namespace, options);
|
||||
this.pipeline = pipeline;
|
||||
this.options = options;
|
||||
this._resumeToken = null;
|
||||
this.startAtOperationTime = options.startAtOperationTime;
|
||||
if (options.startAfter) {
|
||||
this.resumeToken = options.startAfter;
|
||||
}
|
||||
else if (options.resumeAfter) {
|
||||
this.resumeToken = options.resumeAfter;
|
||||
}
|
||||
}
|
||||
set resumeToken(token) {
|
||||
this._resumeToken = token;
|
||||
this.emit(change_stream_1.ChangeStream.RESUME_TOKEN_CHANGED, token);
|
||||
}
|
||||
get resumeToken() {
|
||||
return this._resumeToken;
|
||||
}
|
||||
get resumeOptions() {
|
||||
const options = {
|
||||
...this.options
|
||||
};
|
||||
for (const key of ['resumeAfter', 'startAfter', 'startAtOperationTime']) {
|
||||
delete options[key];
|
||||
}
|
||||
if (this.resumeToken != null) {
|
||||
if (this.options.startAfter && !this.hasReceived) {
|
||||
options.startAfter = this.resumeToken;
|
||||
}
|
||||
else {
|
||||
options.resumeAfter = this.resumeToken;
|
||||
}
|
||||
}
|
||||
else if (this.startAtOperationTime != null && (0, utils_1.maxWireVersion)(this.server) >= 7) {
|
||||
options.startAtOperationTime = this.startAtOperationTime;
|
||||
}
|
||||
return options;
|
||||
}
|
||||
cacheResumeToken(resumeToken) {
|
||||
if (this.bufferedCount() === 0 && this.postBatchResumeToken) {
|
||||
this.resumeToken = this.postBatchResumeToken;
|
||||
}
|
||||
else {
|
||||
this.resumeToken = resumeToken;
|
||||
}
|
||||
this.hasReceived = true;
|
||||
}
|
||||
_processBatch(response) {
|
||||
const cursor = response.cursor;
|
||||
if (cursor.postBatchResumeToken) {
|
||||
this.postBatchResumeToken = response.cursor.postBatchResumeToken;
|
||||
const batch = 'firstBatch' in response.cursor ? response.cursor.firstBatch : response.cursor.nextBatch;
|
||||
if (batch.length === 0) {
|
||||
this.resumeToken = cursor.postBatchResumeToken;
|
||||
}
|
||||
}
|
||||
}
|
||||
clone() {
|
||||
return new ChangeStreamCursor(this.client, this.namespace, this.pipeline, {
|
||||
...this.cursorOptions
|
||||
});
|
||||
}
|
||||
_initialize(session, callback) {
|
||||
const aggregateOperation = new aggregate_1.AggregateOperation(this.namespace, this.pipeline, {
|
||||
...this.cursorOptions,
|
||||
...this.options,
|
||||
session
|
||||
});
|
||||
(0, execute_operation_1.executeOperation)(session.client, aggregateOperation, (err, response) => {
|
||||
if (err || response == null) {
|
||||
return callback(err);
|
||||
}
|
||||
const server = aggregateOperation.server;
|
||||
this.maxWireVersion = (0, utils_1.maxWireVersion)(server);
|
||||
if (this.startAtOperationTime == null &&
|
||||
this.resumeAfter == null &&
|
||||
this.startAfter == null &&
|
||||
this.maxWireVersion >= 7) {
|
||||
this.startAtOperationTime = response.operationTime;
|
||||
}
|
||||
this._processBatch(response);
|
||||
this.emit(constants_1.INIT, response);
|
||||
this.emit(constants_1.RESPONSE);
|
||||
// TODO: NODE-2882
|
||||
callback(undefined, { server, session, response });
|
||||
});
|
||||
}
|
||||
_getMore(batchSize, callback) {
|
||||
super._getMore(batchSize, (err, response) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
this.maxWireVersion = (0, utils_1.maxWireVersion)(this.server);
|
||||
this._processBatch(response);
|
||||
this.emit(change_stream_1.ChangeStream.MORE, response);
|
||||
this.emit(change_stream_1.ChangeStream.RESPONSE);
|
||||
callback(err, response);
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.ChangeStreamCursor = ChangeStreamCursor;
|
||||
//# sourceMappingURL=change_stream_cursor.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cursor/change_stream_cursor.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cursor/change_stream_cursor.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"change_stream_cursor.js","sourceRoot":"","sources":["../../src/cursor/change_stream_cursor.ts"],"names":[],"mappings":";;;AACA,oDAM0B;AAC1B,4CAA8C;AAG9C,uDAA6D;AAE7D,uEAAyF;AAEzF,oCAAgF;AAChF,uDAA+E;AAwB/E,gBAAgB;AAChB,MAAa,kBAGX,SAAQ,gCAA2C;IAkBnD,YACE,MAAmB,EACnB,SAA2B,EAC3B,WAAuB,EAAE,EACzB,UAAqC,EAAE;QAEvC,KAAK,CAAC,MAAM,EAAE,SAAS,EAAE,OAAO,CAAC,CAAC;QAElC,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;QACzB,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;QACvB,IAAI,CAAC,YAAY,GAAG,IAAI,CAAC;QACzB,IAAI,CAAC,oBAAoB,GAAG,OAAO,CAAC,oBAAoB,CAAC;QAEzD,IAAI,OAAO,CAAC,UAAU,EAAE;YACtB,IAAI,CAAC,WAAW,GAAG,OAAO,CAAC,UAAU,CAAC;SACvC;aAAM,IAAI,OAAO,CAAC,WAAW,EAAE;YAC9B,IAAI,CAAC,WAAW,GAAG,OAAO,CAAC,WAAW,CAAC;SACxC;IACH,CAAC;IAED,IAAI,WAAW,CAAC,KAAkB;QAChC,IAAI,CAAC,YAAY,GAAG,KAAK,CAAC;QAC1B,IAAI,CAAC,IAAI,CAAC,4BAAY,CAAC,oBAAoB,EAAE,KAAK,CAAC,CAAC;IACtD,CAAC;IAED,IAAI,WAAW;QACb,OAAO,IAAI,CAAC,YAAY,CAAC;IAC3B,CAAC;IAED,IAAI,aAAa;QACf,MAAM,OAAO,GAA8B;YACzC,GAAG,IAAI,CAAC,OAAO;SAChB,CAAC;QAEF,KAAK,MAAM,GAAG,IAAI,CAAC,aAAa,EAAE,YAAY,EAAE,sBAAsB,CAAU,EAAE;YAChF,OAAO,OAAO,CAAC,GAAG,CAAC,CAAC;SACrB;QAED,IAAI,IAAI,CAAC,WAAW,IAAI,IAAI,EAAE;YAC5B,IAAI,IAAI,CAAC,OAAO,CAAC,UAAU,IAAI,CAAC,IAAI,CAAC,WAAW,EAAE;gBAChD,OAAO,CAAC,UAAU,GAAG,IAAI,CAAC,WAAW,CAAC;aACvC;iBAAM;gBACL,OAAO,CAAC,WAAW,GAAG,IAAI,CAAC,WAAW,CAAC;aACxC;SACF;aAAM,IAAI,IAAI,CAAC,oBAAoB,IAAI,IAAI,IAAI,IAAA,sBAAc,EAAC,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,EAAE;YAChF,OAAO,CAAC,oBAAoB,GAAG,IAAI,CAAC,oBAAoB,CAAC;SAC1D;QAED,OAAO,OAAO,CAAC;IACjB,CAAC;IAED,gBAAgB,CAAC,WAAwB;QACvC,IAAI,IAAI,CAAC,aAAa,EAAE,KAAK,CAAC,IAAI,IAAI,CAAC,oBAAoB,EAAE;YAC3D,IAAI,CAAC,WAAW,GAAG,IAAI,CAAC,oBAAoB,CAAC;SAC9C;aAAM;YACL,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;SAChC;QACD,IAAI,CAAC,WAAW,GAAG,IAAI,CAAC;IAC1B,CAAC;IAED,aAAa,CAAC,QAAiD;QAC7D,MAAM,MAAM,GAAG,QAAQ,CAAC,MAAM,CAAC;QAC/B,IAAI,MAAM,CAAC,oBAAoB,EAAE;YAC/B,IAAI,CAAC,oBAAoB,GAAG,QAAQ,CAAC,MAAM,CAAC,oBAAoB,CAAC;YAEjE,MAAM,KAAK,GACT,YAAY,IAAI,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,SAAS,CAAC;YAC3F,IAAI,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE;gBACtB,IAAI,CAAC,WAAW,GAAG,MAAM,CAAC,oBAAoB,CAAC;aAChD;SACF;IACH,CAAC;IAED,KAAK;QACH,OAAO,IAAI,kBAAkB,CAAC,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,SAAS,EAAE,IAAI,CAAC,QAAQ,EAAE;YACxE,GAAG,IAAI,CAAC,aAAa;SACtB,CAAC,CAAC;IACL,CAAC;IAED,WAAW,CAAC,OAAsB,EAAE,QAAmC;QACrE,MAAM,kBAAkB,GAAG,IAAI,8BAAkB,CAAC,IAAI,CAAC,SAAS,EAAE,IAAI,CAAC,QAAQ,EAAE;YAC/E,GAAG,IAAI,CAAC,aAAa;YACrB,GAAG,IAAI,CAAC,OAAO;YACf,OAAO;SACR,CAAC,CAAC;QAEH,IAAA,oCAAgB,EACd,OAAO,CAAC,MAAM,EACd,kBAAkB,EAClB,CAAC,GAAG,EAAE,QAAQ,EAAE,EAAE;YAChB,IAAI,GAAG,IAAI,QAAQ,IAAI,IAAI,EAAE;gBAC3B,OAAO,QAAQ,CAAC,GAAG,CAAC,CAAC;aACtB;YAED,MAAM,MAAM,GAAG,kBAAkB,CAAC,MAAM,CAAC;YACzC,IAAI,CAAC,cAAc,GAAG,IAAA,sBAAc,EAAC,MAAM,CAAC,CAAC;YAE7C,IACE,IAAI,CAAC,oBAAoB,IAAI,IAAI;gBACjC,IAAI,CAAC,WAAW,IAAI,IAAI;gBACxB,IAAI,CAAC,UAAU,IAAI,IAAI;gBACvB,IAAI,CAAC,cAAc,IAAI,CAAC,EACxB;gBACA,IAAI,CAAC,oBAAoB,GAAG,QAAQ,CAAC,aAAa,CAAC;aACpD;YAED,IAAI,CAAC,aAAa,CAAC,QAAQ,CAAC,CAAC;YAE7B,IAAI,CAAC,IAAI,CAAC,gBAAI,EAAE,QAAQ,CAAC,CAAC;YAC1B,IAAI,CAAC,IAAI,CAAC,oBAAQ,CAAC,CAAC;YAEpB,kBAAkB;YAClB,QAAQ,CAAC,SAAS,EAAE,EAAE,MAAM,EAAE,OAAO,EAAE,QAAQ,EAAE,CAAC,CAAC;QACrD,CAAC,CACF,CAAC;IACJ,CAAC;IAEQ,QAAQ,CAAC,SAAiB,EAAE,QAAkB;QACrD,KAAK,CAAC,QAAQ,CAAC,SAAS,EAAE,CAAC,GAAG,EAAE,QAAQ,EAAE,EAAE;YAC1C,IAAI,GAAG,EAAE;gBACP,OAAO,QAAQ,CAAC,GAAG,CAAC,CAAC;aACtB;YAED,IAAI,CAAC,cAAc,GAAG,IAAA,sBAAc,EAAC,IAAI,CAAC,MAAM,CAAC,CAAC;YAClD,IAAI,CAAC,aAAa,CAAC,QAAqE,CAAC,CAAC;YAE1F,IAAI,CAAC,IAAI,CAAC,4BAAY,CAAC,IAAI,EAAE,QAAQ,CAAC,CAAC;YACvC,IAAI,CAAC,IAAI,CAAC,4BAAY,CAAC,QAAQ,CAAC,CAAC;YACjC,QAAQ,CAAC,GAAG,EAAE,QAAQ,CAAC,CAAC;QAC1B,CAAC,CAAC,CAAC;IACL,CAAC;CACF;AAxJD,gDAwJC"}
|
||||
381
VISUALIZACION/node_modules/mongodb/lib/cursor/find_cursor.js
generated
vendored
Executable file
381
VISUALIZACION/node_modules/mongodb/lib/cursor/find_cursor.js
generated
vendored
Executable file
|
|
@ -0,0 +1,381 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.FindCursor = exports.FLAGS = void 0;
|
||||
const error_1 = require("../error");
|
||||
const count_1 = require("../operations/count");
|
||||
const execute_operation_1 = require("../operations/execute_operation");
|
||||
const find_1 = require("../operations/find");
|
||||
const sort_1 = require("../sort");
|
||||
const utils_1 = require("../utils");
|
||||
const abstract_cursor_1 = require("./abstract_cursor");
|
||||
/** @internal */
|
||||
const kFilter = Symbol('filter');
|
||||
/** @internal */
|
||||
const kNumReturned = Symbol('numReturned');
|
||||
/** @internal */
|
||||
const kBuiltOptions = Symbol('builtOptions');
|
||||
/** @public Flags allowed for cursor */
|
||||
exports.FLAGS = [
|
||||
'tailable',
|
||||
'oplogReplay',
|
||||
'noCursorTimeout',
|
||||
'awaitData',
|
||||
'exhaust',
|
||||
'partial'
|
||||
];
|
||||
/** @public */
|
||||
class FindCursor extends abstract_cursor_1.AbstractCursor {
|
||||
/** @internal */
|
||||
constructor(client, namespace, filter = {}, options = {}) {
|
||||
super(client, namespace, options);
|
||||
this[kFilter] = filter;
|
||||
this[kBuiltOptions] = options;
|
||||
if (options.sort != null) {
|
||||
this[kBuiltOptions].sort = (0, sort_1.formatSort)(options.sort);
|
||||
}
|
||||
}
|
||||
clone() {
|
||||
const clonedOptions = (0, utils_1.mergeOptions)({}, this[kBuiltOptions]);
|
||||
delete clonedOptions.session;
|
||||
return new FindCursor(this.client, this.namespace, this[kFilter], {
|
||||
...clonedOptions
|
||||
});
|
||||
}
|
||||
map(transform) {
|
||||
return super.map(transform);
|
||||
}
|
||||
/** @internal */
|
||||
_initialize(session, callback) {
|
||||
const findOperation = new find_1.FindOperation(undefined, this.namespace, this[kFilter], {
|
||||
...this[kBuiltOptions],
|
||||
...this.cursorOptions,
|
||||
session
|
||||
});
|
||||
(0, execute_operation_1.executeOperation)(this.client, findOperation, (err, response) => {
|
||||
if (err || response == null)
|
||||
return callback(err);
|
||||
// TODO: We only need this for legacy queries that do not support `limit`, maybe
|
||||
// the value should only be saved in those cases.
|
||||
if (response.cursor) {
|
||||
this[kNumReturned] = response.cursor.firstBatch.length;
|
||||
}
|
||||
else {
|
||||
this[kNumReturned] = response.documents ? response.documents.length : 0;
|
||||
}
|
||||
// TODO: NODE-2882
|
||||
callback(undefined, { server: findOperation.server, session, response });
|
||||
});
|
||||
}
|
||||
/** @internal */
|
||||
_getMore(batchSize, callback) {
|
||||
// NOTE: this is to support client provided limits in pre-command servers
|
||||
const numReturned = this[kNumReturned];
|
||||
if (numReturned) {
|
||||
const limit = this[kBuiltOptions].limit;
|
||||
batchSize =
|
||||
limit && limit > 0 && numReturned + batchSize > limit ? limit - numReturned : batchSize;
|
||||
if (batchSize <= 0) {
|
||||
this.close().finally(() => callback());
|
||||
return;
|
||||
}
|
||||
}
|
||||
super._getMore(batchSize, (err, response) => {
|
||||
if (err)
|
||||
return callback(err);
|
||||
// TODO: wrap this in some logic to prevent it from happening if we don't need this support
|
||||
if (response) {
|
||||
this[kNumReturned] = this[kNumReturned] + response.cursor.nextBatch.length;
|
||||
}
|
||||
callback(undefined, response);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Get the count of documents for this cursor
|
||||
* @deprecated Use `collection.estimatedDocumentCount` or `collection.countDocuments` instead
|
||||
*/
|
||||
async count(options) {
|
||||
(0, utils_1.emitWarningOnce)('cursor.count is deprecated and will be removed in the next major version, please use `collection.estimatedDocumentCount` or `collection.countDocuments` instead ');
|
||||
if (typeof options === 'boolean') {
|
||||
throw new error_1.MongoInvalidArgumentError('Invalid first parameter to count');
|
||||
}
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new count_1.CountOperation(this.namespace, this[kFilter], {
|
||||
...this[kBuiltOptions],
|
||||
...this.cursorOptions,
|
||||
...options
|
||||
}));
|
||||
}
|
||||
/** Execute the explain for the cursor */
|
||||
async explain(verbosity) {
|
||||
return (0, execute_operation_1.executeOperation)(this.client, new find_1.FindOperation(undefined, this.namespace, this[kFilter], {
|
||||
...this[kBuiltOptions],
|
||||
...this.cursorOptions,
|
||||
explain: verbosity ?? true
|
||||
}));
|
||||
}
|
||||
/** Set the cursor query */
|
||||
filter(filter) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kFilter] = filter;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Set the cursor hint
|
||||
*
|
||||
* @param hint - If specified, then the query system will only consider plans using the hinted index.
|
||||
*/
|
||||
hint(hint) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kBuiltOptions].hint = hint;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Set the cursor min
|
||||
*
|
||||
* @param min - Specify a $min value to specify the inclusive lower bound for a specific index in order to constrain the results of find(). The $min specifies the lower bound for all keys of a specific index in order.
|
||||
*/
|
||||
min(min) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kBuiltOptions].min = min;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Set the cursor max
|
||||
*
|
||||
* @param max - Specify a $max value to specify the exclusive upper bound for a specific index in order to constrain the results of find(). The $max specifies the upper bound for all keys of a specific index in order.
|
||||
*/
|
||||
max(max) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kBuiltOptions].max = max;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Set the cursor returnKey.
|
||||
* If set to true, modifies the cursor to only return the index field or fields for the results of the query, rather than documents.
|
||||
* If set to true and the query does not use an index to perform the read operation, the returned documents will not contain any fields.
|
||||
*
|
||||
* @param value - the returnKey value.
|
||||
*/
|
||||
returnKey(value) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kBuiltOptions].returnKey = value;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Modifies the output of a query by adding a field $recordId to matching documents. $recordId is the internal key which uniquely identifies a document in a collection.
|
||||
*
|
||||
* @param value - The $showDiskLoc option has now been deprecated and replaced with the showRecordId field. $showDiskLoc will still be accepted for OP_QUERY stye find.
|
||||
*/
|
||||
showRecordId(value) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kBuiltOptions].showRecordId = value;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Add a query modifier to the cursor query
|
||||
*
|
||||
* @param name - The query modifier (must start with $, such as $orderby etc)
|
||||
* @param value - The modifier value.
|
||||
*/
|
||||
addQueryModifier(name, value) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
if (name[0] !== '$') {
|
||||
throw new error_1.MongoInvalidArgumentError(`${name} is not a valid query modifier`);
|
||||
}
|
||||
// Strip of the $
|
||||
const field = name.substr(1);
|
||||
// NOTE: consider some TS magic for this
|
||||
switch (field) {
|
||||
case 'comment':
|
||||
this[kBuiltOptions].comment = value;
|
||||
break;
|
||||
case 'explain':
|
||||
this[kBuiltOptions].explain = value;
|
||||
break;
|
||||
case 'hint':
|
||||
this[kBuiltOptions].hint = value;
|
||||
break;
|
||||
case 'max':
|
||||
this[kBuiltOptions].max = value;
|
||||
break;
|
||||
case 'maxTimeMS':
|
||||
this[kBuiltOptions].maxTimeMS = value;
|
||||
break;
|
||||
case 'min':
|
||||
this[kBuiltOptions].min = value;
|
||||
break;
|
||||
case 'orderby':
|
||||
this[kBuiltOptions].sort = (0, sort_1.formatSort)(value);
|
||||
break;
|
||||
case 'query':
|
||||
this[kFilter] = value;
|
||||
break;
|
||||
case 'returnKey':
|
||||
this[kBuiltOptions].returnKey = value;
|
||||
break;
|
||||
case 'showDiskLoc':
|
||||
this[kBuiltOptions].showRecordId = value;
|
||||
break;
|
||||
default:
|
||||
throw new error_1.MongoInvalidArgumentError(`Invalid query modifier: ${name}`);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Add a comment to the cursor query allowing for tracking the comment in the log.
|
||||
*
|
||||
* @param value - The comment attached to this query.
|
||||
*/
|
||||
comment(value) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kBuiltOptions].comment = value;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Set a maxAwaitTimeMS on a tailing cursor query to allow to customize the timeout value for the option awaitData (Only supported on MongoDB 3.2 or higher, ignored otherwise)
|
||||
*
|
||||
* @param value - Number of milliseconds to wait before aborting the tailed query.
|
||||
*/
|
||||
maxAwaitTimeMS(value) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
if (typeof value !== 'number') {
|
||||
throw new error_1.MongoInvalidArgumentError('Argument for maxAwaitTimeMS must be a number');
|
||||
}
|
||||
this[kBuiltOptions].maxAwaitTimeMS = value;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Set a maxTimeMS on the cursor query, allowing for hard timeout limits on queries (Only supported on MongoDB 2.6 or higher)
|
||||
*
|
||||
* @param value - Number of milliseconds to wait before aborting the query.
|
||||
*/
|
||||
maxTimeMS(value) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
if (typeof value !== 'number') {
|
||||
throw new error_1.MongoInvalidArgumentError('Argument for maxTimeMS must be a number');
|
||||
}
|
||||
this[kBuiltOptions].maxTimeMS = value;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Add a project stage to the aggregation pipeline
|
||||
*
|
||||
* @remarks
|
||||
* In order to strictly type this function you must provide an interface
|
||||
* that represents the effect of your projection on the result documents.
|
||||
*
|
||||
* By default chaining a projection to your cursor changes the returned type to the generic
|
||||
* {@link Document} type.
|
||||
* You should specify a parameterized type to have assertions on your final results.
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* // Best way
|
||||
* const docs: FindCursor<{ a: number }> = cursor.project<{ a: number }>({ _id: 0, a: true });
|
||||
* // Flexible way
|
||||
* const docs: FindCursor<Document> = cursor.project({ _id: 0, a: true });
|
||||
* ```
|
||||
*
|
||||
* @remarks
|
||||
*
|
||||
* **Note for Typescript Users:** adding a transform changes the return type of the iteration of this cursor,
|
||||
* it **does not** return a new instance of a cursor. This means when calling project,
|
||||
* you should always assign the result to a new variable in order to get a correctly typed cursor variable.
|
||||
* Take note of the following example:
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const cursor: FindCursor<{ a: number; b: string }> = coll.find();
|
||||
* const projectCursor = cursor.project<{ a: number }>({ _id: 0, a: true });
|
||||
* const aPropOnlyArray: {a: number}[] = await projectCursor.toArray();
|
||||
*
|
||||
* // or always use chaining and save the final cursor
|
||||
*
|
||||
* const cursor = coll.find().project<{ a: string }>({
|
||||
* _id: 0,
|
||||
* a: { $convert: { input: '$a', to: 'string' }
|
||||
* }});
|
||||
* ```
|
||||
*/
|
||||
project(value) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kBuiltOptions].projection = value;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Sets the sort order of the cursor query.
|
||||
*
|
||||
* @param sort - The key or keys set for the sort.
|
||||
* @param direction - The direction of the sorting (1 or -1).
|
||||
*/
|
||||
sort(sort, direction) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
if (this[kBuiltOptions].tailable) {
|
||||
throw new error_1.MongoTailableCursorError('Tailable cursor does not support sorting');
|
||||
}
|
||||
this[kBuiltOptions].sort = (0, sort_1.formatSort)(sort, direction);
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Allows disk use for blocking sort operations exceeding 100MB memory. (MongoDB 3.2 or higher)
|
||||
*
|
||||
* @remarks
|
||||
* {@link https://www.mongodb.com/docs/manual/reference/command/find/#find-cmd-allowdiskuse | find command allowDiskUse documentation}
|
||||
*/
|
||||
allowDiskUse(allow = true) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
if (!this[kBuiltOptions].sort) {
|
||||
throw new error_1.MongoInvalidArgumentError('Option "allowDiskUse" requires a sort specification');
|
||||
}
|
||||
// As of 6.0 the default is true. This allows users to get back to the old behavior.
|
||||
if (!allow) {
|
||||
this[kBuiltOptions].allowDiskUse = false;
|
||||
return this;
|
||||
}
|
||||
this[kBuiltOptions].allowDiskUse = true;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Set the collation options for the cursor.
|
||||
*
|
||||
* @param value - The cursor collation options (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).
|
||||
*/
|
||||
collation(value) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
this[kBuiltOptions].collation = value;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Set the limit for the cursor.
|
||||
*
|
||||
* @param value - The limit for the cursor query.
|
||||
*/
|
||||
limit(value) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
if (this[kBuiltOptions].tailable) {
|
||||
throw new error_1.MongoTailableCursorError('Tailable cursor does not support limit');
|
||||
}
|
||||
if (typeof value !== 'number') {
|
||||
throw new error_1.MongoInvalidArgumentError('Operation "limit" requires an integer');
|
||||
}
|
||||
this[kBuiltOptions].limit = value;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Set the skip for the cursor.
|
||||
*
|
||||
* @param value - The skip for the cursor query.
|
||||
*/
|
||||
skip(value) {
|
||||
(0, abstract_cursor_1.assertUninitialized)(this);
|
||||
if (this[kBuiltOptions].tailable) {
|
||||
throw new error_1.MongoTailableCursorError('Tailable cursor does not support skip');
|
||||
}
|
||||
if (typeof value !== 'number') {
|
||||
throw new error_1.MongoInvalidArgumentError('Operation "skip" requires an integer');
|
||||
}
|
||||
this[kBuiltOptions].skip = value;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
exports.FindCursor = FindCursor;
|
||||
//# sourceMappingURL=find_cursor.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cursor/find_cursor.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cursor/find_cursor.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
37
VISUALIZACION/node_modules/mongodb/lib/cursor/list_collections_cursor.js
generated
vendored
Executable file
37
VISUALIZACION/node_modules/mongodb/lib/cursor/list_collections_cursor.js
generated
vendored
Executable file
|
|
@ -0,0 +1,37 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ListCollectionsCursor = void 0;
|
||||
const execute_operation_1 = require("../operations/execute_operation");
|
||||
const list_collections_1 = require("../operations/list_collections");
|
||||
const abstract_cursor_1 = require("./abstract_cursor");
|
||||
/** @public */
|
||||
class ListCollectionsCursor extends abstract_cursor_1.AbstractCursor {
|
||||
constructor(db, filter, options) {
|
||||
super(db.client, db.s.namespace, options);
|
||||
this.parent = db;
|
||||
this.filter = filter;
|
||||
this.options = options;
|
||||
}
|
||||
clone() {
|
||||
return new ListCollectionsCursor(this.parent, this.filter, {
|
||||
...this.options,
|
||||
...this.cursorOptions
|
||||
});
|
||||
}
|
||||
/** @internal */
|
||||
_initialize(session, callback) {
|
||||
const operation = new list_collections_1.ListCollectionsOperation(this.parent, this.filter, {
|
||||
...this.cursorOptions,
|
||||
...this.options,
|
||||
session
|
||||
});
|
||||
(0, execute_operation_1.executeOperation)(this.parent.client, operation, (err, response) => {
|
||||
if (err || response == null)
|
||||
return callback(err);
|
||||
// TODO: NODE-2882
|
||||
callback(undefined, { server: operation.server, session, response });
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.ListCollectionsCursor = ListCollectionsCursor;
|
||||
//# sourceMappingURL=list_collections_cursor.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cursor/list_collections_cursor.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cursor/list_collections_cursor.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"list_collections_cursor.js","sourceRoot":"","sources":["../../src/cursor/list_collections_cursor.ts"],"names":[],"mappings":";;;AAEA,uEAAyF;AACzF,qEAIwC;AAGxC,uDAAmD;AAEnD,cAAc;AACd,MAAa,qBAIX,SAAQ,gCAAiB;IAKzB,YAAY,EAAM,EAAE,MAAgB,EAAE,OAAgC;QACpE,KAAK,CAAC,EAAE,CAAC,MAAM,EAAE,EAAE,CAAC,CAAC,CAAC,SAAS,EAAE,OAAO,CAAC,CAAC;QAC1C,IAAI,CAAC,MAAM,GAAG,EAAE,CAAC;QACjB,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;QACrB,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;IACzB,CAAC;IAED,KAAK;QACH,OAAO,IAAI,qBAAqB,CAAC,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,MAAM,EAAE;YACzD,GAAG,IAAI,CAAC,OAAO;YACf,GAAG,IAAI,CAAC,aAAa;SACtB,CAAC,CAAC;IACL,CAAC;IAED,gBAAgB;IAChB,WAAW,CAAC,OAAkC,EAAE,QAAmC;QACjF,MAAM,SAAS,GAAG,IAAI,2CAAwB,CAAC,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,MAAM,EAAE;YACvE,GAAG,IAAI,CAAC,aAAa;YACrB,GAAG,IAAI,CAAC,OAAO;YACf,OAAO;SACR,CAAC,CAAC;QAEH,IAAA,oCAAgB,EAAC,IAAI,CAAC,MAAM,CAAC,MAAM,EAAE,SAAS,EAAE,CAAC,GAAG,EAAE,QAAQ,EAAE,EAAE;YAChE,IAAI,GAAG,IAAI,QAAQ,IAAI,IAAI;gBAAE,OAAO,QAAQ,CAAC,GAAG,CAAC,CAAC;YAElD,kBAAkB;YAClB,QAAQ,CAAC,SAAS,EAAE,EAAE,MAAM,EAAE,SAAS,CAAC,MAAM,EAAE,OAAO,EAAE,QAAQ,EAAE,CAAC,CAAC;QACvE,CAAC,CAAC,CAAC;IACL,CAAC;CACF;AAtCD,sDAsCC"}
|
||||
36
VISUALIZACION/node_modules/mongodb/lib/cursor/list_indexes_cursor.js
generated
vendored
Executable file
36
VISUALIZACION/node_modules/mongodb/lib/cursor/list_indexes_cursor.js
generated
vendored
Executable file
|
|
@ -0,0 +1,36 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ListIndexesCursor = void 0;
|
||||
const execute_operation_1 = require("../operations/execute_operation");
|
||||
const indexes_1 = require("../operations/indexes");
|
||||
const abstract_cursor_1 = require("./abstract_cursor");
|
||||
/** @public */
|
||||
class ListIndexesCursor extends abstract_cursor_1.AbstractCursor {
|
||||
constructor(collection, options) {
|
||||
super(collection.client, collection.s.namespace, options);
|
||||
this.parent = collection;
|
||||
this.options = options;
|
||||
}
|
||||
clone() {
|
||||
return new ListIndexesCursor(this.parent, {
|
||||
...this.options,
|
||||
...this.cursorOptions
|
||||
});
|
||||
}
|
||||
/** @internal */
|
||||
_initialize(session, callback) {
|
||||
const operation = new indexes_1.ListIndexesOperation(this.parent, {
|
||||
...this.cursorOptions,
|
||||
...this.options,
|
||||
session
|
||||
});
|
||||
(0, execute_operation_1.executeOperation)(this.parent.client, operation, (err, response) => {
|
||||
if (err || response == null)
|
||||
return callback(err);
|
||||
// TODO: NODE-2882
|
||||
callback(undefined, { server: operation.server, session, response });
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.ListIndexesCursor = ListIndexesCursor;
|
||||
//# sourceMappingURL=list_indexes_cursor.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cursor/list_indexes_cursor.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cursor/list_indexes_cursor.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"list_indexes_cursor.js","sourceRoot":"","sources":["../../src/cursor/list_indexes_cursor.ts"],"names":[],"mappings":";;;AACA,uEAAyF;AACzF,mDAAsF;AAGtF,uDAAmD;AAEnD,cAAc;AACd,MAAa,iBAAkB,SAAQ,gCAAc;IAInD,YAAY,UAAsB,EAAE,OAA4B;QAC9D,KAAK,CAAC,UAAU,CAAC,MAAM,EAAE,UAAU,CAAC,CAAC,CAAC,SAAS,EAAE,OAAO,CAAC,CAAC;QAC1D,IAAI,CAAC,MAAM,GAAG,UAAU,CAAC;QACzB,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;IACzB,CAAC;IAED,KAAK;QACH,OAAO,IAAI,iBAAiB,CAAC,IAAI,CAAC,MAAM,EAAE;YACxC,GAAG,IAAI,CAAC,OAAO;YACf,GAAG,IAAI,CAAC,aAAa;SACtB,CAAC,CAAC;IACL,CAAC;IAED,gBAAgB;IAChB,WAAW,CAAC,OAAkC,EAAE,QAAmC;QACjF,MAAM,SAAS,GAAG,IAAI,8BAAoB,CAAC,IAAI,CAAC,MAAM,EAAE;YACtD,GAAG,IAAI,CAAC,aAAa;YACrB,GAAG,IAAI,CAAC,OAAO;YACf,OAAO;SACR,CAAC,CAAC;QAEH,IAAA,oCAAgB,EAAC,IAAI,CAAC,MAAM,CAAC,MAAM,EAAE,SAAS,EAAE,CAAC,GAAG,EAAE,QAAQ,EAAE,EAAE;YAChE,IAAI,GAAG,IAAI,QAAQ,IAAI,IAAI;gBAAE,OAAO,QAAQ,CAAC,GAAG,CAAC,CAAC;YAElD,kBAAkB;YAClB,QAAQ,CAAC,SAAS,EAAE,EAAE,MAAM,EAAE,SAAS,CAAC,MAAM,EAAE,OAAO,EAAE,QAAQ,EAAE,CAAC,CAAC;QACvE,CAAC,CAAC,CAAC;IACL,CAAC;CACF;AAhCD,8CAgCC"}
|
||||
14
VISUALIZACION/node_modules/mongodb/lib/cursor/list_search_indexes_cursor.js
generated
vendored
Executable file
14
VISUALIZACION/node_modules/mongodb/lib/cursor/list_search_indexes_cursor.js
generated
vendored
Executable file
|
|
@ -0,0 +1,14 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ListSearchIndexesCursor = void 0;
|
||||
const aggregation_cursor_1 = require("./aggregation_cursor");
|
||||
/** @public */
|
||||
class ListSearchIndexesCursor extends aggregation_cursor_1.AggregationCursor {
|
||||
/** @internal */
|
||||
constructor({ fullNamespace: ns, client }, name, options = {}) {
|
||||
const pipeline = name == null ? [{ $listSearchIndexes: {} }] : [{ $listSearchIndexes: { name } }];
|
||||
super(client, ns, pipeline, options);
|
||||
}
|
||||
}
|
||||
exports.ListSearchIndexesCursor = ListSearchIndexesCursor;
|
||||
//# sourceMappingURL=list_search_indexes_cursor.js.map
|
||||
1
VISUALIZACION/node_modules/mongodb/lib/cursor/list_search_indexes_cursor.js.map
generated
vendored
Executable file
1
VISUALIZACION/node_modules/mongodb/lib/cursor/list_search_indexes_cursor.js.map
generated
vendored
Executable file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"list_search_indexes_cursor.js","sourceRoot":"","sources":["../../src/cursor/list_search_indexes_cursor.ts"],"names":[],"mappings":";;;AAEA,6DAAyD;AAKzD,cAAc;AACd,MAAa,uBAAwB,SAAQ,sCAAmC;IAC9E,gBAAgB;IAChB,YACE,EAAE,aAAa,EAAE,EAAE,EAAE,MAAM,EAAc,EACzC,IAAmB,EACnB,UAAoC,EAAE;QAEtC,MAAM,QAAQ,GACZ,IAAI,IAAI,IAAI,CAAC,CAAC,CAAC,CAAC,EAAE,kBAAkB,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,kBAAkB,EAAE,EAAE,IAAI,EAAE,EAAE,CAAC,CAAC;QACnF,KAAK,CAAC,MAAM,EAAE,EAAE,EAAE,QAAQ,EAAE,OAAO,CAAC,CAAC;IACvC,CAAC;CACF;AAXD,0DAWC"}
|
||||
94
VISUALIZACION/node_modules/mongodb/lib/cursor/run_command_cursor.js
generated
vendored
Executable file
94
VISUALIZACION/node_modules/mongodb/lib/cursor/run_command_cursor.js
generated
vendored
Executable file
|
|
@ -0,0 +1,94 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.RunCommandCursor = void 0;
|
||||
const error_1 = require("../error");
|
||||
const execute_operation_1 = require("../operations/execute_operation");
|
||||
const get_more_1 = require("../operations/get_more");
|
||||
const run_command_1 = require("../operations/run_command");
|
||||
const utils_1 = require("../utils");
|
||||
const abstract_cursor_1 = require("./abstract_cursor");
|
||||
/** @public */
|
||||
class RunCommandCursor extends abstract_cursor_1.AbstractCursor {
|
||||
/**
|
||||
* Controls the `getMore.comment` field
|
||||
* @param comment - any BSON value
|
||||
*/
|
||||
setComment(comment) {
|
||||
this.getMoreOptions.comment = comment;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Controls the `getMore.maxTimeMS` field. Only valid when cursor is tailable await
|
||||
* @param maxTimeMS - the number of milliseconds to wait for new data
|
||||
*/
|
||||
setMaxTimeMS(maxTimeMS) {
|
||||
this.getMoreOptions.maxAwaitTimeMS = maxTimeMS;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Controls the `getMore.batchSize` field
|
||||
* @param maxTimeMS - the number documents to return in the `nextBatch`
|
||||
*/
|
||||
setBatchSize(batchSize) {
|
||||
this.getMoreOptions.batchSize = batchSize;
|
||||
return this;
|
||||
}
|
||||
/** Unsupported for RunCommandCursor */
|
||||
clone() {
|
||||
throw new error_1.MongoAPIError('Clone not supported, create a new cursor with db.runCursorCommand');
|
||||
}
|
||||
/** Unsupported for RunCommandCursor: readConcern must be configured directly on command document */
|
||||
withReadConcern(_) {
|
||||
throw new error_1.MongoAPIError('RunCommandCursor does not support readConcern it must be attached to the command being run');
|
||||
}
|
||||
/** Unsupported for RunCommandCursor: various cursor flags must be configured directly on command document */
|
||||
addCursorFlag(_, __) {
|
||||
throw new error_1.MongoAPIError('RunCommandCursor does not support cursor flags, they must be attached to the command being run');
|
||||
}
|
||||
/** Unsupported for RunCommandCursor: maxTimeMS must be configured directly on command document */
|
||||
maxTimeMS(_) {
|
||||
throw new error_1.MongoAPIError('maxTimeMS must be configured on the command document directly, to configure getMore.maxTimeMS use cursor.setMaxTimeMS()');
|
||||
}
|
||||
/** Unsupported for RunCommandCursor: batchSize must be configured directly on command document */
|
||||
batchSize(_) {
|
||||
throw new error_1.MongoAPIError('batchSize must be configured on the command document directly, to configure getMore.batchSize use cursor.setBatchSize()');
|
||||
}
|
||||
/** @internal */
|
||||
constructor(db, command, options = {}) {
|
||||
super(db.client, (0, utils_1.ns)(db.namespace), options);
|
||||
this.getMoreOptions = {};
|
||||
this.db = db;
|
||||
this.command = Object.freeze({ ...command });
|
||||
}
|
||||
/** @internal */
|
||||
_initialize(session, callback) {
|
||||
const operation = new run_command_1.RunCommandOperation(this.db, this.command, {
|
||||
...this.cursorOptions,
|
||||
session: session,
|
||||
readPreference: this.cursorOptions.readPreference
|
||||
});
|
||||
(0, execute_operation_1.executeOperation)(this.client, operation).then(response => {
|
||||
if (response.cursor == null) {
|
||||
callback(new error_1.MongoUnexpectedServerResponseError('Expected server to respond with cursor'));
|
||||
return;
|
||||
}
|
||||
callback(undefined, {
|
||||
server: operation.server,
|
||||
session,
|
||||
response
|
||||
});
|
||||
}, err => callback(err));
|
||||
}
|
||||
/** @internal */
|
||||
_getMore(_batchSize, callback) {
|
||||
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
||||
const getMoreOperation = new get_more_1.GetMoreOperation(this.namespace, this.id, this.server, {
|
||||
...this.cursorOptions,
|
||||
session: this.session,
|
||||
...this.getMoreOptions
|
||||
});
|
||||
(0, execute_operation_1.executeOperation)(this.client, getMoreOperation, callback);
|
||||
}
|
||||
}
|
||||
exports.RunCommandCursor = RunCommandCursor;
|
||||
//# sourceMappingURL=run_command_cursor.js.map
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue