From 8d7ef8c53c5735a596bc68511584d99d5105611f Mon Sep 17 00:00:00 2001 From: branberry Date: Thu, 26 Sep 2024 11:43:21 -0500 Subject: [PATCH 1/7] Persistence module code --- snooty-cache/src/index.ts | 12 +++++++----- snooty-cache/src/persistence.ts | 28 ++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 5 deletions(-) create mode 100644 snooty-cache/src/persistence.ts diff --git a/snooty-cache/src/index.ts b/snooty-cache/src/index.ts index 5ab16507e..9e411375b 100644 --- a/snooty-cache/src/index.ts +++ b/snooty-cache/src/index.ts @@ -1,11 +1,11 @@ // Documentation: https://sdk.netlify.com +import { readdir } from 'node:fs'; +import { promisify } from 'node:util'; import { NetlifyIntegration } from '@netlify/sdk'; -import { readdir } from 'fs'; - -import { promisify } from 'util'; import { checkForNewSnootyVersion } from './snooty-frontend-version-check'; +import { downloadPersistenceModule } from './persistence'; const readdirAsync = promisify(readdir); @@ -32,6 +32,8 @@ integration.addBuildEventHandler( await Promise.all(cacheFiles.map((cacheFile) => cache.restore(cacheFile))); await checkForNewSnootyVersion(run); + + await downloadPersistenceModule(run.command); }, ); @@ -77,10 +79,10 @@ integration.addBuildEventHandler( let errorCount = 0; let warningCount = 0; - logsSplit.forEach((row) => { + for (const row of logsSplit) { if (row.includes('ERROR')) errorCount += 1; if (row.includes('WARNING')) warningCount += 1; - }); + } status.show({ title: `Snooty Parser Logs - Errors: ${errorCount} | Warnings: ${warningCount}`, diff --git a/snooty-cache/src/persistence.ts b/snooty-cache/src/persistence.ts new file mode 100644 index 000000000..fb8fae74b --- /dev/null +++ b/snooty-cache/src/persistence.ts @@ -0,0 +1,28 @@ +import type { NetlifyPluginUtils } from '@netlify/build'; +import { existsSync } from 'node:fs'; + +type CliCommand = NetlifyPluginUtils['run']['command']; + +export async function downloadPersistenceModule( + command: CliCommand, +): Promise { + const isModuleDownloaded = existsSync(`${process.cwd()}/docs-worker-pool`); + + if (isModuleDownloaded) return; + + await command( + 'git clone --depth 1 --filter=tree:0 https://github.com/mongodb/docs-worker-pool.git --sparse', + ); + + await command('git sparse-checkout set --no-cone modules/persistence', { + cwd: `${process.cwd()}/docs-worker-pool`, + }); + + await command('npm ci', { + cwd: `${process.cwd()}/docs-worker-pool/modules/persistence`, + }); + + await command('npm run build', { + cwd: `${process.cwd()}/docs-worker-pool/modules/persistence`, + }); +} From a0a52d51365a587f71be4fd870a5f016b3cd58c6 Mon Sep 17 00:00:00 2001 From: branberry Date: Fri, 27 Sep 2024 09:52:12 -0500 Subject: [PATCH 2/7] add a bunch of formatter changes --- git-changed-files/src/index.ts | 2 +- persistence-module/src/index.ts | 4 ++-- redoc/src/atlas.ts | 4 ++-- redoc/src/build-pages.ts | 6 +++--- redoc/src/utils/fs-async.ts | 4 ++-- search-manifest/src/generateManifest/document.ts | 2 +- search-manifest/src/generateManifest/manifestEntry.ts | 2 +- .../src/uploadToAtlas/deleteStaleProperties.ts | 2 +- search-manifest/src/uploadToAtlas/getProperties.ts | 10 +++++----- .../tests/integration/uploadManifest.test.ts | 4 ++-- search-manifest/tests/unit/getProperties.test.ts | 4 ++-- search-manifest/tests/unit/utils.test.ts | 2 +- snooty-cache/src/snooty-frontend-version-check.ts | 8 ++++---- 13 files changed, 27 insertions(+), 27 deletions(-) diff --git a/git-changed-files/src/index.ts b/git-changed-files/src/index.ts index 5abd03082..cb2cce1ec 100644 --- a/git-changed-files/src/index.ts +++ b/git-changed-files/src/index.ts @@ -18,7 +18,7 @@ integration.addBuildEventHandler('onSuccess', ({ utils: { status, git } }) => { if (markdownList.length !== 0) { status.show({ - title: `URLs to Changed Files`, + title: 'URLs to Changed Files', summary: markdownList.join('\n'), }); } diff --git a/persistence-module/src/index.ts b/persistence-module/src/index.ts index 181c6e01c..4e76aa589 100644 --- a/persistence-module/src/index.ts +++ b/persistence-module/src/index.ts @@ -1,8 +1,8 @@ // Documentation: https://sdk.netlify.com import { NetlifyIntegration } from '@netlify/sdk'; import { deserialize } from 'bson'; -import { readdir, readFile, existsSync } from 'fs'; -import { promisify } from 'util'; +import { readdir, readFile, existsSync } from 'node:fs'; +import { promisify } from 'node:util'; import { type Page, updatePages } from './update-pages'; const readdirAsync = promisify(readdir); diff --git a/redoc/src/atlas.ts b/redoc/src/atlas.ts index 7b229a8c2..9b4161bcf 100644 --- a/redoc/src/atlas.ts +++ b/redoc/src/atlas.ts @@ -124,7 +124,7 @@ function ensureSavedVersionDataMatches( ) { throw new Error(`Last successful build data does not include necessary version data:\n Version requested: ${apiVersion}${ - resourceVersion ? ` - ${resourceVersion}` : `` + resourceVersion ? ` - ${resourceVersion}` : '' }`); } } @@ -144,7 +144,7 @@ function createFetchGitHash() { return gitHash; } catch (e) { console.error(e); - throw new Error(`Unsuccessful git hash fetch`); + throw new Error('Unsuccessful git hash fetch'); } }, resetGitHashCache: () => { diff --git a/redoc/src/build-pages.ts b/redoc/src/build-pages.ts index 11eef6528..bf7e59ac7 100644 --- a/redoc/src/build-pages.ts +++ b/redoc/src/build-pages.ts @@ -18,7 +18,7 @@ const COLLECTION_NAME = 'oas_files'; const OAS_FILE_SERVER = 'https://mongodb-mms-build-server.s3.amazonaws.com/openapi/'; -export const normalizePath = (path: string) => path.replace(/\/+/g, `/`); +export const normalizePath = (path: string) => path.replace(/\/+/g, '/'); export const normalizeUrl = (url: string) => { const urlObject = new URL(url); urlObject.pathname = normalizePath(urlObject.pathname); @@ -112,7 +112,7 @@ const createFetchGitHash = () => { return gitHash; } catch (e) { console.error(e); - throw new Error(`Unsuccessful git hash fetch`); + throw new Error('Unsuccessful git hash fetch'); } }, resetGitHashCache: () => { @@ -236,7 +236,7 @@ export async function buildOpenAPIPages( } // If all builds successful, persist git hash and version data in db - if (isSuccessfulBuild && sourceType == 'atlas') { + if (isSuccessfulBuild && sourceType === 'atlas') { try { const gitHash = await fetchGitHash(); const versions = await fetchVersionData(gitHash, OAS_FILE_SERVER); diff --git a/redoc/src/utils/fs-async.ts b/redoc/src/utils/fs-async.ts index e685b7e53..8fb869e8e 100644 --- a/redoc/src/utils/fs-async.ts +++ b/redoc/src/utils/fs-async.ts @@ -1,5 +1,5 @@ -import { writeFile, readFile } from 'fs'; -import { promisify } from 'util'; +import { writeFile, readFile } from 'node:fs'; +import { promisify } from 'node:util'; export const readFileAsync = promisify(readFile); export const writeFileAsync = promisify(writeFile); diff --git a/search-manifest/src/generateManifest/document.ts b/search-manifest/src/generateManifest/document.ts index f0c61607b..5b5549a7b 100644 --- a/search-manifest/src/generateManifest/document.ts +++ b/search-manifest/src/generateManifest/document.ts @@ -1,7 +1,7 @@ import { JSONPath } from "jsonpath-plus"; import { Facet } from "./createFacets"; import { ManifestEntry } from "./manifestEntry"; -import { BSON } from "bson"; +import type { BSON } from "bson"; export class Document { //Return indexing data from a page's JSON-formatted AST for search purposes diff --git a/search-manifest/src/generateManifest/manifestEntry.ts b/search-manifest/src/generateManifest/manifestEntry.ts index 6e4243e31..7254aee7b 100644 --- a/search-manifest/src/generateManifest/manifestEntry.ts +++ b/search-manifest/src/generateManifest/manifestEntry.ts @@ -1,4 +1,4 @@ -import { Facet } from "./createFacets"; +import type { Facet } from "./createFacets"; //change this to an interface export class ManifestEntry { diff --git a/search-manifest/src/uploadToAtlas/deleteStaleProperties.ts b/search-manifest/src/uploadToAtlas/deleteStaleProperties.ts index a485468ab..ed8c4b27d 100644 --- a/search-manifest/src/uploadToAtlas/deleteStaleProperties.ts +++ b/search-manifest/src/uploadToAtlas/deleteStaleProperties.ts @@ -1,5 +1,5 @@ import { db, teardown } from "./searchConnector"; -import { DatabaseDocument } from "./types"; +import type { DatabaseDocument } from "./types"; const ATLAS_SEARCH_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_SEARCH_HOST}/?retryWrites=true&w=majority`; diff --git a/search-manifest/src/uploadToAtlas/getProperties.ts b/search-manifest/src/uploadToAtlas/getProperties.ts index 4ca629049..f86519045 100644 --- a/search-manifest/src/uploadToAtlas/getProperties.ts +++ b/search-manifest/src/uploadToAtlas/getProperties.ts @@ -1,6 +1,6 @@ -import { Collection, Db, Document, WithId } from "mongodb"; +import { type Collection, type Db, Document, WithId } from "mongodb"; import { db, teardown } from "./searchConnector"; -import { +import type { BranchEntry, DatabaseDocument, DocsetsDocument, @@ -34,9 +34,9 @@ const getProperties = async (branchName: string) => { let dbSession: Db; let repos_branches: Collection; let docsets: Collection; - let url: string = ""; - let searchProperty: string = ""; - let includeInGlobalSearch: boolean = false; + let url = ""; + let searchProperty = ""; + let includeInGlobalSearch = false; let repo: ReposBranchesDocument | null; let docsetRepo: DocsetsDocument | null; let version: string; diff --git a/search-manifest/tests/integration/uploadManifest.test.ts b/search-manifest/tests/integration/uploadManifest.test.ts index 719bf2c80..cc0943b42 100644 --- a/search-manifest/tests/integration/uploadManifest.test.ts +++ b/search-manifest/tests/integration/uploadManifest.test.ts @@ -11,7 +11,7 @@ import { uploadManifest } from "../../src/uploadToAtlas/uploadManifest"; import { Manifest } from "../../src/generateManifest/manifest"; import nodeManifest from "../resources/s3Manifests/node-current.json"; import { mockDb, insert, removeDocuments } from "../utils/mockDB"; -import { DatabaseDocument } from "../../src/uploadToAtlas/types"; +import type { DatabaseDocument } from "../../src/uploadToAtlas/types"; import { getManifest } from "../utils/getManifest"; import { hash } from "node:crypto"; import { generateHash } from "../../src/uploadToAtlas/utils"; @@ -108,7 +108,7 @@ describe( afterEach(async () => { await removeDocuments("documents"); }); - let manifest1: Manifest = new Manifest( + const manifest1: Manifest = new Manifest( nodeManifest.url, nodeManifest.includeInGlobalSearch ); diff --git a/search-manifest/tests/unit/getProperties.test.ts b/search-manifest/tests/unit/getProperties.test.ts index e6798a8da..9831d412f 100644 --- a/search-manifest/tests/unit/getProperties.test.ts +++ b/search-manifest/tests/unit/getProperties.test.ts @@ -20,8 +20,8 @@ import { import repos_branches from '../resources/mockCollections/repos-branches.json'; //simulate the docsests collection in an object import docsets from "../resources/mockCollections/docsets.json"; -import * as mongodb from "mongodb"; -import { BranchEntry, DatabaseDocument } from "../../src/uploadToAtlas/types"; +import type * as mongodb from "mongodb"; +import type { BranchEntry, DatabaseDocument } from "../../src/uploadToAtlas/types"; import { Manifest } from "../../src/generateManifest/manifest"; import { getManifest } from "../utils/getManifest"; import { uploadManifest } from "../../src/uploadToAtlas/uploadManifest"; diff --git a/search-manifest/tests/unit/utils.test.ts b/search-manifest/tests/unit/utils.test.ts index c50a891d9..4496523b7 100644 --- a/search-manifest/tests/unit/utils.test.ts +++ b/search-manifest/tests/unit/utils.test.ts @@ -2,7 +2,7 @@ import { joinUrl } from '../../src/uploadToAtlas/utils'; import { expect, it } from 'vitest'; //test joinUrl util -it("correctly joins base URLs with slugs", function () { +it("correctly joins base URLs with slugs", () => { expect(joinUrl({ base: "https://example.com//", path: "//foo/" })).toEqual( "https://example.com/foo/" ); diff --git a/snooty-cache/src/snooty-frontend-version-check.ts b/snooty-cache/src/snooty-frontend-version-check.ts index 367a98cd4..aa137efc9 100644 --- a/snooty-cache/src/snooty-frontend-version-check.ts +++ b/snooty-cache/src/snooty-frontend-version-check.ts @@ -1,10 +1,10 @@ import type { NetlifyPluginUtils } from '@netlify/build'; import axios from 'axios'; -import { createHash } from 'crypto'; -import { existsSync } from 'fs'; +import { createHash } from 'node:crypto'; +import { existsSync } from 'node:fs'; -import { readFile } from 'fs'; -import { promisify } from 'util'; +import { readFile } from 'node:fs'; +import { promisify } from 'node:util'; const readFileAsync = promisify(readFile); From 4449587c24c36231932a57aa404a924d7a011441 Mon Sep 17 00:00:00 2001 From: branberry Date: Fri, 27 Sep 2024 09:53:36 -0500 Subject: [PATCH 3/7] fix linting errors --- .../tests/unit/getProperties.test.ts | 360 +++++++++--------- 1 file changed, 181 insertions(+), 179 deletions(-) diff --git a/search-manifest/tests/unit/getProperties.test.ts b/search-manifest/tests/unit/getProperties.test.ts index 9831d412f..06aa4797e 100644 --- a/search-manifest/tests/unit/getProperties.test.ts +++ b/search-manifest/tests/unit/getProperties.test.ts @@ -8,8 +8,8 @@ import { afterAll, } from 'vitest'; import getProperties, { - getBranch, -} from "../../src/uploadToAtlas/getProperties"; + getBranch, +} from '../../src/uploadToAtlas/getProperties'; import { mockDb, teardownMockDbClient, @@ -19,44 +19,47 @@ import { // simulate the repos_branches collection in an object import repos_branches from '../resources/mockCollections/repos-branches.json'; //simulate the docsests collection in an object -import docsets from "../resources/mockCollections/docsets.json"; -import type * as mongodb from "mongodb"; -import type { BranchEntry, DatabaseDocument } from "../../src/uploadToAtlas/types"; -import { Manifest } from "../../src/generateManifest/manifest"; -import { getManifest } from "../utils/getManifest"; -import { uploadManifest } from "../../src/uploadToAtlas/uploadManifest"; -import { afterEach } from "node:test"; - -const BRANCH_NAME_MASTER = "master"; -const BRANCH_NAME_BETA = "beta"; -const BRANCH_NAME_GIBBERISH = "gibberish"; +import docsets from '../resources/mockCollections/docsets.json'; +import type * as mongodb from 'mongodb'; +import type { + BranchEntry, + DatabaseDocument, +} from '../../src/uploadToAtlas/types'; +import { Manifest } from '../../src/generateManifest/manifest'; +import { getManifest } from '../utils/getManifest'; +import { uploadManifest } from '../../src/uploadToAtlas/uploadManifest'; +import { afterEach } from 'node:test'; + +const BRANCH_NAME_MASTER = 'master'; +const BRANCH_NAME_BETA = 'beta'; +const BRANCH_NAME_GIBBERISH = 'gibberish'; let db: mongodb.Db; -const DOCS_COMPASS_NAME = "docs-compass"; -const DOCS_CLOUD_NAME = "cloud-docs"; -const DOCS_APP_SERVICES_NAME = "docs-app-services"; -const DOCS_MONGODB_INTERNAL_NAME = "docs-mongodb-internal"; +const DOCS_COMPASS_NAME = 'docs-compass'; +const DOCS_CLOUD_NAME = 'cloud-docs'; +const DOCS_APP_SERVICES_NAME = 'docs-app-services'; +const DOCS_MONGODB_INTERNAL_NAME = 'docs-mongodb-internal'; beforeAll(async () => { - db = await mockDb(); + db = await mockDb(); - await insert(db, "repos_branches", repos_branches); - await insert(db, "docsets", docsets); + await insert(db, 'repos_branches', repos_branches); + await insert(db, 'docsets', docsets); }); //mock repos_branches database beforeEach(async () => { - vi.mock("../../src/uploadToAtlas/searchConnector", async () => { - const { mockDb, teardownMockDbClient } = await import("../utils/mockDB"); - return { - teardown: teardownMockDbClient, - db: async () => { - //mock db of repos_branches - db = await mockDb(); - return db; - }, - }; - }); + vi.mock('../../src/uploadToAtlas/searchConnector', async () => { + const { mockDb, teardownMockDbClient } = await import('../utils/mockDB'); + return { + teardown: teardownMockDbClient, + db: async () => { + //mock db of repos_branches + db = await mockDb(); + return db; + }, + }; + }); }); afterAll(async () => { @@ -65,37 +68,37 @@ afterAll(async () => { await teardownMockDbClient(); }); -describe("given an array of branches and a branch name, the corrct output is returned", () => { - //mock branches object - const branches: Array = repos_branches[1].branches; - test("given a branch name that exists in the branches array, the correct branch object is returned", () => { - expect(getBranch(branches, BRANCH_NAME_MASTER)).toEqual({ - gitBranchName: "master", - isStableBranch: true, - urlSlug: "current", - active: true, - }); - }); - - test("given a branch name that exists with different capitalization than in the branches array, the correct branch object is still returned", () => { - expect(getBranch(branches, "MASTER")).toEqual({ - gitBranchName: "master", - isStableBranch: true, - urlSlug: "current", - active: true, - }); - }); - - test("given a branch name that doesn't exist in the branches array, undefined is returned", () => { - expect(() => getBranch(branches, BRANCH_NAME_GIBBERISH)).toThrowError( - new Error(`Branch ${BRANCH_NAME_GIBBERISH} not found in branches object`) - ); - }); - test("given a branch name and an empty branches array, undefined is returned", () => { - expect(() => getBranch([], BRANCH_NAME_MASTER)).toThrowError( - `Branch ${BRANCH_NAME_MASTER} not found in branches object` - ); - }); +describe('given an array of branches and a branch name, the corrct output is returned', () => { + //mock branches object + const branches: Array = repos_branches[1].branches; + test('given a branch name that exists in the branches array, the correct branch object is returned', () => { + expect(getBranch(branches, BRANCH_NAME_MASTER)).toEqual({ + gitBranchName: 'master', + isStableBranch: true, + urlSlug: 'current', + active: true, + }); + }); + + test('given a branch name that exists with different capitalization than in the branches array, the correct branch object is still returned', () => { + expect(getBranch(branches, 'MASTER')).toEqual({ + gitBranchName: 'master', + isStableBranch: true, + urlSlug: 'current', + active: true, + }); + }); + + test("given a branch name that doesn't exist in the branches array, undefined is returned", () => { + expect(() => getBranch(branches, BRANCH_NAME_GIBBERISH)).toThrowError( + new Error(`Branch ${BRANCH_NAME_GIBBERISH} not found in branches object`), + ); + }); + test('given a branch name and an empty branches array, undefined is returned', () => { + expect(() => getBranch([], BRANCH_NAME_MASTER)).toThrowError( + `Branch ${BRANCH_NAME_MASTER} not found in branches object`, + ); + }); }); //two tests for a repo with multiple branches, one test for a repo with only one branch @@ -114,124 +117,123 @@ describe('Given a branchname, get the properties associated with it from repos_b ); }); - test(`correct properties are retrieved for branch ${BRANCH_NAME_MASTER} of repoName ${DOCS_CLOUD_NAME}`, async () => { - //define expected properties object for master branch of cloud-docs repo - process.env.REPO_NAME = DOCS_CLOUD_NAME; - const cloudDocsMasterProperties = { - searchProperty: "atlas-master", - url: "http://mongodb.com/docs/atlas/", - includeInGlobalSearch: true, - }; - - expect(await getProperties(BRANCH_NAME_MASTER)).toEqual( - cloudDocsMasterProperties - ); - }); + test(`correct properties are retrieved for branch ${BRANCH_NAME_MASTER} of repoName ${DOCS_CLOUD_NAME}`, async () => { + //define expected properties object for master branch of cloud-docs repo + process.env.REPO_NAME = DOCS_CLOUD_NAME; + const cloudDocsMasterProperties = { + searchProperty: 'atlas-master', + url: 'http://mongodb.com/docs/atlas/', + includeInGlobalSearch: true, + }; + + expect(await getProperties(BRANCH_NAME_MASTER)).toEqual( + cloudDocsMasterProperties, + ); + }); }); describe( - "GetProperties behaves as expected for stale properties", - () => { - afterEach(async () => { - console.log(await removeDocuments("documents")); - }); - - test("getting properties for an inactive branch with no existing documents executes correctly and does not change db document count", async () => { - //populate db with manifests - db = await mockDb(); - const manifest1 = await getManifest("mms-master"); - await uploadManifest(manifest1, "mms-docs-stable"); - //reopen connection to db - await mockDb(); - //check number of documents initially in db - const documentCount = await db - .collection("documents") - .countDocuments(); - - //getProperties for beta doens't change number of documents in collection - process.env.repo_name = "docs-compass"; - await expect(getProperties(BRANCH_NAME_BETA)).rejects.toThrow(); - await mockDb(); - expect( - await db.collection("documents").countDocuments() - ).toEqual(documentCount); - }); - - test("non prod-deployable repo throws and doesn't return properties", async () => { - process.env.REPO_NAME = DOCS_MONGODB_INTERNAL_NAME; - await expect(getProperties("v5.0")).rejects.toThrow( - `Search manifest should not be generated for repo ${process.env.REPO_NAME}. Removing all associated manifests` - ); - }); - - test(`no properties are retrieved for branch on repo ${DOCS_APP_SERVICES_NAME} without a "search" field. `, async () => { - process.env.REPO_NAME = DOCS_MONGODB_INTERNAL_NAME; - await expect(getProperties(BRANCH_NAME_MASTER)).rejects.toThrow(); - }); - - test("repo with no search categoryTitle removes all old documents with search properties beginning with that project name", async () => { - db = await mockDb(); - - //add documents for project from two diff branches to search DB - const manifest1 = await getManifest("mms-master"); - - await uploadManifest(manifest1, "mms-docs-stable"); - await mockDb(); - - const manifest2 = await getManifest("mms-v1.3"); - await uploadManifest(manifest2, "mms-docs-v1.3"); - - await mockDb(); - - //trying to get properties for repo removes those older documents - process.env.REPO_NAME = "mms-docs"; - const documentCount = await db - .collection("documents") - .countDocuments(); - await expect(getProperties(BRANCH_NAME_MASTER)).rejects.toThrow(); - //throws - //no return type - - await mockDb(); - const documentCount2 = await db - .collection("documents") - .countDocuments(); - expect(documentCount2).toEqual( - documentCount - manifest1.documents.length - manifest2.documents.length - ); - }); - - test("getting properties for an inactive branch removes all old documents with that exact project-version searchProperty", async () => { - //add documents for project from two diff branches to DB-- docs-compass master and beta - db = await mockDb(); - //add documents for project from two diff branches to search DB - const manifest1 = await getManifest("compass-master"); - - await uploadManifest(manifest1, "compass-current"); - await mockDb(); - - const manifest2 = await getManifest("compass-beta"); - await uploadManifest(manifest2, "compass-upcoming"); - await mockDb(); - - //trying to get properties for repo removes only the older documents from that specific branch, beta - let documentCount; - let documentCount2; - //trying to get properties for repo removes those older documents - - process.env.REPO_NAME = "docs-compass"; - documentCount = await db - .collection("documents") - .countDocuments(); - await expect(getProperties(BRANCH_NAME_BETA)).rejects.toThrow(); - await mockDb(); - documentCount2 = await db - .collection("documents") - .countDocuments(); - expect(documentCount2).toEqual( - documentCount - manifest2.documents.length - ); - }); - }, - { timeout: 10000 } + 'GetProperties behaves as expected for stale properties', + () => { + afterEach(async () => { + console.log(await removeDocuments('documents')); + }); + + test('getting properties for an inactive branch with no existing documents executes correctly and does not change db document count', async () => { + //populate db with manifests + db = await mockDb(); + const manifest1 = await getManifest('mms-master'); + await uploadManifest(manifest1, 'mms-docs-stable'); + //reopen connection to db + await mockDb(); + //check number of documents initially in db + const documentCount = await db + .collection('documents') + .countDocuments(); + + //getProperties for beta doens't change number of documents in collection + process.env.repo_name = 'docs-compass'; + await expect(getProperties(BRANCH_NAME_BETA)).rejects.toThrow(); + await mockDb(); + expect( + await db.collection('documents').countDocuments(), + ).toEqual(documentCount); + }); + + test("non prod-deployable repo throws and doesn't return properties", async () => { + process.env.REPO_NAME = DOCS_MONGODB_INTERNAL_NAME; + await expect(getProperties('v5.0')).rejects.toThrow( + `Search manifest should not be generated for repo ${process.env.REPO_NAME}. Removing all associated manifests`, + ); + }); + + test(`no properties are retrieved for branch on repo ${DOCS_APP_SERVICES_NAME} without a "search" field. `, async () => { + process.env.REPO_NAME = DOCS_MONGODB_INTERNAL_NAME; + await expect(getProperties(BRANCH_NAME_MASTER)).rejects.toThrow(); + }); + + test('repo with no search categoryTitle removes all old documents with search properties beginning with that project name', async () => { + db = await mockDb(); + + //add documents for project from two diff branches to search DB + const manifest1 = await getManifest('mms-master'); + + await uploadManifest(manifest1, 'mms-docs-stable'); + await mockDb(); + + const manifest2 = await getManifest('mms-v1.3'); + await uploadManifest(manifest2, 'mms-docs-v1.3'); + + await mockDb(); + + //trying to get properties for repo removes those older documents + process.env.REPO_NAME = 'mms-docs'; + const documentCount = await db + .collection('documents') + .countDocuments(); + await expect(getProperties(BRANCH_NAME_MASTER)).rejects.toThrow(); + //throws + //no return type + + await mockDb(); + const documentCount2 = await db + .collection('documents') + .countDocuments(); + expect(documentCount2).toEqual( + documentCount - manifest1.documents.length - manifest2.documents.length, + ); + }); + + test('getting properties for an inactive branch removes all old documents with that exact project-version searchProperty', async () => { + //add documents for project from two diff branches to DB-- docs-compass master and beta + db = await mockDb(); + //add documents for project from two diff branches to search DB + const manifest1 = await getManifest('compass-master'); + + await uploadManifest(manifest1, 'compass-current'); + await mockDb(); + + const manifest2 = await getManifest('compass-beta'); + await uploadManifest(manifest2, 'compass-upcoming'); + await mockDb(); + + //trying to get properties for repo removes only the older documents from that specific branch, beta + + //trying to get properties for repo removes those older documents + + process.env.REPO_NAME = 'docs-compass'; + const documentCount = await db + .collection('documents') + .countDocuments(); + await expect(getProperties(BRANCH_NAME_BETA)).rejects.toThrow(); + await mockDb(); + const documentCount2 = await db + .collection('documents') + .countDocuments(); + expect(documentCount2).toEqual( + documentCount - manifest2.documents.length, + ); + }); + }, + { timeout: 10000 }, ); From 06c0d1dd2a15a4db9cfd40acb6f0cb0ab385d698 Mon Sep 17 00:00:00 2001 From: branberry Date: Fri, 27 Sep 2024 10:04:24 -0500 Subject: [PATCH 4/7] Small refactor --- biome.json | 2 +- snooty-cache/src/index.ts | 2 +- snooty-cache/src/persistence.ts | 12 +++++------- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/biome.json b/biome.json index d6f187d7d..abfe04616 100644 --- a/biome.json +++ b/biome.json @@ -7,7 +7,7 @@ }, "files": { "ignoreUnknown": false, - "ignore": ["*/.ntli/*"] + "ignore": ["*/.ntli/*", "*/node_modules/*"] }, "formatter": { "enabled": true, diff --git a/snooty-cache/src/index.ts b/snooty-cache/src/index.ts index 9e411375b..5695d7e6c 100644 --- a/snooty-cache/src/index.ts +++ b/snooty-cache/src/index.ts @@ -33,7 +33,7 @@ integration.addBuildEventHandler( await checkForNewSnootyVersion(run); - await downloadPersistenceModule(run.command); + await downloadPersistenceModule(run); }, ); diff --git a/snooty-cache/src/persistence.ts b/snooty-cache/src/persistence.ts index fb8fae74b..b3adc02fe 100644 --- a/snooty-cache/src/persistence.ts +++ b/snooty-cache/src/persistence.ts @@ -1,28 +1,26 @@ import type { NetlifyPluginUtils } from '@netlify/build'; import { existsSync } from 'node:fs'; -type CliCommand = NetlifyPluginUtils['run']['command']; - export async function downloadPersistenceModule( - command: CliCommand, + run: NetlifyPluginUtils['run'], ): Promise { const isModuleDownloaded = existsSync(`${process.cwd()}/docs-worker-pool`); if (isModuleDownloaded) return; - await command( + await run.command( 'git clone --depth 1 --filter=tree:0 https://github.com/mongodb/docs-worker-pool.git --sparse', ); - await command('git sparse-checkout set --no-cone modules/persistence', { + await run.command('git sparse-checkout set --no-cone modules/persistence', { cwd: `${process.cwd()}/docs-worker-pool`, }); - await command('npm ci', { + await run.command('npm ci', { cwd: `${process.cwd()}/docs-worker-pool/modules/persistence`, }); - await command('npm run build', { + await run.command('npm run build', { cwd: `${process.cwd()}/docs-worker-pool/modules/persistence`, }); } From cd2223f11141d9dfe342dc0168bbf0d1c772478d Mon Sep 17 00:00:00 2001 From: branberry Date: Fri, 27 Sep 2024 10:07:56 -0500 Subject: [PATCH 5/7] Refactor cwd to use constants --- snooty-cache/src/persistence.ts | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/snooty-cache/src/persistence.ts b/snooty-cache/src/persistence.ts index b3adc02fe..8a83ae004 100644 --- a/snooty-cache/src/persistence.ts +++ b/snooty-cache/src/persistence.ts @@ -1,10 +1,13 @@ import type { NetlifyPluginUtils } from '@netlify/build'; import { existsSync } from 'node:fs'; +const WORKER_POOL_PATH = `${process.cwd()}/docs-worker-pool`; +const PERSISTENCE_PATH = `${WORKER_POOL_PATH}/modules/persistence`; + export async function downloadPersistenceModule( run: NetlifyPluginUtils['run'], ): Promise { - const isModuleDownloaded = existsSync(`${process.cwd()}/docs-worker-pool`); + const isModuleDownloaded = existsSync(WORKER_POOL_PATH); if (isModuleDownloaded) return; @@ -13,14 +16,14 @@ export async function downloadPersistenceModule( ); await run.command('git sparse-checkout set --no-cone modules/persistence', { - cwd: `${process.cwd()}/docs-worker-pool`, + cwd: WORKER_POOL_PATH, }); await run.command('npm ci', { - cwd: `${process.cwd()}/docs-worker-pool/modules/persistence`, + cwd: PERSISTENCE_PATH, }); await run.command('npm run build', { - cwd: `${process.cwd()}/docs-worker-pool/modules/persistence`, + cwd: PERSISTENCE_PATH, }); } From e18011b092569b7b1b09684e4f7a0b975a94133a Mon Sep 17 00:00:00 2001 From: branberry Date: Fri, 27 Sep 2024 10:18:27 -0500 Subject: [PATCH 6/7] Change formatting style --- biome.json | 59 +- git-changed-files/package.json | 42 +- git-changed-files/src/index.ts | 70 +- git-changed-files/tests/index.test.ts | 60 +- git-changed-files/tsconfig.json | 16 +- git-changed-files/vitest.config.ts | 8 +- package.json | 28 +- .../.netlify/functions/manifest.json | 28 +- persistence-module/.netlify/state.json | 2 +- persistence-module/.vscode/launch.json | 28 +- persistence-module/package.json | 46 +- persistence-module/src/connector.ts | 22 +- persistence-module/src/db-operations.ts | 28 +- persistence-module/src/index.ts | 48 +- persistence-module/src/update-pages.ts | 394 +- persistence-module/test/index.test.ts | 4 +- persistence-module/test/update-pages.test.ts | 204 +- persistence-module/test/utils/mockDb.ts | 26 +- persistence-module/tsconfig.json | 22 +- persistence-module/vitest.config.ts | 6 +- redoc/.netlify/functions/manifest.json | 28 +- redoc/.netlify/state.json | 2 +- redoc/package.json | 42 +- redoc/src/atlas.ts | 236 +- redoc/src/build-pages.ts | 406 +- redoc/src/index.ts | 90 +- redoc/src/utils/db.ts | 66 +- redoc/tsconfig.json | 22 +- .../src/generateManifest/createFacets.ts | 38 +- .../src/generateManifest/document.ts | 472 +- .../src/generateManifest/manifest.ts | 10 +- .../src/generateManifest/manifestEntry.ts | 2 +- search-manifest/src/index.ts | 62 +- search-manifest/src/types.ts | 4 +- .../src/uploadToAtlas/deleteStale.ts | 68 +- .../uploadToAtlas/deleteStaleProperties.ts | 12 +- .../src/uploadToAtlas/getProperties.ts | 32 +- .../src/uploadToAtlas/searchConnector.ts | 2 +- .../src/uploadToAtlas/uploadManifest.ts | 34 +- search-manifest/src/uploadToS3/connectToS3.ts | 6 +- .../src/uploadToS3/uploadManifest.ts | 10 +- search-manifest/src/utils.ts | 16 +- .../tests/integration/deleteStale.test.ts | 4 +- .../tests/integration/uploadToAtlas.test.ts | 338 +- .../tests/integration/uploadToS3.test.ts | 28 +- .../resources/mockCollections/docsets.json | 96 +- .../mockCollections/repos-branches.json | 173 +- .../s3Manifests/kotlin-upcoming.json | 9610 ++++++++--------- .../resources/s3Manifests/node-current.json | 7856 +++++++------- search-manifest/tests/snapshots/index.test.ts | 4 +- .../tests/unit/getProperties.test.ts | 382 +- search-manifest/tests/unit/index.test.ts | 144 +- search-manifest/tests/unit/utils.test.ts | 14 +- search-manifest/tests/utils/mockDB.ts | 14 +- search-manifest/tsconfig.json | 22 +- search-manifest/vitest.config.ts | 8 +- snooty-cache/package.json | 42 +- snooty-cache/src/index.ts | 126 +- snooty-cache/src/persistence.ts | 30 +- .../src/snooty-frontend-version-check.ts | 102 +- snooty-cache/tsconfig.json | 22 +- 61 files changed, 10909 insertions(+), 10907 deletions(-) diff --git a/biome.json b/biome.json index abfe04616..bceb6c984 100644 --- a/biome.json +++ b/biome.json @@ -1,31 +1,32 @@ { - "$schema": "https://biomejs.dev/schemas/1.9.1/schema.json", - "vcs": { - "enabled": false, - "clientKind": "git", - "useIgnoreFile": true - }, - "files": { - "ignoreUnknown": false, - "ignore": ["*/.ntli/*", "*/node_modules/*"] - }, - "formatter": { - "enabled": true, - "indentStyle": "tab" - }, - "organizeImports": { - "enabled": true - }, - "linter": { - "enabled": true, - "rules": { - "recommended": true - } - }, - "javascript": { - "formatter": { - "quoteStyle": "single", - "trailingCommas": "all" - } - } + "$schema": "https://biomejs.dev/schemas/1.9.1/schema.json", + "vcs": { + "enabled": false, + "clientKind": "git", + "useIgnoreFile": true + }, + "files": { + "ignoreUnknown": false, + "ignore": ["*/.ntli/*", "*/node_modules/*"] + }, + "formatter": { + "enabled": true, + "indentStyle": "space", + "indentWidth": 2 + }, + "organizeImports": { + "enabled": true + }, + "linter": { + "enabled": true, + "rules": { + "recommended": true + } + }, + "javascript": { + "formatter": { + "quoteStyle": "single", + "trailingCommas": "all" + } + } } diff --git a/git-changed-files/package.json b/git-changed-files/package.json index 0e7938705..f0ddf8aa7 100644 --- a/git-changed-files/package.json +++ b/git-changed-files/package.json @@ -1,23 +1,23 @@ { - "name": "git-changed-files", - "version": "0.0.1", - "main": "src/index.ts", - "type": "module", - "scripts": { - "build": "netlify-integration build -a", - "dev": "netlify-integration dev -a", - "preview": "netlify-integration preview", - "test": "vitest" - }, - "dependencies": { - "@netlify/sdk": "^1.60.2-pr-1468.3", - "axios": "^1.7.7", - "typescript": "^5.4.5" - }, - "devDependencies": { - "@netlify/build": "^29.50.2", - "@types/node": "^20.14.9", - "execa": "^6.1.0", - "vitest": "^2.1.0" - } + "name": "git-changed-files", + "version": "0.0.1", + "main": "src/index.ts", + "type": "module", + "scripts": { + "build": "netlify-integration build -a", + "dev": "netlify-integration dev -a", + "preview": "netlify-integration preview", + "test": "vitest" + }, + "dependencies": { + "@netlify/sdk": "^1.60.2-pr-1468.3", + "axios": "^1.7.7", + "typescript": "^5.4.5" + }, + "devDependencies": { + "@netlify/build": "^29.50.2", + "@types/node": "^20.14.9", + "execa": "^6.1.0", + "vitest": "^2.1.0" + } } diff --git a/git-changed-files/src/index.ts b/git-changed-files/src/index.ts index cb2cce1ec..40cf84e81 100644 --- a/git-changed-files/src/index.ts +++ b/git-changed-files/src/index.ts @@ -3,25 +3,25 @@ import { NetlifyIntegration } from '@netlify/sdk'; const integration = new NetlifyIntegration(); integration.addBuildEventHandler('onSuccess', ({ utils: { status, git } }) => { - console.log('Checking if any files changed on git -----'); - console.log('Modified files:', git.modifiedFiles); - - if (!process.env.DEPLOY_PRIME_URL) { - console.error('ERROR! process.env.DEPLOY_PRIME_URL is not defined.'); - return; - } - - const markdownList = createMarkdown( - git.modifiedFiles, - process.env.DEPLOY_PRIME_URL, - ); - - if (markdownList.length !== 0) { - status.show({ - title: 'URLs to Changed Files', - summary: markdownList.join('\n'), - }); - } + console.log('Checking if any files changed on git -----'); + console.log('Modified files:', git.modifiedFiles); + + if (!process.env.DEPLOY_PRIME_URL) { + console.error('ERROR! process.env.DEPLOY_PRIME_URL is not defined.'); + return; + } + + const markdownList = createMarkdown( + git.modifiedFiles, + process.env.DEPLOY_PRIME_URL, + ); + + if (markdownList.length !== 0) { + status.show({ + title: 'URLs to Changed Files', + summary: markdownList.join('\n'), + }); + } }); /** @@ -33,28 +33,28 @@ integration.addBuildEventHandler('onSuccess', ({ utils: { status, git } }) => { * @returns string[] */ export function createMarkdown( - modifiedFiles: readonly string[], - netlifyURL: string, + modifiedFiles: readonly string[], + netlifyURL: string, ): string[] { - const IGNORED_DIRS = new Set(['includes', 'images', 'examples']); + const IGNORED_DIRS = new Set(['includes', 'images', 'examples']); - const markdownList = []; - for (const modifiedFile of modifiedFiles) { - const modifiedFilePath = modifiedFile.split('/'); + const markdownList = []; + for (const modifiedFile of modifiedFiles) { + const modifiedFilePath = modifiedFile.split('/'); - // check if this is equal to 'source' - const isSourceDir = modifiedFilePath[0] === 'source'; + // check if this is equal to 'source' + const isSourceDir = modifiedFilePath[0] === 'source'; - // check if this is equal to either images, includes, or examples - const isNonIgnoredDir = !IGNORED_DIRS.has(modifiedFilePath[1]); + // check if this is equal to either images, includes, or examples + const isNonIgnoredDir = !IGNORED_DIRS.has(modifiedFilePath[1]); - if (isSourceDir && isNonIgnoredDir) { - const shortform = modifiedFile.replace('source', '').replace('.txt', ''); - markdownList.push(`[${modifiedFile}](${netlifyURL + shortform})`); - } - } + if (isSourceDir && isNonIgnoredDir) { + const shortform = modifiedFile.replace('source', '').replace('.txt', ''); + markdownList.push(`[${modifiedFile}](${netlifyURL + shortform})`); + } + } - return markdownList; + return markdownList; } export { integration }; diff --git a/git-changed-files/tests/index.test.ts b/git-changed-files/tests/index.test.ts index f804a973c..0dd7cda41 100644 --- a/git-changed-files/tests/index.test.ts +++ b/git-changed-files/tests/index.test.ts @@ -2,37 +2,37 @@ import { expect, test, describe } from 'vitest'; import { createMarkdown } from '../src/index'; describe('Test displaying URLs for changed files in source directory ', () => { - test('It displays files only changed in source', () => { - const exampleModifiedFiles = [ - 'source/legacy.txt', - 'source/trial.txt', - 'new.html', - ]; - const netlifyURL = 'testing.com'; - const output = [ - '[source/legacy.txt](testing.com/legacy)', - '[source/trial.txt](testing.com/trial)', - ]; + test('It displays files only changed in source', () => { + const exampleModifiedFiles = [ + 'source/legacy.txt', + 'source/trial.txt', + 'new.html', + ]; + const netlifyURL = 'testing.com'; + const output = [ + '[source/legacy.txt](testing.com/legacy)', + '[source/trial.txt](testing.com/trial)', + ]; - expect(createMarkdown(exampleModifiedFiles, netlifyURL)).toStrictEqual( - output, - ); - }); + expect(createMarkdown(exampleModifiedFiles, netlifyURL)).toStrictEqual( + output, + ); + }); - test('Test displaying URLs for changed files in source directory except for images', () => { - const exampleModifiedFiles = [ - 'source/legacy.txt', - 'source/trial.txt', - 'source/images/picture.png', - ]; - const netlifyURL = 'testing.com'; - const output = [ - '[source/legacy.txt](testing.com/legacy)', - '[source/trial.txt](testing.com/trial)', - ]; + test('Test displaying URLs for changed files in source directory except for images', () => { + const exampleModifiedFiles = [ + 'source/legacy.txt', + 'source/trial.txt', + 'source/images/picture.png', + ]; + const netlifyURL = 'testing.com'; + const output = [ + '[source/legacy.txt](testing.com/legacy)', + '[source/trial.txt](testing.com/trial)', + ]; - expect(createMarkdown(exampleModifiedFiles, netlifyURL)).toStrictEqual( - output, - ); - }); + expect(createMarkdown(exampleModifiedFiles, netlifyURL)).toStrictEqual( + output, + ); + }); }); diff --git a/git-changed-files/tsconfig.json b/git-changed-files/tsconfig.json index e7a464905..45617c086 100644 --- a/git-changed-files/tsconfig.json +++ b/git-changed-files/tsconfig.json @@ -1,10 +1,10 @@ { - "compilerOptions": { - "target": "ES2022", - "module": "ES2022", - "moduleResolution": "bundler", - "strict": true, - "rootDir": "." - }, - "exclude": ["node_modules", "dist"] + "compilerOptions": { + "target": "ES2022", + "module": "ES2022", + "moduleResolution": "bundler", + "strict": true, + "rootDir": "." + }, + "exclude": ["node_modules", "dist"] } diff --git a/git-changed-files/vitest.config.ts b/git-changed-files/vitest.config.ts index 7fff64e77..60de9efb2 100644 --- a/git-changed-files/vitest.config.ts +++ b/git-changed-files/vitest.config.ts @@ -1,8 +1,8 @@ import { defineConfig } from 'vitest/config'; export default defineConfig({ - test: { - name: 'test-suite', - root: './tests', - }, + test: { + name: 'test-suite', + root: './tests', + }, }); diff --git a/package.json b/package.json index 1e76a3955..dcffeb449 100644 --- a/package.json +++ b/package.json @@ -1,16 +1,16 @@ { - "name": "netlify-integrations", - "description": "", - "version": "1.0.0", - "main": "index.js", - "scripts": { - "test": "echo \"Error: no test specified\" && exit 1", - "lint": "biome ci" - }, - "keywords": [], - "author": "", - "license": "ISC", - "devDependencies": { - "@biomejs/biome": "1.9.1" - } + "name": "netlify-integrations", + "description": "", + "version": "1.0.0", + "main": "index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1", + "lint": "biome ci" + }, + "keywords": [], + "author": "", + "license": "ISC", + "devDependencies": { + "@biomejs/biome": "1.9.1" + } } diff --git a/persistence-module/.netlify/functions/manifest.json b/persistence-module/.netlify/functions/manifest.json index a032e0b51..1e152b90c 100644 --- a/persistence-module/.netlify/functions/manifest.json +++ b/persistence-module/.netlify/functions/manifest.json @@ -1,16 +1,16 @@ { - "functions": [ - { - "bundler": "esbuild", - "buildData": { "runtimeAPIVersion": 1 }, - "mainFile": "/Users/brandonly/Documents/gitrepos/chatbot-netlify-integration/.ntli/site/netlify/functions/handler.js", - "name": "handler", - "priority": 10, - "path": "/Users/brandonly/Documents/gitrepos/chatbot-netlify-integration/.netlify/functions/handler.zip", - "runtime": "js" - } - ], - "system": { "arch": "arm64", "platform": "darwin" }, - "timestamp": 1719950065771, - "version": 1 + "functions": [ + { + "bundler": "esbuild", + "buildData": { "runtimeAPIVersion": 1 }, + "mainFile": "/Users/brandonly/Documents/gitrepos/chatbot-netlify-integration/.ntli/site/netlify/functions/handler.js", + "name": "handler", + "priority": 10, + "path": "/Users/brandonly/Documents/gitrepos/chatbot-netlify-integration/.netlify/functions/handler.zip", + "runtime": "js" + } + ], + "system": { "arch": "arm64", "platform": "darwin" }, + "timestamp": 1719950065771, + "version": 1 } diff --git a/persistence-module/.netlify/state.json b/persistence-module/.netlify/state.json index 45d2975b0..19e76d89c 100644 --- a/persistence-module/.netlify/state.json +++ b/persistence-module/.netlify/state.json @@ -1,3 +1,3 @@ { - "siteId": "956d0b2c-d0f1-4c7e-bd4c-f27c0542b80f" + "siteId": "956d0b2c-d0f1-4c7e-bd4c-f27c0542b80f" } diff --git a/persistence-module/.vscode/launch.json b/persistence-module/.vscode/launch.json index 219b4bc28..289eb6673 100644 --- a/persistence-module/.vscode/launch.json +++ b/persistence-module/.vscode/launch.json @@ -1,16 +1,16 @@ { - // Use IntelliSense to learn about possible attributes. - // Hover to view descriptions of existing attributes. - // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 - "version": "0.2.0", - "configurations": [ - { - "type": "node", - "request": "launch", - "name": "Launch Program", - "skipFiles": ["/**"], - "program": "${workspaceFolder}/src/index.ts", - "outFiles": ["${workspaceFolder}/**/*.js"] - } - ] + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "type": "node", + "request": "launch", + "name": "Launch Program", + "skipFiles": ["/**"], + "program": "${workspaceFolder}/src/index.ts", + "outFiles": ["${workspaceFolder}/**/*.js"] + } + ] } diff --git a/persistence-module/package.json b/persistence-module/package.json index e204723b1..1b94a2660 100644 --- a/persistence-module/package.json +++ b/persistence-module/package.json @@ -1,25 +1,25 @@ { - "name": "chatbot-netlify-integration", - "version": "0.0.1", - "main": "src/index.ts", - "type": "module", - "scripts": { - "build": "netlify-integration build -a", - "dev": "netlify-integration dev -a", - "preview": "netlify-integration preview", - "test": "vitest" - }, - "dependencies": { - "@netlify/sdk": "^1.60.2-pr-1468.3", - "bson": "^6.8.0", - "mongodb": "^6.8.0", - "typescript": "^5.5.2" - }, - "devDependencies": { - "@netlify/build": "^29.50.2", - "@types/node": "^20.14.9", - "execa": "^6.1.0", - "mongodb-memory-server": "^9.4.0", - "vitest": "^2.0.2" - } + "name": "chatbot-netlify-integration", + "version": "0.0.1", + "main": "src/index.ts", + "type": "module", + "scripts": { + "build": "netlify-integration build -a", + "dev": "netlify-integration dev -a", + "preview": "netlify-integration preview", + "test": "vitest" + }, + "dependencies": { + "@netlify/sdk": "^1.60.2-pr-1468.3", + "bson": "^6.8.0", + "mongodb": "^6.8.0", + "typescript": "^5.5.2" + }, + "devDependencies": { + "@netlify/build": "^29.50.2", + "@types/node": "^20.14.9", + "execa": "^6.1.0", + "mongodb-memory-server": "^9.4.0", + "vitest": "^2.0.2" + } } diff --git a/persistence-module/src/connector.ts b/persistence-module/src/connector.ts index b801dea61..3008335a4 100644 --- a/persistence-module/src/connector.ts +++ b/persistence-module/src/connector.ts @@ -11,7 +11,7 @@ const atlasURL = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.en const client = new mongodb.MongoClient(atlasURL); export const teardown = async () => { - await client.close(); + await client.close(); }; const SNOOTY_DB_NAME = 'snooty_dotcomstg'; @@ -20,14 +20,14 @@ const SNOOTY_DB_NAME = 'snooty_dotcomstg'; let dbInstance: Db; // Handles memoization of db object, and initial connection logic if needs to be initialized export const db = async () => { - if (!dbInstance) { - try { - await client.connect(); - dbInstance = client.db(SNOOTY_DB_NAME); - } catch (error) { - console.error(`Error at db client connection: ${error}`); - throw error; - } - } - return dbInstance; + if (!dbInstance) { + try { + await client.connect(); + dbInstance = client.db(SNOOTY_DB_NAME); + } catch (error) { + console.error(`Error at db client connection: ${error}`); + throw error; + } + } + return dbInstance; }; diff --git a/persistence-module/src/db-operations.ts b/persistence-module/src/db-operations.ts index 550ca576e..aea25d3fe 100644 --- a/persistence-module/src/db-operations.ts +++ b/persistence-module/src/db-operations.ts @@ -2,19 +2,19 @@ import type * as mongodb from 'mongodb'; import { db } from './connector'; export const bulkWrite = async ( - operations: mongodb.AnyBulkWriteOperation[], - collection: string, + operations: mongodb.AnyBulkWriteOperation[], + collection: string, ) => { - const dbSession = await db(); - try { - if (!operations || !operations.length) { - return; - } - return dbSession - .collection(collection) - .bulkWrite(operations, { ordered: false }); - } catch (error) { - console.error(`Error at bulk write time for ${collection}: ${error}`); - throw error; - } + const dbSession = await db(); + try { + if (!operations || !operations.length) { + return; + } + return dbSession + .collection(collection) + .bulkWrite(operations, { ordered: false }); + } catch (error) { + console.error(`Error at bulk write time for ${collection}: ${error}`); + throw error; + } }; diff --git a/persistence-module/src/index.ts b/persistence-module/src/index.ts index 4e76aa589..8bcf50a40 100644 --- a/persistence-module/src/index.ts +++ b/persistence-module/src/index.ts @@ -12,38 +12,38 @@ const integration = new NetlifyIntegration(); const ZIP_PATH = `${process.cwd()}/bundle/documents`; integration.addBuildEventHandler( - 'onSuccess', - async ({ utils: { run, git } }) => { - /** - * Minor note that persistence module also handles merging of ToCs for embedded products - */ - console.log('=========== Chatbot Data Upload Integration ================'); + 'onSuccess', + async ({ utils: { run, git } }) => { + /** + * Minor note that persistence module also handles merging of ToCs for embedded products + */ + console.log('=========== Chatbot Data Upload Integration ================'); - const bundleDirExists = existsSync(`${process.cwd()}/bundle`); + const bundleDirExists = existsSync(`${process.cwd()}/bundle`); - if (!bundleDirExists) await run.command('unzip -o bundle.zip -d bundle'); + if (!bundleDirExists) await run.command('unzip -o bundle.zip -d bundle'); - const zipContents = await readdirAsync(ZIP_PATH, { - recursive: true, - }); + const zipContents = await readdirAsync(ZIP_PATH, { + recursive: true, + }); - const bsonPages = zipContents.filter((fileName) => { - const splitFile = fileName.toString().split('.'); + const bsonPages = zipContents.filter((fileName) => { + const splitFile = fileName.toString().split('.'); - return splitFile[splitFile.length - 1] === 'bson'; - }); + return splitFile[splitFile.length - 1] === 'bson'; + }); - const pageAstObjects = await Promise.all( - bsonPages.map(async (bsonFileName) => { - const rawData = await readFileAsync(`${ZIP_PATH}/${bsonFileName}`); + const pageAstObjects = await Promise.all( + bsonPages.map(async (bsonFileName) => { + const rawData = await readFileAsync(`${ZIP_PATH}/${bsonFileName}`); - return deserialize(rawData) as Page; - }), - ); + return deserialize(rawData) as Page; + }), + ); - await updatePages(pageAstObjects, 'updated_documents'); - console.log('=========== Chatbot Data Upload Integration ================'); - }, + await updatePages(pageAstObjects, 'updated_documents'); + console.log('=========== Chatbot Data Upload Integration ================'); + }, ); export { integration }; diff --git a/persistence-module/src/update-pages.ts b/persistence-module/src/update-pages.ts index 77b7fa016..065995157 100644 --- a/persistence-module/src/update-pages.ts +++ b/persistence-module/src/update-pages.ts @@ -4,53 +4,53 @@ import { db } from './connector'; import { bulkWrite } from './db-operations'; interface PreviousPageMapping { - [key: string]: { - ast: PageAst; - static_assets: StaticAsset[]; - }; + [key: string]: { + ast: PageAst; + static_assets: StaticAsset[]; + }; } export interface StaticAsset { - checksum: string; - key: string; - updated_at?: Date; + checksum: string; + key: string; + updated_at?: Date; } export interface PageAst { - [key: string]: unknown; - type: string; - position: Record; - children: PageAst[]; - fileid: string; - options: Record; + [key: string]: unknown; + type: string; + position: Record; + children: PageAst[]; + fileid: string; + options: Record; } export interface Page { - page_id: string; - filename: string; - ast: PageAst; - source: string; - static_assets: StaticAsset[]; - github_username: string; + page_id: string; + filename: string; + ast: PageAst; + source: string; + static_assets: StaticAsset[]; + github_username: string; } export interface UpdatedPage extends Page { - created_at: Date; - updated_at: Date; - deleted: boolean; + created_at: Date; + updated_at: Date; + deleted: boolean; } export const GITHUB_USER = 'docs-builder-bot'; export const createPageAstMapping = async (docsCursor: FindCursor) => { - // Create mapping for page id and its AST - const mapping: PreviousPageMapping = {}; - // Create set of all page ids. To be used for tracking unseen pages in the current build - const pageIds = new Set(); - for await (const doc of docsCursor) { - mapping[doc.page_id] = { - ast: doc.ast, - static_assets: doc.static_assets, - }; - pageIds.add(doc.page_id); - } - return { mapping, pageIds }; + // Create mapping for page id and its AST + const mapping: PreviousPageMapping = {}; + // Create set of all page ids. To be used for tracking unseen pages in the current build + const pageIds = new Set(); + for await (const doc of docsCursor) { + mapping[doc.page_id] = { + ast: doc.ast, + static_assets: doc.static_assets, + }; + pageIds.add(doc.page_id); + } + return { mapping, pageIds }; }; /** @@ -62,34 +62,34 @@ export const createPageAstMapping = async (docsCursor: FindCursor) => { * @param collection - The collection to perform the find query on */ const findPrevPageDocs = async ( - pageIdPrefix: string, - collection: string, - githubUser: string, + pageIdPrefix: string, + collection: string, + githubUser: string, ) => { - const dbSession = await db(); - const findQuery = { - page_id: { $regex: new RegExp(`^${pageIdPrefix}/`) }, - github_username: githubUser, - deleted: false, - }; - const projection = { - _id: 0, - page_id: 1, - ast: 1, - static_assets: 1, - }; + const dbSession = await db(); + const findQuery = { + page_id: { $regex: new RegExp(`^${pageIdPrefix}/`) }, + github_username: githubUser, + deleted: false, + }; + const projection = { + _id: 0, + page_id: 1, + ast: 1, + static_assets: 1, + }; - try { - return dbSession - .collection(collection) - .find(findQuery) - .project(projection); - } catch (error) { - console.error( - `Error trying to find previous page documents using prefix ${pageIdPrefix} in ${collection}}: ${error}`, - ); - throw error; - } + try { + return dbSession + .collection(collection) + .find(findQuery) + .project(projection); + } catch (error) { + console.error( + `Error trying to find previous page documents using prefix ${pageIdPrefix} in ${collection}}: ${error}`, + ); + throw error; + } }; /** @@ -101,73 +101,73 @@ const findPrevPageDocs = async ( * @param collection */ export const updatePages = async (pages: Page[], collection: string) => { - if (pages.length === 0) { - return; - } + if (pages.length === 0) { + return; + } - try { - const updateTime = new Date(); - // Find all pages that share the same project name + branch. Expects page IDs - // to include these two properties after parse - const pageIdPrefix = pages[0].page_id.split('/').slice(0, 3).join('/'); - const previousPagesCursor = await findPrevPageDocs( - pageIdPrefix, - collection, - GITHUB_USER, - ); - const { mapping: prevPageDocsMapping, pageIds: prevPageIds } = - await createPageAstMapping(previousPagesCursor); + try { + const updateTime = new Date(); + // Find all pages that share the same project name + branch. Expects page IDs + // to include these two properties after parse + const pageIdPrefix = pages[0].page_id.split('/').slice(0, 3).join('/'); + const previousPagesCursor = await findPrevPageDocs( + pageIdPrefix, + collection, + GITHUB_USER, + ); + const { mapping: prevPageDocsMapping, pageIds: prevPageIds } = + await createPageAstMapping(previousPagesCursor); - const operations = [ - ...checkForPageDiffs({ - prevPageDocsMapping, - prevPageIds, - currentPages: pages, - updateTime, - }), - ...markUnseenPagesAsDeleted({ prevPageIds, updateTime }), - ]; + const operations = [ + ...checkForPageDiffs({ + prevPageDocsMapping, + prevPageIds, + currentPages: pages, + updateTime, + }), + ...markUnseenPagesAsDeleted({ prevPageIds, updateTime }), + ]; - if (operations.length > 0) { - await bulkWrite(operations, collection); - } - } catch (error) { - console.error(`Error when trying to update pages: ${error}`); - throw error; - } + if (operations.length > 0) { + await bulkWrite(operations, collection); + } + } catch (error) { + console.error(`Error when trying to update pages: ${error}`); + throw error; + } }; interface MarkUnseenPagesAsDeletedParams { - updateTime: Date; - prevPageIds: Set; + updateTime: Date; + prevPageIds: Set; } function markUnseenPagesAsDeleted({ - prevPageIds, - updateTime, + prevPageIds, + updateTime, }: MarkUnseenPagesAsDeletedParams) { - const operations: AnyBulkWriteOperation[] = []; - prevPageIds.forEach((unseenPageId) => { - const operation = { - updateOne: { - filter: { page_id: unseenPageId, github_username: GITHUB_USER }, - update: { - $set: { - deleted: true, - updated_at: updateTime, - }, - }, - }, - }; - operations.push(operation); - }); - return operations; + const operations: AnyBulkWriteOperation[] = []; + prevPageIds.forEach((unseenPageId) => { + const operation = { + updateOne: { + filter: { page_id: unseenPageId, github_username: GITHUB_USER }, + update: { + $set: { + deleted: true, + updated_at: updateTime, + }, + }, + }, + }; + operations.push(operation); + }); + return operations; } interface CheckForPageDiffsParams { - currentPages: Page[]; - updateTime: Date; - prevPageDocsMapping: PreviousPageMapping; - prevPageIds: Set; + currentPages: Page[]; + updateTime: Date; + prevPageDocsMapping: PreviousPageMapping; + prevPageIds: Set; } /** * Compares the ASTs of the current pages with the previous pages. New update @@ -175,55 +175,55 @@ interface CheckForPageDiffsParams { * removed from `prevPageIds` to signal that the previous page has been "seen" */ export function checkForPageDiffs({ - currentPages, - updateTime, - prevPageDocsMapping, - prevPageIds, + currentPages, + updateTime, + prevPageDocsMapping, + prevPageIds, }: CheckForPageDiffsParams) { - const operations: AnyBulkWriteOperation[] = []; - currentPages.forEach((page) => { - // Filter out rst (non-page) files - if (!page.filename.endsWith('.txt')) { - return; - } + const operations: AnyBulkWriteOperation[] = []; + currentPages.forEach((page) => { + // Filter out rst (non-page) files + if (!page.filename.endsWith('.txt')) { + return; + } - const currentPageId = page.page_id; - prevPageIds.delete(currentPageId); - const prevPageData = prevPageDocsMapping[currentPageId]; + const currentPageId = page.page_id; + prevPageIds.delete(currentPageId); + const prevPageData = prevPageDocsMapping[currentPageId]; - // Update the document if page's current AST is different from previous build's. - // New pages should always count as having a "different" AST - if (isEqual(page.ast, prevPageData?.ast)) return; - const operation = { - updateOne: { - filter: { - page_id: currentPageId, - github_username: page.github_username, - }, - update: { - $set: { - page_id: currentPageId, - filename: page.filename, - ast: page.ast, - static_assets: findUpdatedAssets( - page.static_assets, - updateTime, - prevPageData?.static_assets, - ), - updated_at: updateTime, - deleted: false, - // Track the last build ID to update the content - }, - $setOnInsert: { - created_at: updateTime, - }, - }, - upsert: true, - }, - }; - operations.push(operation); - }); - return operations; + // Update the document if page's current AST is different from previous build's. + // New pages should always count as having a "different" AST + if (isEqual(page.ast, prevPageData?.ast)) return; + const operation = { + updateOne: { + filter: { + page_id: currentPageId, + github_username: page.github_username, + }, + update: { + $set: { + page_id: currentPageId, + filename: page.filename, + ast: page.ast, + static_assets: findUpdatedAssets( + page.static_assets, + updateTime, + prevPageData?.static_assets, + ), + updated_at: updateTime, + deleted: false, + // Track the last build ID to update the content + }, + $setOnInsert: { + created_at: updateTime, + }, + }, + upsert: true, + }, + }; + operations.push(operation); + }); + return operations; } /** @@ -243,44 +243,44 @@ export function checkForPageDiffs({ * @param prevPageAssets */ function findUpdatedAssets( - currentPageAssets: StaticAsset[], - updateTime: Date, - prevPageAssets?: StaticAsset[], + currentPageAssets: StaticAsset[], + updateTime: Date, + prevPageAssets?: StaticAsset[], ) { - const updatedAssets: StaticAsset[] = []; - if ( - currentPageAssets && - currentPageAssets.length === 0 && - prevPageAssets && - prevPageAssets.length === 0 - ) { - return updatedAssets; - } + const updatedAssets: StaticAsset[] = []; + if ( + currentPageAssets && + currentPageAssets.length === 0 && + prevPageAssets && + prevPageAssets.length === 0 + ) { + return updatedAssets; + } - const prevAssetMapping: Record = - {}; - if (prevPageAssets) { - prevPageAssets.forEach((asset) => { - prevAssetMapping[asset.checksum] = { - key: asset.key, - updated_at: asset.updated_at ?? updateTime, - }; - }); - } + const prevAssetMapping: Record = + {}; + if (prevPageAssets) { + prevPageAssets.forEach((asset) => { + prevAssetMapping[asset.checksum] = { + key: asset.key, + updated_at: asset.updated_at ?? updateTime, + }; + }); + } - currentPageAssets.forEach(({ checksum, key }) => { - const prevAsset = prevAssetMapping[checksum]; - // Edge case: check to ensure previous asset exists with the same checksum, - // but different key/filename. This can happen if an image is renamed - const isSame = prevAsset && prevAsset.key === key; - // Most common case: no change in asset; we keep the updated time the same - const timeOfUpdate = isSame ? prevAsset.updated_at : updateTime; - updatedAssets.push({ - checksum, - key, - updated_at: timeOfUpdate, - }); - }); + currentPageAssets.forEach(({ checksum, key }) => { + const prevAsset = prevAssetMapping[checksum]; + // Edge case: check to ensure previous asset exists with the same checksum, + // but different key/filename. This can happen if an image is renamed + const isSame = prevAsset && prevAsset.key === key; + // Most common case: no change in asset; we keep the updated time the same + const timeOfUpdate = isSame ? prevAsset.updated_at : updateTime; + updatedAssets.push({ + checksum, + key, + updated_at: timeOfUpdate, + }); + }); - return updatedAssets; + return updatedAssets; } diff --git a/persistence-module/test/index.test.ts b/persistence-module/test/index.test.ts index 8893bc962..9b81429bf 100644 --- a/persistence-module/test/index.test.ts +++ b/persistence-module/test/index.test.ts @@ -1,8 +1,8 @@ import { expect, test } from 'vitest'; function sum(a: number, b: number) { - return a + b; + return a + b; } test('adds 1 + 2 to equal 3', () => { - expect(sum(1, 2)).toBe(3); + expect(sum(1, 2)).toBe(3); }); diff --git a/persistence-module/test/update-pages.test.ts b/persistence-module/test/update-pages.test.ts index 70398d8ab..8c15cb3c6 100644 --- a/persistence-module/test/update-pages.test.ts +++ b/persistence-module/test/update-pages.test.ts @@ -1,128 +1,128 @@ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; import { - GITHUB_USER, - type Page, - type UpdatedPage, - updatePages, + GITHUB_USER, + type Page, + type UpdatedPage, + updatePages, } from '../src/update-pages'; import { getMockDb } from './utils/mockDb'; const COLLECTION_NAME = 'updated_documents'; beforeEach(async () => { - vi.mock('../src/connector', async () => { - const { getMockDb, teardownMockDbClient } = await import('./utils/mockDb'); + vi.mock('../src/connector', async () => { + const { getMockDb, teardownMockDbClient } = await import('./utils/mockDb'); - return { - teardown: teardownMockDbClient, - db: async () => { - const db = await getMockDb(); - return db; - }, - }; - }); + return { + teardown: teardownMockDbClient, + db: async () => { + const db = await getMockDb(); + return db; + }, + }; + }); }); afterEach(async () => { - const { teardownMockDbClient } = await import('./utils/mockDb'); + const { teardownMockDbClient } = await import('./utils/mockDb'); - await teardownMockDbClient(); + await teardownMockDbClient(); }); describe('Update Pages Unit Tests', () => { - it('inserts a new document', async () => { - const testPages: Page[] = [ - { - page_id: 'page0.txt', - filename: 'page0.txt', - github_username: GITHUB_USER, - source: '', - ast: { - type: 'root', - fileid: 'page0.txt', - options: {}, - children: [], - foo: 'foo', - bar: { foo: 'foo' }, - position: { - start: { - line: { - $numberInt: '0', - }, - }, - }, - }, - static_assets: [], - }, - ]; - await updatePages(testPages, COLLECTION_NAME); + it('inserts a new document', async () => { + const testPages: Page[] = [ + { + page_id: 'page0.txt', + filename: 'page0.txt', + github_username: GITHUB_USER, + source: '', + ast: { + type: 'root', + fileid: 'page0.txt', + options: {}, + children: [], + foo: 'foo', + bar: { foo: 'foo' }, + position: { + start: { + line: { + $numberInt: '0', + }, + }, + }, + }, + static_assets: [], + }, + ]; + await updatePages(testPages, COLLECTION_NAME); - const db = await getMockDb(); - const updatedDocuments = db.collection(COLLECTION_NAME); - const documentsCursor = updatedDocuments.find({}); - const documents = []; - for await (const doc of documentsCursor) { - documents.push(doc); - } + const db = await getMockDb(); + const updatedDocuments = db.collection(COLLECTION_NAME); + const documentsCursor = updatedDocuments.find({}); + const documents = []; + for await (const doc of documentsCursor) { + documents.push(doc); + } - expect(documents.length).toEqual(1); - const document = documents[0]; + expect(documents.length).toEqual(1); + const document = documents[0]; - expect(document.created_at.getTime()).toEqual( - document.updated_at.getTime(), - ); - }); - it('updates the original document and provides a new time stamp', async () => { - const testPages: Page[] = [ - { - page_id: 'page0.txt', - filename: 'page0.txt', - github_username: GITHUB_USER, - source: '', - ast: { - type: 'root', - fileid: 'page0.txt', - options: {}, - children: [], - foo: 'foo', - bar: { foo: 'foo' }, - position: { - start: { - line: { - $numberInt: '0', - }, - }, - }, - }, - static_assets: [], - }, - ]; - const NUM_RUNS = 2; - for (let i = 0; i < NUM_RUNS; i++) { - await updatePages(testPages, COLLECTION_NAME); + expect(document.created_at.getTime()).toEqual( + document.updated_at.getTime(), + ); + }); + it('updates the original document and provides a new time stamp', async () => { + const testPages: Page[] = [ + { + page_id: 'page0.txt', + filename: 'page0.txt', + github_username: GITHUB_USER, + source: '', + ast: { + type: 'root', + fileid: 'page0.txt', + options: {}, + children: [], + foo: 'foo', + bar: { foo: 'foo' }, + position: { + start: { + line: { + $numberInt: '0', + }, + }, + }, + }, + static_assets: [], + }, + ]; + const NUM_RUNS = 2; + for (let i = 0; i < NUM_RUNS; i++) { + await updatePages(testPages, COLLECTION_NAME); - let now = new Date().getTime(); + let now = new Date().getTime(); - const oneSecLater = now + 1000; + const oneSecLater = now + 1000; - while (now < oneSecLater) { - now = new Date().getTime(); - } - } + while (now < oneSecLater) { + now = new Date().getTime(); + } + } - const db = await getMockDb(); - const updatedDocuments = db.collection(COLLECTION_NAME); - const documentsCursor = updatedDocuments.find({}); - const documents = []; - for await (const doc of documentsCursor) { - documents.push(doc); - } + const db = await getMockDb(); + const updatedDocuments = db.collection(COLLECTION_NAME); + const documentsCursor = updatedDocuments.find({}); + const documents = []; + for await (const doc of documentsCursor) { + documents.push(doc); + } - expect(documents.length).toEqual(1); - const document = documents[0]; + expect(documents.length).toEqual(1); + const document = documents[0]; - expect(document.created_at.getTime()).toBeLessThan( - document.updated_at.getTime(), - ); - }); + expect(document.created_at.getTime()).toBeLessThan( + document.updated_at.getTime(), + ); + }); }); diff --git a/persistence-module/test/utils/mockDb.ts b/persistence-module/test/utils/mockDb.ts index c928dd1ed..080553b09 100644 --- a/persistence-module/test/utils/mockDb.ts +++ b/persistence-module/test/utils/mockDb.ts @@ -4,25 +4,25 @@ import * as mongodb from 'mongodb'; let client: mongodb.MongoClient; export async function teardownMockDbClient() { - if (!client) return; + if (!client) return; - await client.close(); + await client.close(); } export async function getMockDbClient() { - if (client) { - await client.connect(); - return client; - } - const mongod = await MongoMemoryServer.create(); + if (client) { + await client.connect(); + return client; + } + const mongod = await MongoMemoryServer.create(); - const uri = mongod.getUri(); - client = new mongodb.MongoClient(uri); - await client.connect(); - return client; + const uri = mongod.getUri(); + client = new mongodb.MongoClient(uri); + await client.connect(); + return client; } export async function getMockDb() { - const client = await getMockDbClient(); - return client.db('test_db'); + const client = await getMockDbClient(); + return client.db('test_db'); } diff --git a/persistence-module/tsconfig.json b/persistence-module/tsconfig.json index 5a3eebc48..cfd7d3953 100644 --- a/persistence-module/tsconfig.json +++ b/persistence-module/tsconfig.json @@ -1,13 +1,13 @@ { - "compilerOptions": { - "target": "ES2022", - "module": "ES2022", - "moduleResolution": "bundler", - "strict": true, - "rootDir": ".", - "paths": { - "bson": ["./node_modules/bson/src/"] - } - }, - "exclude": ["node_modules", "dist"] + "compilerOptions": { + "target": "ES2022", + "module": "ES2022", + "moduleResolution": "bundler", + "strict": true, + "rootDir": ".", + "paths": { + "bson": ["./node_modules/bson/src/"] + } + }, + "exclude": ["node_modules", "dist"] } diff --git a/persistence-module/vitest.config.ts b/persistence-module/vitest.config.ts index a766fca83..824db0fc4 100644 --- a/persistence-module/vitest.config.ts +++ b/persistence-module/vitest.config.ts @@ -1,7 +1,7 @@ import { defineConfig } from 'vite'; export default defineConfig({ - test: { - name: 'test-suite', - }, + test: { + name: 'test-suite', + }, }); diff --git a/redoc/.netlify/functions/manifest.json b/redoc/.netlify/functions/manifest.json index 67da80b3a..4686ffac7 100644 --- a/redoc/.netlify/functions/manifest.json +++ b/redoc/.netlify/functions/manifest.json @@ -1,16 +1,16 @@ { - "functions": [ - { - "bundler": "esbuild", - "buildData": { "runtimeAPIVersion": 1 }, - "mainFile": "/Users/brandonly/Documents/gitrepos/redoc-integration/.ntli/site/netlify/functions/handler.js", - "name": "handler", - "priority": 10, - "path": "/Users/brandonly/Documents/gitrepos/redoc-integration/.netlify/functions/handler.zip", - "runtime": "js" - } - ], - "system": { "arch": "arm64", "platform": "darwin" }, - "timestamp": 1720541102438, - "version": 1 + "functions": [ + { + "bundler": "esbuild", + "buildData": { "runtimeAPIVersion": 1 }, + "mainFile": "/Users/brandonly/Documents/gitrepos/redoc-integration/.ntli/site/netlify/functions/handler.js", + "name": "handler", + "priority": 10, + "path": "/Users/brandonly/Documents/gitrepos/redoc-integration/.netlify/functions/handler.zip", + "runtime": "js" + } + ], + "system": { "arch": "arm64", "platform": "darwin" }, + "timestamp": 1720541102438, + "version": 1 } diff --git a/redoc/.netlify/state.json b/redoc/.netlify/state.json index 300137a43..e2cd053c9 100644 --- a/redoc/.netlify/state.json +++ b/redoc/.netlify/state.json @@ -1,3 +1,3 @@ { - "siteId": "07fb77ec-7e92-414c-adf4-0f9af0770b20" + "siteId": "07fb77ec-7e92-414c-adf4-0f9af0770b20" } diff --git a/redoc/package.json b/redoc/package.json index b1da077f3..ebde7b6e3 100644 --- a/redoc/package.json +++ b/redoc/package.json @@ -1,23 +1,23 @@ { - "name": "redoc-integration", - "version": "0.0.1", - "main": "src/index.ts", - "type": "module", - "scripts": { - "build": "netlify-integration build -a", - "dev": "netlify-integration dev -a", - "preview": "netlify-integration preview" - }, - "dependencies": { - "@netlify/sdk": "^1.60.2-pr-1468.3", - "bson": "^6.8.0", - "mongodb": "^6.8.0", - "node-fetch": "^3.3.2", - "typescript": "^5.5.3" - }, - "devDependencies": { - "@netlify/build": "^29.50.5", - "@types/node": "^20.14.10", - "execa": "^6.1.0" - } + "name": "redoc-integration", + "version": "0.0.1", + "main": "src/index.ts", + "type": "module", + "scripts": { + "build": "netlify-integration build -a", + "dev": "netlify-integration dev -a", + "preview": "netlify-integration preview" + }, + "dependencies": { + "@netlify/sdk": "^1.60.2-pr-1468.3", + "bson": "^6.8.0", + "mongodb": "^6.8.0", + "node-fetch": "^3.3.2", + "typescript": "^5.5.3" + }, + "devDependencies": { + "@netlify/build": "^29.50.5", + "@types/node": "^20.14.10", + "execa": "^6.1.0" + } } diff --git a/redoc/src/atlas.ts b/redoc/src/atlas.ts index 9b4161bcf..fac9851e6 100644 --- a/redoc/src/atlas.ts +++ b/redoc/src/atlas.ts @@ -3,154 +3,154 @@ import { COLLECTION_NAME, db } from './utils/db'; const env = process.env.SNOOTY_ENV ?? ''; const OAS_FILE_SERVER = - env === 'dotcomprd' - ? 'https://mongodb-mms-prod-build-server.s3.amazonaws.com/openapi/' - : 'https://mongodb-mms-build-server.s3.amazonaws.com/openapi/'; + env === 'dotcomprd' + ? 'https://mongodb-mms-prod-build-server.s3.amazonaws.com/openapi/' + : 'https://mongodb-mms-build-server.s3.amazonaws.com/openapi/'; const GIT_HASH_URL = - env === 'dotcomprd' - ? 'https://cloud.mongodb.com/version' - : 'https://cloud-dev.mongodb.com/version'; + env === 'dotcomprd' + ? 'https://cloud.mongodb.com/version' + : 'https://cloud-dev.mongodb.com/version'; export interface OASFile { - api: string; - fileContent: string; - gitHash: string; - lastUpdated: string; - versions: VersionData; + api: string; + fileContent: string; + gitHash: string; + lastUpdated: string; + versions: VersionData; } export type OASFilePartial = Pick; export const findLastSavedVersionData = async (apiKeyword: string) => { - const dbSession = await db(); - try { - const projection = { gitHash: 1, versions: 1 }; - const filter = { api: apiKeyword }; - const oasFilesCollection = dbSession.collection(COLLECTION_NAME); - return oasFilesCollection.findOne(filter, { - projection, - }); - } catch (error) { - console.error(`Error fetching lastest git hash for API: ${apiKeyword}.`); - throw error; - } + const dbSession = await db(); + try { + const projection = { gitHash: 1, versions: 1 }; + const filter = { api: apiKeyword }; + const oasFilesCollection = dbSession.collection(COLLECTION_NAME); + return oasFilesCollection.findOne(filter, { + projection, + }); + } catch (error) { + console.error(`Error fetching lastest git hash for API: ${apiKeyword}.`); + throw error; + } }; interface AtlasSpecUrlParams { - apiKeyword: string; - apiVersion?: string; - resourceVersion?: string; - latestResourceVersion?: string; + apiKeyword: string; + apiVersion?: string; + resourceVersion?: string; + latestResourceVersion?: string; } export const getAtlasSpecUrl = async ({ - apiKeyword, - apiVersion, - resourceVersion, - latestResourceVersion, + apiKeyword, + apiVersion, + resourceVersion, + latestResourceVersion, }: AtlasSpecUrlParams) => { - // Currently, the only expected API fetched programmatically is the Cloud Admin API, - // but it's possible to have more in the future with varying processes. - const keywords = ['cloud']; - if (!keywords.includes(apiKeyword)) { - throw new Error(`${apiKeyword} is not a supported API for building.`); - } - - const versionExtension = `${ - apiVersion ? `-v${apiVersion.split('.')[0]}` : '' - }${ - apiVersion && resourceVersion - ? `-${resourceVersion}` - : apiVersion && latestResourceVersion && !resourceVersion - ? `-${latestResourceVersion}` - : '' - }`; - - let oasFileURL; - let successfulGitHash = true; - - try { - const gitHash = await fetchGitHash(); - oasFileURL = `${OAS_FILE_SERVER}${gitHash}${versionExtension}.json`; - - // Sometimes the latest git hash might not have a fully available spec file yet. - // If this is the case, we should default to using the last successfully saved - // hash in our database. - await fetchTextData(oasFileURL, `Error fetching data from ${oasFileURL}`); - } catch (e) { - const unsuccessfulOasFileURL = oasFileURL; - successfulGitHash = false; - - const res = await findLastSavedVersionData(apiKeyword); - if (res) { - ensureSavedVersionDataMatches(res.versions, apiVersion, resourceVersion); - oasFileURL = `${OAS_FILE_SERVER}${res.gitHash}${versionExtension}.json`; - console.log(`Error occurred fetching from newest OAS spec at ${unsuccessfulOasFileURL}.\n + // Currently, the only expected API fetched programmatically is the Cloud Admin API, + // but it's possible to have more in the future with varying processes. + const keywords = ['cloud']; + if (!keywords.includes(apiKeyword)) { + throw new Error(`${apiKeyword} is not a supported API for building.`); + } + + const versionExtension = `${ + apiVersion ? `-v${apiVersion.split('.')[0]}` : '' + }${ + apiVersion && resourceVersion + ? `-${resourceVersion}` + : apiVersion && latestResourceVersion && !resourceVersion + ? `-${latestResourceVersion}` + : '' + }`; + + let oasFileURL; + let successfulGitHash = true; + + try { + const gitHash = await fetchGitHash(); + oasFileURL = `${OAS_FILE_SERVER}${gitHash}${versionExtension}.json`; + + // Sometimes the latest git hash might not have a fully available spec file yet. + // If this is the case, we should default to using the last successfully saved + // hash in our database. + await fetchTextData(oasFileURL, `Error fetching data from ${oasFileURL}`); + } catch (e) { + const unsuccessfulOasFileURL = oasFileURL; + successfulGitHash = false; + + const res = await findLastSavedVersionData(apiKeyword); + if (res) { + ensureSavedVersionDataMatches(res.versions, apiVersion, resourceVersion); + oasFileURL = `${OAS_FILE_SERVER}${res.gitHash}${versionExtension}.json`; + console.log(`Error occurred fetching from newest OAS spec at ${unsuccessfulOasFileURL}.\n This error is a rare but expected result of upload timing between gitHashes and specs.\n If you see this error multiple times, let the DOP team know!\n\n Using last successfully fetched OAS spec at ${oasFileURL}!`); - } else { - throw new Error(`Could not find a saved hash for API: ${apiKeyword}`); - } - } - - return { - oasFileURL, - successfulGitHash, - }; + } else { + throw new Error(`Could not find a saved hash for API: ${apiKeyword}`); + } + } + + return { + oasFileURL, + successfulGitHash, + }; }; const fetchTextData = async (url: string, errMsg: string) => { - const res = await fetch(url); - if (!res.ok) { - // Error should be caught when creating pages. - throw new Error(`${errMsg}; ${res.statusText}`); - } - return res.text(); + const res = await fetch(url); + if (!res.ok) { + // Error should be caught when creating pages. + throw new Error(`${errMsg}; ${res.statusText}`); + } + return res.text(); }; export interface VersionData { - [k: string]: string[]; + [k: string]: string[]; } function ensureSavedVersionDataMatches( - versions: VersionData, - apiVersion?: string, - resourceVersion?: string, + versions: VersionData, + apiVersion?: string, + resourceVersion?: string, ) { - // Check that requested versions are included in saved version data - if (apiVersion) { - if ( - !versions.major.includes(apiVersion) || - (resourceVersion && !versions[apiVersion].includes(resourceVersion)) - ) { - throw new Error(`Last successful build data does not include necessary version data:\n + // Check that requested versions are included in saved version data + if (apiVersion) { + if ( + !versions.major.includes(apiVersion) || + (resourceVersion && !versions[apiVersion].includes(resourceVersion)) + ) { + throw new Error(`Last successful build data does not include necessary version data:\n Version requested: ${apiVersion}${ - resourceVersion ? ` - ${resourceVersion}` : '' - }`); - } - } + resourceVersion ? ` - ${resourceVersion}` : '' + }`); + } + } } function createFetchGitHash() { - let gitHashCache: string; - return { - fetchGitHash: async () => { - if (gitHashCache) return gitHashCache; - try { - const gitHash = await fetchTextData( - GIT_HASH_URL, - 'Could not find current version or git hash', - ); - gitHashCache = gitHash; - return gitHash; - } catch (e) { - console.error(e); - throw new Error('Unsuccessful git hash fetch'); - } - }, - resetGitHashCache: () => { - gitHashCache = ''; - }, - }; + let gitHashCache: string; + return { + fetchGitHash: async () => { + if (gitHashCache) return gitHashCache; + try { + const gitHash = await fetchTextData( + GIT_HASH_URL, + 'Could not find current version or git hash', + ); + gitHashCache = gitHash; + return gitHash; + } catch (e) { + console.error(e); + throw new Error('Unsuccessful git hash fetch'); + } + }, + resetGitHashCache: () => { + gitHashCache = ''; + }, + }; } const { fetchGitHash, resetGitHashCache } = createFetchGitHash(); diff --git a/redoc/src/build-pages.ts b/redoc/src/build-pages.ts index bf7e59ac7..319479884 100644 --- a/redoc/src/build-pages.ts +++ b/redoc/src/build-pages.ts @@ -5,246 +5,246 @@ import { writeFileAsync } from './utils/fs-async'; import { db } from './utils/db'; export interface RedocVersionOptions { - active: { - apiVersion: string; - resourceVersion: string; - }; - rootUrl: string; - resourceVersions: string[]; + active: { + apiVersion: string; + resourceVersion: string; + }; + rootUrl: string; + resourceVersions: string[]; } const GIT_HASH_URL = 'https://cloud-dev.mongodb.com/version'; const COLLECTION_NAME = 'oas_files'; const OAS_FILE_SERVER = - 'https://mongodb-mms-build-server.s3.amazonaws.com/openapi/'; + 'https://mongodb-mms-build-server.s3.amazonaws.com/openapi/'; export const normalizePath = (path: string) => path.replace(/\/+/g, '/'); export const normalizeUrl = (url: string) => { - const urlObject = new URL(url); - urlObject.pathname = normalizePath(urlObject.pathname); - return urlObject.href; + const urlObject = new URL(url); + urlObject.pathname = normalizePath(urlObject.pathname); + return urlObject.href; }; interface GetOASpecParams { - sourceType: string; - source: string; - output: string; - pageSlug: string; - siteUrl: string; - siteTitle: string; - resourceVersions?: string[]; - apiVersion?: string; - resourceVersion?: string; + sourceType: string; + source: string; + output: string; + pageSlug: string; + siteUrl: string; + siteTitle: string; + resourceVersions?: string[]; + apiVersion?: string; + resourceVersion?: string; } export async function getBuildOasSpecCommand({ - source, - sourceType, - pageSlug, - output, - siteUrl, - siteTitle, - apiVersion, - resourceVersion, + source, + sourceType, + pageSlug, + output, + siteUrl, + siteTitle, + apiVersion, + resourceVersion, }: GetOASpecParams) { - try { - let spec = ''; - let isSuccessfulBuild = true; - - if (sourceType === 'url') { - spec = source; - } else if (sourceType === 'local') { - const localFilePath = `source${source}`; - spec = localFilePath; - } else if (sourceType === 'atlas') { - const { oasFileURL, successfulGitHash } = await getAtlasSpecUrl({ - apiKeyword: source, - apiVersion, - resourceVersion, - }); - - spec = oasFileURL; - isSuccessfulBuild = successfulGitHash; - } else { - throw new Error( - `Unsupported source type "${sourceType}" for ${pageSlug}`, - ); - } - - const path = `${output}/${pageSlug}/index.html`; - const finalFilename = normalizePath(path); - await writeFileAsync( - `${process.cwd()}/options.json`, - JSON.stringify({ siteUrl, siteTitle, ignoreIncompatibleTypes: true }), - ); - return `node ${process.cwd()}/redoc/cli/index.js build ${spec} --output ${finalFilename} --options ${process.cwd()}/options.json`; - } catch (e) { - console.error(e); - return ''; - } + try { + let spec = ''; + let isSuccessfulBuild = true; + + if (sourceType === 'url') { + spec = source; + } else if (sourceType === 'local') { + const localFilePath = `source${source}`; + spec = localFilePath; + } else if (sourceType === 'atlas') { + const { oasFileURL, successfulGitHash } = await getAtlasSpecUrl({ + apiKeyword: source, + apiVersion, + resourceVersion, + }); + + spec = oasFileURL; + isSuccessfulBuild = successfulGitHash; + } else { + throw new Error( + `Unsupported source type "${sourceType}" for ${pageSlug}`, + ); + } + + const path = `${output}/${pageSlug}/index.html`; + const finalFilename = normalizePath(path); + await writeFileAsync( + `${process.cwd()}/options.json`, + JSON.stringify({ siteUrl, siteTitle, ignoreIncompatibleTypes: true }), + ); + return `node ${process.cwd()}/redoc/cli/index.js build ${spec} --output ${finalFilename} --options ${process.cwd()}/options.json`; + } catch (e) { + console.error(e); + return ''; + } } interface PageBuilderOptions { - siteTitle: string; - siteUrl: string; + siteTitle: string; + siteUrl: string; } const fetchTextData = async (url: string, errMsg: string) => { - const res = await fetch(url); - if (!res.ok) { - // Error should be caught when creating pages. - throw new Error(`${errMsg}; ${res.statusText}`); - } - return res.text(); + const res = await fetch(url); + if (!res.ok) { + // Error should be caught when creating pages. + throw new Error(`${errMsg}; ${res.statusText}`); + } + return res.text(); }; const createFetchGitHash = () => { - let gitHashCache: string; - return { - fetchGitHash: async () => { - if (gitHashCache) return gitHashCache; - try { - const gitHash = await fetchTextData( - GIT_HASH_URL, - 'Could not find current version or git hash', - ); - gitHashCache = gitHash; - return gitHash; - } catch (e) { - console.error(e); - throw new Error('Unsuccessful git hash fetch'); - } - }, - resetGitHashCache: () => { - gitHashCache = ''; - }, - }; + let gitHashCache: string; + return { + fetchGitHash: async () => { + if (gitHashCache) return gitHashCache; + try { + const gitHash = await fetchTextData( + GIT_HASH_URL, + 'Could not find current version or git hash', + ); + gitHashCache = gitHash; + return gitHash; + } catch (e) { + console.error(e); + throw new Error('Unsuccessful git hash fetch'); + } + }, + resetGitHashCache: () => { + gitHashCache = ''; + }, + }; }; const { fetchGitHash, resetGitHashCache } = createFetchGitHash(); export const fetchVersionData = async (gitHash: string, serverURL: string) => { - const versionUrl = `${serverURL}${gitHash}-api-versions.json`; - const res = await fetch(versionUrl); - const { versions } = await res.json(); - return versions; + const versionUrl = `${serverURL}${gitHash}-api-versions.json`; + const res = await fetch(versionUrl); + const { versions } = await res.json(); + return versions; }; export interface OASFile { - api: string; - fileContent: string; - gitHash: string; - lastUpdated: string; - versions: Record; + api: string; + fileContent: string; + gitHash: string; + lastUpdated: string; + versions: Record; } export const saveSuccessfulBuildVersionData = async ( - apiKeyword: string, - gitHash: string, - versionData: Record, + apiKeyword: string, + gitHash: string, + versionData: Record, ) => { - const dbSession = await db(); - try { - const query = { - api: apiKeyword, - }; - const update = { - $set: { - gitHash: gitHash, - versions: versionData, - lastUpdated: new Date().toISOString(), - }, - }; - const options = { - upsert: true, - }; - - const oasFilesCollection = dbSession.collection(COLLECTION_NAME); - await oasFilesCollection.updateOne(query, update, options); - } catch (error) { - console.error( - `Error updating lastest git hash and versions for API: ${apiKeyword}.`, - ); - throw error; - } + const dbSession = await db(); + try { + const query = { + api: apiKeyword, + }; + const update = { + $set: { + gitHash: gitHash, + versions: versionData, + lastUpdated: new Date().toISOString(), + }, + }; + const options = { + upsert: true, + }; + + const oasFilesCollection = dbSession.collection(COLLECTION_NAME); + await oasFilesCollection.updateOne(query, update, options); + } catch (error) { + console.error( + `Error updating lastest git hash and versions for API: ${apiKeyword}.`, + ); + throw error; + } }; export async function buildOpenAPIPages( - entries: [string, OASPageMetadata][], - { siteUrl, siteTitle }: PageBuilderOptions, - run: NetlifyPluginUtils['run'], + entries: [string, OASPageMetadata][], + { siteUrl, siteTitle }: PageBuilderOptions, + run: NetlifyPluginUtils['run'], ) { - for (const [pageSlug, data] of entries) { - const { - source_type: sourceType, - source, - api_version: apiVersion, - resource_versions: resourceVersions, - } = data; - - let isSuccessfulBuild = true; - - if (resourceVersions) { - const isRunSuccessfulArray = await Promise.all( - resourceVersions.map(async (resourceVersion) => { - // if a resource versions array is provided, then we can loop through the resourceVersions array and call the getOASpec - // for each minor version - try { - const command = await getBuildOasSpecCommand({ - source, - sourceType, - output: `${process.cwd()}/snooty/public`, - pageSlug, - siteUrl, - siteTitle, - apiVersion, - resourceVersions, - resourceVersion, - }); - - await run.command(command); - - return true; - } catch (e) { - console.error('an error occurred', e); - - return false; - } - }), - ); - isSuccessfulBuild = isRunSuccessfulArray.every( - (isSuccessful) => isSuccessful, - ); - } - - try { - const command = await getBuildOasSpecCommand({ - source, - sourceType, - output: `${process.cwd()}/snooty/public`, - pageSlug, - siteUrl, - siteTitle, - apiVersion, - }); - await run.command(command); - - isSuccessfulBuild = true; - } catch (e) { - console.error('an error occurred', e); - - isSuccessfulBuild = false; - } - - // If all builds successful, persist git hash and version data in db - if (isSuccessfulBuild && sourceType === 'atlas') { - try { - const gitHash = await fetchGitHash(); - const versions = await fetchVersionData(gitHash, OAS_FILE_SERVER); - await saveSuccessfulBuildVersionData(source, gitHash, versions); - } catch (e) { - console.error(e); - } - } - resetGitHashCache(); - } + for (const [pageSlug, data] of entries) { + const { + source_type: sourceType, + source, + api_version: apiVersion, + resource_versions: resourceVersions, + } = data; + + let isSuccessfulBuild = true; + + if (resourceVersions) { + const isRunSuccessfulArray = await Promise.all( + resourceVersions.map(async (resourceVersion) => { + // if a resource versions array is provided, then we can loop through the resourceVersions array and call the getOASpec + // for each minor version + try { + const command = await getBuildOasSpecCommand({ + source, + sourceType, + output: `${process.cwd()}/snooty/public`, + pageSlug, + siteUrl, + siteTitle, + apiVersion, + resourceVersions, + resourceVersion, + }); + + await run.command(command); + + return true; + } catch (e) { + console.error('an error occurred', e); + + return false; + } + }), + ); + isSuccessfulBuild = isRunSuccessfulArray.every( + (isSuccessful) => isSuccessful, + ); + } + + try { + const command = await getBuildOasSpecCommand({ + source, + sourceType, + output: `${process.cwd()}/snooty/public`, + pageSlug, + siteUrl, + siteTitle, + apiVersion, + }); + await run.command(command); + + isSuccessfulBuild = true; + } catch (e) { + console.error('an error occurred', e); + + isSuccessfulBuild = false; + } + + // If all builds successful, persist git hash and version data in db + if (isSuccessfulBuild && sourceType === 'atlas') { + try { + const gitHash = await fetchGitHash(); + const versions = await fetchVersionData(gitHash, OAS_FILE_SERVER); + await saveSuccessfulBuildVersionData(source, gitHash, versions); + } catch (e) { + console.error(e); + } + } + resetGitHashCache(); + } } diff --git a/redoc/src/index.ts b/redoc/src/index.ts index a39e30da3..fc0fdad5f 100644 --- a/redoc/src/index.ts +++ b/redoc/src/index.ts @@ -8,71 +8,71 @@ const BUNDLE_PATH = `${process.cwd()}/bundle`; const REDOC_CLI_VERSION = '1.2.3'; export interface OASPageMetadata { - source_type: string; - source: string; - api_version?: string; - resource_versions?: string[]; + source_type: string; + source: string; + api_version?: string; + resource_versions?: string[]; } export type OASPagesMetadata = Record; // handle installing redoc cli if it's not already installed integration.addBuildEventHandler( - 'onPreBuild', - async ({ utils: { run, cache } }) => { - console.log('Running redoc prebuild'); - const hasRedoc = await cache.has('redoc'); - - if (hasRedoc) { - console.log('Restoring redoc from cache'); - cache.restore('redoc'); - return; - } - - await run.command( - `git clone -b @dop/redoc-cli@${REDOC_CLI_VERSION} --depth 1 https://github.com/mongodb-forks/redoc.git redoc`, - ); - - await run.command('npm ci --prefix cli/ --omit=dev', { - cwd: `${process.cwd()}/redoc`, - }); - - await cache.save('redoc'); - }, + 'onPreBuild', + async ({ utils: { run, cache } }) => { + console.log('Running redoc prebuild'); + const hasRedoc = await cache.has('redoc'); + + if (hasRedoc) { + console.log('Restoring redoc from cache'); + cache.restore('redoc'); + return; + } + + await run.command( + `git clone -b @dop/redoc-cli@${REDOC_CLI_VERSION} --depth 1 https://github.com/mongodb-forks/redoc.git redoc`, + ); + + await run.command('npm ci --prefix cli/ --omit=dev', { + cwd: `${process.cwd()}/redoc`, + }); + + await cache.save('redoc'); + }, ); // handle building the redoc pages integration.addBuildEventHandler('onPostBuild', async ({ utils: { run } }) => { - console.log('=========== Redoc Integration Begin ================'); - await run.command('unzip -o bundle.zip -d bundle'); + console.log('=========== Redoc Integration Begin ================'); + await run.command('unzip -o bundle.zip -d bundle'); - const siteBson = await readFileAsync(`${BUNDLE_PATH}/site.bson`); + const siteBson = await readFileAsync(`${BUNDLE_PATH}/site.bson`); - const buildMetadata = deserialize(siteBson); - const siteTitle: string = buildMetadata.title; - const openapiPages: OASPagesMetadata | undefined = - buildMetadata.openapi_pages; + const buildMetadata = deserialize(siteBson); + const siteTitle: string = buildMetadata.title; + const openapiPages: OASPagesMetadata | undefined = + buildMetadata.openapi_pages; - if (!openapiPages) { - console.log('No OpenAPI pages found'); - return; - } + if (!openapiPages) { + console.log('No OpenAPI pages found'); + return; + } - const openapiPagesEntries = Object.entries(openapiPages); - const siteUrl = process.env.DEPLOY_PRIME_URL || ''; + const openapiPagesEntries = Object.entries(openapiPages); + const siteUrl = process.env.DEPLOY_PRIME_URL || ''; - await buildOpenAPIPages(openapiPagesEntries, { siteTitle, siteUrl }, run); + await buildOpenAPIPages(openapiPagesEntries, { siteTitle, siteUrl }, run); - console.log('=========== Redoc Integration End ================'); + console.log('=========== Redoc Integration End ================'); }); // cache redoc integration.addBuildEventHandler('onSuccess', async ({ utils: { cache } }) => { - const hasRedoc = await cache.has('redoc'); - if (!hasRedoc) { - console.log('saving redoc to cache'); - await cache.save('redoc'); - } + const hasRedoc = await cache.has('redoc'); + if (!hasRedoc) { + console.log('saving redoc to cache'); + await cache.save('redoc'); + } }); export { integration }; diff --git a/redoc/src/utils/db.ts b/redoc/src/utils/db.ts index d40431581..f41addae2 100644 --- a/redoc/src/utils/db.ts +++ b/redoc/src/utils/db.ts @@ -3,11 +3,11 @@ import { MongoClient, type Db } from 'mongodb'; export const COLLECTION_NAME = 'oas_files'; const getAtlasURL = () => { - const isHostLocal = process.env.DB_HOST?.includes('localhost'); - if (isHostLocal) { - return `mongodb://${process.env.MONGO_ATLAS_HOST}/?retryWrites=true&w=majority`; - } - return `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_HOST}/?retryWrites=true&w=majority`; + const isHostLocal = process.env.DB_HOST?.includes('localhost'); + if (isHostLocal) { + return `mongodb://${process.env.MONGO_ATLAS_HOST}/?retryWrites=true&w=majority`; + } + return `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_HOST}/?retryWrites=true&w=majority`; }; const atlasURL = getAtlasURL(); @@ -16,38 +16,38 @@ const client = new MongoClient(atlasURL); let dbInstance: Db; export const teardown = async () => { - await client.close(); + await client.close(); }; const getDbName = () => { - const env = process.env.SNOOTY_ENV ?? ''; + const env = process.env.SNOOTY_ENV ?? ''; - switch (env) { - // Autobuilder's prd env - case 'production': - return 'snooty_prod'; - case 'dotcomprd': - return 'snooty_dotcomprd'; - // Autobuilder's pre-prd env - case 'staging': - return 'snooty_stage'; - case 'dotcomstg': - return 'snooty_dotcomstg'; - default: - // snooty_dotcomprd.oas_files should be guaranteed to have the latest data - return 'snooty_dotcomprd'; - } + switch (env) { + // Autobuilder's prd env + case 'production': + return 'snooty_prod'; + case 'dotcomprd': + return 'snooty_dotcomprd'; + // Autobuilder's pre-prd env + case 'staging': + return 'snooty_stage'; + case 'dotcomstg': + return 'snooty_dotcomstg'; + default: + // snooty_dotcomprd.oas_files should be guaranteed to have the latest data + return 'snooty_dotcomprd'; + } }; export const db = async () => { - if (!dbInstance) { - try { - await client.connect(); - const dbName = getDbName(); - dbInstance = client.db(dbName); - } catch (error) { - console.error(`Error at db client connection: ${error}`); - throw error; - } - } - return dbInstance; + if (!dbInstance) { + try { + await client.connect(); + const dbName = getDbName(); + dbInstance = client.db(dbName); + } catch (error) { + console.error(`Error at db client connection: ${error}`); + throw error; + } + } + return dbInstance; }; diff --git a/redoc/tsconfig.json b/redoc/tsconfig.json index 5a3eebc48..cfd7d3953 100644 --- a/redoc/tsconfig.json +++ b/redoc/tsconfig.json @@ -1,13 +1,13 @@ { - "compilerOptions": { - "target": "ES2022", - "module": "ES2022", - "moduleResolution": "bundler", - "strict": true, - "rootDir": ".", - "paths": { - "bson": ["./node_modules/bson/src/"] - } - }, - "exclude": ["node_modules", "dist"] + "compilerOptions": { + "target": "ES2022", + "module": "ES2022", + "moduleResolution": "bundler", + "strict": true, + "rootDir": ".", + "paths": { + "bson": ["./node_modules/bson/src/"] + } + }, + "exclude": ["node_modules", "dist"] } diff --git a/search-manifest/src/generateManifest/createFacets.ts b/search-manifest/src/generateManifest/createFacets.ts index 0777da562..1f7ca54cb 100644 --- a/search-manifest/src/generateManifest/createFacets.ts +++ b/search-manifest/src/generateManifest/createFacets.ts @@ -1,25 +1,25 @@ import { NetlifyIntegration } from '@netlify/sdk'; export class Facet { - category: any; - value: any; - subFacets: any; + category: any; + value: any; + subFacets: any; - constructor(category: string, value: string, subFacets: []) { - this.category = category; - this.value = value; - this.subFacets = []; + constructor(category: string, value: string, subFacets: []) { + this.category = category; + this.value = value; + this.subFacets = []; - if (subFacets) { - for (const subFacet of subFacets) { - this.subFacets.push( - new Facet( - subFacet['category'], - subFacet['value'], - subFacet['sub_facets'] ?? [], - ), - ); - } - } - } + if (subFacets) { + for (const subFacet of subFacets) { + this.subFacets.push( + new Facet( + subFacet['category'], + subFacet['value'], + subFacet['sub_facets'] ?? [], + ), + ); + } + } + } } diff --git a/search-manifest/src/generateManifest/document.ts b/search-manifest/src/generateManifest/document.ts index 5b5549a7b..8f8c5cd23 100644 --- a/search-manifest/src/generateManifest/document.ts +++ b/search-manifest/src/generateManifest/document.ts @@ -1,256 +1,256 @@ -import { JSONPath } from "jsonpath-plus"; -import { Facet } from "./createFacets"; -import { ManifestEntry } from "./manifestEntry"; -import type { BSON } from "bson"; +import { JSONPath } from 'jsonpath-plus'; +import { Facet } from './createFacets'; +import { ManifestEntry } from './manifestEntry'; +import type { BSON } from 'bson'; export class Document { - //Return indexing data from a page's JSON-formatted AST for search purposes - tree: any; - robots: any; - keywords: any; - description: any; - paragraphs: string; - code: { lang: string; value: any }[]; - title: any; - headings: any; - slug: string; - preview?: string; - facets: any; - noIndex: any; - reasons: any; + //Return indexing data from a page's JSON-formatted AST for search purposes + tree: any; + robots: any; + keywords: any; + description: any; + paragraphs: string; + code: { lang: string; value: any }[]; + title: any; + headings: any; + slug: string; + preview?: string; + facets: any; + noIndex: any; + reasons: any; constructor(doc: BSON.Document) { this.tree = doc; - //find metadata - [this.robots, this.keywords, this.description] = this.findMetadata(); - //find paragraphs - this.paragraphs = this.findParagraphs(); - //find code - this.code = this.findCode(); - - //find title, headings - [this.title, this.headings] = this.findHeadings(); - - //derive slug - this.slug = this.deriveSlug(); - - //derive preview - this.preview = this.derivePreview(); - - //derive facets - this.facets = deriveFacets(this.tree); - - //noindex, reasons - [this.noIndex, this.reasons] = this.getNoIndex(); - } - - findMetadata() { - let robots = true; //can be set in the rst if the page is supposed to be crawled - let keywords: string | null = null; //keywords is an optional list of strings - let description: string | null = null; //this can be optional?? - - const results = JSONPath({ - path: "$..children[?(@.name=='meta')]..options", - json: this.tree, - }); - if (results.length) { - if (results.length > 1) - console.log( - "length of results is greater than one, it's: " + results.length, - ); - const val = results[0]; - //check if robots, set to false if no robots - if ('robots' in val && (val.robots == 'None' || val.robots == 'noindex')) - robots = false; + //find metadata + [this.robots, this.keywords, this.description] = this.findMetadata(); + //find paragraphs + this.paragraphs = this.findParagraphs(); + //find code + this.code = this.findCode(); + + //find title, headings + [this.title, this.headings] = this.findHeadings(); + + //derive slug + this.slug = this.deriveSlug(); + + //derive preview + this.preview = this.derivePreview(); + + //derive facets + this.facets = deriveFacets(this.tree); + + //noindex, reasons + [this.noIndex, this.reasons] = this.getNoIndex(); + } + + findMetadata() { + let robots = true; //can be set in the rst if the page is supposed to be crawled + let keywords: string | null = null; //keywords is an optional list of strings + let description: string | null = null; //this can be optional?? + + const results = JSONPath({ + path: "$..children[?(@.name=='meta')]..options", + json: this.tree, + }); + if (results.length) { + if (results.length > 1) + console.log( + "length of results is greater than one, it's: " + results.length, + ); + const val = results[0]; + //check if robots, set to false if no robots + if ('robots' in val && (val.robots == 'None' || val.robots == 'noindex')) + robots = false; keywords = val?.keywords; description = val?.description; } - return [robots, keywords, description]; - } - - findParagraphs() { - let paragraphs = ''; - - const results = JSONPath({ - path: "$..children[?(@.type=='paragraph')]..value", - json: this.tree, - }); - - for (const r of results) { - paragraphs += ' ' + r; - } - return paragraphs.trim(); - } - - findCode() { - const results = JSONPath({ - path: "$..children[?(@.type=='code')]", - json: this.tree, - }); - - const codeContents = []; - for (const r of results) { - const lang = r.lang ?? null; - codeContents.push({ lang: lang, value: r.value }); - } - return codeContents; - } - - findHeadings() { - const headings: string[] = []; - let title = ''; - // Get the children of headings nodes - - const results = JSONPath({ - path: "$..children[?(@.type=='heading')].children", - json: this.tree, - }); - - //no heading nodes found?? page doesn't have title, or headings - if (!results.length) return [title, headings]; - - for (const r of results) { - const heading = []; - const parts = JSONPath({ - path: '$..value', - json: r, - }); - - //add a check in case there is no parts found - for (const part of parts) { - // add a check in case there is no value field found - heading.push(part); - } - headings.push(heading.join()); - } - - title = headings.shift() ?? ''; - return [title, headings]; - } - - deriveSlug() { - let pageId = this.tree['filename']?.split('.')[0]; - if (pageId == 'index') pageId = ''; - return pageId; - } - - derivePreview() { - //set preview to the meta description if one is specified - - if (this.description) return this.description; - - // Set preview to the paragraph value that's a child of a 'target' element - // (for reference pages that lead with a target definition) - - let results = JSONPath({ - path: "$..children[?(@.type=='target')].children[?(@.type=='paragraph')]", - json: this.tree, - }); - - if (!results.length) { - // Otherwise attempt to set preview to the first content paragraph on the page, - // excluding admonitions. - results = JSONPath({ - path: "$..children[?(@.type=='section')].children[?(@.type=='paragraph')]", - json: this.tree, - }); - } - - if (results.length) { - const strList = []; - - //get value in results - const first = JSONPath({ - path: '$..value', - json: results[0], - }); - - for (const f of first) { - strList.push(f); - } - return strList.join(''); - } - - //else, give up and don't provide a preview - return null; - } - - getNoIndex() { - //determining indexability - - let noIndex = false; - const reasons: string[] = []; - - //if :robots: None in metadata, do not index - if (!this.robots) { - noIndex = true; - reasons.push('robots=None or robots=noindex in meta directive'); - } - - //if page has no title, do not index - if (!this.title) { - noIndex = true; - reasons.push('This page has no headings'); - } - - return [noIndex, reasons]; - } - - exportAsManifestDocument = () => { - // Generate the manifest dictionary entry from the AST source + return [robots, keywords, description]; + } + + findParagraphs() { + let paragraphs = ''; + + const results = JSONPath({ + path: "$..children[?(@.type=='paragraph')]..value", + json: this.tree, + }); + + for (const r of results) { + paragraphs += ' ' + r; + } + return paragraphs.trim(); + } + + findCode() { + const results = JSONPath({ + path: "$..children[?(@.type=='code')]", + json: this.tree, + }); + + const codeContents = []; + for (const r of results) { + const lang = r.lang ?? null; + codeContents.push({ lang: lang, value: r.value }); + } + return codeContents; + } + + findHeadings() { + const headings: string[] = []; + let title = ''; + // Get the children of headings nodes + + const results = JSONPath({ + path: "$..children[?(@.type=='heading')].children", + json: this.tree, + }); + + //no heading nodes found?? page doesn't have title, or headings + if (!results.length) return [title, headings]; + + for (const r of results) { + const heading = []; + const parts = JSONPath({ + path: '$..value', + json: r, + }); + + //add a check in case there is no parts found + for (const part of parts) { + // add a check in case there is no value field found + heading.push(part); + } + headings.push(heading.join()); + } + + title = headings.shift() ?? ''; + return [title, headings]; + } + + deriveSlug() { + let pageId = this.tree['filename']?.split('.')[0]; + if (pageId == 'index') pageId = ''; + return pageId; + } + + derivePreview() { + //set preview to the meta description if one is specified + + if (this.description) return this.description; + + // Set preview to the paragraph value that's a child of a 'target' element + // (for reference pages that lead with a target definition) + + let results = JSONPath({ + path: "$..children[?(@.type=='target')].children[?(@.type=='paragraph')]", + json: this.tree, + }); + + if (!results.length) { + // Otherwise attempt to set preview to the first content paragraph on the page, + // excluding admonitions. + results = JSONPath({ + path: "$..children[?(@.type=='section')].children[?(@.type=='paragraph')]", + json: this.tree, + }); + } + + if (results.length) { + const strList = []; + + //get value in results + const first = JSONPath({ + path: '$..value', + json: results[0], + }); + + for (const f of first) { + strList.push(f); + } + return strList.join(''); + } + + //else, give up and don't provide a preview + return null; + } + + getNoIndex() { + //determining indexability + + let noIndex = false; + const reasons: string[] = []; + + //if :robots: None in metadata, do not index + if (!this.robots) { + noIndex = true; + reasons.push('robots=None or robots=noindex in meta directive'); + } + + //if page has no title, do not index + if (!this.title) { + noIndex = true; + reasons.push('This page has no headings'); + } + + return [noIndex, reasons]; + } + + exportAsManifestDocument = () => { + // Generate the manifest dictionary entry from the AST source if (this.noIndex) { - console.info("Refusing to index"); + console.info('Refusing to index'); return; } - const document = new ManifestEntry({ - slug: this.slug, - title: this.title, - headings: this.headings, - paragraphs: this.paragraphs, - code: this.code, - preview: this.preview, - keywords: this.keywords, - facets: this.facets, - }); - - return document; - }; + const document = new ManifestEntry({ + slug: this.slug, + title: this.title, + headings: this.headings, + paragraphs: this.paragraphs, + code: this.code, + preview: this.preview, + keywords: this.keywords, + facets: this.facets, + }); + + return document; + }; } const deriveFacets = (tree: any) => { - //Format facets for ManifestEntry from bson entry tree['facets'] if it exists - - const insertKeyVals = (facet: any, prefix = '') => { - const key = prefix + facet.category; - documentFacets[key] = documentFacets[key] ?? []; - documentFacets[key].push(facet.value); - - if (!facet.subFacets) return; - - for (const subFacet of facet.subFacets) { - insertKeyVals(subFacet, key + '>' + facet.value + '>'); - } - }; - - const createFacet = (facetEntry: any) => { - const facet = new Facet( - facetEntry.category, - facetEntry.value, - facetEntry.sub_facets, - ); - insertKeyVals(facet); - }; - - const documentFacets: any = {}; - if (tree['facets']) { - for (const facetEntry of tree['facets']) { - createFacet(facetEntry); - } - } - return documentFacets; + //Format facets for ManifestEntry from bson entry tree['facets'] if it exists + + const insertKeyVals = (facet: any, prefix = '') => { + const key = prefix + facet.category; + documentFacets[key] = documentFacets[key] ?? []; + documentFacets[key].push(facet.value); + + if (!facet.subFacets) return; + + for (const subFacet of facet.subFacets) { + insertKeyVals(subFacet, key + '>' + facet.value + '>'); + } + }; + + const createFacet = (facetEntry: any) => { + const facet = new Facet( + facetEntry.category, + facetEntry.value, + facetEntry.sub_facets, + ); + insertKeyVals(facet); + }; + + const documentFacets: any = {}; + if (tree['facets']) { + for (const facetEntry of tree['facets']) { + createFacet(facetEntry); + } + } + return documentFacets; }; diff --git a/search-manifest/src/generateManifest/manifest.ts b/search-manifest/src/generateManifest/manifest.ts index 7476f6e1a..a96b4df54 100644 --- a/search-manifest/src/generateManifest/manifest.ts +++ b/search-manifest/src/generateManifest/manifest.ts @@ -5,11 +5,11 @@ export class Manifest { global: boolean; documents: ManifestEntry[]; - constructor(url = '', includeInGlobalSearch = false) { - this.url = url; - this.documents = []; - this.global = includeInGlobalSearch; - } + constructor(url = '', includeInGlobalSearch = false) { + this.url = url; + this.documents = []; + this.global = includeInGlobalSearch; + } addDocument(document: ManifestEntry) { //Add a document to the manifest diff --git a/search-manifest/src/generateManifest/manifestEntry.ts b/search-manifest/src/generateManifest/manifestEntry.ts index 7254aee7b..a75d037aa 100644 --- a/search-manifest/src/generateManifest/manifestEntry.ts +++ b/search-manifest/src/generateManifest/manifestEntry.ts @@ -1,4 +1,4 @@ -import type { Facet } from "./createFacets"; +import type { Facet } from './createFacets'; //change this to an interface export class ManifestEntry { diff --git a/search-manifest/src/index.ts b/search-manifest/src/index.ts index 94189fb91..fa74ca26d 100644 --- a/search-manifest/src/index.ts +++ b/search-manifest/src/index.ts @@ -1,16 +1,16 @@ // Documentation: https://sdk.netlify.com -import { NetlifyIntegration } from "@netlify/sdk"; -import { Manifest } from "./generateManifest/manifest"; -import { promisify } from "util"; -import { BSON } from "bson"; -import { Document } from "./generateManifest/document"; -import { uploadManifest } from "./uploadToAtlas/uploadManifest"; - -import { readdir, readFileSync } from "fs"; -import getProperties from "./uploadToAtlas/getProperties"; -import { uploadManifestToS3 } from "./uploadToS3/uploadManifest"; -import { teardown } from "./uploadToAtlas/searchConnector"; -import { s3UploadParams } from "./types"; +import { NetlifyIntegration } from '@netlify/sdk'; +import { Manifest } from './generateManifest/manifest'; +import { promisify } from 'util'; +import { BSON } from 'bson'; +import { Document } from './generateManifest/document'; +import { uploadManifest } from './uploadToAtlas/uploadManifest'; + +import { readdir, readFileSync } from 'fs'; +import getProperties from './uploadToAtlas/getProperties'; +import { uploadManifestToS3 } from './uploadToS3/uploadManifest'; +import { teardown } from './uploadToAtlas/searchConnector'; +import { s3UploadParams } from './types'; const readdirAsync = promisify(readdir); @@ -19,21 +19,21 @@ const integration = new NetlifyIntegration(); export const generateManifest = async () => { // create Manifest object const manifest = new Manifest(); - console.log("=========== generating manifests ================"); + console.log('=========== generating manifests ================'); //go into documents directory and get list of file entries - const entries = await readdirAsync("documents", { recursive: true }); + const entries = await readdirAsync('documents', { recursive: true }); const mappedEntries = entries.filter((fileName) => { return ( - fileName.includes(".bson") && - !fileName.includes("images") && - !fileName.includes("includes") && - !fileName.includes("sharedinclude") + fileName.includes('.bson') && + !fileName.includes('images') && + !fileName.includes('includes') && + !fileName.includes('sharedinclude') ); }); - process.chdir("documents"); + process.chdir('documents'); for (const entry of mappedEntries) { //each file is read and decoded const decoded = BSON.deserialize(readFileSync(`${entry}`)); @@ -48,17 +48,17 @@ export const generateManifest = async () => { //Return indexing data from a page's AST for search purposes. integration.addBuildEventHandler( - "onSuccess", + 'onSuccess', async ({ utils: { run }, netlifyConfig }) => { // Get content repo zipfile in AST representation. - await run.command("unzip -o bundle.zip"); - const branch = netlifyConfig.build?.environment["BRANCH"]; + await run.command('unzip -o bundle.zip'); + const branch = netlifyConfig.build?.environment['BRANCH']; //use export function for uploading to S3 const manifest = await generateManifest(); - console.log("=========== finished generating manifests ================"); + console.log('=========== finished generating manifests ================'); const { searchProperty, projectName, @@ -72,12 +72,12 @@ integration.addBuildEventHandler( } = await getProperties(branch); //uploads manifests to S3 - console.log("=========== Uploading Manifests to S3================="); + console.log('=========== Uploading Manifests to S3================='); //upload manifests to S3 const uploadParams: s3UploadParams = { - bucket: "docs-search-indexes-test", + bucket: 'docs-search-indexes-test', //TODO: change this values based on environments - prefix: "search-indexes/ab-testing", + prefix: 'search-indexes/ab-testing', fileName: `${projectName}-${branch}.json`, manifest: manifest.export(), }; @@ -85,22 +85,22 @@ integration.addBuildEventHandler( const s3Status = await uploadManifestToS3(uploadParams); console.log(`S3 upload status: ${JSON.stringify(s3Status)}`); - console.log("=========== Finished Uploading to S3 ================"); + console.log('=========== Finished Uploading to S3 ================'); try { manifest.url = url; manifest.global = includeInGlobalSearch; //uploads manifests to atlas - console.log("=========== Uploading Manifests ================="); + console.log('=========== Uploading Manifests ================='); await uploadManifest(manifest, searchProperty); - console.log("=========== Manifests uploaded to Atlas ================="); + console.log('=========== Manifests uploaded to Atlas ================='); } catch (e) { - console.log("Manifest could not be uploaded", e); + console.log('Manifest could not be uploaded', e); } finally { teardown(); } - } + }, ); export { integration }; diff --git a/search-manifest/src/types.ts b/search-manifest/src/types.ts index 73bdb896e..91c68ce33 100644 --- a/search-manifest/src/types.ts +++ b/search-manifest/src/types.ts @@ -1,5 +1,5 @@ -import type { WithId } from "mongodb"; -import type { ManifestEntry } from "./generateManifest/manifestEntry"; +import type { WithId } from 'mongodb'; +import type { ManifestEntry } from './generateManifest/manifestEntry'; export type RefreshInfo = { deleted: number; diff --git a/search-manifest/src/uploadToAtlas/deleteStale.ts b/search-manifest/src/uploadToAtlas/deleteStale.ts index 85ad833ad..b0fa2edc0 100644 --- a/search-manifest/src/uploadToAtlas/deleteStale.ts +++ b/search-manifest/src/uploadToAtlas/deleteStale.ts @@ -1,41 +1,41 @@ export const deleteStaleDocuments = async ( - searchProperty: string, - manifestRevisionId: string, + searchProperty: string, + manifestRevisionId: string, ) => { - console.debug(`Removing old documents`); - return { - deleteMany: { - filter: { - searchProperty: searchProperty, - manifestRevisionId: { $ne: manifestRevisionId }, - }, - }, - }; - // const deleteResult = await collection.deleteMany( - // { - // searchProperty: searchProperty, - // manifestRevisionId: { $ne: manifestRevisionId }, - // }, - // { session } - // ); - // status.deleted += - // deleteResult.deletedCount === undefined ? 0 : deleteResult.deletedCount; - // console.debug( - // `Removed ${deleteResult.deletedCount} entries from ${collection.collectionName}` - // ); + console.debug(`Removing old documents`); + return { + deleteMany: { + filter: { + searchProperty: searchProperty, + manifestRevisionId: { $ne: manifestRevisionId }, + }, + }, + }; + // const deleteResult = await collection.deleteMany( + // { + // searchProperty: searchProperty, + // manifestRevisionId: { $ne: manifestRevisionId }, + // }, + // { session } + // ); + // status.deleted += + // deleteResult.deletedCount === undefined ? 0 : deleteResult.deletedCount; + // console.debug( + // `Removed ${deleteResult.deletedCount} entries from ${collection.collectionName}` + // ); }; export const deleteStaleProperties = async ( - searchProperty: string, - manifestRevisionId: string, + searchProperty: string, + manifestRevisionId: string, ) => { - console.debug(`Removing old documents`); - return { - deleteMany: { - filter: { - searchProperty: searchProperty, - manifestRevisionId: { $ne: manifestRevisionId }, - }, - }, - }; + console.debug(`Removing old documents`); + return { + deleteMany: { + filter: { + searchProperty: searchProperty, + manifestRevisionId: { $ne: manifestRevisionId }, + }, + }, + }; }; diff --git a/search-manifest/src/uploadToAtlas/deleteStaleProperties.ts b/search-manifest/src/uploadToAtlas/deleteStaleProperties.ts index 35b1d4cba..49547dd6e 100644 --- a/search-manifest/src/uploadToAtlas/deleteStaleProperties.ts +++ b/search-manifest/src/uploadToAtlas/deleteStaleProperties.ts @@ -7,10 +7,10 @@ const ATLAS_SEARCH_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${pr const SEARCH_DB_NAME = `${process.env.MONGO_ATLAS_SEARCH_DB_NAME}`; export const deleteStaleProperties = async (searchProperty: string) => { - const dbSession = await db({ uri: ATLAS_SEARCH_URI, dbName: SEARCH_DB_NAME }); - const documentsColl = dbSession.collection('documents'); - console.debug('Removing old documents'); - const query = { searchProperty: { $regex: searchProperty } }; - const status = await documentsColl?.deleteMany(query); - return status; + const dbSession = await db({ uri: ATLAS_SEARCH_URI, dbName: SEARCH_DB_NAME }); + const documentsColl = dbSession.collection('documents'); + console.debug('Removing old documents'); + const query = { searchProperty: { $regex: searchProperty } }; + const status = await documentsColl?.deleteMany(query); + return status; }; diff --git a/search-manifest/src/uploadToAtlas/getProperties.ts b/search-manifest/src/uploadToAtlas/getProperties.ts index bd99aaa80..a5786b8aa 100644 --- a/search-manifest/src/uploadToAtlas/getProperties.ts +++ b/search-manifest/src/uploadToAtlas/getProperties.ts @@ -1,13 +1,13 @@ -import { type Collection, type Db, Document, WithId } from "mongodb"; -import { db, teardown } from "./searchConnector"; +import { type Collection, type Db, Document, WithId } from 'mongodb'; +import { db, teardown } from './searchConnector'; import type { BranchEntry, DatabaseDocument, DocsetsDocument, ReposBranchesDocument, -} from "../types"; -import { assertTrailingSlash } from "../utils"; -import { deleteStaleProperties } from "./deleteStaleProperties"; +} from '../types'; +import { assertTrailingSlash } from '../utils'; +import { deleteStaleProperties } from './deleteStaleProperties'; // helper function to find the associated branch export const getBranch = (branches: Array, branchName: string) => { @@ -27,15 +27,15 @@ const getProperties = async (branchName: string) => { //check that an environment variable for repo name was set if (!REPO_NAME) { throw new Error( - "No repo name supplied as environment variable, manifest cannot be uploaded to Atlas Search.Documents collection " + 'No repo name supplied as environment variable, manifest cannot be uploaded to Atlas Search.Documents collection ', ); } let dbSession: Db; let repos_branches: Collection; let docsets: Collection; - let url = ""; - let searchProperty = ""; + let url = ''; + let searchProperty = ''; let includeInGlobalSearch = false; let repo: ReposBranchesDocument | null; let docsetRepo: DocsetsDocument | null; @@ -44,8 +44,8 @@ const getProperties = async (branchName: string) => { try { //connect to database and get repos_branches, docsets collections dbSession = await db({ uri: ATLAS_CLUSTER0_URI, dbName: SNOOTY_DB_NAME }); - repos_branches = dbSession.collection("repos_branches"); - docsets = dbSession.collection("docsets"); + repos_branches = dbSession.collection('repos_branches'); + docsets = dbSession.collection('docsets'); } catch (e) { throw new Error(`issue starting session for Snooty Pool Database ${e}`); } @@ -68,8 +68,8 @@ const getProperties = async (branchName: string) => { if (!repo) { throw new Error( `Could not get repos_branches entry for repo ${REPO_NAME}, ${repo}, ${JSON.stringify( - query - )}` + query, + )}`, ); } } catch (e) { @@ -85,7 +85,7 @@ const getProperties = async (branchName: string) => { if (docsetRepo) { //TODO: change based on environment url = assertTrailingSlash( - docsetRepo.url?.dotcomprd + docsetRepo.prefix.dotcomprd + docsetRepo.url?.dotcomprd + docsetRepo.prefix.dotcomprd, ); } } catch (e) { @@ -96,7 +96,7 @@ const getProperties = async (branchName: string) => { try { const { isStableBranch, gitBranchName, active, urlSlug } = getBranch( repo.branches, - branchName + branchName, ); includeInGlobalSearch = isStableBranch; version = urlSlug || gitBranchName; @@ -110,13 +110,13 @@ const getProperties = async (branchName: string) => { // deletestaleproperties here for ALL manifests beginning with this repo? or just for this project-version searchproperty await deleteStaleProperties(project); throw new Error( - `Search manifest should not be generated for repo ${REPO_NAME}. Removing all associated manifests` + `Search manifest should not be generated for repo ${REPO_NAME}. Removing all associated manifests`, ); } if (!active) { deleteStaleProperties(searchProperty); throw new Error( - `Search manifest should not be generated for inactive version ${version} of repo ${REPO_NAME}. Removing all associated manifests` + `Search manifest should not be generated for inactive version ${version} of repo ${REPO_NAME}. Removing all associated manifests`, ); } } catch (e) { diff --git a/search-manifest/src/uploadToAtlas/searchConnector.ts b/search-manifest/src/uploadToAtlas/searchConnector.ts index 00d9beb7e..bf816d5e6 100644 --- a/search-manifest/src/uploadToAtlas/searchConnector.ts +++ b/search-manifest/src/uploadToAtlas/searchConnector.ts @@ -8,7 +8,7 @@ let dbInstance: Db; let client: mongodb.MongoClient; export const teardown = async () => { - await client.close(); + await client.close(); }; // Handles memoization of db object, and initial connection logic if needs to be initialized diff --git a/search-manifest/src/uploadToAtlas/uploadManifest.ts b/search-manifest/src/uploadToAtlas/uploadManifest.ts index a0bd36783..97827e3b4 100644 --- a/search-manifest/src/uploadToAtlas/uploadManifest.ts +++ b/search-manifest/src/uploadToAtlas/uploadManifest.ts @@ -1,8 +1,8 @@ -import type { Manifest } from "../generateManifest/manifest"; -import { db, teardown } from "./searchConnector"; -import assert from "assert"; -import type { RefreshInfo, DatabaseDocument } from "../types"; -import { generateHash, joinUrl } from "../utils"; +import type { Manifest } from '../generateManifest/manifest'; +import { db, teardown } from './searchConnector'; +import assert from 'assert'; +import type { RefreshInfo, DatabaseDocument } from '../types'; +import { generateHash, joinUrl } from '../utils'; const ATLAS_SEARCH_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_SEARCH_HOST}/?retryWrites=true&w=majority`; @@ -13,14 +13,14 @@ const composeUpserts = async ( manifest: Manifest, searchProperty: string, lastModified: Date, - hash: string + hash: string, ) => { const documents = manifest.documents; return documents.map((document) => { - assert.strictEqual(typeof document.slug, "string"); - assert.ok(document.slug || document.slug === ""); + assert.strictEqual(typeof document.slug, 'string'); + assert.ok(document.slug || document.slug === ''); - document.strippedSlug = document.slug.replaceAll("/", ""); + document.strippedSlug = document.slug.replaceAll('/', ''); const newDocument: DatabaseDocument = { ...document, @@ -46,11 +46,11 @@ const composeUpserts = async ( export const uploadManifest = async ( manifest: Manifest, - searchProperty: string + searchProperty: string, ) => { //check that manifest documents exist if (!manifest?.documents?.length) { - return Promise.reject(new Error("Invalid manifest")); + return Promise.reject(new Error('Invalid manifest')); } //start a session let documentsColl; @@ -59,9 +59,9 @@ export const uploadManifest = async ( uri: ATLAS_SEARCH_URI, dbName: SEARCH_DB_NAME, }); - documentsColl = dbSession.collection("documents"); + documentsColl = dbSession.collection('documents'); } catch (e) { - console.error("issue starting session for Search Database", e); + console.error('issue starting session for Search Database', e); } const status: RefreshInfo = { deleted: 0, @@ -80,7 +80,7 @@ export const uploadManifest = async ( manifest, searchProperty, lastModified, - hash + hash, ); const operations = [...upserts]; @@ -88,8 +88,8 @@ export const uploadManifest = async ( //check property types console.info(`Starting transaction`); - assert.strictEqual(typeof manifest.global, "boolean"); - assert.strictEqual(typeof hash, "string"); + assert.strictEqual(typeof manifest.global, 'boolean'); + assert.strictEqual(typeof hash, 'string'); assert.ok(hash); try { @@ -108,7 +108,7 @@ export const uploadManifest = async ( return status; } catch (e) { throw new Error( - `Error writing upserts to Search.documents collection with error ${e}` + `Error writing upserts to Search.documents collection with error ${e}`, ); } finally { await teardown(); diff --git a/search-manifest/src/uploadToS3/connectToS3.ts b/search-manifest/src/uploadToS3/connectToS3.ts index d7dd6bda1..84a04f7ef 100644 --- a/search-manifest/src/uploadToS3/connectToS3.ts +++ b/search-manifest/src/uploadToS3/connectToS3.ts @@ -1,18 +1,18 @@ -import { S3Client } from "@aws-sdk/client-s3"; +import { S3Client } from '@aws-sdk/client-s3'; const AWS_SECRET_ACCESS_KEY = process.env.AWS_S3_SECRET_ACCESS_KEY; const AWS_ACCESS_KEY_ID = process.env.AWS_S3_ACCESS_KEY_ID; export const connectToS3 = (): S3Client => { if (!AWS_SECRET_ACCESS_KEY || !AWS_ACCESS_KEY_ID) { - throw new Error("credentials not found"); + throw new Error('credentials not found'); } const client = new S3Client({ credentials: { accessKeyId: AWS_ACCESS_KEY_ID, secretAccessKey: AWS_SECRET_ACCESS_KEY, }, - region: "us-east-2", + region: 'us-east-2', }); return client; }; diff --git a/search-manifest/src/uploadToS3/uploadManifest.ts b/search-manifest/src/uploadToS3/uploadManifest.ts index 0921b4f79..56806c956 100644 --- a/search-manifest/src/uploadToS3/uploadManifest.ts +++ b/search-manifest/src/uploadToS3/uploadManifest.ts @@ -1,11 +1,11 @@ -import { PutObjectCommand, S3Client } from "@aws-sdk/client-s3"; -import { assertTrailingSlash } from "../utils"; -import { connectToS3 } from "./connectToS3"; -import { s3UploadParams } from "../types"; +import { PutObjectCommand, S3Client } from '@aws-sdk/client-s3'; +import { assertTrailingSlash } from '../utils'; +import { connectToS3 } from './connectToS3'; +import { s3UploadParams } from '../types'; const upload = async ( client: S3Client, - params: { Bucket: string; Key: string; Body: string } + params: { Bucket: string; Key: string; Body: string }, ) => { try { const command = new PutObjectCommand(params); diff --git a/search-manifest/src/utils.ts b/search-manifest/src/utils.ts index c494157b7..446b85664 100644 --- a/search-manifest/src/utils.ts +++ b/search-manifest/src/utils.ts @@ -1,19 +1,19 @@ import crypto from 'crypto'; export function generateHash(data: string): Promise { - const hash = crypto.createHash('sha256'); + const hash = crypto.createHash('sha256'); return new Promise((resolve) => { - hash.on("readable", () => { + hash.on('readable', () => { const data = hash.read(); if (data) { - resolve(data.toString("hex")); + resolve(data.toString('hex')); } }); - hash.write(data); - hash.end(); - }); + hash.write(data); + hash.end(); + }); } export function joinUrl({ @@ -23,9 +23,9 @@ export function joinUrl({ base: string; path: string; }): string { - return base.replace(/\/*$/, "/") + path.replace(/^\/*/, ""); + return base.replace(/\/*$/, '/') + path.replace(/^\/*/, ''); } export function assertTrailingSlash(path: string): string { - return path.endsWith('/') ? path : `${path}/`; + return path.endsWith('/') ? path : `${path}/`; } diff --git a/search-manifest/tests/integration/deleteStale.test.ts b/search-manifest/tests/integration/deleteStale.test.ts index 7bca6afcf..31bc78aa2 100644 --- a/search-manifest/tests/integration/deleteStale.test.ts +++ b/search-manifest/tests/integration/deleteStale.test.ts @@ -2,8 +2,8 @@ import { describe, expect, test, it, vi } from 'vitest'; function sum(a: number, b: number) { - return a + b; + return a + b; } test('dummy test', () => { - expect(sum(1, 2)).toBe(3); + expect(sum(1, 2)).toBe(3); }); diff --git a/search-manifest/tests/integration/uploadToAtlas.test.ts b/search-manifest/tests/integration/uploadToAtlas.test.ts index 33c48e4ce..93e8814f4 100644 --- a/search-manifest/tests/integration/uploadToAtlas.test.ts +++ b/search-manifest/tests/integration/uploadToAtlas.test.ts @@ -1,11 +1,11 @@ import { - afterAll, - beforeEach, - afterEach, - describe, - expect, - test, - vi, + afterAll, + beforeEach, + afterEach, + describe, + expect, + test, + vi, } from 'vitest'; import { uploadManifest } from '../../src/uploadToAtlas/uploadManifest'; import { Manifest } from '../../src/generateManifest/manifest'; @@ -19,183 +19,183 @@ const PROPERTY_NAME = 'dummyName'; //teardown connections beforeEach(async () => { - vi.mock('../../src/uploadToAtlas/searchConnector', async () => { - const { mockDb, teardownMockDbClient } = await import('../utils/mockDB'); - return { - teardown: teardownMockDbClient, - db: async () => { - const db = await mockDb(); - return db; - }, - }; - }); + vi.mock('../../src/uploadToAtlas/searchConnector', async () => { + const { mockDb, teardownMockDbClient } = await import('../utils/mockDB'); + return { + teardown: teardownMockDbClient, + db: async () => { + const db = await mockDb(); + return db; + }, + }; + }); }); const checkCollection = async () => { - const db = await mockDb(); - const documentCount = await db - .collection('documents') - .estimatedDocumentCount(); - expect(documentCount).toEqual(0); + const db = await mockDb(); + const documentCount = await db + .collection('documents') + .estimatedDocumentCount(); + expect(documentCount).toEqual(0); }; afterAll(async () => { - //teardown db instance - const { teardownMockDbClient } = await import('../utils/mockDB'); - await teardownMockDbClient(); + //teardown db instance + const { teardownMockDbClient } = await import('../utils/mockDB'); + await teardownMockDbClient(); }); // given empty manifest, test that it doesn't run describe("Upload manifest doesn't work for invalid manifests", () => { - let manifest: Manifest; - - test('throws an error for an empty manifest', async () => { - expect( - async () => await uploadManifest(manifest, PROPERTY_NAME), - ).rejects.toThrowError(); - }); - - test('throws an error for a manifest with 0 documents', async () => { - manifest = new Manifest('', true); - expect( - async () => await uploadManifest(manifest, PROPERTY_NAME), - ).rejects.toThrowError(); - }); + let manifest: Manifest; + + test('throws an error for an empty manifest', async () => { + expect( + async () => await uploadManifest(manifest, PROPERTY_NAME), + ).rejects.toThrowError(); + }); + + test('throws an error for a manifest with 0 documents', async () => { + manifest = new Manifest('', true); + expect( + async () => await uploadManifest(manifest, PROPERTY_NAME), + ).rejects.toThrowError(); + }); }); // given manifests, test that it uploads said manifests describe('Upload manifest uploads to Atlas db', () => { - afterEach(async () => { - await removeDocuments('documents'); - }); - let manifest: Manifest; - - test('constant nodeManifest uploads correct number of documents', async () => { - manifest = new Manifest( - nodeManifest.url, - nodeManifest.includeInGlobalSearch, - ); - manifest.documents = nodeManifest.documents; - - await uploadManifest(manifest, PROPERTY_NAME); - - //check that manifests have been uploaded - const db = await mockDb(); - const documents = db.collection('documents'); - //count number of documents in collection - expect(await documents.countDocuments()).toEqual(manifest.documents.length); - }); - - test('Generated node manifest uploads correct number of documents', async () => { - //get new manifest - manifest = await getManifest('node'); - - // upload manifest - const status = await uploadManifest(manifest, PROPERTY_NAME); - expect(status.upserted).toEqual(manifest.documents.length); - - //check that manifests have been uploaded - const db = await mockDb(); - const documents = db.collection('documents'); - expect(await documents.countDocuments()).toEqual(manifest.documents.length); - }); + afterEach(async () => { + await removeDocuments('documents'); + }); + let manifest: Manifest; + + test('constant nodeManifest uploads correct number of documents', async () => { + manifest = new Manifest( + nodeManifest.url, + nodeManifest.includeInGlobalSearch, + ); + manifest.documents = nodeManifest.documents; + + await uploadManifest(manifest, PROPERTY_NAME); + + //check that manifests have been uploaded + const db = await mockDb(); + const documents = db.collection('documents'); + //count number of documents in collection + expect(await documents.countDocuments()).toEqual(manifest.documents.length); + }); + + test('Generated node manifest uploads correct number of documents', async () => { + //get new manifest + manifest = await getManifest('node'); + + // upload manifest + const status = await uploadManifest(manifest, PROPERTY_NAME); + expect(status.upserted).toEqual(manifest.documents.length); + + //check that manifests have been uploaded + const db = await mockDb(); + const documents = db.collection('documents'); + expect(await documents.countDocuments()).toEqual(manifest.documents.length); + }); }); describe( - 'Upload manifest uploads to Atlas db and updates existing manifests correctly ', - async () => { - afterEach(async () => { - await removeDocuments('documents'); - }); - const manifest1: Manifest = new Manifest( - nodeManifest.url, - nodeManifest.includeInGlobalSearch, - ); - manifest1.documents = nodeManifest.documents; - const db = await mockDb(); - const documents = db.collection('documents'); - const kotlinManifest = await getManifest('kotlin'); - - test('nodeManifest uploads all documents', async () => { - await checkCollection(); - const status1 = await uploadManifest(manifest1, PROPERTY_NAME); - expect(status1.upserted).toEqual(manifest1.documents.length); - //reopen connection to count current num of documents in collection - await mockDb(); - - expect(await documents.countDocuments()).toEqual( - manifest1.documents.length, - ); - - //re upload the same manifest - const status2 = await uploadManifest(manifest1, PROPERTY_NAME); - expect(status2.upserted).toEqual(0); - }); - - test('two separate manifests uplodaded uploads correct number of entries', async () => { - //find a way to check that there are no documents in the collection yet - await mockDb(); - const status = await uploadManifest(manifest1, PROPERTY_NAME); - await mockDb(); - expect(await documents.countDocuments()).toEqual( - manifest1.documents.length, - ); - const status1 = await uploadManifest(kotlinManifest, 'docs-kotlin'); - expect(status1.upserted).toEqual(kotlinManifest.documents.length); - - //reopen connection to count current num of documents in collection - await mockDb(); - expect(await documents.countDocuments()).toEqual( - kotlinManifest.documents.length + manifest1.documents.length, - ); - }); - - test('stale documents from same search property are removed', async () => { - //upload documents - const db = await mockDb(); - const status = await uploadManifest(manifest1, PROPERTY_NAME); - await mockDb(); - const status1 = await uploadManifest(kotlinManifest, 'docs-kotlin'); - //reopen connection to count current num of documents in collection - await mockDb(); - expect(await documents.countDocuments()).toEqual( - kotlinManifest.documents.length + manifest1.documents.length, - ); - - //insert entries with random slugs - await mockDb(); - const dummyHash = generateHash('dummyManifest'); - const dummyDate = new Date(); - const dummyDocs = [ - { - manifestRevisionId: dummyHash, - lastModified: dummyDate, - searchProperty: PROPERTY_NAME, - slug: 'dummySlug1', - }, - { - manifestRevisionId: dummyHash, - lastModified: dummyDate, - searchProperty: PROPERTY_NAME, - slug: 'dummySlug2', - }, - ]; - - insert(db, 'documents', dummyDocs); - //upload node documents again - await mockDb(); - - const status3 = await uploadManifest(manifest1, PROPERTY_NAME); - expect(status3.deleted).toEqual(dummyDocs.length); - expect(status3.modified).toEqual(manifest1.documents.length); - //check all documents have current hash, time - await mockDb(); - const empty = await db.collection('documents').findOne({ - searchProperty: PROPERTY_NAME, - manifestRevisionId: dummyHash, - }); - expect(empty).toBe(null); - }); - }, - { timeout: 10000 }, + 'Upload manifest uploads to Atlas db and updates existing manifests correctly ', + async () => { + afterEach(async () => { + await removeDocuments('documents'); + }); + const manifest1: Manifest = new Manifest( + nodeManifest.url, + nodeManifest.includeInGlobalSearch, + ); + manifest1.documents = nodeManifest.documents; + const db = await mockDb(); + const documents = db.collection('documents'); + const kotlinManifest = await getManifest('kotlin'); + + test('nodeManifest uploads all documents', async () => { + await checkCollection(); + const status1 = await uploadManifest(manifest1, PROPERTY_NAME); + expect(status1.upserted).toEqual(manifest1.documents.length); + //reopen connection to count current num of documents in collection + await mockDb(); + + expect(await documents.countDocuments()).toEqual( + manifest1.documents.length, + ); + + //re upload the same manifest + const status2 = await uploadManifest(manifest1, PROPERTY_NAME); + expect(status2.upserted).toEqual(0); + }); + + test('two separate manifests uplodaded uploads correct number of entries', async () => { + //find a way to check that there are no documents in the collection yet + await mockDb(); + const status = await uploadManifest(manifest1, PROPERTY_NAME); + await mockDb(); + expect(await documents.countDocuments()).toEqual( + manifest1.documents.length, + ); + const status1 = await uploadManifest(kotlinManifest, 'docs-kotlin'); + expect(status1.upserted).toEqual(kotlinManifest.documents.length); + + //reopen connection to count current num of documents in collection + await mockDb(); + expect(await documents.countDocuments()).toEqual( + kotlinManifest.documents.length + manifest1.documents.length, + ); + }); + + test('stale documents from same search property are removed', async () => { + //upload documents + const db = await mockDb(); + const status = await uploadManifest(manifest1, PROPERTY_NAME); + await mockDb(); + const status1 = await uploadManifest(kotlinManifest, 'docs-kotlin'); + //reopen connection to count current num of documents in collection + await mockDb(); + expect(await documents.countDocuments()).toEqual( + kotlinManifest.documents.length + manifest1.documents.length, + ); + + //insert entries with random slugs + await mockDb(); + const dummyHash = generateHash('dummyManifest'); + const dummyDate = new Date(); + const dummyDocs = [ + { + manifestRevisionId: dummyHash, + lastModified: dummyDate, + searchProperty: PROPERTY_NAME, + slug: 'dummySlug1', + }, + { + manifestRevisionId: dummyHash, + lastModified: dummyDate, + searchProperty: PROPERTY_NAME, + slug: 'dummySlug2', + }, + ]; + + insert(db, 'documents', dummyDocs); + //upload node documents again + await mockDb(); + + const status3 = await uploadManifest(manifest1, PROPERTY_NAME); + expect(status3.deleted).toEqual(dummyDocs.length); + expect(status3.modified).toEqual(manifest1.documents.length); + //check all documents have current hash, time + await mockDb(); + const empty = await db.collection('documents').findOne({ + searchProperty: PROPERTY_NAME, + manifestRevisionId: dummyHash, + }); + expect(empty).toBe(null); + }); + }, + { timeout: 10000 }, ); diff --git a/search-manifest/tests/integration/uploadToS3.test.ts b/search-manifest/tests/integration/uploadToS3.test.ts index b569f847c..e6053a792 100644 --- a/search-manifest/tests/integration/uploadToS3.test.ts +++ b/search-manifest/tests/integration/uploadToS3.test.ts @@ -1,34 +1,34 @@ -import { beforeEach, describe, expect, test, vi } from "vitest"; +import { beforeEach, describe, expect, test, vi } from 'vitest'; import { PutObjectCommand, PutObjectCommandOutput, S3Client, -} from "@aws-sdk/client-s3"; -import { mockClient } from "aws-sdk-client-mock"; -import { getManifest } from "../utils/getManifest"; -import { uploadManifestToS3 } from "../../src/uploadToS3/uploadManifest"; +} from '@aws-sdk/client-s3'; +import { mockClient } from 'aws-sdk-client-mock'; +import { getManifest } from '../utils/getManifest'; +import { uploadManifestToS3 } from '../../src/uploadToS3/uploadManifest'; -const MANIFEST = await getManifest("node"); +const MANIFEST = await getManifest('node'); const PROJECT_NAME = `node`; const BRANCH = `master`; const output: PutObjectCommandOutput = { $metadata: { httpStatusCode: 200, - requestId: "MPCZN4GMCM56ZCQT", + requestId: 'MPCZN4GMCM56ZCQT', extendedRequestId: - "iMY6089hIWIjGiAJbiGfHooJfUCjUbKd7s12b7xo3p+U2SBRLHVNOPfWLi1/LbpHRhD5R65V7Lw=", + 'iMY6089hIWIjGiAJbiGfHooJfUCjUbKd7s12b7xo3p+U2SBRLHVNOPfWLi1/LbpHRhD5R65V7Lw=', attempts: 1, totalRetryDelay: 0, }, ETag: '"7af17bccdfeee6b7550e235c098a01d3"', - ServerSideEncryption: "AES256", + ServerSideEncryption: 'AES256', }; beforeEach(async () => { const s3Mock = mockClient(S3Client); s3Mock.on(PutObjectCommand).resolves(output); - vi.mock("../../src/uploadToS3/connectToS3.ts", async () => { + vi.mock('../../src/uploadToS3/connectToS3.ts', async () => { return { connectToS3: () => { return new S3Client({}); @@ -37,15 +37,15 @@ beforeEach(async () => { }); }); -describe("upload manifest to S3 behaves as expected", () => { +describe('upload manifest to S3 behaves as expected', () => { const uploadParams = { - bucket: "docs-search-indexes-test", - prefix: "search-indexes/ab-testing", + bucket: 'docs-search-indexes-test', + prefix: 'search-indexes/ab-testing', fileName: `${PROJECT_NAME}-${BRANCH}.json`, manifest: MANIFEST.export(), }; - test("given sufficient parameters, upload to S3 resolves", async () => { + test('given sufficient parameters, upload to S3 resolves', async () => { expect(uploadManifestToS3(uploadParams)).resolves.toStrictEqual(output); }); }); diff --git a/search-manifest/tests/resources/mockCollections/docsets.json b/search-manifest/tests/resources/mockCollections/docsets.json index 6e07b60a5..265325d62 100644 --- a/search-manifest/tests/resources/mockCollections/docsets.json +++ b/search-manifest/tests/resources/mockCollections/docsets.json @@ -1,50 +1,50 @@ [ - { - "project": "atlas-app-services", - "prefix": { - "stg": "atlas/app-services", - "prd": "atlas/app-services", - "dotcomstg": "docs/atlas/app-services", - "dotcomprd": "docs/atlas/app-services" - }, - "url": { - "dev": "https://docs-atlas-staging.mongodb.com", - "stg": "https://docs-atlas-staging.mongodb.com", - "prd": "https://docs.atlas.mongodb.com", - "dotcomprd": "http://mongodb.com/", - "dotcomstg": "https://mongodbcom-cdn.website.staging.corp.mongodb.com/" - } - }, - { - "project": "compass", - "prefix": { - "stg": "compass", - "prd": "compass", - "dotcomstg": "docs/compass", - "dotcomprd": "docs/compass" - }, - "url": { - "dev": "https://docs-mongodborg-staging.corp.mongodb.com", - "stg": "https://docs-mongodborg-staging.corp.mongodb.com", - "prd": "https://docs.mongodb.com", - "dotcomprd": "http://mongodb.com/", - "dotcomstg": "https://mongodbcom-cdn.website.staging.corp.mongodb.com/" - } - }, - { - "project": "cloud-docs", - "prefix": { - "stg": "", - "prd": "", - "dotcomstg": "docs/atlas", - "dotcomprd": "docs/atlas" - }, - "url": { - "dev": "https://docs-atlas-staging.mongodb.com", - "stg": "https://docs-atlas-staging.mongodb.com", - "prd": "https://docs.atlas.mongodb.com", - "dotcomprd": "http://mongodb.com/", - "dotcomstg": "https://mongodbcom-cdn.website.staging.corp.mongodb.com/" - } - } + { + "project": "atlas-app-services", + "prefix": { + "stg": "atlas/app-services", + "prd": "atlas/app-services", + "dotcomstg": "docs/atlas/app-services", + "dotcomprd": "docs/atlas/app-services" + }, + "url": { + "dev": "https://docs-atlas-staging.mongodb.com", + "stg": "https://docs-atlas-staging.mongodb.com", + "prd": "https://docs.atlas.mongodb.com", + "dotcomprd": "http://mongodb.com/", + "dotcomstg": "https://mongodbcom-cdn.website.staging.corp.mongodb.com/" + } + }, + { + "project": "compass", + "prefix": { + "stg": "compass", + "prd": "compass", + "dotcomstg": "docs/compass", + "dotcomprd": "docs/compass" + }, + "url": { + "dev": "https://docs-mongodborg-staging.corp.mongodb.com", + "stg": "https://docs-mongodborg-staging.corp.mongodb.com", + "prd": "https://docs.mongodb.com", + "dotcomprd": "http://mongodb.com/", + "dotcomstg": "https://mongodbcom-cdn.website.staging.corp.mongodb.com/" + } + }, + { + "project": "cloud-docs", + "prefix": { + "stg": "", + "prd": "", + "dotcomstg": "docs/atlas", + "dotcomprd": "docs/atlas" + }, + "url": { + "dev": "https://docs-atlas-staging.mongodb.com", + "stg": "https://docs-atlas-staging.mongodb.com", + "prd": "https://docs.atlas.mongodb.com", + "dotcomprd": "http://mongodb.com/", + "dotcomstg": "https://mongodbcom-cdn.website.staging.corp.mongodb.com/" + } + } ] diff --git a/search-manifest/tests/resources/mockCollections/repos-branches.json b/search-manifest/tests/resources/mockCollections/repos-branches.json index 57ea8e42e..9aa43d1be 100644 --- a/search-manifest/tests/resources/mockCollections/repos-branches.json +++ b/search-manifest/tests/resources/mockCollections/repos-branches.json @@ -1,90 +1,91 @@ - -[ { - "repoName": "docs-app-services", - "branches": [ - { - "gitBranchName": "master", - "urlSlug" : "", - "isStableBranch": true, - "active": true - }], - "project": "atlas-app-services", - "internalOnly": false, - "prodDeployable": true - }, - { - "repoName": "docs-compass", - "branches": [ - { - "gitBranchName": "master", - "isStableBranch": true, - "urlSlug": "current", - "active": true - }, - { - "gitBranchName": "beta", - "urlSlug": "upcoming", - "isStableBranch": false, - "active": false - } - ], - "project": "compass", - "search": { - "categoryTitle": "Compass" - }, - "internalOnly": false, - "prodDeployable": true - }, - { - "repoName": "cloud-docs", - "branches": [ - { - "gitBranchName": "master", - "urlSlug": "", - "isStableBranch": true, - "active": true - } - ], - "project": "cloud-docs", - "search": { - "categoryName": "atlas", - "categoryTitle": "Atlas" - }, - "internalOnly": false, - "prodDeployable": true +[ + { + "repoName": "docs-app-services", + "branches": [ + { + "gitBranchName": "master", + "urlSlug": "", + "isStableBranch": true, + "active": true + } + ], + "project": "atlas-app-services", + "internalOnly": false, + "prodDeployable": true + }, + { + "repoName": "docs-compass", + "branches": [ + { + "gitBranchName": "master", + "isStableBranch": true, + "urlSlug": "current", + "active": true + }, + { + "gitBranchName": "beta", + "urlSlug": "upcoming", + "isStableBranch": false, + "active": false + } + ], + "project": "compass", + "search": { + "categoryTitle": "Compass" }, - { - "repoName": "docs-mongodb-internal", - "branches": [ - { - "gitBranchName": "v5.0", - "active": true, - "urlSlug": "stable", - "isStableBranch": false - } - ], - "project": "docs", - "internalOnly": true, - "prodDeployable": false + "internalOnly": false, + "prodDeployable": true + }, + { + "repoName": "cloud-docs", + "branches": [ + { + "gitBranchName": "master", + "urlSlug": "", + "isStableBranch": true, + "active": true + } + ], + "project": "cloud-docs", + "search": { + "categoryName": "atlas", + "categoryTitle": "Atlas" }, + "internalOnly": false, + "prodDeployable": true + }, + { + "repoName": "docs-mongodb-internal", + "branches": [ + { + "gitBranchName": "v5.0", + "active": true, + "urlSlug": "stable", + "isStableBranch": false + } + ], + "project": "docs", + "internalOnly": true, + "prodDeployable": false + }, + { + "repoName": "mms-docs", + "branches": [ + { + "gitBranchName": "master", + "active": true, + "urlSlug": "stable", + "isStableBranch": true + }, { - "repoName": "mms-docs", - "branches": [ - { - "gitBranchName": "master", - "active": true, - "urlSlug": "stable", - "isStableBranch": true - }, - { - "gitBranchName": "v1.3", - "active": true, - "urlSlug": "v1.3", - "isStableBranch": false - } - ], - "project": "mms-docs", - "internalOnly": true, - "prodDeployable": false - } + "gitBranchName": "v1.3", + "active": true, + "urlSlug": "v1.3", + "isStableBranch": false + } + ], + "project": "mms-docs", + "internalOnly": true, + "prodDeployable": false + } ] diff --git a/search-manifest/tests/resources/s3Manifests/kotlin-upcoming.json b/search-manifest/tests/resources/s3Manifests/kotlin-upcoming.json index 9fecfc8ba..c746a0511 100644 --- a/search-manifest/tests/resources/s3Manifests/kotlin-upcoming.json +++ b/search-manifest/tests/resources/s3Manifests/kotlin-upcoming.json @@ -1,4807 +1,4807 @@ { - "url": "http://mongodb.com/docs/drivers/kotlin/coroutine/upcoming", - "includeInGlobalSearch": false, - "documents": [ - { - "slug": "api-documentation", - "title": "API Documentation", - "headings": [], - "paragraphs": "BSON kotlinx.serialization -\nclasses for encoding and decoding between Kotlin data classes and the BSON data\nformat using kotlinx.serialization . Core - classes that\ncontain essential driver functionality. Kotlin Coroutine Driver -\nclasses for the current driver API using coroutines. Kotlin Sync Driver -\nclasses for the current synchronous driver API.", - "code": [], - "preview": null, - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "compatibility", - "title": "Compatibility", - "headings": [ - "MongoDB Compatibility", - "Compatibility Table Legend", - "Language Compatibility" - ], - "paragraphs": "The following compatibility table specifies the recommended version or versions\nof the MongoDB Kotlin Driver for use with a specific version of MongoDB. The first column lists the driver version. MongoDB ensures compatibility between the MongoDB Server and the drivers\nfor three years after the server version's end of life (EOL) date. To learn\nmore about the MongoDB release and EOL dates, see\n MongoDB Software Lifecycle Schedules . Icon Explanation \u2713 All features are supported. \u229b The Driver version will work with the MongoDB version, but not all\nnew MongoDB features are supported. No mark The Driver version is not tested with the MongoDB version. Kotlin Driver Version MongoDB 8.0 MongoDB 7.0 MongoDB 6.0 MongoDB 5.0 MongoDB 4.4 MongoDB 4.2 MongoDB 4.0 MongoDB 3.6 MongoDB 3.4 MongoDB 3.2 MongoDB 3.0 MongoDB 2.6 5.2 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 5.1 \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 5.0 \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 4.11 \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 4.10 \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 The following compatibility table specifies the recommended version or versions\nof the MongoDB Kotlin Driver for use with a specific version of Kotlin. The first column lists the driver version. For more information on how to read the compatibility tables, see our guide on\n MongoDB Compatibility Tables . Kotlin Driver Version Kotlin 1.8 5.1 \u2713 5.0 \u2713 4.11 \u2713 4.10 \u2713", - "code": [], - "preview": "The following compatibility table specifies the recommended version or versions\nof the MongoDB Kotlin Driver for use with a specific version of MongoDB.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "connection-troubleshooting", - "title": "Connection Troubleshooting", - "headings": [ - "Connection Error", - "Check Connection String", - "Configure Firewall", - "Authentication Error", - "Check Connection String", - "Verify User Is in Authentication Database", - "Error Sending Message", - "Check Connection String", - "Verify User Is in Authentication Database", - "Configure Firewall", - "Check the Number of Connections", - "Timeout Error", - "Set maxConnectionTimeoutMS", - "Set maxConnectionLifeTime and maxConnectionIdleTime", - "Check the Number of Connections", - "Additional Tips", - "Get Log Information for TLS/SSL" - ], - "paragraphs": "This page offers potential solutions to issues you might see when\nconnecting to a MongoDB instance or replica set while using the\nMongoDB Kotlin Driver. This page lists only connection issues. If you are having any other issues\nwith MongoDB, consider the following resources: The Frequently Asked Questions (FAQ) for the Kotlin driver The Issues & Help topic for information about\nreporting bugs, contributing to the driver, and additional resources The MongoDB Community Forums for\nquestions, discussions, or general technical support The following error message is a general message indicating that the driver\ncannot connect to a server on the specified hostname or port: If you receive this error, try the following methods to resolve the issue. Verify that the hostname and port number in the connection string are both\naccurate. In the sample error message, the hostname is 127.0.0.1 and the\nport is 27017 . The default port value for a MongoDB instance is\n 27017 , but you can configure MongoDB to communicate on another port. Assuming that your MongoDB deployment uses the default port, verify that your\nfirewall has port 27017 open. If your deployment is using a different port,\nverify that port is open in your firewall. Do not open ports in your firewall unless you are sure that is the port used\nby your MongoDB instance. The Kotlin driver can fail to connect to a MongoDB instance if\nthe authorization is not configured correctly. This often results in an error\nmessage similar to the following: If you receive this error, try the following methods to resolve the issue. An invalid connection string is the most common cause of authentication\nissues when attempting to connect to MongoDB. If your connection string contains a username and password, ensure that they\nare in the correct format. If your MongoDB deployment is on MongoDB Atlas, you can check your connection\nstring by using the Atlas Connection Example .\nMake sure to replace the connection string in the example with yours. When connecting to a replica set, you should include all of the hosts\nin the replica set in your connection string. Separate each of the hosts\nin the connection string with a comma. This enables the driver to establish a\nconnection if one of the hosts is unreachable. For more information about using connection strings with the Kotlin driver,\nsee Connection URI in the Connection Guide. If the username or password includes any of the following characters, they\nmust be percent encoded : To successfully authenticate a connection by using a username and password,\nthe username must be defined in the authentication database. The default\nauthentication database is the admin database. To use a different database\nfor authentication, specify the authSource in the connection string. The\nfollowing example instructs the driver to use users as the authentication\ndatabase: When you send a request through the driver and it is unable to send the command,\nit often displays the following general error message: If you receive this error, try the following methods to resolve the issue. Verify that the connection string in\nyour app is accurate. This is described under Connection Error \nand Authentication Error . The user needs to be recognized in your\nauthentication database. This is described under Authentication\nError . The firewall needs to have an open port for communicating with the MongoDB\ninstance. This is described under Connection Error . Each MongoClient instance supports a maximum number of concurrent open\nconnections in its connection pool. The configuration parameter maxPoolSize \ndefines this value and is set to 100 by default. If there are already a\nnumber of open connections equal to maxPoolSize , the server waits until\na connection becomes available. If this wait time exceeds the maxIdleTimeMS \nvalue, the driver responds with an error. Sometimes when you send messages through the driver to the server, the messages\ntake a while to respond. When this happens, you might receive an error message\nsimilar to one of the following error messages: If you receive one of these errors, try the following methods to resolve the\nissue. The maxConnectionTimeoutMS option indicates the amount of time the\nKotlin driver waits for a connection before timing out. The default\nvalue is 10000 . You can increase this value or set it to 0 if\nyou want the driver to never timeout. Consider setting maxConnectionLifeTime and\n maxConnectionIdleTime . These parameters configure how long a connection\ncan be maintained with a MongoDB instance. For more information about these\nparameters, see Connection Pool Settings . You might have too many open connections. The solution to this is described\nunder Error Sending Message . While not related to a specific error message, this section includes\nadditional information that can be useful when attempting to troubleshoot\nconnection issues. When using TLS/SSL, you can use the -Djavax.net.debug=all system property\nto view additional log statements. This can help when attempting to debug any\nconnection issues. See the Oracle guide to debugging TLS/SSL connections \nfor more information.", - "code": [ - { - "lang": "none", - "value": "Error: couldn't connect to server 127.0.0.1:27017" - }, - { - "lang": "none", - "value": "Command failed with error 18 (AuthenticationFailed): 'Authentication failed.' on server localhost:27017." - }, - { - "lang": "none", - "value": ": / ? # [ ] @" - }, - { - "lang": "kotlin", - "value": "val mongoClient =\nMongoClient.create(\"mongodb://:@:/?authSource=users\")" - }, - { - "lang": "none", - "value": "com.mongodb.MongoSocketWriteException: Exception sending message" - }, - { - "lang": "none", - "value": "Timed out after 30000 ms while waiting for a server that matches ReadPreferenceServerSelector{readPreference=primary}." - }, - { - "lang": "none", - "value": "No server chosen by ReadPreferenceServerSelector{readPreference=primary} from cluster description" - } - ], - "preview": "This page offers potential solutions to issues you might see when\nconnecting to a MongoDB instance or replica set while using the\nMongoDB Kotlin Driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "faq", - "title": "FAQ", - "headings": [ - "Why Am I Having Problems Connecting to a MongoDB Instance?", - "How is the Kotlin Driver Different from KMongo?", - "What is the Difference Between the Kotlin Driver and the Kotlin SDK?", - "How Does Connection Pooling Work in the Kotlin Driver?" - ], - "paragraphs": "On this page, you can find frequently asked questions and their corresponding answers. If you can't find an answer to your question on this page, see the\n Issues & Help page for information on how to report issues. If you have trouble connecting to a MongoDB deployment, see\nthe Connection Troubleshooting Guide \nfor possible solutions. The Kotlin driver is the official MongoDB driver for Kotlin. It is\ndeveloped by the MongoDB team and provides a native API for Kotlin\napplications to connect to MongoDB and work with data. It is implemented\nby wrapping the MongoDB Java driver . The Kotlin driver was developed in collaboration with the creator of KMongo,\nJulien Buret, to give users an officially-supported driver. The official Kotlin driver and KMongo have generally similar APIs.\nNotable similarities between the Kotlin driver and KMongo include: Although the official Kotlin driver and KMongo are similar, there are some\nkey differences: For more detailed information, see Migrate from KMongo . KMongo is a popular community-developed library\nfor working with MongoDB from Kotlin applications.\nIt is a wrapper around the Java driver that was created prior to the creation of\nthe official Kotlin driver to serve the needs of the Kotlin community. As of July 2023, KMongo has been marked as deprecated. Support for synchronous and coroutine-based operations Support using data classes to represent MongoDB documents Support KotlinX serialization Support for MongoDB CRUD APIs and aggregation The official driver does not have built-in support for reactor ,\n rxjava2 , Jackson ,\nor GSON . The official driver does not support MongoDB shell commands. The official driver supports type-safe queries with the Builders API,\nwhereas KMongo uses infix functions and property references for\ntype-safe queries. MongoDB supports both mobile and server-side development in Kotlin. If\nyou are developing a mobile application for Android or Kotlin\nMultiplatform (KMP), you can use the MongoDB\nAtlas Device Kotlin SDK to access Atlas App Services and\nto manage your Realm data. The Kotlin driver supports server-side development by providing a\ncomplete library for building idiomatic Kotlin applications. You can\nlearn how to develop asynchronous applications in this documentation for\nthe Kotlin Coroutine Driver, or you can view the Kotlin Sync\nDriver documentation to learn more about synchronous\nprogramming. Every MongoClient instance has a built-in connection pool for each server\nin your MongoDB topology. Connection pools open sockets on demand to\nsupport concurrent MongoDB operations in your multi-threaded application. The maximum size of each connection pool is set by the maxPoolSize option, which\ndefaults to 100 . If the number of in-use connections to a server reaches\nthe value of maxPoolSize , the next request to that server will wait\nuntil a connection becomes available. Each MongoClient instance opens two additional sockets per server in your\nMongoDB topology for monitoring the server's state. For example, a client connected to a 3-node replica set opens 6\nmonitoring sockets. It also opens as many sockets as needed to support\nan application's threads on each server, up to\nthe value of maxPoolSize . If maxPoolSize is 100 and the\napplication only uses the primary (the default), then only the primary\nconnection pool grows and there can be at most 106 total connections. If the\napplication uses a read preference to query the\nsecondary nodes, their pools also grow and there can be 306 total connections. Additionally, connection pools are rate-limited such that each connection pool\ncan only create, at maximum, the value of maxConnecting connections\nin parallel at any time. Any additional thread stops waiting in the\nfollowing cases: You can set the minimum number of concurrent connections to\neach server with the minPoolSize option, which defaults to 0 .\nThe connection pool will be initialized with this number of sockets. If\nsockets are closed due to any network errors, causing the total number\nof sockets (both in use and idle) to drop below the minimum, more\nsockets are opened until the minimum is reached. You can set the maximum number of milliseconds that a connection can\nremain idle in the pool before being removed and replaced with\nthe maxIdleTimeMS option, which defaults to 0 (no limit). The following default configuration for a MongoClient works for most\napplications: Create a client once for each process, and reuse it for all\noperations. It is a common mistake to create a new client for each\nrequest, which is very inefficient. To support high numbers of concurrent MongoDB operations\nwithin one process, you can increase maxPoolSize . Once the pool\nreaches its maximum size, additional threads wait for sockets\nto become available. The driver does not limit the number of threads that\ncan wait for sockets to become available, and it is the application's\nresponsibility to limit the size of its pool to bound queuing\nduring a load spike. Threads wait for the amount of time specified in\nthe waitQueueTimeoutMS option, which defaults to 120000 (120 seconds). A thread that waits more than the length of time defined by\n waitQueueTimeoutMS for a socket raises a connection error. Use this\noption if it is more important to bound the duration of operations\nduring a load spike than it is to complete every operation. When MongoClient.close() is called by any thread, the driver\ncloses all idle sockets and closes all sockets that are in\nuse as they are returned to the pool. To learn more about connecting to MongoDB, see the Connection\nGuide . One of the existing threads finishes creating a connection, or\nan existing connection is checked back into the pool. The driver's ability to reuse existing connections improves due to\nrate-limits on connection creation.", - "code": [ - { - "lang": "kotlin", - "value": "val client = MongoClient(\"\")" - } - ], - "preview": "On this page, you can find frequently asked questions and their corresponding answers.", - "tags": "troubleshooting, question, support", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/aggregation-expression-operations", - "title": "Aggregation Expression Operations", - "headings": [ - "Overview", - "How to Use Operations", - "Constructor Methods", - "Operations", - "Arithmetic Operations", - "Array Operations", - "Boolean Operations", - "Comparison Operations", - "Conditional Operations", - "Convenience Operations", - "Conversion Operations", - "Date Operations", - "Document Operations", - "Map Operations", - "String Operations", - "Type-Checking Operations" - ], - "paragraphs": "In this guide, you can learn how to use the MongoDB Kotlin Driver to construct\nexpressions for use in aggregation pipelines. You can perform\nexpression operations with discoverable, typesafe Java methods rather\nthan BSON documents. Because these methods follow the fluent interface\npattern, you can chain aggregation operations together to create code\nthat is both more compact and more naturally readable. The operations in this guide use methods from the\n com.mongodb.client.model.mql package.\nThese methods provide an idiomatic way to use the Query API,\nthe mechanism by which the driver interacts with a MongoDB deployment. To learn more\nabout the Query API, see the Server manual documentation . The examples in this guide assume that you include the following imports\nin your code: To access document fields in an expression, you need to reference the\ncurrent document being processed by the aggregation pipeline. Use the\n current() method to refer to this document. To access the value of a\nfield, you must use the appropriately typed method, such as\n getString() or getDate() . When you specify the type for a field,\nyou ensure that the driver provides only those methods which are\ncompatible with that type. The following code shows how to reference a\nstring field called name : To specify a value in an operation, pass it to the of() constructor method to\nconvert it to a valid type. The following code shows how to reference a\nvalue of 1.0 : To create an operation, chain a method to your field or value reference.\nYou can build more complex operations by chaining additional methods. The following example creates an operation to find patients in New\nMexico who have visited the doctor\u2019s office at least once. The operation\nperforms the following actions: The and() method links these operations so that the pipeline stage\nmatches only documents that meet both criteria. While some aggregation stages, such as group() , accept operations\ndirectly, other stages expect that you first include your operation in a\nmethod such as computed() or expr() . These methods, which take\nvalues of type TExpression , allow you to use your expressions in\ncertain aggregations. To complete your aggregation pipeline stage, include your expression\nin an aggregates builder method. The following list provides examples of\nhow to include your expression in common aggregates builder methods: To learn more about these methods, see the\n Aggregation guide . The examples use the listOf() method to create a list of\naggregation stages. This list is passed to the aggregate() method of\n MongoCollection . Checks if the size of the visitDates array is greater than 0 \nby using the gt() method Checks if the state field value is \u201cNew Mexico\u201d by using the\n eq() method match(expr()) project(fields(computed(\"\", ))) group() You can use these constructor methods to define values for use in Kotlin aggregation\nexpressions. Refer to any of the sections in Operations for examples using these methods. Method Description current() References the current document being processed by the aggregation pipeline. currentAsMap() References the current document being processed by the aggregation pipeline as a map value. Returns an MqlValue type corresponding to the provided primitive. ofArray() Returns an array of MqlValue types corresponding to the provided array of primitives. ofEntry() Returns an entry value. ofMap() Returns an empty map value. ofNull() Returns the null value as exists in the Query API. When you provide a value to one of these methods, the driver treats\nit literally. For example, of(\"$x\") represents the string value\n \"$x\" , rather than a field named x . The following sections provide information and examples for\naggregation expression operations available in the driver.\nThe operations are categorized by purpose and functionality. Each section has a table that describes aggregation methods\navailable in the driver and corresponding expression operators in the\nQuery API. The method names link to API documentation and the\naggregation pipeline operator names link to descriptions and examples in\nthe Server manual documentation. While each method is effectively\nequivalent to the corresponding Query API expression, they may differ in\nexpected parameters and implementation. The driver generates a Query API expression that may be different\nfrom the Query API expression provided in each example. However,\nboth expressions will produce the same aggregation result. The driver does not provide methods for all aggregation pipeline operators in\nthe Query API. If you need to use an unsupported operation in an\naggregation, you must define the entire expression using the BSON Document \ntype. To learn more about the Document type, see Documents . You can perform an arithmetic operation on a value of type MqlInteger or\n MqlNumber using the methods described in this section. Suppose you have weather data for a specific year that includes the\nprecipitation measurement (in inches) for each day. You want find the average\nprecipitation, in millimeters, for each month. The multiply() operator multiplies the precipitation field by\n 25.4 to convert the value to millimeters. The avg() accumulator method\nreturns the average as the avgPrecipMM field. The group() method\ngroups the values by month given in each document's date field. The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in the\nQuery API: Method Aggregation Pipeline Operator $abs $add divide() $divide $multiply round() $round $subtract You can perform an array operation on a value of type MqlArray \nusing the methods described in this section. Suppose you have a collection of movies, each of which contains an array\nof nested documents for upcoming showtimes. Each nested document\ncontains an array that represents the total number of seats in the\ntheater, where the first array entry is the number of premium seats and\nthe second entry is the number of regular seats. Each nested document\nalso contains the number of tickets that have already been bought for\nthe showtime. A document in this collection might resemble the\nfollowing: The filter() method displays only the results matching the provided\npredicate. In this case, the predicate uses sum() to calculate the\ntotal number of seats and compares that value to the number of ticketsBought \nwith lt() . The project() method stores these filtered results as a new\n availableShowtimes array. The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: Method Aggregation Pipeline Operator all() $allElementsTrue any() $anyElementTrue concat() $concatArrays concatArrays() $concatArrays contains() $in distinct() $setUnion elementAt() $arrayElemAt filter() $filter first() $first joinStrings() $concat last() $last map() $map max() $max maxN() $maxN min() $min minN() $minN multiply() $multiply size() $size slice() $slice sum() $sum union() $setUnion unionArrays() $setUnion You must specify the type of the array that you retrieve with the\n getArray() method if you need to work with the values of the\narray as their specific type. In this example, we specify that the seats array contains values\nof type MqlDocument so that we can extract nested fields from\neach array entry. To improve readability, the previous example assigns intermediary values to\nthe totalSeats and isAvailable variables. If you don't pull\nout these intermediary values into variables, the code still produces\nequivalent results. You can perform a boolean operation on a value of type MqlBoolean \nusing the methods described in this section. Suppose you want to classify very low or high weather temperature\nreadings (in degrees Fahrenheit) as extreme. The or() operator checks to see if temperatures are extreme by comparing\nthe temperature field to predefined values with lt() and gt() .\nThe project() method records this result in the extremeTemp field. The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: Method Aggregation Pipeline Operator and() $and not() $not or() $or You can perform a comparison operation on a value of type MqlValue \nusing the methods described in this section. The following example shows a pipeline that matches all the documents\nwhere the location field has the value \"California\" : The following code provides an equivalent aggregation pipeline in\nthe Query API: The cond() method is similar to the ternary operator in Java and you\nshould use it for simple branches based on a boolean value. You should use\nthe switchOn() methods for more complex comparisons such as performing\npattern matching on the value type or other arbitrary checks on the value. Method Aggregation Pipeline Operator eq() $eq gt() $gt gte() $gte lt() $lt lte() $lte $max $min ne() $ne You can perform a conditional operation using the methods described in\nthis section. Suppose you have a collection of customers with their membership information.\nOriginally, customers were either members or not. Over time, membership levels\nwere introduced and used the same field. The information stored in this field\ncan be one of a few different types, and you want to create a standardized value\nindicating their membership level. The switchOn() method checks each clause in order. If the value matches the\ntype indicated by the clause, that clause determines the string value\ncorresponding to the membership level. If the original value is a string, it\nrepresents the membership level and that value is used. If the data type is a\nboolean, it returns either Gold or Guest for the membership level. If\nthe data type is an array, it returns the most recent string in the array which\nmatches the most recent membership level. If the member field is an\nunknown type, the switchOn() method provides a default value of Guest . The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: Method Aggregation Pipeline Operator cond() $cond switchOn() $switch You can apply custom functions to values of type\n MqlValue using the methods described in this section. To improve readability and allow for code reuse, you can move redundant\ncode into static methods. However, it is not possible to directly chain\nstatic methods in Kotlin. The passTo() method lets you chain values\ninto custom static methods. Suppose you need to determine how a class is performing against some\nbenchmarks. You want to find the average final grade for each class and\ncompare it against the benchmark values. The following custom method gradeAverage() takes an array of documents and\nthe name of an integer field shared across those documents. It calculates the\naverage of that field across all the documents in the provided array and\ndetermines the average of that field across all the elements in\nthe provided array. The evaluate() method compares a provided value to\ntwo provided range limits and generates a response string based on\nhow the values compare: The passArrayTo() method takes all of the students and calculates the\naverage score by using the gradeAverage() method. Then, the\n passNumberTo() method uses the evaluate() method to determine how the\nclasses are performing. This example stores the result as the evaluation \nfield using the project() method. The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: Method Aggregation Pipeline Operator passTo() No corresponding operator One advantage of using the passTo() method is that you can reuse\nyour custom methods for other aggregations. You could\nuse the gradeAverage() method to find the average of grades for\ngroups of students filtered by, for example, entry year or district, not just their\nclass. You could use the evaluate() method to evaluate, for\nexample, an individual student's performance, or an entire school's or\ndistrict's performance. You can perform a conversion operation to convert between certain MqlValue \ntypes using the methods described in this section. Suppose you want to have a collection of student data that includes\ntheir graduation years, which are stored as strings. You want to\ncalculate the year of their five-year reunion and store this value in a\nnew field. The parseInteger() method converts the graduationYear to an integer\nso that add() can calculate the reunion year. The addFields() method\nstores this result as a new reunionYear field. The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: Method Aggregation Pipeline Operator asDocument() No corresponding operator asMap() No corresponding operator asString() for MqlDate $dateToString asString() for MqlValue $toString millisecondsAsDate() $toDate parseDate() $dateFromString parseInteger() $toInt You can perform a date operation on a value of type MqlDate \nusing the methods described in this section. Suppose you have data about package deliveries and need to match\ndeliveries that occurred on any Monday in the \"America/New_York\" time\nzone. If the deliveryDate field contains any string values representing\nvalid dates, such as \"2018-01-15T16:00:00Z\" or \"Jan 15, 2018, 12:00\nPM EST\" , you can use the parseDate() method to convert the strings\ninto date types. The dayOfWeek() method determines which day of the week it is and converts\nit to a number based on which day is a Monday according to the\n \"America/New_York\" parameter. The eq() method compares this value to\n 2 , which corresponds to Monday based on the provided timezone parameter. The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: Method Aggregation Pipeline Operator dayOfMonth() $dayOfMonth dayOfWeek() $dayOfWeek dayOfYear() $dayOfYear hour() $hour millisecond() $millisecond minute() $minute month() $month second() $second week() $week year() $year You can perform a document operation on a value of type MqlDocument \nusing the methods described in this section. Suppose you have a collection of legacy customer data which includes\naddresses as child documents under the mailing.address field. You want\nto find all the customers who currently live in Washington state. A\ndocument in this collection might resemble the following: The getDocument() method retrieves the mailing.address field as a\ndocument so the nested state field can be retrieved with the\n getString() method. The eq() method checks if the value of the\n state field is \"WA\" . The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: Method Aggregation Pipeline Operator $getField hasField() No corresponding operator merge() $mergeObjects setField() $setField unsetField() $unsetField You can perform a map operation on a value of either type MqlMap or\n MqlEntry using the methods described in this section. Suppose you have a collection of inventory data where each document represents\nan individual item you're responsible for supplying. Each document contains a\nfield that is a map of all your warehouses and how many copies they currently\nhave in their inventory of the item. You want to determine the total number of\ncopies of items you have across all of your warehouses. A document in this\ncollection might resemble the following: The entries() method returns the map entries in the warehouses \nfield as an array. The sum() method calculates the total value of items\nbased on the values in the array retrieved with the getValue() method.\nThis example stores the result as the new totalInventory field using the\n project() method. The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: You should represent data as a map if the data maps\narbitrary keys such as dates or item IDs to values. Method Aggregation Pipeline Operator entries() $objectToArray get() No corresponding operator getKey() No corresponding operator getValue() No corresponding operator has() No corresponding operator merge() No corresponding operator set() No corresponding operator setKey() No corresponding operator setValue() No corresponding operator unset() No corresponding operator You can perform a string operation on a value of type MqlString \nusing the methods described in this section. Suppose you need to generate lowercase usernames for employees of a\ncompany from the employees' last names and employee IDs. The append() method combines the lastName and employeeID fields into\na single username, while the toLower() method makes the entire username\nlowercase. This example stores the result as a new username field using\nthe project() method. The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: Method Aggregation Pipeline Operator append() $concat length() $strLenCP lengthBytes() $strLenBytes substr() $substrCP substrBytes() $substrBytes toLower() $toLower toUpper() $toUpper You can perform a type-check operation on a value of type MqlValue \nusing the methods described in this section. These methods do not return boolean values. Instead, you provide a default value\nthat matches the type specified by the method. If the checked value\nmatches the method type, the checked value is returned. Otherwise, the supplied\ndefault value is returned. If you want to program branching logic based on the\ndata type, see switchOn() . Suppose you have a collection of rating data. An early version of the review\nschema allowed users to submit negative reviews without a star rating. You want\nconvert any of these negative reviews without a star rating to have the minimum\nvalue of 1 star. The isNumberOr() method returns either the value of rating , or\na value of 1 if rating is not a number or is null. The\n project() method returns this value as a new numericalRating field. The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: Method Aggregation Pipeline Operator isArrayOr() No corresponding operator isBooleanOr() No corresponding operator isDateOr() No corresponding operator isDocumentOr() No corresponding operator isIntegerOr() No corresponding operator isMapOr() No corresponding operator isNumberOr() No corresponding operator isStringOr() No corresponding operator", - "code": [ - { - "lang": "kotlin", - "value": "import com.mongodb.client.model.Aggregates\nimport com.mongodb.client.model.Accumulators\nimport com.mongodb.client.model.Projections\nimport com.mongodb.client.model.Filters\nimport com.mongodb.client.model.mql.MqlValues" - }, - { - "lang": "kotlin", - "value": "current().getString(\"name\")" - }, - { - "lang": "kotlin", - "value": "of(1.0)" - }, - { - "lang": "kotlin", - "value": "current()\n .getArray(\"visitDates\")\n .size()\n .gt(of(0))\n .and(current()\n .getString(\"state\")\n .eq(of(\"New Mexico\")))" - }, - { - "lang": "javascript", - "value": "[ { $group: {\n _id: { $month: \"$date\" },\n avgPrecipMM: {\n $avg: { $multiply: [\"$precipitation\", 25.4] } }\n} } ]" - }, - { - "lang": "kotlin", - "value": "val month = current().getDate(\"date\").month(of(\"UTC\"))\nval precip = current().getInteger(\"precipitation\")\n\nlistOf(\n Aggregates.group(\n month,\n Accumulators.avg(\"avgPrecipMM\", precip.multiply(25.4))\n))\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": ...,\n \"movie\": \"Hamlet\",\n \"showtimes\": [\n {\n \"date\": \"May 14, 2023, 12:00 PM\",\n \"seats\": [ 20, 80 ],\n \"ticketsBought\": 100\n },\n {\n \"date\": \"May 20, 2023, 08:00 PM\",\n \"seats\": [ 10, 40 ],\n \"ticketsBought\": 34\n }]\n}" - }, - { - "lang": "javascript", - "value": "[ { $project: {\n availableShowtimes: {\n $filter: {\n input: \"$showtimes\",\n as: \"showtime\",\n cond: { $lt: [ \"$$showtime.ticketsBought\", { $sum: \"$$showtime.seats\" } ] }\n } }\n} } ]" - }, - { - "lang": "kotlin", - "value": "val showtimes = current().getArray(\"showtimes\")\n\nlistOf(\n Aggregates.project(\n Projections.fields(\n Projections.computed(\"availableShowtimes\", showtimes\n .filter { showtime ->\n val seats = showtime.getArray(\"seats\")\n val totalSeats = seats.sum { n -> n }\n val ticketsBought = showtime.getInteger(\"ticketsBought\")\n val isAvailable = ticketsBought.lt(totalSeats)\n isAvailable\n })\n)))\n" - }, - { - "lang": "javascript", - "value": "[ { $project: {\n extremeTemp: { $or: [ { $lt: [\"$temperature\", 10] },\n { $gt: [\"$temperature\", 95] } ] }\n} } ]" - }, - { - "lang": "kotlin", - "value": "val temperature = current().getInteger(\"temperature\")\n\nlistOf(\n Aggregates.project(\n Projections.fields(\n Projections.computed(\"extremeTemp\", temperature\n .lt(of(10))\n .or(temperature.gt(of(95))))\n)))\n" - }, - { - "lang": "javascript", - "value": "[ { $match: { location: { $eq: \"California\" } } } ]" - }, - { - "lang": "kotlin", - "value": "val location = current().getString(\"location\")\n\nlistOf(\n Aggregates.match(\n Filters.expr(location.eq(of(\"California\")))\n))\n" - }, - { - "lang": "javascript", - "value": "[ { $project: {\n membershipLevel: {\n $switch: {\n branches: [\n { case: { $eq: [ { $type: \"$member\" }, \"string\" ] }, then: \"$member\" },\n { case: { $eq: [ { $type: \"$member\" }, \"bool\" ] }, then: { $cond: {\n if: \"$member\",\n then: \"Gold\",\n else: \"Guest\" } } },\n { case: { $eq: [ { $type: \"$member\" }, \"array\" ] }, then: { $last: \"$member\" } }\n ],\n default: \"Guest\" } }\n} } ]" - }, - { - "lang": "kotlin", - "value": "val member = current().getField(\"member\")\n\nlistOf(\n Aggregates.project(\n Projections.fields(\n Projections.computed(\"membershipLevel\",\n member.switchOn{field -> field\n .isString{s-> s}\n .isBoolean{b -> b.cond(of(\"Gold\"), of(\"Guest\"))}\n .isArray { a -> a.last()}\n .defaults{ d -> of(\"Guest\")}})\n)))\n" - }, - { - "lang": "javascript", - "value": "[ { $project: {\n evaluation: { $switch: {\n branches: [\n { case: { $lte: [ { $avg: \"$students.finalGrade\" }, 70 ] },\n then: \"Needs improvement\"\n },\n { case: { $lte: [ { $avg: \"$students.finalGrade\" }, 85 ] },\n then: \"Meets expectations\"\n }\n ],\n default: \"Exceeds expectations\" } }\n} } ]" - }, - { - "lang": "kotlin", - "value": "fun gradeAverage(students: MqlArray, fieldName: String): MqlNumber {\n val sum = students.sum{ student -> student.getInteger(fieldName) }\n val avg = sum.divide(students.size())\n return avg\n}\n\nfun evaluate(grade: MqlNumber, cutoff1: MqlNumber, cutoff2: MqlNumber): MqlString {\n val message = grade.switchOn{ on -> on\n .lte(cutoff1) { g -> of(\"Needs improvement\") }\n .lte(cutoff2) { g -> of(\"Meets expectations\") }\n .defaults{g -> of(\"Exceeds expectations\")}}\n return message\n}\n" - }, - { - "lang": "kotlin", - "value": "val students = current().getArray(\"students\")\n\nlistOf(\n Aggregates.project(\n Projections.fields(\n Projections.computed(\"evaluation\", students\n .passArrayTo { s -> gradeAverage(s, \"finalGrade\") }\n .passNumberTo { grade -> evaluate(grade, of(70), of(85)) })\n)))\n" - }, - { - "lang": "javascript", - "value": "[ { $addFields: {\n reunionYear: {\n $add: [ { $toInt: \"$graduationYear\" }, 5 ] }\n} } ]" - }, - { - "lang": "kotlin", - "value": "val graduationYear = current().getString(\"graduationYear\")\n\nlistOf(\n Aggregates.addFields(\n Field(\"reunionYear\",\n graduationYear\n .parseInteger()\n .add(5))\n))\n" - }, - { - "lang": "javascript", - "value": "[ { $match: {\n $expr: {\n $eq: [ {\n $dayOfWeek: {\n date: { $dateFromString: { dateString: \"$deliveryDate\" } },\n timezone: \"America/New_York\" }},\n 2\n ] }\n} } ]" - }, - { - "lang": "kotlin", - "value": "val deliveryDate = current().getString(\"deliveryDate\")\n\nlistOf(\n Aggregates.match(\n Filters.expr(deliveryDate\n .parseDate()\n .dayOfWeek(of(\"America/New_York\"))\n .eq(of(2))\n)))\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": ...,\n \"customer.name\": \"Mary Kenneth Keller\",\n \"mailing.address\":\n {\n \"street\": \"601 Mongo Drive\",\n \"city\": \"Vasqueztown\",\n \"state\": \"CO\",\n \"zip\": 27017\n }\n}" - }, - { - "lang": "javascript", - "value": "[\n { $match: {\n $expr: {\n $eq: [{\n $getField: {\n input: { $getField: { input: \"$$CURRENT\", field: \"mailing.address\"}},\n field: \"state\" }},\n \"WA\" ]\n}}}]" - }, - { - "lang": "kotlin", - "value": "val address = current().getDocument(\"mailing.address\")\n\nlistOf(\n Aggregates.match(\n Filters.expr(address\n .getString(\"state\")\n .eq(of(\"WA\"))\n)))\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": ...,\n \"item\": \"notebook\"\n \"warehouses\": [\n { \"Atlanta\", 50 },\n { \"Chicago\", 0 },\n { \"Portland\", 120 },\n { \"Dallas\", 6 }\n ]\n}" - }, - { - "lang": "javascript", - "value": "[ { $project: {\n totalInventory: {\n $sum: {\n $getField: { $objectToArray: \"$warehouses\" },\n } }\n} } ]" - }, - { - "lang": "kotlin", - "value": "val warehouses = current().getMap(\"warehouses\")\n\nlistOf(\n Aggregates.project(\n Projections.fields(\n Projections.computed(\"totalInventory\", warehouses\n .entries()\n .sum { v -> v.getValue() })\n)))\n" - }, - { - "lang": "javascript", - "value": "[ { $project: {\n username: {\n $toLower: { $concat: [\"$lastName\", \"$employeeID\"] } }\n} } ]" - }, - { - "lang": "kotlin", - "value": "val lastName = current().getString(\"lastName\")\nval employeeID = current().getString(\"employeeID\")\n\nlistOf(\n Aggregates.project(\n Projections.fields(\n Projections.computed(\"username\", lastName\n .append(employeeID)\n .toLower())\n)))\n" - }, - { - "lang": "javascript", - "value": "[ { $project: {\n numericalRating: {\n $cond: { if: { $isNumber: \"$rating\" },\n then: \"$rating\",\n else: 1\n } }\n} } ]" - }, - { - "lang": "kotlin", - "value": "val rating = current().getField(\"rating\")\n\nlistOf(\n Aggregates.project(\n Projections.fields(\n Projections.computed(\"numericalRating\", rating\n .isNumberOr(of(1)))\n)))\n" - } - ], - "preview": "In this guide, you can learn how to use the MongoDB Kotlin Driver to construct\nexpressions for use in aggregation pipelines. You can perform\nexpression operations with discoverable, typesafe Java methods rather\nthan BSON documents. Because these methods follow the fluent interface\npattern, you can chain aggregation operations together to create code\nthat is both more compact and more naturally readable.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/aggregation", - "title": "Aggregation", - "headings": [ - "Overview", - "Aggregation and Find Operations Compared", - "Useful References", - "Example Data", - "Basic Aggregation", - "Explain Aggregation", - "Aggregation Expressions" - ], - "paragraphs": "In this guide, you can learn how to use aggregation operations in the MongoDB Kotlin driver. Aggregation operations process data in your MongoDB collections and return computed results. MongoDB's Aggregation\npipeline, part of the Query API, is modeled on the concept of data processing pipelines. Documents enter a multi-staged pipeline that\ntransforms the documents into an aggregated result. Another way to think of aggregation is like a car factory. Within the car factory is an assembly line, along which\nare assembly stations with specialized tools to do a specific job, like drills and welders. Raw parts enter the factory,\nwhich are then transformed and assembled into a finished product. The aggregation pipeline is the assembly line, aggregation stages are the assembly stations, and\n operator expressions are the specialized tools. Using find operations, you can: Using aggregation operations, you can: Aggregation operations have some limitations you must keep in mind: select what documents to return select what fields to return sort the results perform all find operations rename fields calculate fields summarize data group values Returned documents must not violate the BSON document size limit \nof 16 megabytes. Pipeline stages have a memory limit of 100 megabytes by default. If required,\nyou may exceed this limit by using the\n allowDiskUse \nmethod. The $graphLookup stage has a strict memory limit of 100 megabytes\nand will ignore allowDiskUse . Aggregation pipeline Aggregation stages Operator expressions Aggregation Builders The examples use a collection of the following data in MongoDB: The data in the collection is modeled by the following Restaurant data class: To perform an aggregation, pass a list of aggregation stages to the\n MongoCollection.aggregate() method. The Kotlin driver provides the\n Aggregates \nhelper class that contains builders for aggregation stages. In the following example, the aggregation pipeline: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: Uses a $match stage to filter for documents whose\n categories array field contains the element Bakery . The example uses\n Aggregates.match to build the $match stage. Uses a $group stage to group the matching documents by the stars \nfield, accumulating a count of documents for each distinct value of stars . You can build the expressions used in this example using the aggregation builders . MongoCollection.aggregate() Aggregates.match To view information about how MongoDB executes your operation, use the\n explain() method of the AggregateFlow class. The explain() \nmethod returns execution plans and performance statistics. An execution\nplan is a potential way MongoDB can complete an operation.\nThe explain() method provides both the winning plan (the plan MongoDB\nexecuted) and rejected plans. In the following example, we print the JSON representation of the\nwinning plans for aggregation stages that produce execution plans: For more information about the topics mentioned in this section, see the\nfollowing resources: You can specify the level of detail of your explanation by passing a\nverbosity level to the explain() method. The following table shows all verbosity levels for explanations and\ntheir intended use cases: Verbosity Level Use Case ALL_PLANS_EXECUTIONS You want to know which plan MongoDB will choose to run your query. EXECUTION_STATS You want to know if your query is performing well. QUERY_PLANNER You have a problem with your query and you want as much information\nas possible to diagnose the issue. Explain Output Server Manual Entry Query Plans Server Manual Entry ExplainVerbosity API Documentation explain() API Documentation AggregateFlow API Documentation The Kotlin driver provides builders for accumulator expressions for use with\n $group . You must declare all other expressions in JSON format or\ncompatible document format. In the following example, the aggregation pipeline uses a\n $project stage and various Projections to return the name \nfield and the calculated field firstCategory whose value is the\nfirst element in the categories field. For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: The syntax in either of the following examples will define an $arrayElemAt \nexpression. The $ in front of \"categories\" tells MongoDB that this is a field path ,\nusing the \"categories\" field from the input document. Accumulators $group $project Projections", - "code": [ - { - "lang": "json", - "value": "[\n {\"name\": \"Sun Bakery Trattoria\", \"contact\": {\"phone\": \"386-555-0189\", \"email\": \"SunBakeryTrattoria@example.org\", \"location\": [-74.0056649, 40.7452371]}, \"stars\": 4, \"categories\": [\"Pizza\", \"Pasta\", \"Italian\", \"Coffee\", \"Sandwiches\"]},\n {\"name\": \"Blue Bagels Grill\", \"contact\": {\"phone\": \"786-555-0102\", \"email\": \"BlueBagelsGrill@example.com\", \"location\": [-73.92506, 40.8275556]}, \"stars\": 3, \"categories\": [\"Bagels\", \"Cookies\", \"Sandwiches\"]},\n {\"name\": \"XYZ Bagels Restaurant\", \"contact\": {\"phone\": \"435-555-0190\", \"email\": \"XYZBagelsRestaurant@example.net\", \"location\": [-74.0707363, 40.59321569999999]}, \"stars\": 4, \"categories\": [\"Bagels\", \"Sandwiches\", \"Coffee\"]},\n {\"name\": \"Hot Bakery Cafe\", \"contact\": {\"phone\": \"264-555-0171\", \"email\": \"HotBakeryCafe@example.net\", \"location\": [-73.96485799999999, 40.761899]}, \"stars\": 4, \"categories\": [\"Bakery\", \"Cafe\", \"Coffee\", \"Dessert\"]},\n {\"name\": \"Green Feast Pizzeria\", \"contact\": {\"phone\": \"840-555-0102\", \"email\": \"GreenFeastPizzeria@example.com\", \"location\": [-74.1220973, 40.6129407]}, \"stars\": 2, \"categories\": [\"Pizza\", \"Italian\"]},\n {\"name\": \"ZZZ Pasta Buffet\", \"contact\": {\"phone\": \"769-555-0152\", \"email\": \"ZZZPastaBuffet@example.com\", \"location\": [-73.9446421, 40.7253944]}, \"stars\": 0, \"categories\": [\"Pasta\", \"Italian\", \"Buffet\", \"Cafeteria\"]},\n {\"name\": \"XYZ Coffee Bar\", \"contact\": {\"phone\": \"644-555-0193\", \"email\": \"XYZCoffeeBar@example.net\", \"location\": [-74.0166091, 40.6284767]}, \"stars\": 5, \"categories\": [\"Coffee\", \"Cafe\", \"Bakery\", \"Chocolates\"]},\n {\"name\": \"456 Steak Restaurant\", \"contact\": {\"phone\": \"990-555-0165\", \"email\": \"456SteakRestaurant@example.com\", \"location\": [-73.9365108, 40.8497077]}, \"stars\": 0, \"categories\": [\"Steak\", \"Seafood\"]},\n {\"name\": \"456 Cookies Shop\", \"contact\": {\"phone\": \"604-555-0149\", \"email\": \"456CookiesShop@example.org\", \"location\": [-73.8850023, 40.7494272]}, \"stars\": 4, \"categories\": [\"Bakery\", \"Cookies\", \"Cake\", \"Coffee\"]},\n {\"name\": \"XYZ Steak Buffet\", \"contact\": {\"phone\": \"229-555-0197\", \"email\": \"XYZSteakBuffet@example.org\", \"location\": [-73.9799932, 40.7660886]}, \"stars\": 3, \"categories\": [\"Steak\", \"Salad\", \"Chinese\"]}\n]" - }, - { - "lang": "kotlin", - "value": "data class Restaurant(\n val name: String,\n val contact: Contact,\n val stars: Int,\n val categories: List\n) {\n data class Contact(\n val phone: String,\n val email: String,\n val location: List\n )\n}\n" - }, - { - "lang": "kotlin", - "value": "data class Results(@BsonId val id: Int, val count: Int)\n\nval resultsFlow = collection.aggregate(\n listOf(\n Aggregates.match(Filters.eq(Restaurant::categories.name, \"Bakery\")),\n Aggregates.group(\"\\$${Restaurant::stars.name}\",\n Accumulators.sum(\"count\", 1))\n )\n)\n\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": null, - "value": "Results(id=4, count=2)\nResults(id=5, count=1)" - }, - { - "lang": "kotlin", - "value": "data class Results (val name: String, val count: Int)\n\nval explanation = collection.aggregate(\n listOf(\n Aggregates.match(Filters.eq(Restaurant::categories.name, \"bakery\")),\n Aggregates.group(\"\\$${Restaurant::stars.name}\", Accumulators.sum(\"count\", 1))\n )\n).explain(ExplainVerbosity.EXECUTION_STATS)\n\n// Prettyprint the output\nprintln(explanation.toJson(JsonWriterSettings.builder().indent(true).build()))\n" - }, - { - "lang": "javascript", - "value": "{\n \"explainVersion\": \"2\",\n \"queryPlanner\": {\n // ...\n },\n \"command\": {\n // ...\n },\n // ...\n}" - }, - { - "lang": "kotlin", - "value": "Document(\"\\$arrayElemAt\", listOf(\"\\$categories\", 0))\n// is equivalent to\nDocument.parse(\"{ \\$arrayElemAt: ['\\$categories', 0] }\")\n" - }, - { - "lang": "kotlin", - "value": "data class Results(val name: String, val firstCategory: String)\n\nval resultsFlow = collection.aggregate(\n listOf(\n Aggregates.project(\n Projections.fields(\n Projections.excludeId(),\n Projections.include(\"name\"),\n Projections.computed(\n \"firstCategory\",\n Document(\"\\$arrayElemAt\", listOf(\"\\$categories\", 0))\n )\n )\n )\n )\n)\n\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Results(name=Sun Bakery Trattoria, firstCategory=Pizza)\nResults(name=Blue Bagels Grill, firstCategory=Bagels)\nResults(name=XYZ Bagels Restaurant, firstCategory=Bagels)\nResults(name=Hot Bakery Cafe, firstCategory=Bakery)\nResults(name=Green Feast Pizzeria, firstCategory=Pizza)\nResults(name=ZZZ Pasta Buffet, firstCategory=Pasta)\nResults(name=XYZ Coffee Bar, firstCategory=Coffee)\nResults(name=456 Steak Restaurant, firstCategory=Steak)\nResults(name=456 Cookies Shop, firstCategory=Bakery)\nResults(name=XYZ Steak Buffet, firstCategory=Steak)" - } - ], - "preview": "In this guide, you can learn how to use aggregation operations in the MongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/auth", - "title": "Authentication Mechanisms", - "headings": [ - "Overview", - "Specify an Authentication Mechanism", - "Mechanisms", - "Default", - "SCRAM-SHA-256", - "SCRAM-SHA-1", - "MONGODB-CR", - "MONGODB-AWS", - "AWS SDK", - "Specify Your Credentials in the Environment", - "Specify Your Credentials in a MongoCredential", - "X.509" - ], - "paragraphs": "In this guide, you can learn how to authenticate with MongoDB using each\n authentication mechanism available in the MongoDB Community Edition.\nAuthentication mechanisms are processes by which the driver and server\nconfirm identity and establish trust to ensure security. The mechanisms that you can use with the latest version of MongoDB Community\nEdition are as follows: To authenticate using Kerberos or LDAP , see the\n Enterprise Authentication Mechanisms guide . For more information on establishing a connection to your MongoDB cluster,\nread our Connection Guide . Default SCRAM-SHA-256 SCRAM-SHA-1 MONGODB-CR MONGODB-AWS X.509 You can specify your authentication mechanism and credentials when connecting\nto MongoDB using either of the following: A connection string (also known as a connection URI ) specifies how to\nconnect and authenticate to your MongoDB cluster. To authenticate using a connection string, include your settings in your\nconnection string and pass it to the MongoClient.create() method to\ninstantiate your MongoClient . The Connection String \ntab in each section provides the syntax for authenticating using a\n connection string . Alternatively, you can use the MongoCredential class to specify your\nauthentication details. The MongoCredential class contains static factory\nmethods that construct instances containing your authentication mechanism and\ncredentials. When you use the MongoCredential helper class, you need\nto use the MongoClientSettings.Builder class to configure your\nconnection settings when constructing your MongoClient . The\n MongoCredential tab in each section provides the syntax for\nauthenticating using a MongoCredential . For more information on these classes and methods, refer to the following API\ndocumentation: A connection string A MongoCredential factory method MongoClient.create() MongoClient MongoClientSettings.Builder MongoCredential The default authentication mechanism setting uses one of the following\nauthentication mechanisms depending on what your MongoDB server supports: Server versions 3.6 and earlier use MONGODB-CR as the default\nmechanism. Newer versions of the server use one of the mechanisms for\nwhich they advertise support. The following code snippets show how to specify the authentication mechanism,\nusing the following placeholders: Select the Connection String or the MongoCredential \ntab below for instructions and sample code for specifying this authentication\nmechanism: For more information on the challenge-response (CR) and salted\nchallenge-response authentication mechanisms (SCRAM) that MongoDB supports,\nsee the SCRAM section of the Server manual. SCRAM-SHA-256 SCRAM-SHA-1 MONGODB-CR db_username - your MongoDB database username db_password - your MongoDB database user's password hostname - network address of your MongoDB server, accessible by your client port - port number of your MongoDB server authenticationDb - MongoDB database that contains your user's\nauthentication data. If you omit this parameter, the driver uses the\ndefault value admin . To specify the default authentication mechanism using a connection\nstring, omit the mechanism. Your code to instantiate a MongoClient \nshould resemble the following: To specify the default authentication mechanism using the\n MongoCredential class, use the createCredential() method. Your\ncode to instantiate a MongoClient should resemble the following: SCRAM-SHA-256 is a salted challenge-response authentication mechanism\n(SCRAM) that uses your username and password, encrypted with the SHA-256 \nalgorithm, to authenticate your user. The following code snippets show how to specify the authentication mechanism,\nusing the following placeholders: Select the Connection String or the MongoCredential \ntab below for instructions and sample code for specifying this authentication\nmechanism: SCRAM-SHA-256 is the default authentication method for MongoDB starting\nin MongoDB 4.0. db_username - your MongoDB database username. db_password - your MongoDB database user's password. hostname - network address of your MongoDB server, accessible by your client. port - port number of your MongoDB server. authenticationDb - MongoDB database that contains your user's\nauthentication data. If you omit this parameter, the driver uses the\ndefault value admin . To specify the SCRAM-SHA-256 authentication mechanism using a\nconnection string, assign the authMechanism parameter the value\n SCRAM-SHA-256 in your connection string. Your code to instantiate\na MongoClient should resemble the following: To specify the default authentication mechanism using the\n MongoCredential class, use the\n createScramSha256Credential() \nmethod. Your code to instantiate a MongoClient should resemble the following: SCRAM-SHA-1 is a salted challenge-response mechanism (SCRAM) that uses your\nusername and password, encrypted with the SHA-1 algorithm, to authenticate\nyour user. The following code snippets show how to specify the authentication mechanism,\nusing the following placeholders: Select the Connection String or the MongoCredential \ntab below for instructions and sample code for specifying this authentication\nmechanism: SCRAM-SHA-1 is the default authentication method for MongoDB versions\n3.0, 3.2, 3.4, and 3.6. db_username - your MongoDB database username. db_password - your MongoDB database user's password. hostname - network address of your MongoDB server, accessible by your client. port - port number of your MongoDB server. authenticationDb - MongoDB database that contains your user's\nauthentication data. If you omit this parameter, the driver uses the\ndefault value admin . To specify the SCRAM-SHA-1 authentication mechanism using a\nconnection string, assign the authMechanism parameter the value\n SCRAM-SHA-1 in your connection string. Your code to instantiate\na MongoClient should resemble the following: To specify the default authentication mechanism using the\n MongoCredential class, use the\n createScramSha1Credential() \nmethod. Your code to instantiate a MongoClient should resemble the following: MONGODB-CR is a challenge-response authentication mechanism that uses your\nusername and password to authenticate your user. This authentication\nmechanism was deprecated starting in MongoDB 3.6 and is no longer\nsupported as of MongoDB 4.0. You cannot specify this method explicitly; refer to the fallback provided\nby the default authentication mechanism to\nconnect using MONGODB-CR . The MONGODB-AWS authentication mechanism uses your Amazon Web Services\nIdentity and Access Management (AWS IAM) credentials to authenticate your\nuser. To learn more about configuring MongoDB Atlas, see the\n Set Up Passwordless Authentication with AWS IAM Roles \nguide. To instruct the driver to use this authentication mechanism, you can specify\n MONGODB-AWS either as a parameter in the connection string or by using\nthe MongoCredential.createAwsCredential() factory method. Learn how to specify this authentication mechanism and the various ways to\nprovide your AWS IAM credentials in the next sections. These sections contain code examples that use the following placeholders: The MONGODB-AWS authentication mechanism is available for MongoDB\ndeployments on MongoDB Atlas. awsKeyId - value of your AWS access key ID awsSecretKey - value of your AWS secret access key atlasUri - network address of your MongoDB Atlas deployment hostname - hostname of your MongoDB Atlas deployment port - port of your MongoDB Atlas deployment awsSessionToken - value of your AWS session token You can use one of the AWS SDK for Java v1 or v2 to specify your credentials.\nThis method offers the following features: To use the AWS SDK for Java for MONGODB-AWS authentication, you must\nperform the following: To specify the authentication mechanism by using a MongoCredential ,\nuse the MongoCredential.createAwsCredential() factory method\nand add the MongoCredential instance to your MongoClient as shown\nin the following example: To specify the authentication mechanism in the connection string, add\nit as a parameter as shown in the following example: To add the AWS SDK as a dependency to your project, see the following\nAWS documentation for the version you need: To supply your credentials, see the following AWS documentation for the\nversion you need: Multiple options for obtaining credentials Credential caching which helps your application avoid rate limiting Credential provider management for use with the Elastic Kubernetes Service . Specify the authentication mechanism Add the SDK as a dependency to your project Supply your credentials using one of the methods in the credential\nprovider chain For the AWS SDK for Java v2 , see the Setting Up \nguide. For the AWS SDK for Java v1 , see the Getting Started \nguide. For the AWS SDK for Java v2, the Java driver currently tests using the\n software.amazon.awssdk:auth:2.18.9 dependency. For the AWS SDK for Java v1, the Java driver currently tests using the\n com.amazonaws:aws-java-sdk-core:1.12.337 dependency. To learn more about the AWS SDK for Java v2 class the driver uses to\nget the credentials, see the DefaultCredentialsProvider \nAPI documentation. Learn how to supply your credentials to this class from the\n Use the default credential provider chain \nsection. To learn more about the AWS SDK for Java v1 class the driver uses to\nget the credentials, see the DefaultAWSCredentialsProviderChain \nAPI documentation. Learn how to supply your credentials to this class from the\n Using the Default Credential Provider Chain \nsection. If you include both v1 and v2 of the AWS SDK for Java in your project,\nyou must use the v2 methods to supply your credentials. You can provide your AWS IAM credentials by instructing the driver to\nuse the MONGODB-AWS authentication mechanism and by setting the\nappropriate environment variables. To use the environment variables to supply your credentials, you must perform\nthe following: You can specify the authentication mechanism by using a MongoCredential \nor on the connection string. To specify the authentication mechanism by using a MongoCredential ,\nuse the MongoCredential.createAwsCredential() factory method and add the\n MongoCredential instance to your MongoClient as shown in the following\nexample: To specify the authentication mechanism in the connection string, add it as a\nparameter as shown in the following example: The next examples show how to provide your credentials by setting environment\nvariables for the following types of authentication: The following example shows how you can set your programmatic access keys \nin environment variables by using bash or a similar shell: Omit the line containing AWS_SESSION_TOKEN if you don't need an AWS\nsession token for that role. To authenticate by using ECS container credentials , set the ECS\nendpoint relative URI in an environment variable by using bash or\na similar shell as shown in the following example: To authenticate using EC2 container credentials , make sure none of the\naforementioned environment variables are set. The driver obtains the\ncredentials from the default IPv4 EC2 instance metadata endpoint. Specify the authentication mechanism Add the appropriate environment variables Programmatic access keys ECS container credentials EC2 container credentials You can supply your AWS IAM credentials to a MongoClient by using a\n MongoCredential instance. To construct the MongoCredential instance\nfor MONGODB-AWS authentication, use the createAwsCredential() \nfactory method. You can supply only programmatic access keys to the\n MongoCredential.createAwsCredential() method. If you need to supply ECS\nor EC2 container credentials, use the instructions in\n Specify Your Credentials in the Environment or AWS SDK . To use the MongoCredential for MONGODB-AWS authentication, you\nmust perform the following: To specify the authentication mechanism by using a MongoCredential ,\nuse the MongoCredential.createAwsCredential() factory method\nand add the MongoCredential instance to your MongoClient as shown\nin the following example: If you need to specify an AWS session token, pass it to the\n withMechanismProperty() \nmethod as shown in the following example: To refresh your credentials, you can declare a Supplier lambda expression\nthat returns new credentials as shown in the following example: If you must provide AWS IAM credentials in a connection string, you can add\nit to your MongoClientSettings by calling the applyConnectionString() \nmethod: Specify the authentication mechanism Supply the credentials The X.509 authentication mechanism uses\n TLS with X.509 certificates to\nauthenticate your user, identified by the relative distinguished names\n(RDNs) of your client certificate. When you specify the X.509 \nauthentication mechanism, the server authenticates the connection using\nthe subject name of the client certificate. The following code snippets show how to specify the authentication mechanism,\nusing the following placeholders: Select the Connection String or the MongoCredential \ntab below for instructions and sample code for specifying this authentication\nmechanism: For additional information on configuring your application to use\ncertificates as well as TLS/SSL options, see our\n TLS/SSL guide . hostname - network address of your MongoDB server, accessible by your client. port - port number of your MongoDB server. authenticationDb - MongoDB database that contains your user's\nauthentication data. If you omit this parameter, the driver uses the\ndefault value admin . To specify the X.509 authentication mechanism using a connection\nstring, assign the authMechanism parameter the value MONGODB-X509 \nand enable TLS by assigning the tls \nparameter a true value. Your code to instantiate a MongoClient \nshould resemble the following: To specify the X.509 authentication mechanism using the\n MongoCredential class, use the\n createMongoX509Credential() \nmethod. Also, enable TLS by calling the\n applyToSslSettings() \nmethod and setting the enabled property to true in the\n SslSettings.Builder \nblock. Your code to instantiate a MongoClient should resemble the following:", - "code": [ - { - "lang": "kotlin", - "value": "val mongoClient =\n MongoClient.create(\"mongodb://:@:/?authSource=\")\n" - }, - { - "lang": "kotlin", - "value": "val credential = MongoCredential.createCredential(\n \"\", \"\", \"\".toCharArray()\n)\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder: ClusterSettings.Builder ->\n builder.hosts(\n listOf(ServerAddress(\"\", \"\"))\n )\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" - }, - { - "lang": "kotlin", - "value": "val mongoClient =\n MongoClient.create(\"mongodb://:@:/?authSource=&authMechanism=SCRAM-SHA-256\")\n" - }, - { - "lang": "kotlin", - "value": "val credential = MongoCredential.createScramSha256Credential(\n \"\", \"\", \"\".toCharArray()\n)\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder: ClusterSettings.Builder ->\n builder.hosts(\n listOf(ServerAddress(\"\", \"\"))\n )\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" - }, - { - "lang": "kotlin", - "value": "val mongoClient =\n MongoClient.create(\"mongodb://:@:/?authSource=&authMechanism=SCRAM-SHA-1\")\n" - }, - { - "lang": "kotlin", - "value": "val credential = MongoCredential.createScramSha1Credential(\n \"\", \"\", \"\".toCharArray()\n)\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder: ClusterSettings.Builder ->\n builder.hosts(\n listOf(ServerAddress(\"\", \"\"))\n )\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" - }, - { - "lang": "kotlin", - "value": "val credential = MongoCredential.createAwsCredential(null, null)\n\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder: ClusterSettings.Builder ->\n builder.hosts(\n listOf(ServerAddress(\"\"))\n )\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" - }, - { - "lang": "kotlin", - "value": "val mongoClient =\n MongoClient.create(\"mongodb://?authMechanism=MONGODB-AWS\")\n" - }, - { - "lang": "bash", - "value": "export AWS_ACCESS_KEY_ID=\nexport AWS_SECRET_ACCESS_KEY=\nexport AWS_SESSION_TOKEN=" - }, - { - "lang": "bash", - "value": "export AWS_CONTAINER_CREDENTIALS_RELATIVE_URI=" - }, - { - "lang": "kotlin", - "value": "val credential = MongoCredential.createAwsCredential(null, null)\n\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder: ClusterSettings.Builder ->\n builder.hosts(\n listOf(ServerAddress(\"\"))\n )\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" - }, - { - "lang": "kotlin", - "value": "val mongoClient =\n MongoClient.create(\"mongodb://?authMechanism=MONGODB-AWS\")\n" - }, - { - "lang": "kotlin", - "value": "val credential = MongoCredential.createAwsCredential(\"\", \"\".toCharArray())\n\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder: ClusterSettings.Builder ->\n builder.hosts(\n listOf(ServerAddress(\"\"))\n )\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" - }, - { - "lang": "kotlin", - "value": "val credential = MongoCredential.createAwsCredential(\"\", \"\".toCharArray())\n .withMechanismProperty(\"AWS_SESSION_TOKEN\", \"\")\n\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder: ClusterSettings.Builder ->\n builder.hosts(\n listOf(ServerAddress(\"\"))\n )\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" - }, - { - "lang": "kotlin", - "value": "val awsFreshCredentialSupplier: Supplier = Supplier {\n // Add your code here to fetch new credentials\n\n // Return the new credentials\n AwsCredential(\"\", \"\", \"\")\n}\n\nval credential = MongoCredential.createAwsCredential(\"\", \"\".toCharArray())\n .withMechanismProperty(MongoCredential.AWS_CREDENTIAL_PROVIDER_KEY, awsFreshCredentialSupplier)\n\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder ->\n builder.hosts(listOf(ServerAddress(\"\", \"\")))\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" - }, - { - "lang": "kotlin", - "value": "val credential = MongoCredential.createAwsCredential(\"\", \"\".toCharArray())\nval connectionString = ConnectionString(\"mongodb:///?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN:\")\n\nval settings = MongoClientSettings.builder()\n .applyConnectionString(connectionString)\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" - }, - { - "lang": "kotlin", - "value": "val mongoClient =\n MongoClient.create(\"mongodb://:@:/?authSource=&authMechanism=MONGODB-X509&tls=true\")\n" - }, - { - "lang": "kotlin", - "value": "val credential = MongoCredential.createMongoX509Credential()\n\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder ->\n builder.hosts(listOf(\n ServerAddress(\"\", \"\"))\n )\n }\n .applyToSslSettings { builder ->\n builder.enabled(true)\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" - } - ], - "preview": "In this guide, you can learn how to authenticate with MongoDB using each\nauthentication mechanism available in the MongoDB Community Edition.\nAuthentication mechanisms are processes by which the driver and server\nconfirm identity and establish trust to ensure security.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/builders/aggregates", - "title": "Aggregates Builders", - "headings": [ - "Overview", - "Match", - "Project", - "Projecting Computed Fields", - "Documents", - "Sample", - "Sort", - "Skip", - "Limit", - "Lookup", - "Left Outer Join", - "Full Join and Uncorrelated Subqueries", - "Group", - "Pick-N Accumulators", - "MinN", - "MaxN", - "FirstN", - "LastN", - "Top", - "TopN", - "Bottom", - "BottomN", - "Unwind", - "Out", - "Merge", - "GraphLookup", - "SortByCount", - "ReplaceRoot", - "AddFields", - "Count", - "Bucket", - "BucketAuto", - "Facet", - "SetWindowFields", - "Densify", - "Fill", - "Atlas Full-Text Search", - "Atlas Search Metadata", - "Atlas Vector Search" - ], - "paragraphs": "In this guide, you can learn how to use the Aggregates \nclass which provides static factory methods that build aggregation pipeline\nstages in the MongoDB Kotlin driver. For a more thorough introduction to Aggregation, see our Aggregation guide . The examples on this page assume imports for methods of the following classes: Use these methods to construct pipeline stages and specify them in your\naggregation as a list: Many Aggregation examples in this guide use the Atlas sample_mflix.movies dataset . The documents in this collection are\nmodeled by the following Movie data class for use with the Kotlin driver: Aggregates Filters Projections Sorts Accumulators Use the match() method to create a $match \npipeline stage that matches incoming documents against the specified\nquery filter, filtering out documents that do not match. The following example creates a pipeline stage that matches all documents\nin the movies collection where the\n title field is equal to \"The Shawshank Redemption\": The filter can be an instance of any class that implements Bson , but it's\nconvenient to combine with use of the Filters class.\nclass. Use the project() method to create a $project \npipeline stage that project specified document fields. Field projection\nin aggregation follows the same rules as field projection in queries . The following example creates a pipeline stage that includes the title and\n plot fields but excludes the _id field: Though the projection can be an instance of any class that implements Bson ,\nit's convenient to combine with use of Projections . The $project stage can project computed fields as well. The following example creates a pipeline stage that projects the rated field\ninto a new field called rating , effectively renaming the field: Use the documents() method to create a\n $documents \npipeline stage that returns literal documents from input values. The following example creates a pipeline stage that creates\nsample documents in the movies collection with a title field: If you use a $documents stage in an aggregation pipeline, it must be the first\nstage in the pipeline. If you use the documents() method to provide the input to an aggregation pipeline,\nyou must call the aggregate() method on a database instead of on a\ncollection. Use the sample() method to create a $sample \npipeline stage to randomly select documents from input. The following example creates a pipeline stage that randomly selects 5 documents\nfrom the movies collection: Use the sort() method to create a $sort \npipeline stage to sort by the specified criteria. The following example creates a pipeline stage that sorts in descending order according\nto the value of the year field and then in ascending order according to the\nvalue of the title field: Though the sort criteria can be an instance of any class that\nimplements Bson , it's convenient to combine with use of\n Sorts . Use the skip() method to create a $skip \npipeline stage to skip over the specified number of documents before\npassing documents into the next stage. The following example creates a pipeline stage that skips the first 5 documents\nin the movies collection: Use the $limit pipeline stage\nto limit the number of documents passed to the next stage. The following example creates a pipeline stage that limits the number of documents\nreturned from the movies collection to 4 : Use the lookup() method to create a $lookup \npipeline stage to perform joins and uncorrelated subqueries between two collections. The following example creates a pipeline stage that performs a left outer\njoin between the movies and comments collections in the sample mflix \ndatabase: It joins the _id field from movies to the movie_id field in comments It outputs the results in the joined_comments field The following example uses the fictional orders and warehouses collections.\nThe data is modeled using the following Kotlin data classes: The example creates a pipeline stage that joins the two collections by the item\nand whether the available quantity in inStock field is enough to fulfill\nthe ordered quantity: Use the group() method to create a $group \npipeline stage to group documents by a specified expression and output a document\nfor each distinct grouping. The following example creates a pipeline stage that groups documents\nin the orders collection by the value of the customerId field.\nEach group accumulates the sum and average\nof the values of the ordered field into the totalQuantity and\n averageQuantity fields: Learn more about accumulator operators from the Server manual section\non Accumulators . The driver includes the Accumulators \nclass with static factory methods for each of the supported accumulators. The pick-n accumulators are aggregation accumulation operators that return\nthe top and bottom elements given a specific ordering. Use one of the\nfollowing builders to create an aggregation accumulation operator: Learn which aggregation pipeline stages you can use accumulator operators with\nfrom the Server manual section on\n Accumulators . The pick-n accumulator examples use documents from the movies collection\nin the sample-mflix database. minN() maxN() firstN() lastN() top() topN() bottom() bottomN() You can only perform aggregation operations with these pick-n accumulators\nwhen running MongoDB v5.2 or later. The minN() builder creates the $minN \naccumulator which returns data from documents that contain the n lowest\nvalues of a grouping. The following example demonstrates how to use the minN() method to return\nthe lowest three imdb.rating values for movies, grouped by year : See the minN() API documentation \nfor more information. The $minN and $bottomN accumulators can perform similar tasks.\nSee\n Comparison of $minN and $bottomN Accumulators \nfor recommended usage of each. The maxN() accumulator returns data from documents that contain the n \nhighest values of a grouping. The following example demonstrates how to use the maxN() method to\nreturn the highest two imdb.rating values for movies, grouped by year : See the maxN() API documentation \nfor more information. The firstN() accumulator returns data from the first n documents in\neach grouping for the specified sort order. The following example demonstrates how to use the firstN() method to\nreturn the first two movie title values, based on the order they came\ninto the stage, grouped by year : See the firstN() API documentation \nfor more information. The $firstN and $topN accumulators can perform similar tasks.\nSee\n Comparison of $firstN and $topN Accumulators \nfor recommended usage of each. The lastN() accumulator returns data from the last n documents in\neach grouping for the specified sort order. The following example demonstrates how to use the lastN() method to show\nthe last three movie title values, based on the the order they came into\nthe stage, grouped by year : See the lastN() API documentation \nfor more information. The top() accumulator returns data from the first document in a group\nbased on the specified sort order. The following example demonstrates how to use the top() method to return\nthe title and imdb.rating values for the top rated movies based on the\n imdb.rating , grouped by year . See the top() API documentation \nfor more information. The topN() accumulator returns data from documents that contain the\nhighest n values for the specified field. The following example demonstrates how to use the topN() method to return\nthe title and runtime values of the three longest movies based on the\n runtime values, grouped by year . See the topN() API documentation \nfor more information. The $firstN and $topN accumulators can perform similar tasks.\nSee\n Comparison of $firstN and $topN Accumulators \nfor recommended usage of each. The bottom() accumulator returns data from the last document in a group\nbased on the specified sort order. The following example demonstrates how to use the bottom() method to\nreturn the title and runtime values of the shortest movie based on the\n runtime value, grouped by year . See the bottom() API documentation \nfor more information. The bottomN() accumulator returns data from documents that contain the\nlowest n values for the specified field. The following example demonstrates how to use the bottomN() method to\nreturn the title and imdb.rating values of the two lowest rated movies\nbased on the imdb.rating value, grouped by year : See the bottomN() API documentation \nfor more information. The $minN and $bottomN accumulators can perform similar tasks.\nSee Comparison of $minN and $bottomN Accumulators \nfor recommended usage of each. Use the unwind() method to create an $unwind \npipeline stage to deconstruct an array field from input documents, creating\nan output document for each array element. The following example creates a document for each element in the lowestRatedTwoMovies array: To preserve documents that have missing or null \nvalues for the array field, or where array is empty: To include the array index (in this example, in a field called \"position\" ): Use the out() method to create an $out \npipeline stage that writes all documents to the specified collection in\nthe same database. The following example writes the results of the pipeline to the classic_movies \ncollection: The $out stage must be the last stage in any aggregation pipeline. Use the merge() method to create a $merge \npipeline stage that merges all documents into the specified collection. The following example merges the pipeline into the nineties_movies collection\nusing the default options: The following example merges the pipeline into the movie_ratings collection\nin the aggregation database using some non-default options that specify to\nreplace the document if both year and title match, otherwise insert the\ndocument: The $merge stage must be the last stage in any aggregation pipeline. Use the graphLookup() method to create a $graphLookup \npipeline stage that performs a recursive search on a specified collection to match\na specified field in one document to a specified field of another document. The following example uses the contacts collection. The data is modeled\nusing the following Kotlin data class: The example computes the reporting graph for users in the\n contact collection, recursively matching the value in the friends field\nto the name field: Using GraphLookupOptions , you can specify the depth to recurse as well as\nthe name of the depth field, if desired. In this example, $graphLookup will\nrecurse up to two times, and create a field called degrees with the\nrecursion depth information for every document. Using GraphLookupOptions , you can specify a filter that documents must match\nin order for MongoDB to include them in your search. In this\nexample, only links with \"golf\" in their hobbies field will be included: Use the sortByCount() method to create a $sortByCount \npipeline stage that groups documents by a given expression and then sorts\nthese groups by count in descending order. The following example groups documents in the movies collection by the\n genres field and computes the count for each distinct value: The $sortByCount stage is identical to a $group stage with a\n $sum accumulator followed by a $sort stage. Use the replaceRoot() method to create a $replaceRoot \npipeline stage that replaces each input document with the specified document. The following example uses a fictional books collection that contains data\nmodeled using the following Kotlin data class: Each input document is replaced by the nested document in the\n spanishTranslation field: Use the addFields() method to create an $addFields \npipeline stage that adds new fields to documents. The following example adds two new fields, watched and type , to the\ninput documents in the movie collection: Use $addFields when you do not want to project field inclusion\nor exclusion. Use the count() method to create a $count \npipeline stage that counts the number of documents that enter the stage, and assigns\nthat value to a specified field name. If you do not specify a field,\n count() defaults the field name to \"count\". The following example creates a pipeline stage that outputs the count of incoming\ndocuments in a field called \"total\": The $count stage is syntactic sugar for: Use the bucket() method to create a $bucket \npipeline stage that automates the bucketing of data around predefined boundary\nvalues. The following examples use data modeled with the following Kotlin data class: This example creates a pipeline stage that groups incoming documents based\non the value of their screenSize field, inclusive of the lower boundary\nand exclusive of the upper boundary: Use the BucketOptions class to specify a default bucket for values\noutside of the specified boundaries, and to specify additional accumulators. The following example creates a pipeline stage that groups incoming documents based\non the value of their screenSize field, counting the number of documents\nthat fall within each bucket, pushing the value of screenSize into a\nfield called matches , and capturing any screen sizes greater than \"70\"\ninto a bucket called \"monster\" for monstrously large screen sizes: The driver includes the Accumulators \nclass with static factory methods for each of the supported accumulators. Use the bucketAuto() method to create a $bucketAuto \npipeline stage that automatically determines the boundaries of each bucket\nin its attempt to distribute the documents evenly into a specified number of buckets. The following examples use data modeled with the following Kotlin data class: This example creates a pipeline stage that will attempt to create and evenly\ndistribute documents into 5 buckets using the value of their price field: Use the BucketAutoOptions class to specify a preferred number \nbased scheme to set boundary values, and specify additional accumulators. The following example creates a pipeline stage that will attempt to create and evenly\ndistribute documents into 5 buckets using the value of their price field,\nsetting the bucket boundaries at powers of 2 (2, 4, 8, 16, ...). It also counts\nthe number of documents in each bucket, and calculates their average price \nin a new field called avgPrice : The driver includes the Accumulators \nclass with static factory methods for each of the supported accumulators. Use the facet() method to create a $facet \npipeline stage that allows for the definition of parallel pipelines. The following examples use data modeled with the following Kotlin data class: This example creates a pipeline stage that executes two parallel aggregations: The first aggregation distributes incoming documents into 5 groups according to\ntheir screenSize field. The second aggregation counts all manufacturers and returns their count, limited\nto the top 5. Use the setWindowFields() method to create a $setWindowFields \npipeline stage that allows using window operators to perform operations\non a specified span of documents in a collection. The following example uses a fictional weather collection using data modeled\nwith the following Kotlin data class: The example creates a pipeline stage that computes the\naccumulated rainfall and the average temperature over the past month for\neach locality from more fine-grained measurements presented in the rainfall \nand temperature fields: The driver includes the Windows \nclass with static factory methods for building windowed computations. Use the densify() method to create a\n $densify \npipeline stage that generates a sequence of documents to span a specified\ninterval. Consider the following documents retrieved from the Atlas sample weather dataset \nthat contain measurements for a similar position field, spaced one hour\napart: These documents are modeled using the following Kotlin data class: Suppose you needed to create a pipeline stage that performs the following\nactions on these documents: The call to the densify() aggregation stage builder that accomplishes\nthese actions should resemble the following: The following output highlights the documents generated by the aggregate stage\nwhich contain ts values every 15 minutes between the existing documents: See the densify package API documentation \nfor more information. You can use the $densify() aggregation stage only when running\nMongoDB v5.1 or later. Add a document at every 15-minute interval for which a ts value does not\nalready exist. Group the documents by the position field. Use the fill() method to create a\n $fill \npipeline stage that populates null and missing field values. Consider the following documents that contain temperature and air pressure\nmeasurements at an hourly interval: These documents are modeled using the following Kotlin data class: Suppose you needed to populate missing temperature and air pressure\ndata points in the documents as follows: The call to the fill() aggregation stage builder that accomplishes\nthese actions resembles the following: See the fill package API documentation \nfor more information. You can use the $fill() aggregation stage only when running\nMongoDB v5.3 or later. Populate the air_pressure field for hour \"2\" using linear interpolation\nto calculate the value. Set the missing temperature value to \"23.6C\" for hour \"3\". Use the search() method to create a $search \npipeline stage that specifies a full-text search of one or more fields. The following example creates a pipeline stage that searches the title \nfield in the movies collection for text that contains the word \"Future\": Learn more about the builders from the\n search package API documentation . This aggregation pipeline operator is only available for collections hosted\non MongoDB Atlas clusters running v4.2 or later that are\ncovered by an Atlas search index .\nLearn more about the required setup and the functionality of this operator\nfrom the Atlas Search documentation. Use the searchMeta() method to create a\n $searchMeta \npipeline stage which returns only the metadata part of the results from\nAtlas full-text search queries. The following example shows the count metadata for an Atlas search\naggregation stage: Learn more about this helper from the\n searchMeta() API documentation . This aggregation pipeline operator is only available\non MongoDB Atlas clusters running v4.4.11 and later. For a\ndetailed list of version availability, see the MongoDB Atlas documentation\non $searchMeta . Use the vectorSearch() method to create a $vectorSearch \npipeline stage that specifies a semantic search . A semantic search is\na type of search that locates pieces of information that are similar in meaning. To use this feature when performing an aggregation on a collection, you\nmust create a vector search index and index your vector embeddings. To\nlearn how to set up search indexes in MongoDB Atlas, see How to\nIndex Vector Embeddings for Vector Search in the Atlas documentation. The example in this section uses data modeled with the following Kotlin data class: This example shows how to build an aggregation pipeline that uses the\n vectorSearch() method to perform a vector search with the following\nspecifications: To learn more about this helper, see the\n vectorSearch() API documentation . To learn about which versions of MongoDB Atlas support this feature, see\n Limitations \nin the Atlas documentation. Searches plotEmbedding field values by using vector embeddings of a\nstring value Uses the mflix_movies_embedding_index vector search index Considers up to 2 nearest neighbors Returns 1 document Filters for documents in which the year value is at least 2016", - "code": [ - { - "lang": "kotlin", - "value": "import com.mongodb.client.model.Aggregates\nimport com.mongodb.client.model.Filters\nimport com.mongodb.client.model.Projections\nimport com.mongodb.client.model.Sorts\nimport com.mongodb.client.model.Accumulators" - }, - { - "lang": "kotlin", - "value": "val matchStage = Aggregates.match(Filters.eq(\"someField\", \"someCriteria\"))\nval sortByCountStage = Aggregates.sortByCount(\"\\$someField\")\nval results = collection.aggregate(\n listOf(matchStage, sortByCountStage)).toList()\n" - }, - { - "lang": "kotlin", - "value": "data class Movie(\n val title: String,\n val year: Int,\n val genres: List,\n val rated: String,\n val plot: String,\n val runtime: Int,\n val imdb: IMDB\n){\n data class IMDB(\n val rating: Double\n )\n}\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.match(Filters.eq(Movie::title.name, \"The Shawshank Redemption\"))\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.project(\n Projections.fields(\n Projections.include(Movie::title.name, Movie::plot.name),\n Projections.excludeId())\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.project(\n Projections.fields(\n Projections.computed(\"rating\", \"\\$${Movie::rated.name}\"),\n Projections.excludeId()\n )\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.documents(\n listOf(\n Document(Movie::title.name, \"Steel Magnolias\"),\n Document(Movie::title.name, \"Back to the Future\"),\n Document(Movie::title.name, \"Jurassic Park\")\n )\n)\n" - }, - { - "lang": "kotlin", - "value": "val docsStage = database.aggregate( // ... )\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.sample(5)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.sort(\n Sorts.orderBy(\n Sorts.descending(Movie::year.name),\n Sorts.ascending(Movie::title.name)\n )\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.skip(5)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.limit(4)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.lookup(\n \"comments\",\n \"_id\",\n \"movie_id\",\n \"joined_comments\"\n)\n" - }, - { - "lang": "kotlin", - "value": "data class Order(\n @BsonId val id: Int,\n val customerId: Int,\n val item: String,\n val ordered: Int\n)\ndata class Inventory(\n @BsonId val id: Int,\n val stockItem: String,\n val inStock: Int\n)\n" - }, - { - "lang": "kotlin", - "value": "val variables = listOf(\n Variable(\"order_item\", \"\\$item\"),\n Variable(\"order_qty\", \"\\$ordered\")\n)\nval pipeline = listOf(\n Aggregates.match(\n Filters.expr(\n Document(\"\\$and\", listOf(\n Document(\"\\$eq\", listOf(\"$\\$order_item\", \"\\$${Inventory::stockItem.name}\")),\n Document(\"\\$gte\", listOf(\"\\$${Inventory::inStock.name}\", \"$\\$order_qty\"))\n ))\n )\n ),\n Aggregates.project(\n Projections.fields(\n Projections.exclude(Order::customerId.name, Inventory::stockItem.name),\n Projections.excludeId()\n )\n )\n)\nval innerJoinLookup =\n Aggregates.lookup(\"warehouses\", variables, pipeline, \"stockData\")\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.group(\"\\$${Order::customerId.name}\",\n Accumulators.sum(\"totalQuantity\", \"\\$${Order::ordered.name}\"),\n Accumulators.avg(\"averageQuantity\", \"\\$${Order::ordered.name}\")\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.group(\n \"\\$${Movie::year.name}\",\n Accumulators.minN(\n \"lowestThreeRatings\",\n \"\\$${Movie::imdb.name}.${Movie.IMDB::rating.name}\",\n 3\n )\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.group(\n \"\\$${Movie::year.name}\",\n Accumulators.maxN(\n \"highestTwoRatings\",\n \"\\$${Movie::imdb.name}.${Movie.IMDB::rating.name}\",\n 2\n )\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.group(\n \"\\$${Movie::year.name}\",\n Accumulators.firstN(\n \"firstTwoMovies\",\n \"\\$${Movie::title.name}\",\n 2\n )\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.group(\n \"\\$${Movie::year.name}\",\n Accumulators.lastN(\n \"lastThreeMovies\",\n \"\\$${Movie::title.name}\",\n 3\n )\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.group(\n \"\\$${Movie::year.name}\",\n Accumulators.top(\n \"topRatedMovie\",\n Sorts.descending(\"${Movie::imdb.name}.${Movie.IMDB::rating.name}\"),\n listOf(\"\\$${Movie::title.name}\", \"\\$${Movie::imdb.name}.${Movie.IMDB::rating.name}\")\n )\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.group(\n \"\\$${Movie::year.name}\",\n Accumulators.topN(\n \"longestThreeMovies\",\n Sorts.descending(Movie::runtime.name),\n listOf(\"\\$${Movie::title.name}\", \"\\$${Movie::runtime.name}\"),\n 3\n )\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.group(\n \"\\$${Movie::year.name}\",\n Accumulators.bottom(\n \"shortestMovies\",\n Sorts.descending(Movie::runtime.name),\n listOf(\"\\$${Movie::title.name}\", \"\\$${Movie::runtime.name}\")\n )\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.group(\n \"\\$${Movie::year.name}\",\n Accumulators.bottom(\n \"lowestRatedTwoMovies\",\n Sorts.descending(\"${Movie::imdb.name}.${Movie.IMDB::rating.name}\"),\n listOf(\"\\$${Movie::title.name}\", \"\\$${Movie::imdb.name}.${Movie.IMDB::rating.name}\"),\n )\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.unwind(\"\\$${\"lowestRatedTwoMovies\"}\")\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.unwind(\n \"\\$${\"lowestRatedTwoMovies\"}\",\n UnwindOptions().preserveNullAndEmptyArrays(true)\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.unwind(\n \"\\$${\"lowestRatedTwoMovies\"}\",\n UnwindOptions().includeArrayIndex(\"position\")\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.out(\"classic_movies\")\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.merge(\"nineties_movies\")\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.merge(\n MongoNamespace(\"aggregation\", \"movie_ratings\"),\n MergeOptions().uniqueIdentifier(listOf(\"year\", \"title\"))\n .whenMatched(MergeOptions.WhenMatched.REPLACE)\n .whenNotMatched(MergeOptions.WhenNotMatched.INSERT)\n)\n" - }, - { - "lang": "kotlin", - "value": "data class Users(\n val name: String,\n val friends: List?,\n val hobbies: List?\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.graphLookup(\n \"contacts\",\n \"\\$${Users::friends.name}\", Users::friends.name, Users::name.name,\n \"socialNetwork\"\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.graphLookup(\n \"contacts\",\n \"\\$${Users::friends.name}\", Users::friends.name, Users::name.name,\n \"socialNetwork\",\n GraphLookupOptions().maxDepth(2).depthField(\"degrees\")\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.graphLookup(\n \"contacts\",\n \"\\$${Users::friends.name}\", Users::friends.name, Users::name.name, \"socialNetwork\",\n GraphLookupOptions().maxDepth(1).restrictSearchWithMatch(\n Filters.eq(Users::hobbies.name, \"golf\")\n )\n)\n" - }, - { - "lang": "json", - "value": "[\n { \"$group\": { \"_id\": , \"count\": { \"$sum\": 1 } } },\n { \"$sort\": { \"count\": -1 } }\n]" - }, - { - "lang": "kotlin", - "value": "Aggregates.sortByCount(\"\\$${Movie::genres.name}\"),\n" - }, - { - "lang": "kotlin", - "value": "data class Libro(val titulo: String)\ndata class Book(val title: String, val spanishTranslation: Libro)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.replaceRoot(\"\\$${Book::spanishTranslation.name}\")\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.addFields(\n Field(\"watched\", false),\n Field(\"type\", \"movie\")\n)\n" - }, - { - "lang": "json", - "value": "{ \"$group\":{ \"_id\": 0, \"count\": { \"$sum\" : 1 } } }" - }, - { - "lang": "kotlin", - "value": "Aggregates.count(\"total\")\n" - }, - { - "lang": "kotlin", - "value": "data class Screen(\n val id: String,\n val screenSize: Int,\n val manufacturer: String,\n val price: Double\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.bucket(\"\\$${Screen::screenSize.name}\", listOf(0, 24, 32, 50, 70, 1000))\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.bucket(\"\\$${Screen::screenSize.name}\", listOf(0, 24, 32, 50, 70),\n BucketOptions()\n .defaultBucket(\"monster\")\n .output(\n Accumulators.sum(\"count\", 1),\n Accumulators.push(\"matches\", \"\\$${Screen::screenSize.name}\")\n )\n)\n" - }, - { - "lang": "kotlin", - "value": "data class Screen(\n val id: String,\n val screenSize: Int,\n val manufacturer: String,\n val price: Double\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.bucketAuto(\"\\$${Screen::screenSize.name}\", 5)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.bucketAuto(\n \"\\$${Screen::price.name}\", 5,\n BucketAutoOptions()\n .granularity(BucketGranularity.POWERSOF2)\n .output(Accumulators.sum(\"count\", 1), Accumulators.avg(\"avgPrice\", \"\\$${Screen::price.name}\"))\n )\n" - }, - { - "lang": "kotlin", - "value": "data class Screen(\n val id: String,\n val screenSize: Int,\n val manufacturer: String,\n val price: Double\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.facet(\n Facet(\n \"Screen Sizes\",\n Aggregates.bucketAuto(\n \"\\$${Screen::screenSize.name}\",\n 5,\n BucketAutoOptions().output(Accumulators.sum(\"count\", 1))\n )\n ),\n Facet(\n \"Manufacturer\",\n Aggregates.sortByCount(\"\\$${Screen::manufacturer.name}\"),\n Aggregates.limit(5)\n )\n)\n" - }, - { - "lang": "kotlin", - "value": "data class Weather(\n val localityId: String,\n val measurementDateTime: LocalDateTime,\n val rainfall: Double,\n val temperature: Double\n)\n" - }, - { - "lang": "kotlin", - "value": "val pastMonth = Windows.timeRange(-1, MongoTimeUnit.MONTH, Windows.Bound.CURRENT)\n\nval resultsFlow = weatherCollection.aggregate(\n listOf(\n Aggregates.setWindowFields(\"\\$${Weather::localityId.name}\",\n Sorts.ascending(Weather::measurementDateTime.name),\n WindowOutputFields.sum(\n \"monthlyRainfall\",\n \"\\$${Weather::rainfall.name}\",\n pastMonth\n ),\n WindowOutputFields.avg(\n \"monthlyAvgTemp\",\n \"\\$${Weather::temperature.name}\",\n pastMonth\n )\n )\n )\n" - }, - { - "lang": "none", - "value": "Document{{ _id=5553a..., position=Document{{type=Point, coordinates=[-47.9, 47.6]}}, ts=Mon Mar 05 08:00:00 EST 1984, ... }}\nDocument{{ _id=5553b..., position=Document{{type=Point, coordinates=[-47.9, 47.6]}}, ts=Mon Mar 05 09:00:00 EST 1984, ... }}" - }, - { - "lang": "none", - "value": "Document{{ _id=5553a..., position=Document{{type=Point, coordinates=[-47.9, 47.6]}}, ts=Mon Mar 05 08:00:00 EST 1984, ... }}\nDocument{{ position=Document{{coordinates=[-47.9, 47.6]}}, ts=Mon Mar 05 08:15:00 EST 1984 }}\nDocument{{ position=Document{{coordinates=[-47.9, 47.6]}}, ts=Mon Mar 05 08:30:00 EST 1984 }}\nDocument{{ position=Document{{coordinates=[-47.9, 47.6]}}, ts=Mon Mar 05 08:45:00 EST 1984 }}\nDocument{{ _id=5553b..., position=Document{{type=Point, coordinates=[-47.9, 47.6]}}, ts=Mon Mar 05 09:00:00 EST 1984, ... }}" - }, - { - "lang": "kotlin", - "value": "data class Weather(\n @BsonId val id: ObjectId = ObjectId(),\n val position: Point,\n val ts: LocalDateTime\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.densify(\n \"ts\",\n DensifyRange.partitionRangeWithStep(15, MongoTimeUnit.MINUTE),\n DensifyOptions.densifyOptions().partitionByFields(\"Position.coordinates\")\n)\n" - }, - { - "lang": "none", - "value": "Document{{_id=6308a..., hour=1, temperature=23C, air_pressure=29.74}}\nDocument{{_id=6308b..., hour=2, temperature=23.5C}}\nDocument{{_id=6308c..., hour=3, temperature=null, air_pressure=29.76}}" - }, - { - "lang": "kotlin", - "value": "data class Weather(\n @BsonId val id: ObjectId = ObjectId(),\n val hour: Int,\n val temperature: String?,\n val air_pressure: Double?\n)\n" - }, - { - "lang": "kotlin", - "value": "val resultsFlow = weatherCollection.aggregate(\n listOf(\n Aggregates.fill(\n FillOptions.fillOptions().sortBy(Sorts.ascending(Weather::hour.name)),\n FillOutputField.value(Weather::temperature.name, \"23.6C\"),\n FillOutputField.linear(Weather::air_pressure.name)\n )\n )\n)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Weather(id=6308a..., hour=1, temperature=23C, air_pressure=29.74)\nWeather(id=6308b..., hour=2, temperature=23.5C, air_pressure=29.75)\nWeather(id=6308b..., hour=3, temperature=23.6C, air_pressure=29.76)" - }, - { - "lang": "kotlin", - "value": "Aggregates.search(\n SearchOperator.text(\n SearchPath.fieldPath(Movie::title.name), \"Future\"\n ),\n SearchOptions.searchOptions().index(\"title\")\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.searchMeta(\n SearchOperator.near(1985, 2, SearchPath.fieldPath(Movie::year.name)),\n SearchOptions.searchOptions().index(\"year\")\n)\n" - }, - { - "lang": "kotlin", - "value": "data class MovieAlt(\n val title: String,\n val year: Int,\n val plot: String,\n val plotEmbedding: List\n)\n" - }, - { - "lang": "kotlin", - "value": "Aggregates.vectorSearch(\n SearchPath.fieldPath(MovieAlt::plotEmbedding.name),\n listOf(-0.0072121937, -0.030757688, -0.012945653),\n \"mflix_movies_embedding_index\",\n 2.toLong(),\n 1.toLong(),\n vectorSearchOptions().filter(Filters.gte(MovieAlt::year.name, 2016))\n)\n" - } - ], - "preview": "In this guide, you can learn how to use the Aggregates\nclass which provides static factory methods that build aggregation pipeline\nstages in the MongoDB Kotlin driver.", - "tags": "code example, data insights, compute, atlas", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/builders/filters", - "title": "Filters Builders", - "headings": [ - "Overview", - "Comparison", - "Logical", - "Arrays", - "Elements", - "Evaluation", - "Bitwise", - "Geospatial" - ], - "paragraphs": "In this guide, you can learn how to use builders to specify\n filters for your queries in the MongoDB Kotlin driver. Builders are classes provided by the MongoDB Kotlin driver that help you\nconstruct BSON objects. To learn more, see our guide\non builders . Filters are operations used to limit the results of a query based on\nspecified conditions. Filters are a helpful tool to locate\ninformation that matches search conditions in a collection. You can use filters in the following places: Some examples of results from queries with filters are: This guide shows you how to use builders with examples of the following\ntypes of operators: The Filters class provides static factory methods for all the MongoDB query\noperators. Each method returns an instance of the BSON \ntype, which you can pass to any method that expects a query filter. Most of the Filter examples in this guide use the following sample collection paints : These documents in the paints collection are modeled by the following data class for use\nwith the Kotlin driver: As a parameter to the find() method In a match stage of an aggregation pipeline As a parameter to the deleteOne() or deleteMany() method As a parameter to the updateOne() or updateMany() method Items that cost more than $0 but less than $25. Foods that are both gluten-free and less than 500 calories. A food critic review that mentions \"spicy\". Comparison Logical Arrays Elements Evaluation Bitwise Geospatial For brevity, you may choose to import all methods of the\n Filters \nclass statically: The comparison filters include all operators that compare the value in a\ndocument to a specified value. The Filters comparison operator methods include: The following example creates a filter that matches all documents where\nthe value of the qty field equals \"5\" in the paints collection: The following example creates a filter that matches all documents where\nthe value of the qty field is greater than or equal to \"10\" in the\n paints collection: The following example creates a filter that matches all documents in\nthe paints collection because the predicate is empty: Comparison Method Matches eq() values equal to a specified value. gt() values greater than a specified value. gte() values greater than or equal to a specified value. lt() values less than a specified value. lte() values less than or equal to a specified value. ne() values not equal to a specified value. in() any of the values specified in an array. nin() none of the values specified in an array. empty() all the documents. The logical operators perform logical operations based on the conditions of the specified method. The Filters logical operator methods include: The following example creates a filter that matches documents where\nthe value of the qty field is greater than \"8\" or the value\nof the color field equals \"pink\" in the paints collection: Logical Method Matches and() documents with the conditions of all the filters. This operator joins filters with a logical AND . or() documents with the conditions of either filter. This operator joins filters with a logical OR . not() documents that do not match the filter. nor() documents that fail to match both filters. This operator joins filters with a logical NOR . The array operators evaluate the array field in a document. The Filters array operator methods include: The following example matches documents with a vendors array\ncontaining both \"A\" and \"D\" in the paints collection: Array Method Matches all() documents if the array field contains every element specified in the query. elemMatch() documents if an element in the array field matches all the specified conditions. size() documents if the array field is a specified number of elements. The elements operators evaluate the nature of a specified field. The Filters elements operator methods include: The following example matches documents that have a qty field and\nits value does not equal \"5\" or \"8\" in the paints collection: Elements Method Matches exists() documents that have the specified field. type() documents if a field is of the specified type. The evaluation operators evaluate the value of any field in a document. The Filters evaluation operator methods include: The following example matches documents that have a color field\nstarting with the letter \"p\" in the paints collection: Evaluation Method Matches mod() documents where a modulo operation on the value of a field contain a specified result. regex() documents where values contain a specified regular expression. text() documents which contain a specified full-text search expression. where() documents which contain a specified JavaScript expression. The bitwise operators convert a number into its binary value to\nevaluate its bits. The Filters bitwise operator methods include: The following example matches documents that have a decimalValue field\nwith bits set at positions of the corresponding bitmask \"34\" (i.e.\n\"00100010\") in this binary_numbers collection: Bitwise Method Matches bitsAllSet() documents where the specified bits of a field are set (i.e. \"1\"). bitsAllClear() documents where the specified bits of a field are clear (i.e. \"0\"). bitsAnySet() documents where at least one of the specified bits of a field are set (i.e. \"1\"). bitsAnyClear() documents where at least one of the specified bits of a field are clear (i.e. \"0\"). The geospatial operators evaluate a specified coordinate and its\nrelation to a shape or location. The Filters geospatial operator methods include: The following example creates a filter that matches documents in which\nthe point field contains a GeoJSON geometry that falls within\nthe given Polygon \nin this stores collection: Geospatial Method Matches geoWithin() documents containing a GeoJSON geometry value that falls within a bounding GeoJSON geometry. geoWithinBox() documents containing a coordinates value that exist within the specified box. geoWithinPolygon() documents containing a coordinates value that exist within the specified polygon. geoWithinCenter() documents containing a coordinates value that exist within the specified circle. geoWithinCenterSphere() geometries containing a geospatial data value (GeoJSON or legacy coordinate pairs) that exist within the specified circle, using spherical geometry. geoIntersects() geometries that intersect with a GeoJSON geometry. The 2dsphere index supports $geoIntersects . near() geospatial objects in proximity to a point. Requires a geospatial index. The 2dsphere and 2d indexes support $near . nearSphere() geospatial objects in proximity to a point on a sphere. Requires a geospatial index. The 2dsphere and 2d indexes support $nearSphere .", - "code": [ - { - "lang": "json", - "value": "{ \"_id\": 1, \"color\": \"red\", \"qty\": 5, \"vendor\": [\"A\"] }\n{ \"_id\": 2, \"color\": \"purple\", \"qty\": 10, \"vendor\": [\"C\", \"D\"] }\n{ \"_id\": 3, \"color\": \"blue\", \"qty\": 8, \"vendor\": [\"B\", \"A\"] }\n{ \"_id\": 4, \"color\": \"white\", \"qty\": 6, \"vendor\": [\"D\"] }\n{ \"_id\": 5, \"color\": \"yellow\", \"qty\": 11, \"vendor\": [\"A\", \"B\"] }\n{ \"_id\": 6, \"color\": \"pink\", \"qty\": 5, \"vendor\": [\"C\"] }\n{ \"_id\": 7, \"color\": \"green\", \"qty\": 8,\"vendor\": [\"B\", \"C\"] }\n{ \"_id\": 8, \"color\": \"orange\", \"qty\": 7, \"vendor\": [\"A\", \"D\"] }" - }, - { - "lang": "kotlin", - "value": "import com.mongodb.client.model.Filters.*" - }, - { - "lang": "kotlin", - "value": "data class PaintOrder(\n @BsonId val id: Int,\n val qty: Int,\n val color: String,\n val vendors: List = mutableListOf()\n)\n" - }, - { - "lang": "kotlin", - "value": "val equalComparison = Filters.eq(PaintOrder::qty.name, 5)\nval resultsFlow = collection.find(equalComparison)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=1, qty=5, color=red, vendors=[A])\nPaintOrder(id=6, qty=5, color=pink, vendors=[C])" - }, - { - "lang": "kotlin", - "value": "val gteComparison = Filters.gte(PaintOrder::qty.name, 10)\nval resultsFlow = collection.find(gteComparison)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=2, qty=10, color=purple, vendors=[C, D])\nPaintOrder(id=5, qty=11, color=yellow, vendors=[A, B])" - }, - { - "lang": "kotlin", - "value": "val emptyComparison = Filters.empty()\nval resultsFlow = collection.find(emptyComparison)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=1, qty=5, color=red, vendors=[A])\nPaintOrder(id=2, qty=10, color=purple, vendors=[C, D])\nPaintOrder(id=3, qty=8, color=blue, vendors=[B, A])\nPaintOrder(id=4, qty=6, color=white, vendors=[D])\nPaintOrder(id=5, qty=11, color=yellow, vendors=[A, B])\nPaintOrder(id=6, qty=5, color=pink, vendors=[C])\nPaintOrder(id=7, qty=8, color=green, vendors=[B, C])\nPaintOrder(id=8, qty=7, color=orange, vendors=[A, D])" - }, - { - "lang": "kotlin", - "value": "val orComparison = Filters.or(\n Filters.gt(PaintOrder::qty.name, 8),\n Filters.eq(PaintOrder::color.name, \"pink\")\n)\nval resultsFlow = collection.find(orComparison)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=2, qty=10, color=purple, vendors=[C, D])\nPaintOrder(id=5, qty=11, color=yellow, vendors=[A, B])\nPaintOrder(id=6, qty=5, color=pink, vendors=[C])" - }, - { - "lang": "kotlin", - "value": "val search = listOf(\"A\", \"D\")\nval allComparison = Filters.all(PaintOrder::vendors.name, search)\nval resultsFlow = collection.find(allComparison)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=8, qty=7, color=orange, vendors=[A, D])" - }, - { - "lang": "kotlin", - "value": "val existsComparison = Filters.and(Filters.exists(PaintOrder::qty.name), Filters.nin(\"qty\", 5, 8))\nval resultsFlow = collection.find(existsComparison)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=2, qty=10, color=purple, vendors=[C, D])\nPaintOrder(id=4, qty=6, color=white, vendors=[D])\nPaintOrder(id=5, qty=11, color=yellow, vendors=[A, B])\nPaintOrder(id=8, qty=7, color=orange, vendors=[A, D])" - }, - { - "lang": "kotlin", - "value": "val regexComparison = Filters.regex(PaintOrder::color.name, \"^p\")\nval resultsFlow = collection.find(regexComparison)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=2, qty=10, color=purple, vendors=[C, D])\nPaintOrder(id=6, qty=5, color=pink, vendors=[C])" - }, - { - "lang": "json", - "value": "{ \"_id\": 9, \"decimalValue\": 54, \"binaryValue\": \"00110110\" }\n{ \"_id\": 10, \"decimalValue\": 20, \"binaryValue\": \"00010100\" }\n{ \"_id\": 11, \"decimalValue\": 68, \"binaryValue\": \"1000100\" }\n{ \"_id\": 12, \"decimalValue\": 102, \"binaryValue\": \"01100110\" }" - }, - { - "lang": "kotlin", - "value": "data class BinaryNumber(\n @BsonId val id: Int,\n val decimalValue: Int,\n val binaryValue: String\n)\nval binaryCollection = database.getCollection(\"binary_numbers\")\n\nval bitmask = 34.toLong() // 00100010 in binary\nval bitsComparison = Filters.bitsAllSet(BinaryNumber::decimalValue.name, bitmask)\nval resultsFlow = binaryCollection.find(bitsComparison)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "BinaryNumber(id=1, decimalValue=54, binaryValue=00110110)\nBinaryNumber(id=4, decimalValue=102, binaryValue=01100110)" - }, - { - "lang": "json", - "value": "{ \"_id\": 13, \"coordinates\": { \"type\": \"Point\", \"coordinates\": [2.0, 2.0] } }\n{ \"_id\": 14, \"coordinates\": { \"type\": \"Point\", \"coordinates\": [5.0, 6.0] } }\n{ \"_id\": 15, \"coordinates\": { \"type\": \"Point\", \"coordinates\": [1.0, 3.0] } }\n{ \"_id\": 16, \"coordinates\": { \"type\": \"Point\", \"coordinates\": [4.0, 7.0] } }" - }, - { - "lang": "kotlin", - "value": "data class Store(\n @BsonId val id: Int,\n val name: String,\n val coordinates: Point\n)\nval collection = database.getCollection(\"stores\")\n\nval square = Polygon(listOf(\n Position(0.0, 0.0),\n Position(4.0, 0.0),\n Position(4.0, 4.0),\n Position(0.0, 4.0),\n Position(0.0, 0.0)))\nval geoWithinComparison = Filters.geoWithin(Store::coordinates.name, square)\n\nval resultsFlow = collection.find(geoWithinComparison)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Store(id=13, name=Store 13, coordinates=Point{coordinate=Position{values=[2.0, 2.0]}})\nStore(id=15, name=Store 15, coordinates=Point{coordinate=Position{values=[1.0, 3.0]}})" - } - ], - "preview": "In this guide, you can learn how to use builders to specify\nfilters for your queries in the MongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/builders/indexes", - "title": "Indexes Builders", - "headings": [ - "Overview", - "Ascending Indexes", - "Descending Indexes", - "Compound Indexes", - "Text Indexes", - "Hashed Indexes", - "Geospatial Indexes" - ], - "paragraphs": "In this guide, you can learn how to specify indexes using\n builders in the MongoDB Kotlin Driver.\nThe Indexes builder provides helper methods for constructing the\nfollowing types of indexes: Indexes store a subset of the collection's data set. The index stores\nthe value of a specific field or set of fields, ordered by the value of\nthe field. See our guide on Indexes for\nexamples of queries covered by indexes. The Indexes class provides static factory methods for all the MongoDB index types.\nEach method returns a BSON \ninstance, which you can pass to\n createIndex() . Ascending Indexes Descending Indexes Compound Indexes Text Indexes Hashed Indexes Geospatial Indexes For brevity, you may choose to import all methods of the\n Indexes \nclass: An ascending index enables you to sort query results by the value of the\nindexed fields from smallest to largest. In order to create an ascending index, first call the\n ascending() \nbuilder method to create a Bson instance that represents the index\ndocument, passing the name or names of the fields you want to index.\nThen, call the createIndex() method on the collection, passing the Bson \ninstance that contains the index document. The following example specifies an ascending index on the name field: If you have an ascending or a descending index on a single field, MongoDB\ncan sort using the index in either direction. A descending index enables you to sort query results by the value of the\nindexed fields from largest to smallest. In order to create a descending index, first call the\n descending() \nbuilder method to create a Bson instance that represents the index\ndocument, passing the name or names of the fields you want to index.\nThen, call the createIndex() method on the collection, passing the Bson \ninstance that contains the index document. The following example specifies a descending index on the capacity field: In order to create a compound index, first call the\n compoundIndex() \nbuilder method to create a Bson instance that represents the index\ndocument, passing the names of the fields you want to index. Then, call\nthe createIndex() method on the collection, passing the Bson \ninstance that contains the index document. The following example specifies a compound index composed of\ndescending index on the capacity and year field, followed\nby an ascending index on the name field: A text index groups documents by the text in the indexed field. In order to create a text index, first call the\n text() \nbuilder method to create a Bson instance that represents the index\ndocument, passing the name of the fields you want to index. Then, call\nthe createIndex() method on the collection, passing the Bson \ninstance that contains the index document. The following example specifies a text index key on the theaters field: A hashed index groups documents by the hash value in the indexed field. In order to create a hashed index, first call the\n hashed() \nbuilder method to create a Bson instance that represents the index\ndocument, passing the name of the fields you want to index. Then, call\nthe createIndex() method on the collection, passing the Bson \ninstance that contains the index document. The following example specifies a hashed index on the capacity \nfield: A 2dsphere index groups documents by the coordinates in the indexed field. In order to create a 2dsphere index, first call the\n geo2dsphere() \nbuilder method to create a Bson instance that represents the index\ndocument, passing the name or names of the fields you want to index.\nThen, call the createIndex() method on the collection, passing the Bson \ninstance that contains the index document. The following example specifies a 2dsphere index on the location field:", - "code": [ - { - "lang": "kotlin", - "value": "import com.mongodb.client.model.Indexes.*" - }, - { - "lang": "kotlin", - "value": "val ascendingIndex = Indexes.ascending(\"name\")\nval indexName = collection.createIndex(ascendingIndex)\nprintln(indexName)\n" - }, - { - "lang": "console", - "value": "name_1" - }, - { - "lang": "kotlin", - "value": "val descendingIndex = Indexes.descending(\"capacity\")\nval indexName = collection.createIndex(descendingIndex)\nprintln(indexName)\n" - }, - { - "lang": "console", - "value": "capacity_-1" - }, - { - "lang": "kotlin", - "value": "val compoundIndexExample = Indexes.compoundIndex(\n Indexes.descending(\"capacity\", \"year\"),\n Indexes.ascending(\"name\")\n)\nval indexName = collection.createIndex(compoundIndexExample)\nprintln(indexName)\n" - }, - { - "lang": "console", - "value": "capacity_-1_year_-1_name_1" - }, - { - "lang": "kotlin", - "value": "val textIndex = Indexes.text(\"theaters\")\nval indexName = collection.createIndex(textIndex)\nprintln(indexName)\n" - }, - { - "lang": "console", - "value": "theaters_text" - }, - { - "lang": "kotlin", - "value": "val hashedIndex = Indexes.hashed(\"capacity\")\nval indexName = collection.createIndex(hashedIndex)\nprintln(indexName)\n" - }, - { - "lang": "console", - "value": "capacity_hashed" - }, - { - "lang": "kotlin", - "value": "val geo2dsphereIndex = Indexes.geo2dsphere(\"location\")\nval indexName = collection.createIndex(geo2dsphereIndex)\nprintln(indexName)\n" - }, - { - "lang": "console", - "value": "location_2dsphere" - } - ], - "preview": "In this guide, you can learn how to specify indexes using\nbuilders in the MongoDB Kotlin Driver.\nThe Indexes builder provides helper methods for constructing the\nfollowing types of indexes:", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/builders/projections", - "title": "Projections Builders", - "headings": [ - "Overview", - "Sample Documents and Examples", - "Projection Operations", - "Inclusion", - "Exclusion", - "Combining Projections", - "Exclusion of _id", - "Project an Array Element Match", - "Project an Array Slice", - "Project a Text Score" - ], - "paragraphs": "In this guide, you can learn how to specify projections using\n builders in the MongoDB Kotlin driver. MongoDB supports field projection , specifying which fields to include and exclude when returning results from a\nquery. Projection in MongoDB follows some basic rules: Find more information about projection mechanics in the Project Fields to Return from Query guide in the MongoDB Server documentation. The Projections class provides static factory methods for\nall the MongoDB projection operators. Each method returns an instance of the BSON type which you can pass\nto any method that expects a projection. The _id field is always included unless explicitly excluded Specifying a field for inclusion implicitly excludes all other fields except the _id field Specifying a field for exclusion removes only that field in a query result For brevity, you may choose to import the methods of the\n Projections \nclass: The following sections feature examples that run query and projection operations\non a sample collection called projection_builders . Each section uses\na variable named collection to refer to the MongoCollection instance\nof the projection_builders collection. The collection contains the following documents, representing the monthly average\ntemperatures in Celsius for the years 2018 and 2019: The following data class is used to represent the documents in the collection: The following sections contain information on the available projection\noperations and how to construct them using the Projections class. Use the include() method to specify the inclusion of one or more fields. The following example includes the year field and implicitly the _id field: The following example includes the year and type fields and implicitly the _id field: Use the exclude() method to specify the exclusion of one or more fields. The following example excludes the temperatures field: The following example excludes the temperatures and type fields: Use the fields() method to combine multiple projections. The following example includes the year and type fields and excludes the\n _id field: Use the excludeId() convenience method to specify the exclusion of the _id field: Use the elemMatch(String, Bson) method variant to specify an array projection that will include the first\nelement of an array that matches a supplied query filter. This filtering occurs after all documents matching the\nquery filter (if supplied) are retrieved. The following example projects the first element of the temperatures array where the avg field is\ngreater that 10.1 : When you've specified matching criteria in the query portion of your operation, use the elemMatch(String) method\nvariant to specify a positional projection to include\nthe first element of an array. Only documents that match the query filter will be retrieved. The following example projects the first element of the temperatures array: Only the first element that matches the specified query filter will be included,\nregardless of how many matches there may be. In MongoDB version 4.4 and earlier, the specified array field must appear in the query filter. Beginning in MongoDB 4.4,\nyou can use a positional project on an array field that does not appear in the query filter. Use the slice() method to project a slice of an array. The following example projects the first 6 elements of the temperatures array: The following example skips the first 6 elements of the temperatures array and projects the next 6 : Use the metaTextScore() method to specify a projection of the\n score of a text query . The following example projects the text score as the value of the score field:", - "code": [ - { - "lang": "kotlin", - "value": "import com.mongodb.client.model.Projections.*" - }, - { - "lang": "json", - "value": "{\n \"year\" : 2018,\n \"type\" : \"even number but not a leap year\",\n \"temperatures\" : [\n { \"month\" : \"January\", \"avg\" : 9.765 },\n { \"month\" : \"February\", \"avg\" : 9.675 },\n { \"month\" : \"March\", \"avg\" : 10.004 },\n { \"month\" : \"April\", \"avg\" : 9.983 },\n { \"month\" : \"May\", \"avg\" : 9.747 },\n { \"month\" : \"June\", \"avg\" : 9.65 },\n { \"month\" : \"July\", \"avg\" : 9.786 },\n { \"month\" : \"August\", \"avg\" : 9.617 },\n { \"month\" : \"September\", \"avg\" : 9.51 },\n { \"month\" : \"October\", \"avg\" : 10.042 },\n { \"month\" : \"November\", \"avg\" : 9.452 },\n { \"month\" : \"December\", \"avg\" : 9.86 }\n ]\n},\n{\n \"year\" : 2019,\n \"type\" : \"odd number, can't be a leap year\",\n \"temperatures\" : [\n { \"month\" : \"January\", \"avg\" : 10.023 },\n { \"month\" : \"February\", \"avg\" : 9.808 },\n { \"month\" : \"March\", \"avg\" : 10.43 },\n { \"month\" : \"April\", \"avg\" : 10.175 },\n { \"month\" : \"May\", \"avg\" : 9.648 },\n { \"month\" : \"June\", \"avg\" : 9.686 },\n { \"month\" : \"July\", \"avg\" : 9.794 },\n { \"month\" : \"August\", \"avg\" : 9.741 },\n { \"month\" : \"September\", \"avg\" : 9.84 },\n { \"month\" : \"October\", \"avg\" : 10.15 },\n { \"month\" : \"November\", \"avg\" : 9.84 },\n { \"month\" : \"December\", \"avg\" : 10.366 }\n ]\n}" - }, - { - "lang": "kotlin", - "value": "data class YearlyTemperature(\n @BsonId val id: ObjectId,\n val year: Int,\n val type: String,\n val temperatures: List\n) {\n data class MonthlyTemperature(\n val month: String,\n val avg: Double\n )\n}\n" - }, - { - "lang": "kotlin", - "value": "data class Results(@BsonId val id: ObjectId, val year: Int)\n\nval filter = Filters.empty()\nval projection = Projections.include(YearlyTemperature::year.name)\nval resultsFlow = collection.find(filter).projection(projection)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Results(id=6467808db5003e6354a1ee22, year=2018)\nResults(id=6467808db5003e6354a1ee23, year=2019)" - }, - { - "lang": "kotlin", - "value": "data class Results(@BsonId val id: ObjectId, val year: Int, val type: String)\n\nval filter = Filters.empty()\nval projection = Projections.include(YearlyTemperature::year.name, YearlyTemperature::type.name)\nval resultsFlow = collection.find(filter).projection(projection)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Results(id=646780e3311323724f69a907, year=2018, type=even number but not a leap year)\nResults(id=646780e3311323724f69a908, year=2019, type=odd number, can't be a leap year)" - }, - { - "lang": "kotlin", - "value": "data class Results(@BsonId val id: ObjectId, val year: Int, val type: String)\nval filter = Filters.empty()\nval projection = Projections.exclude(YearlyTemperature::temperatures.name)\nval resultsFlow = collection.find(filter).projection(projection)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Results(id=6462976102c85b29a7bfc9d5, year=2018, type=even number but not a leap year)\nResults(id=6462976102c85b29a7bfc9d6, year=2019, type=odd number, can't be a leap year)" - }, - { - "lang": "kotlin", - "value": "data class Results(@BsonId val id: ObjectId, val year: Int)\n\nval filter = Filters.empty()\nval projection = Projections.exclude(YearlyTemperature::temperatures.name, YearlyTemperature::type.name)\nval resultsFlow = collection.find(filter).projection(projection)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Results(id=64629783d7760d2365215147, year=2018)\nResults(id=64629783d7760d2365215148, year=2019)" - }, - { - "lang": "kotlin", - "value": "data class Results(val year: Int, val type: String)\n\nval filter = Filters.empty()\nval projection = Projections.fields(\n Projections.include(YearlyTemperature::year.name, YearlyTemperature::type.name),\n Projections.excludeId()\n)\nval resultsFlow = collection.find(filter).projection(projection)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Results(year=2018, type=even number but not a leap year)\nResults(year=2019, type=odd number, can't be a leap year)" - }, - { - "lang": "kotlin", - "value": "data class Results(val year: Int, val type: String, val temperatures: List)\nval filter = Filters.empty()\nval projection = Projections.excludeId()\nval resultsFlow = collection.find(filter).projection(projection)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Results(year=2018, type=even number but not a leap year, temperatures=[MonthlyTemperature(month=January, avg=9.765), MonthlyTemperature(month=February, avg=9.675), MonthlyTemperature(month=March, avg=10.004), MonthlyTemperature(month=April, avg=9.983), MonthlyTemperature(month=May, avg=9.747), MonthlyTemperature(month=June, avg=9.65), MonthlyTemperature(month=July, avg=9.786), MonthlyTemperature(month=August, avg=9.617), MonthlyTemperature(month=September, avg=9.51), MonthlyTemperature(month=October, avg=10.042), MonthlyTemperature(month=November, avg=9.452), MonthlyTemperature(month=December, avg=9.86)])\nResults(year=2019, type=odd number, can't be a leap year, temperatures=[MonthlyTemperature(month=January, avg=10.023), MonthlyTemperature(month=February, avg=9.808), MonthlyTemperature(month=March, avg=10.43), MonthlyTemperature(month=April, avg=10.175), MonthlyTemperature(month=May, avg=9.648), MonthlyTemperature(month=June, avg=9.686), MonthlyTemperature(month=July, avg=9.794), MonthlyTemperature(month=August, avg=9.741), MonthlyTemperature(month=September, avg=9.84), MonthlyTemperature(month=October, avg=10.15), MonthlyTemperature(month=November, avg=9.84), MonthlyTemperature(month=December, avg=10.366)])" - }, - { - "lang": "kotlin", - "value": "data class Results(\n val year: Int,\n val temperatures: List?\n)\n\nval filter = Filters.empty()\nval projection = Projections.fields(\n Projections.include(YearlyTemperature::year.name),\n Projections.elemMatch(\n YearlyTemperature::temperatures.name,\n Filters.gt(YearlyTemperature.MonthlyTemperature::avg.name, 10.1)\n )\n)\nval resultsFlow = collection.find(filter).projection(projection)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Results(year=2018, temperatures=null)\nResults(year=2019, temperatures=[MonthlyTemperature(month=March, avg=10.43)])" - }, - { - "lang": "kotlin", - "value": "data class Results(\n val year: Int,\n val temperatures: List\n)\n\nval filter = Filters.gt(\n \"${YearlyTemperature::temperatures.name}.${YearlyTemperature.MonthlyTemperature::avg.name}\",\n 10.1\n)\nval projection = Projections.fields(\n Projections.include(YearlyTemperature::year.name),\n Projections.elemMatch(YearlyTemperature::temperatures.name)\n)\nval resultsFlow = collection.find(filter).projection(projection)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Results(year=2019, temperatures=[MonthlyTemperature(month=March, avg=10.43)])" - }, - { - "lang": "kotlin", - "value": "data class Results(val temperatures: List)\n\nval filter = Filters.empty()\n// First half of the year\nval projection = Projections.fields(\n Projections.slice(YearlyTemperature::temperatures.name, 6),\n Projections.excludeId()\n)\nval resultsFlow = collection.find(filter)\n .projection(projection)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Results(temperatures=[MonthlyTemperature(month=January, avg=9.765), MonthlyTemperature(month=February, avg=9.675), MonthlyTemperature(month=March, avg=10.004), MonthlyTemperature(month=April, avg=9.983), MonthlyTemperature(month=May, avg=9.747), MonthlyTemperature(month=June, avg=9.65)])\nResults(temperatures=[MonthlyTemperature(month=January, avg=10.023), MonthlyTemperature(month=February, avg=9.808), MonthlyTemperature(month=March, avg=10.43), MonthlyTemperature(month=April, avg=10.175), MonthlyTemperature(month=May, avg=9.648), MonthlyTemperature(month=June, avg=9.686)])" - }, - { - "lang": "kotlin", - "value": "data class Results(val temperatures: List)\n\nval filter = Filters.empty()\n// Second half of the year\nval projection = Projections.fields(\n Projections.slice(YearlyTemperature::temperatures.name, 6, 6),\n Projections.excludeId()\n)\nval resultsFlow = collection.find(filter)\n .projection(projection)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Results(temperatures=[MonthlyTemperature(month=July, avg=9.786), MonthlyTemperature(month=August, avg=9.617), MonthlyTemperature(month=September, avg=9.51), MonthlyTemperature(month=October, avg=10.042), MonthlyTemperature(month=November, avg=9.452), MonthlyTemperature(month=December, avg=9.86)])\nResults(temperatures=[MonthlyTemperature(month=July, avg=9.794), MonthlyTemperature(month=August, avg=9.741), MonthlyTemperature(month=September, avg=9.84), MonthlyTemperature(month=October, avg=10.15), MonthlyTemperature(month=November, avg=9.84), MonthlyTemperature(month=December, avg=10.366)])" - }, - { - "lang": "kotlin", - "value": "data class Results(val year: Int, val score: Double)\n\nval filter = Filters.text(\"even number\")\nval projection = Projections.fields(\n Projections.include(YearlyTemperature::year.name),\n Projections.metaTextScore(\"score\")\n)\nval resultsFlow = collection.find(filter).projection(projection)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Results(year=2018, score=1.25)\nResults(year=2019, score=0.625)" - } - ], - "preview": "In this guide, you can learn how to specify projections using\nbuilders in the MongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/builders/sort", - "title": "Sorts Builders", - "headings": [ - "Overview", - "The Sorts Class", - "Ascending", - "Descending", - "Combining Sort Criteria", - "Text Score" - ], - "paragraphs": "In this guide, you can learn how to specify sort criteria for your\nqueries using builders in the MongoDB Kotlin Driver. Sort criteria are the rules MongoDB uses to sort your data. Some\nexamples of sort criteria are: Builders are classes provided by the Kotlin driver that help you\nconstruct BSON objects. To learn more, see the\n builders guide. You should read this guide if you want to learn how to use builders to\nspecify sort criteria for your queries. To learn the fundamentals of sorting in the Kotlin driver, see the\n sorting guide. The examples on this page use a sample collection that\ncontains the following documents: This data is modeled with the following Kotlin data class: Smallest number to largest number Earliest time of day to latest time of day Alphabetical order by first name The Sorts class is a builder that provides static factory methods for all sort\ncriteria operators supported by MongoDB. These methods return a Bson object\nthat you can pass to the sort() method of a FindFlow instance or to\n Aggregates.sort() . To learn more about the Aggregates \nclass, see the Aggregates builder guide. For more information about the classes and interfaces in this section, see the\nfollowing API Documentation: Sorts BSON FindFlow Aggregates To specify an ascending sort, use the Sorts.ascending() static\nfactory method. Pass the name of the field you want to sort on to\n Sorts.ascending() . The following example sorts the documents in the\n sample collection by ascending order\non the orderTotal field: To specify a descending sort, use the Sorts.descending() static factory\nmethod. Pass the name of the field you want to sort on to Sorts.descending() . The following example sorts the documents in the\n sample collection in descending order\non the orderTotal field: To combine sort criteria, use the Sorts.orderBy() static factory\nmethod. This method constructs an object containing an ordered list of sort\ncriteria. When performing the sort, if the previous sort criteria result in a\ntie, the sort uses the next sort criteria in the list to determine the order. The following example sorts the documents in the\n sample collection in descending order\non the date field, and in the event of a tie, ascending order on the\n orderTotal field: You can sort text search results by their text score, a value that indicates how\nclosely a search result matches your search string. To specify a sort by the\ntext score of a text search, use the Sorts.metaTextScore() static factory\nmethod. For a detailed example showing how to specify sort criteria using\nthe Sorts.metaTextScore() method, see\n the text search section of the sorting guide. For more information, see the Sorts class \nAPI Documentation.", - "code": [ - { - "lang": "json", - "value": "{ \"_id\": 1, \"date\": \"2022-01-03\", \"orderTotal\": 17.86, \"description\": \"1/2 lb cream cheese and 1 dozen bagels\" },\n{ \"_id\": 2, \"date\": \"2022-01-11\", \"orderTotal\": 83.87, \"description\": \"two medium vanilla birthday cakes\" },\n{ \"_id\": 3, \"date\": \"2022-01-11\", \"orderTotal\": 19.49, \"description\": \"1 dozen vanilla cupcakes\" },\n{ \"_id\": 4, \"date\": \"2022-01-15\", \"orderTotal\": 43.62, \"description\": \"2 chicken lunches and a diet coke\" },\n{ \"_id\": 5, \"date\": \"2022-01-23\", \"orderTotal\": 60.31, \"description\": \"one large vanilla and chocolate cake\" },\n{ \"_id\": 6, \"date\": \"2022-01-23\", \"orderTotal\": 10.99, \"description\": \"1 bagel, 1 orange juice, 1 muffin\" }" - }, - { - "lang": "kotlin", - "value": "data class Order(\n @BsonId val id: Int,\n val date: String,\n val orderTotal: Double,\n val description: String,\n)\n" - }, - { - "lang": "kotlin", - "value": "val resultsFlow = collection.find()\n .sort(Sorts.ascending(Order::orderTotal.name))\n\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Order(id=6, date=2022-01-23, orderTotal=10.99, description=1 bagel, 1 orange juice, 1 muffin)\nOrder(id=1, date=2022-01-03, orderTotal=17.86, description=1/2 lb cream cheese and 1 dozen bagels)\nOrder(id=3, date=2022-01-11, orderTotal=19.49, description=1 dozen vanilla cupcakes)\nOrder(id=4, date=2022-01-15, orderTotal=43.62, description=2 chicken lunches and a diet coke)\nOrder(id=5, date=2022-01-23, orderTotal=60.31, description=one large vanilla and chocolate cake)\nOrder(id=2, date=2022-01-11, orderTotal=83.87, description=two medium vanilla birthday cakes)" - }, - { - "lang": "kotlin", - "value": "val resultsFlow = collection.find()\n .sort(Sorts.descending(Order::orderTotal.name))\n\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Order(id=2, date=2022-01-11, orderTotal=83.87, description=two medium vanilla birthday cakes)\nOrder(id=5, date=2022-01-23, orderTotal=60.31, description=one large vanilla and chocolate cake)\nOrder(id=4, date=2022-01-15, orderTotal=43.62, description=2 chicken lunches and a diet coke)\nOrder(id=3, date=2022-01-11, orderTotal=19.49, description=1 dozen vanilla cupcakes)\nOrder(id=1, date=2022-01-03, orderTotal=17.86, description=1/2 lb cream cheese and 1 dozen bagels)\nOrder(id=6, date=2022-01-23, orderTotal=10.99, description=1 bagel, 1 orange juice, 1 muffin)" - }, - { - "lang": "kotlin", - "value": "val orderBySort = Sorts.orderBy(\n Sorts.descending(Order::date.name), Sorts.ascending(Order::orderTotal.name)\n)\nval results = collection.find().sort(orderBySort)\n\nresults.collect {println(it) }\n" - }, - { - "lang": "console", - "value": "Order(id=6, date=2022-01-23, orderTotal=10.99, description=1 bagel, 1 orange juice, 1 muffin)\nOrder(id=5, date=2022-01-23, orderTotal=60.31, description=one large vanilla and chocolate cake)\nOrder(id=4, date=2022-01-15, orderTotal=43.62, description=2 chicken lunches and a diet coke)\nOrder(id=3, date=2022-01-11, orderTotal=19.49, description=1 dozen vanilla cupcakes)\nOrder(id=2, date=2022-01-11, orderTotal=83.87, description=two medium vanilla birthday cakes)\nOrder(id=1, date=2022-01-03, orderTotal=17.86, description=1/2 lb cream cheese and 1 dozen bagels)" - } - ], - "preview": "In this guide, you can learn how to specify sort criteria for your\nqueries using builders in the MongoDB Kotlin Driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/builders/updates", - "title": "Updates Builders", - "headings": [ - "Overview", - "Field Updates", - "Set", - "Unset", - "Set On Insert", - "Increment", - "Multiply", - "Rename", - "Min", - "Max", - "Current Date", - "Current Timestamp", - "Bit", - "Array Updates", - "Add to Set", - "Pop", - "Pull All", - "Pull", - "Push", - "Combining Multiple Update Operators" - ], - "paragraphs": "In this guide, you can learn how to specify updates by using\n builders in the MongoDB Kotlin Driver. The Updates builder provides helper methods for the following types of updates: Some methods that expect updates are: The Updates class provides static factory methods for all the MongoDB update\noperators. Each method returns an instance of the BSON \ntype, which you can pass to any method that expects an update argument. The examples in this guide use the following document: This example is modeled by the following data class unless otherwise noted: Field Updates Array Updates Combining Multiple Update Operators updateOne() updateMany() bulkWrite() For brevity, you may choose to import the methods of the Updates class: Use the set() \nmethod to assign the value of a field in an update operation. The following example sets the value of the qty field to 11 : The preceding example updates the original document to the following state: Use the unset() method\nto delete the value of a field in an update operation. The following example deletes the qty field: The preceding example updates the original document to the following state: Use the setOnInsert() \nmethod to assign the value of a field in an update operation on an\ninsert of a document. The following example sets the value of the color field to \"pink\" if\nthe operation resulted in the insert of a document: The preceding example updates the original document to the following state: If the document is not inserted, no change will occur. Use the inc() \nmethod to increment the value of a numeric field in an update operation. The following example increments the value of the qty field, which was 5 , by 3 : The preceding example updates the original document to the following state: Use the mul() \nmethod to multiply the value of a numeric field in an update operation. The following example multiplies the value of the qty field, which\nwas 5 , by 2 : The preceding example updates the original document to the following state: Use the rename() \nmethod to rename the value of a field in an update operation. The following example renames the qty field to quantity : The preceding example updates the original document to the following state: Use the min() \nmethod to set the value of the field to the given value if the given value is\nless than the current value of the field. The following example updates the qty field to 2 because 2 \nis less than the current value of the qty field ( 5 ): The preceding example updates the original document to the following state: Use the max() \nmethod to update the value of a field with the larger number of the two\nspecified in an update operation. The following example updates the qty field to 8 because 8 \nis greater than the current value of the qty field ( 5 ): The preceding example updates the original document to the following state: Use the currentDate() \nmethod to assign the value of a field in an update operation to the\ncurrent date as a BSON date . The following example sets the value of the lastModified field to\nthe current date as a BSON date: Since we wrote this page on June 16, 2023, the preceding example updates\nthe original document to the following state: Use the currentTimestamp() \nmethod to assign the value of a field in an update operation to the\ncurrent date as a timestamp . The following example sets the value of the lastModified field to\nthe current date as a BSON timestamp: Since we wrote this page on June 16, 2023, the preceding example updates\nthe original document to the following state: Use the bitwiseOr() ,\n bitwiseAnd() ,\nand bitwiseXor() \nmethods to perform a bitwise update of the integer value of a field in\nan update operation. The following example performs a bitwise OR between the number\n 10 and the integer value of the qty field ( 5 ): The bitwise operation results in 15 : The preceding example updates the original document to the following state: Use the addToSet() \nmethod to append a value to an array if the value is not already present\nin an update operation. The following example adds a Vendor instance that has a name \nvalue of \"C\" to the vendor array: The preceding example updates the original document to the following state: Use the popFirst() \nmethod to remove the first element of an array and the\n popLast() \nmethod to remove the last element of an array in an update operation. The following example removes the first entry of the vendor array: The preceding example updates the original document to the following state: Use the pullAll() \nmethod to remove all instances of specified values from an existing array in\nan update operation. The following example removes Vendor instances that have name values\nof \"A\" and \"M\" from the vendor array: The preceding example updates the original document to the following state: Use the pull() \nmethod to remove all instances of a specified value from an existing array in\nan update operation. The following example removes Vendor instances that have a name \nvalue of \"D\" from the vendor array: The preceding example updates the original document to the following state: Use the push() \nmethod to append a value to an array in an update operation. The following example adds a Vendor instance that has a name \nvalue of \"Q\" to the vendor array: The preceding example updates the original document to the following state: An application can update multiple fields of a single document by\ncombining two or more of the update operators described in the preceding\nsections. The following example increments the value of the qty field by 6 , sets\nthe value of the color field to \"purple\" , and adds a Vendor \ninstance that has a name value of \"R\" to the vendor field: The preceding example updates the original document to the following state:", - "code": [ - { - "lang": "json", - "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 5,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" - }, - { - "lang": "kotlin", - "value": "import com.mongodb.client.model.Updates.*" - }, - { - "lang": "kotlin", - "value": "data class PaintOrder (\n @BsonId val id: Int,\n val color: String,\n val qty: Int?,\n val vendor: List?,\n val lastModified: LocalDateTime?\n)\n\ndata class Vendor (\n val name: String,\n)\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 11,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.set(PaintOrder::qty.name, 11)\ncollection.updateOne(filter, update)\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.unset(PaintOrder::qty.name)\ncollection.updateOne(filter, update)\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": 1,\n \"color\": \"pink\"\n}" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.setOnInsert(PaintOrder::color.name, \"pink\")\ncollection.updateOne(filter, update, UpdateOptions().upsert(true))\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 8,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.inc(PaintOrder::qty.name, 3)\ncollection.updateOne(filter, update)\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 10,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.mul(PaintOrder::qty.name, 2)\ncollection.updateOne(filter, update)\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" },\n \"quantity\": 5,\n}" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.rename(PaintOrder::qty.name, \"quantity\")\ncollection.updateOne(filter, update)\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 2,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.min(PaintOrder::qty.name, 2)\ncollection.updateOne(filter, update)\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 8,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.max(PaintOrder::qty.name, 8)\ncollection.updateOne(filter, update)\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 5,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"$date\": \"2023-06-16T17:13:06.373Z\"\n}" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.currentDate(PaintOrder::lastModified.name)\ncollection.updateOne(filter, update)\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 5,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"$timestamp\": { \"t\": 1686935654, \"i\": 3 }\n}" - }, - { - "lang": "kotlin", - "value": "// Create a new instance of the collection with the flexible `Document` type\n// to allow for the changing of the `lastModified` field to a `BsonTimestamp`\n// from a `LocalDateTime`.\nval collection = database.getCollection(\"paint_orders\")\n\nval filter = Filters.eq(\"_id\", 1)\nval update = Updates.currentTimestamp(PaintOrder::lastModified.name)\ncollection.updateOne(filter, update)\n" - }, - { - "lang": "none", - "value": "0101 // bit representation of 5\n1010 // bit representation of 10\n----\n1111 // bit representation of 15" - }, - { - "lang": "json", - "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 15,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.bitwiseOr(PaintOrder::qty.name, 10)\ncollection.updateOne(filter, update)\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 5,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" },\n { \"name\": \"C\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.addToSet(PaintOrder::vendor.name, Vendor(\"C\"))\ncollection.updateOne(filter, update)\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 5,\n \"vendor\": [\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.popFirst(PaintOrder::vendor.name)\ncollection.updateOne(filter, update)\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 5,\n \"vendor\": [\n { \"name\": \"D\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.pullAll(PaintOrder::vendor.name, listOf(Vendor(\"A\"), Vendor(\"M\")))\ncollection.updateOne(filter, update)\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 5,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.pull(PaintOrder::vendor.name, Vendor(\"D\"))\ncollection.updateOne(filter, update)\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 5,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" },\n { \"name\": \"Q\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.push(PaintOrder::vendor.name, Vendor(\"Q\"))\ncollection.updateOne(filter, update)\n" - }, - { - "lang": "json", - "value": "{\n \"_id\": 1,\n \"color\": \"purple\",\n \"qty\": 11,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" },\n { \"name\": \"R\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.combine(\n Updates.set(PaintOrder::color.name, \"purple\"),\n Updates.inc(PaintOrder::qty.name, 6),\n Updates.push(PaintOrder::vendor.name, Vendor(\"R\"))\n)\ncollection.updateOne(filter, update)\n" - } - ], - "preview": "In this guide, you can learn how to specify updates by using\nbuilders in the MongoDB Kotlin Driver.", - "tags": "code example, change data, nested class", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/builders", - "title": "Builders", - "headings": [ - "Overview", - "Why Use Builders?", - "Scenario", - "Using the MongoDB Shell", - "Without Using Builders", - "Using Builders", - "Available Builders" - ], - "paragraphs": "This section includes guides on how to use each of the available\nbuilders, and demonstrates the utility the MongoDB Kotlin driver builder classes\nprovide. The Kotlin driver provides classes to simplify the process for developers\nto use CRUD operations and the Aggregation API. The static utility methods allow you\nto build a query more efficiently. Using the builders class, you leverage the power of: When using builders, the Kotlin compiler and the IDE catch errors such as misspelled\noperators early on. When using the MongoDB shell or plain Kotlin, you\nwrite operators as strings and get no visual indication of a problem,\npushing these errors to runtime instead of compile time. With the builder classes, you write operators as methods. The IDE\ninstantly underlines and gives you a red bar on the right indicating\nsomething is wrong. While developing, the IDE also shows you the\nmethods you can use. It automatically completes your code with\nplaceholder parameters once you select which method you want to use. The Kotlin compiler and the IDE to find errors during development The IDE for discovery and code completion Imagine we want to send a marketing email to all users in our users \ncollection with the following criteria: We only want their email address, so we'll ensure our query doesn't\nreturn data we pay bandwidth costs for but don't need. The documents in the users collection are modeled with the following data class\nin our application: Users that identify as female gender Users that are older than 29 Aggregates for building aggregation pipelines. Filters for building query filters. Indexes for creating index keys. Projections for building projections. Sorts for building sort criteria. Updates for building updates.", - "code": [ - { - "lang": "kotlin", - "value": "data class User(\n @BsonId\n val id: BsonObjectId = BsonObjectId(),\n val gender: String,\n val age: Int,\n val email: String,\n)\n" - }, - { - "lang": "js", - "value": "collection.find({ \"gender\": \"female\", \"age\" : { \"$gt\": 29 }}, { \"_id\": 0, \"email\": 1 })" - }, - { - "lang": "kotlin", - "value": "data class Results(val email: String)\n\nval filter = Document().append(\"gender\", \"female\").append(\"age\", Document().append(\"\\$gt\", 29))\nval projection = Document().append(\"_id\", 0).append(\"email\", 1)\nval results = collection.find(filter).projection(projection)\n" - }, - { - "lang": "kotlin", - "value": "import com.mongodb.client.model.Filters\nimport com.mongodb.client.model.Projections" - }, - { - "lang": "kotlin", - "value": "data class Results(val email: String)\n\nval filter = Filters.and(Filters.eq(User::gender.name, \"female\"), Filters.gt(User::age.name, 29))\nval projection = Projections.fields(Projections.excludeId(), Projections.include(\"email\"))\nval results = collection.find(filter).projection(projection)\n" - } - ], - "preview": "This section includes guides on how to use each of the available\nbuilders, and demonstrates the utility the MongoDB Kotlin driver builder classes\nprovide.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/collations", - "title": "Collations", - "headings": [ - "Overview", - "Sample Data for Examples", - "Collations in MongoDB", - "How to Specify Collations", - "Collection", - "Index", - "Operation", - "Index Types That Do Not Support Collations", - "Collation Options", - "Collation Examples", - "find() and sort() Example", - "findOneAndUpdate() Example", - "findOneAndDelete() Example", - "Aggregation Example" - ], - "paragraphs": "In this guide, you can learn how to use collations with MongoDB to order your\nquery or aggregation operation results by string values. A collation is a set\nof character ordering and matching rules that apply to a specific language and\nlocale. You can learn more about collations in the following sections in this guide: Collations in MongoDB How to Specify Collations Collation Options Collation Code Examples The examples on this page use a MongoDB collection with the following documents: These documents are represented by the following data class: MongoDB sorts strings using binary collation by default. The binary\ncollation uses the ASCII standard character values to\ncompare and order strings. Certain languages and locales have specific\ncharacter ordering conventions that differ from the ASCII character values. For example, in Canadian French, the right-most accented character\n(diacritic) determines the ordering for strings when all preceding characters\nare the same. Consider the following Canadian French words: When using binary collation, MongoDB sorts them in the following order: When using the Canadian French collation, MongoDB sorts them in a different\norder as shown below: cote cot\u00e9 c\u00f4te c\u00f4t\u00e9 MongoDB supports collations on most CRUD operations \nand aggregations. For a complete list of supported operations, see the\n Operations that Support Collations \nserver manual page. You can specify the locale code and optional variant in the following string\nformat: The following example specifies the \"de\" locale code and \"phonebook\" variant\ncode: If you do not need to specify a variant, omit everything after the locale\ncode as follows: For a complete list of supported locales, see our server manual page on\n Supported Languages and Locales . The following sections show you different ways to apply collations in\nMongoDB: Collection Index Operation You can set a default collation when you create a collection. When you\ncreate a collection with a specified collation, all supported operations\nthat scan that collection apply the rules of the collation. You can only assign a default collation to a collection when you create that\ncollection. However, you can specify a collation in a new index on an existing\ncollection. See the Index section of this guide\nfor more information. The following snippet shows how to specify the \"en_US\" locale collation\nwhen creating a new collection called items : To check whether you created the collation successfully, retrieve a list\nof the indexes on that collection as follows: You can specify a collation when you create a new index on a collection.\nThe index stores an ordered representation of the documents in the\ncollection so your operation does not need to perform the ordering\nin-memory. To use the index, your operation must meet the following\ncriteria: The following code snippet shows how you can create an index on the \"firstName\"\nfield with the \"en_US\" locale collation in ascending order: To check whether you created the collation successfully, retrieve a list\nof the indexes on that collection as follows: The following code snippet shows an example operation that specifies the\nsame collation and is covered by the index we created in the preceding code snippet: The operation uses the same collation as the one specified in the index. The operation is covered by the index that contains the collation. You can override the default collation on a collection by passing the\nnew collation as a parameter to one of the supported operations. However,\nsince the operation does not use an index, the operation may not perform\nas well as one that is covered by an index. For more information on the\ndisadvantages of sorting operations not covered by an index, see the server\nmanual page on Use Indexes to Sort Query Results . The following code snippet shows an example query operation with the\nfollowing characteristics: The referenced collection contains the default collation \"en_US\" similar to\nthe one specified in the Collection section. The query specifies the Icelandic (\"is\") collation which is not covered\nby the collection's default collation index. Since the specified collation is not covered by an index, the sort\noperation is performed in-memory. While most MongoDB index types support collation, the following types support\nonly binary comparison: text 2d geoHaystack This section covers various collation options and how to specify them to\nfurther refine the ordering and matching behavior. You can use the Collation.Builder class to specify values for the\npreceding collation options. You can call the build() method to construct a\n Collation object as shown in the following code snippet: For more information on the corresponding methods and parameters they\ntake, see the API Documentation for Collation.Builder . Collation Option Description Locale Backwards Case-sensitivity Alternate Case First Max Variable Strength Normalization Numeric Ordering This section contains examples that demonstrate how to use a selection of\nMongoDB operations that support collations. In the following examples, we specify the \"de@collation=phonebook\" locale and\nvariant collation. The \"de\" part of the collation specifies the German\nlocale and the \"collation=phonebook\" part specifies a variant. The\n\"de\" locale collation contains rules for prioritizing proper nouns,\nidentified by capitalization of the first letter. In the\n\"collation=phonebook\" variant, characters with umlauts are ordered before\nthe same characters without them in an ascending sort. The following example demonstrates how you can apply a collation when\nretrieving sorted results from a collection. To perform this\noperation, call find() on the example collection and chain the\n collation() and sort() methods to specify the order in which you want\nto receive the results. When we perform this operation on our example collection ,\nthe output should resemble the following: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: find() sort() Sorts This section demonstrates how you can specify a collation in an\noperation that updates the first match from your query. To specify the\ncollation for this operation, instantiate a FindOneAndUpdateOptions \nobject, set a collation on it, and pass it as a parameter to your call to\nthe findOneAndUpdate() method. In this example, we demonstrate the following: Since \"G\u00fcnter\" is lexically before \"Gunter\" using the\n de@collation=phonebook collation in ascending order, the following operation\nreturns \"G\u00fcnter\" before \"Gunter\" in the results: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: Retrieve the first document in our example collection \nthat precedes \"Gunter\" in an ascending order. Set options for operation including the \"de@collation=phonebook\" \ncollation. Add a new field \"verified\" with the value \"true\". Retrieve and print the updated document. The following code example uses imports from the\n import com.mongodb.client.model package for convenience. findOneAndUpdate findOneAndUpdateOptions Filters Updates Sorts This section demonstrates how you can specify a numerical ordering of\nstrings in a collation in an operation that deletes the first match from your\nquery. To specify the collation for this operation, instantiate\na FindOneAndDeleteOptions object, set a numeric ordering collation on\nit, and pass it as a parameter to your call to the findOneAndDelete() \nmethod. This example calls the findOneAndDelete() operation on a collection that\ncontains the following documents: These documents are represented by the following data class: In the collation, we set the locale option to \"en\" and the\n numericOrdering option to \"true\" in order to sort strings based on their\nnumerical order. The numeric value of the string \"179\" is greater than the number 100, so\nthe preceding document is the only match. If we perform the same operation without the numerical ordering collation\non the original collection of three documents, the filter matches all of\nour documents since \"100\" comes before \"16\", \"84\", and \"179\" when ordering\nby binary collation. For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: The following code example uses imports from the\n import com.mongodb.client.model package for convenience. findOneAndDelete FindOneAndDeleteOptions Filters Sorts This section demonstrates how you can specify a collation in an aggregation\noperation. In an aggregation operation, you can specify a series of\naggregation stages which is collectively called the aggregation pipeline. To\nperform an aggregation, call the aggregate() method on a\n MongoCollection object. To specify a collation for an aggregation operation, call the collation() \nmethod on the AggregateFlow returned by the aggregation operation.\nMake sure to specify a sort aggregation stage on which to apply the\ncollation in your aggregation pipeline. The following example shows how we can construct an aggregation pipeline on\nthe example collection and apply\na collation by specifying the following: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: A group aggregation stage using the Aggregates.group() helper to\nidentify each document by the firstName field and use that value as\nthe _id of the result. An accumulator in the group aggregation stage to sum the number of\ninstances of matching values in the firstName field. Apply an ascending sort to the _id field of the output documents of\nthe prior aggregation stage. Construct a collation object, specifying the German locale and\na collation strength that ignores accents and umlauts. aggregate() AggregateFlow CollationStrength Accumulators Aggregates Sorts", - "code": [ - { - "lang": "json", - "value": "{ \"_id\" : 1, \"firstName\" : \"Klara\" }\n{ \"_id\" : 2, \"firstName\" : \"Gunter\" }\n{ \"_id\" : 3, \"firstName\" : \"G\u00fcnter\" }\n{ \"_id\" : 4, \"firstName\" : \"J\u00fcrgen\" }\n{ \"_id\" : 5, \"firstName\" : \"Hannah\" }" - }, - { - "lang": "kotlin", - "value": "data class FirstName(\n @BsonId val id: Int, \n val firstName: String, \n val verified: Boolean = false\n)\n" - }, - { - "lang": "none", - "value": "cote\ncot\u00e9\nc\u00f4te\nc\u00f4t\u00e9" - }, - { - "lang": "none", - "value": "cote\nc\u00f4te\ncot\u00e9\nc\u00f4t\u00e9" - }, - { - "lang": "none", - "value": "\"@collation=\"" - }, - { - "lang": "none", - "value": "\"de@collation=phonebook\"" - }, - { - "lang": "none", - "value": "\"de\"" - }, - { - "lang": "kotlin", - "value": "database.createCollection(\n \"names\",\n CreateCollectionOptions().collation(\n Collation.builder().locale(\"en_US\").build()\n )\n)\n" - }, - { - "lang": "kotlin", - "value": "val collection = database.getCollection(\"names\")\nval indexInformation = collection.listIndexes().first()\nprintln(indexInformation.toJson())\n" - }, - { - "lang": "javascript", - "value": "{\n // ...\n \"collation\": {\n \"locale\": \"en_US\",\n // ...\n },\n // ...\n}" - }, - { - "lang": "kotlin", - "value": "val collection = database.getCollection(\"names\")\nval idxOptions = IndexOptions().collation(Collation.builder().locale(\"en_US\").build())\ncollection.createIndex(Indexes.ascending(FirstName::firstName.name), idxOptions)\n" - }, - { - "lang": "kotlin", - "value": "val collection = database.getCollection(\"names\")\nval indexInformation = collection.listIndexes().first()\nprintln(indexInformation.toJson())\n" - }, - { - "lang": "javascript", - "value": "{\n // ...\n \"collation\": {\n \"locale\": \"en_US\",\n // ...\n },\n // ...\n}" - }, - { - "lang": "kotlin", - "value": "val resultsFlow = collection.find()\n .collation(Collation.builder().locale(\"en_US\").build())\n .sort(Sorts.ascending(FirstName::firstName.name));\n" - }, - { - "lang": "kotlin", - "value": "val findFlow = collection.find()\n .collation(Collation.builder().locale(\"is\").build())\n .sort(Sorts.ascending(FirstName::firstName.name))\n" - }, - { - "lang": "kotlin", - "value": "Collation.builder()\n .caseLevel(true)\n .collationAlternate(CollationAlternate.SHIFTED)\n .collationCaseFirst(CollationCaseFirst.UPPER)\n .collationMaxVariable(CollationMaxVariable.SPACE)\n .collationStrength(CollationStrength.SECONDARY)\n .locale(\"en_US\")\n .normalization(false)\n .numericOrdering(true)\n .build()\n" - }, - { - "lang": "kotlin", - "value": "val resultsFlow = collection.find()\n .collation(Collation.builder().locale(\"de@collation=phonebook\").build())\n .sort(Sorts.ascending(FirstName::firstName.name))\n\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "FirstName(id=3, firstName=G\u00fcnter, verified=false)\nFirstName(id=2, firstName=Gunter, verified=false)\nFirstName(id=5, firstName=Hannah, verified=false)\nFirstName(id=4, firstName=J\u00fcrgen, verified=false)\nFirstName(id=1, firstName=Klara, verified=false)" - }, - { - "lang": "kotlin", - "value": "val result = collection.findOneAndUpdate(\n Filters.lt(FirstName::firstName.name, \"Gunter\"),\n Updates.set(\"verified\", true),\n FindOneAndUpdateOptions()\n .collation(Collation.builder().locale(\"de@collation=phonebook\").build())\n .sort(Sorts.ascending(FirstName::firstName.name))\n .returnDocument(ReturnDocument.AFTER)\n)\nprintln(result)\n" - }, - { - "lang": "console", - "value": "FirstName(id=3, firstName=G\u00fcnter, verified=true)" - }, - { - "lang": "json", - "value": "{ \"_id\" : 1, \"a\" : \"16 apples\" }\n{ \"_id\" : 2, \"a\" : \"84 oranges\" }\n{ \"_id\" : 3, \"a\" : \"179 bananas\" }" - }, - { - "lang": "kotlin", - "value": "data class CollationExample(@BsonId val id: Int, val a: String)\n" - }, - { - "lang": "kotlin", - "value": "val result = collection.findOneAndDelete(\n Filters.gt(CollationExample::a.name, \"100\"),\n FindOneAndDeleteOptions()\n .collation(Collation.builder().locale(\"en\").numericOrdering(true).build())\n .sort(Sorts.ascending(CollationExample::a.name))\n)\nprintln(result)\n" - }, - { - "lang": "console", - "value": "CollationExample(id=3, a=179 bananas)" - }, - { - "lang": "kotlin", - "value": "data class Result(@BsonId val id: String, val nameCount: Int)\nval groupStage = Aggregates.group(\n \"\\$${FirstName::firstName.name}\",\n Accumulators.sum(\"nameCount\", 1)\n)\nval sortStage = Aggregates.sort(Sorts.ascending(\"_id\"))\nval resultsFlow = collection.aggregate(listOf(groupStage, sortStage))\n .collation(\n Collation.builder().locale(\"de\")\n .collationStrength(CollationStrength.PRIMARY)\n .build()\n )\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Result(id=Gunter, nameCount=2)\nResult(id=Hannah, nameCount=1)\nResult(id=J\u00fcrgen, nameCount=1)\nResult(id=Klara, nameCount=1)" - } - ], - "preview": "In this guide, you can learn how to use collations with MongoDB to order your\nquery or aggregation operation results by string values. A collation is a set\nof character ordering and matching rules that apply to a specific language and\nlocale.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/connection/connect", - "title": "Connect to MongoDB", - "headings": [ - "MongoClient", - "Connection URI", - "Atlas Connection Example", - "Other Ways to Connect to MongoDB", - "Connect to a MongoDB Server on Your Local Machine", - "Connect to a Replica Set" - ], - "paragraphs": "In this guide, you can learn how to connect to a MongoDB instance or\nreplica set using the Kotlin driver. You can view sample code to connect to an Atlas cluster \nor continue reading to learn more about the MongoClient class and\nconnection URIs. You can connect to and communicate with MongoDB using the MongoClient \nclass. Use the MongoClient.create() method to construct a MongoClient . To learn more about how connection pools work in the driver, see the FAQ page . All resource usage limits, such as max connections, apply to individual\n MongoClient instances. To learn about the different settings you can use to control the\nbehavior of your MongoClient , see the guide on\n MongoClient Settings . As each MongoClient represents a thread-safe pool of connections to the\ndatabase, most applications only require a single instance of a\n MongoClient , even across multiple threads. Always call MongoClient.close() to clean up resources when an\ninstance is no longer needed. The connection URI provides a set of instructions that the driver uses to\nconnect to a MongoDB deployment. It instructs the driver on how it should\nconnect to MongoDB and how it should behave while connected. The following\nfigure explains each part of a sample connection URI: This figure uses the Standard Connection String Format ,\n mongodb for the protocol. You can also use the DNS Seed List Connection Format ,\n mongodb+srv , if you want more flexibility of deployment and the ability\nto change the servers in rotation without reconfiguring clients. The next part of the connection URI contains your credentials if you are\nusing a password-based authentication mechanism. Replace the value of user \nwith your database username and pass with your database user's password. If your\nauthentication mechanism does not require credentials, omit this part of\nthe connection URI. The next part of the connection URI specifies the hostname or IP\naddress, followed by the port of your MongoDB instance. In the example,\n sample.host represents the hostname and 27017 is the port number.\nReplace these values to refer to your MongoDB instance. The last part of the connection URI contains connection options as parameters.\nIn the example, we set two connection options: maxPoolSize=20 and\n w=majority . For more information on connection options, skip to the\n Connection Options section of this guide. If your deployment is on MongoDB Atlas, see the\n Atlas driver connection guide \nand select Kotlin from the language dropdown to retrieve your connection\nstring. To connect to a MongoDB deployment on Atlas, create a client. You can\ncreate a client that uses your connection string and other\nclient options by passing a MongoClientSettings object to the\n MongoClient.create() method. To instantiate a MongoClientSettings object, use the builder method to specify\nyour connection string and any other client options, and then call the build() \nmethod. Chain the applyConnectionString() method to the builder to specify your\nconnection URI. You can set the Stable API version client option to avoid\nbreaking changes when you upgrade to a new server version. To\nlearn more about the Stable API feature, see the Stable API page . The following code shows how you can specify the connection string and\nthe Stable API client option when connecting to a MongoDB\ndeployment on Atlas and verify that the connection is successful: If you are connecting to a single MongoDB server instance or replica set\nthat is not hosted on Atlas, see the following sections to find out how to\nconnect. To test whether you can connect to your server, replace the connection\nstring in the Connect to MongoDB Atlas code\nexample and run it. If you need to run a MongoDB server on your local machine for development\npurposes instead of using an Atlas cluster, you need to complete the following: After you successfully start your MongoDB server, specify your connection\nstring in your driver connection code. If your MongoDB Server is running locally, you can use the connection string\n \"mongodb://localhost:\" where is the port number you\nconfigured your server to listen for incoming connections. If you need to specify a different hostname or IP address, see our Server\nManual entry on Connection Strings . Download the Community \nor Enterprise version\nof MongoDB Server. Install and configure \nMongoDB Server. Start the server. Always secure your MongoDB server from malicious attacks. See our\n Security Checklist for a\nlist of security recommendations. A MongoDB replica set deployment is a group of connected instances that\nstore the same set of data. This configuration of instances provides data\nredundancy and high data availability. To connect to a replica set deployment, specify the hostnames (or IP\naddresses) and port numbers of the members of the replica set. If you are not able to provide a full list of hosts in the replica set,\nyou can specify a single or subset of the hosts in the replica and\ninstruct the driver to perform automatic discovery in one of the following\nways: The following examples show how to specify multiple hosts to a MongoClient \ninstance using either the ConnectionString or MongoClientSettings \nclass. Select the tab that corresponds to your preferred class. Specify the name of the replica set as the value of the replicaSet \nparameter Specify false as the value of the directConnection parameter Specify more than one host in the replica set Although you can specify a subset of the hosts in a replica set,\ninclude all the hosts in the replica set to ensure the driver is able to\nestablish the connection if one of the hosts are unreachable.", - "code": [ - { - "lang": "kotlin", - "value": "// Replace the placeholder with your Atlas connection string\nval uri = \"\"\n\n// Construct a ServerApi instance using the ServerApi.builder() method\nval serverApi = ServerApi.builder()\n .version(ServerApiVersion.V1)\n .build()\nval settings = MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(uri))\n .serverApi(serverApi)\n .build()\n// Create a new client and connect to the server\nval mongoClient = MongoClient.create(settings)\nval database = mongoClient.getDatabase(\"admin\")\ntry {\n // Send a ping to confirm a successful connection\n val command = Document(\"ping\", BsonInt64(1))\n val commandResult = database.runCommand(command)\n println(\"Pinged your deployment. You successfully connected to MongoDB!\")\n} catch (me: MongoException) {\n System.err.println(me)\n}\n" - }, - { - "lang": "kotlin", - "value": "val connectionString = ConnectionString(\"mongodb://host1:27017,host2:27017,host3:27017/\")\nval mongoClient = MongoClient.create(connectionString)\n" - }, - { - "lang": "kotlin", - "value": "val seed1 = ServerAddress(\"host1\", 27017)\nval seed2 = ServerAddress(\"host2\", 27017)\nval seed3 = ServerAddress(\"host3\", 27017)\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder ->\n builder.hosts(\n listOf(seed1, seed2, seed3)\n )\n }\n .build()\nval mongoClient = MongoClient.create(settings)\n" - } - ], - "preview": "In this guide, you can learn how to connect to a MongoDB instance or\nreplica set using the Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/connection/connection-options", - "title": "Connection Options", - "headings": [], - "paragraphs": "This section explains MongoDB connection and authentication options\nsupported by the driver. You can pass the connection options as\nparameters of the connection URI to specify the behavior of the client. For a complete list of options, see the\n ConnectionString \nAPI reference page. Option Name Type Description minPoolSize integer Specifies the minimum number of connections that must exist at\nany moment in a single connection pool. maxPoolSize integer Specifies the maximum number of connections that a connection\npool may have at a given time. waitQueueTimeoutMS integer Specifies the maximum amount of time, in milliseconds that a\nthread may wait for a connection to become available. serverSelectionTimeoutMS integer Specifies the maximum amount of time, in milliseconds, the driver\nwill wait for server selection to succeed before throwing an\nexception. localThresholdMS integer When communicating with multiple instances of MongoDB in a replica\nset, the driver will only send requests to a server whose\nresponse time is less than or equal to the server with the fastest\nresponse time plus the local threshold, in milliseconds. heartbeatFrequencyMS integer Specifies the frequency, in milliseconds that the driver will\nwait between attempts to determine the current state of each\nserver in the cluster. replicaSet string Specifies that the connection string \nprovided includes multiple hosts. When specified, the driver\nattempts to find all members of that set. ssl boolean Specifies that all communication with MongoDB instances should\nuse TLS/SSL. Superseded by the tls option. tls boolean Specifies that all communication with MongoDB instances should\nuse TLS. Supersedes the ssl option. tlsInsecure boolean Specifies that the driver should allow invalid hostnames for TLS\nconnections. Has the same effect as setting\n tlsAllowInvalidHostnames to true . To configure TLS security\nconstraints in other ways, use a\n custom SSLContext . tlsAllowInvalidHostnames boolean Specifies that the driver should allow invalid hostnames in the\ncertificate for TLS connections. Supersedes\n sslInvalidHostNameAllowed . connectTimeoutMS integer Specifies the maximum amount of time, in milliseconds, the Kotlin\ndriver waits for a connection to open before timing out. A value of\n 0 instructs the driver to never time out while waiting for a connection\nto open. socketTimeoutMS integer Specifies the maximum amount of time, in milliseconds, the Kotlin\ndriver will wait to send or receive a request before timing out.\nA value of 0 instructs the driver to never time out while waiting\nto send or receive a request. maxIdleTimeMS integer Specifies the maximum amount of time, in milliseconds, that the driver\nallows a pooled connection to idle before closing the\nconnection. A value of 0 indicates that there is no upper bound\non how long the driver allows a pooled connection to be idle. maxLifeTimeMS integer Specifies the maximum amount of time, in milliseconds, the Kotlin\ndriver will continue to use a pooled connection before closing the\nconnection. A value of 0 indicates that there is no upper bound\non how long the driver can keep a pooled connection open. journal boolean Specifies that the driver must wait for the connected MongoDB\ninstance to group commit to the journal file on disk for all writes. w string or integer Specifies the write concern. For more information on values, see\nthe server documentation for the w option . wtimeoutMS integer Specifies a time limit, in milliseconds, for the write concern. For\nmore information, see the server documentation for the\n wtimeoutMS option .\nA value of 0 instructs the driver to never time out write operations. readPreference string Specifies the read preference. For more information on values, see\nthe server documentation for the\n readPreference option . readPreferenceTags string Specifies the read preference tags. For more information on values, see\nthe server documentation for the\n readPreferenceTags option . maxStalenessSeconds integer Specifies, in seconds, how stale a secondary can be before the\ndriver stops communicating with that secondary. The minimum value is\neither 90 seconds or the heartbeat frequency plus 10 seconds, whichever\nis greater. For more information, see the server documentation for the\n maxStalenessSeconds option .\nNot providing a parameter or explicitly specifying -1 indicates\nthat there should be no staleness check for secondaries. authMechanism string Specifies the authentication mechanism \nthat the driver should use if a credential\nwas supplied. authSource string Specifies the database that the supplied credentials should be\nvalidated against. authMechanismProperties string Specifies authentication properties for the specified authentication\nmechanism as a list of colon-separated properties and values.\nFor more information, see the server documentation for\nthe authMechanismProperties option . appName string Specifies the name of the application provided to MongoDB instances\nduring the connection handshake. Can be used for server logs and\nprofiling. compressors string Specifies one or more compression algorithms that the driver\nwill attempt to use to compress requests sent to the connected\nMongoDB instance. Possible values include: zlib , snappy ,\nand zstd . zlibCompressionLevel integer Specifies the degree of compression that Zlib \nshould use to decrease the size of requests to the connected MongoDB\ninstance. The level can range from -1 to 9 , with lower values\ncompressing faster (but resulting in larger requests) and larger values\ncompressing slower (but resulting in smaller requests). retryWrites boolean Specifies that the driver must retry supported write operations\nif they fail due to a network error. retryReads boolean Specifies that the driver must retry supported read operations\nif they fail due to a network error. serverMonitoringMode string Specifies which server monitoring protocol the driver uses. When set to\n auto , the monitoring mode is determined by the environment in which\nthe driver is running. The driver uses poll mode in function-as-a-service\n(FaaS) environments and stream mode in other environments. uuidRepresentation string Specifies the UUID representation to use for read and write\noperations. For more information, see the driver documentation\nfor the\n MongoClientSettings.getUuidRepresentation() method . directConnection boolean Specifies that the driver must connect to the host directly. maxConnecting integer Specifies the maximum number of connections a pool may be establishing\nconcurrently. srvServiceName string Specifies the service name of the\n SRV resource records \nthe driver retrieves to construct your\n seed list .\nYou must use the\n DNS Seed List Connection Format \nin your\n connection URI \nto use this option.", - "code": [], - "preview": "This section explains MongoDB connection and authentication options\nsupported by the driver. You can pass the connection options as\nparameters of the connection URI to specify the behavior of the client.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/connection/mongoclientsettings", - "title": "Specify MongoClient Settings", - "headings": [ - "Overview", - "MongoClient Settings", - "Example", - "Cluster Settings", - "Example", - "Socket Settings", - "Example", - "Connection Pool Settings", - "Example", - "Server Settings", - "Example", - "TLS/SSL Settings", - "Example" - ], - "paragraphs": "In this guide, you can learn about the different settings to control\nthe behavior of your MongoClient . The following sections describe commonly used settings: MongoClient Settings Cluster Settings Socket Settings Connection Pool Settings Server Settings TLS/SSL Settings You can control the behavior of your MongoClient by creating and passing\nin a MongoClientSettings \nobject to the MongoClient.create() \nmethod. To create a MongoClientSettings object, use the\n MongoClientSettings.builder() method and chain methods to specify your\nsettings. After chaining them, use the build() method to create the\n MongoClientSettings object. The following table describes all the methods you can chain to modify your\nconnection behavior: Method Description addCommandListener() Adds a listener for command events . applicationName() Sets the logical name of the application using the MongoClient . applyConnectionString() Applies the settings from the given ConnectionString to the\nbuilder. If you omit this method, the driver attempts to connect to\n localhost . applyToClusterSettings() Applies the ClusterSettings.Builder block and then sets the\n cluster settings . applyToConnectionPoolSettings() Applies the ConnectionPoolSettings.Builder block and then sets the\n connection pool settings . applyToServerSettings() Applies the ServerSettings.Builder block and then sets the\n server settings . applyToSocketSettings() Applies the SocketSettings.Builder block and then sets the\n socket settings . applyToSslSettings() Applies the SslSettings.Builder block and then sets the\n TLS/SSL settings . autoEncryptionSettings() Sets the auto-encryption settings . codecRegistry() Sets the codec registry. Sets the codec registry . commandListenerList() Sets the command listeners . compressorList() Sets the compressors to use for compressing\nmessages to the server. credential() Sets the credential . readConcern() Sets the read concern . readPreference() Sets the read preference . retryReads() Whether the driver should retry reads \nif a network error occurs. retryWrites() Whether the driver should retry writes \nif a network error occurs. serverApi() Sets the server API to use when sending\ncommands to the server. streamFactoryFactory() Sets the factory to use to create a StreamFactory . uuidRepresentation() Sets the UUID representation to use when encoding instances of UUID\nand decoding BSON binary values with subtype of 3. writeConcern() Sets the write concern . This example demonstrates specifying a ConnectionString : Each setting has an applyConnectionString() method. They are\nrarely needed within the settings, so you should use this method as shown\nin the preceding example . Some options in the settings map to a connection string option.\nIf you specify the same options in your settings and connection\nstring, the order you chain them determines which option the driver\nuses. The driver uses the last setting it reads. For example, this snippet contains settings with the following times\nfor the driver to connect to an available socket: Since the driver reads the socket settings options last, the driver\nexpects to connect to an available socket within 5 SECONDS before\ntiming out. The connection string specifies within 2 SECONDS The socket settings specifies within\n 5 SECONDS To log the MongoClient instance settings,\nset the org.mongodb.driver.client named\nlogger to the INFO level. To learn more about logging with the MongoDB Kotlin Driver, see the\n Logging guide. Chain the applyToClusterSettings() \nmethod to modify the driver's behavior when interacting with your\nMongoDB cluster. The following table describes all the methods you can chain to your\nsettings to modify the driver's behavior: Method Description addClusterListener() Adds a listener for cluster-related events. applyConnectionString() Uses the settings from a ConnectionString object. applySettings() Uses the cluster settings specified in a ClusterSettings object. hosts() Sets all the specified locations of a Mongo server. localThreshold() Sets the amount of time that a server\u2019s round trip can take and still\nbe eligible for server selection. mode() Sets how to connect to a MongoDB server. requiredClusterType() Sets the type of cluster required for the cluster. requiredReplicaSetName() Sets the replica set name required for the cluster. serverSelectionTimeout() Sets the maximum time to select a primary node before throwing a\ntimeout exception. serverSelector() Adds a server selector to apply before server selection. srvHost() Sets the host name to use to look up an SRV DNS record to find the\nMongoDB hosts. When setting srvHost , the driver does not process any\nassociated TXT records associated with the host. If you want to enable the processing of TXT records, you must\nspecify the SRV host in the connection string using the\n applyConnectionString() method. srvMaxHosts() This example specifies for the driver to connect directly to a server,\nregardless of the type of MongoDB cluster it's a part of: This is analogous to the directConnection parameter you can specify\nin your connection URI. See Connection Options for more\ninformation. Chain the applyToSocketSettings() \nmethod to modify the driver's behavior when connecting and communicating\nwith your MongoDB server. The following table describes all the methods you can chain to your settings\nto modify the driver's behavior: Method Description applyConnectionString() Uses the settings from a ConnectionString object. applySettings() Uses the socket settings specified in a SocketSettings object. applyToProxySettings() Applies the ProxySettings.Builder block and then sets the\n proxySettings field. connectTimeout() Sets the maximum time to connect to an available socket before throwing\na timeout exception. readTimeout() Sets the maximum time to read to an available socket before throwing a\ntimeout exception. receiveBufferSize() Sets the socket's buffer size when receiving. sendBufferSize() Sets the socket's buffer size when sending. This example specifies the following driver behavior in a MongoDB socket: To connect to an available socket within 10 SECONDS To read from an available socket within 15 SECONDS Chain the applyToConnectionPoolSettings() \nmethod to modify the way the driver manages its connection pool. The following table describes all the methods you can chain to your\nsettings to modify the driver's behavior: Method Description addConnectionPoolListener() Adds a listener for connection pool-related events. applyConnectionString() Uses the settings from a ConnectionString object. applySettings() Uses the connection pool settings specified in a\n ConnectionPoolSettings object. maintenanceFrequency() Sets the frequency for running a maintenance job. maintenanceInitialDelay() Sets the time to wait before running the first maintenance job. maxConnectionIdleTime() Sets the maximum time a connection can be idle before it's closed. maxConnectionLifeTime() Sets the maximum time a pooled connection can be alive before it's\nclosed. maxWaitTime() Sets the maximum time to wait for an available connection. maxSize() Sets the maximum amount of connections associated with a connection\npool. minSize() Sets the minimum amount of connections associated with a connection\npool. This maxSize and minSize settings apply to each server\nin the cluster you connect the driver to. For example, assume you connect the driver to a cluster with three\n mongos servers. This means that there can be at most maxSize \nconnections and at least minSize connections to each mongos server. This example specifies the following driver behavior in a pool of\n Connection types: The thread to wait at most 10 SECONDS for an available connection To have at most 200 connections associated with the pool Chain the applyToServerSettings() \nmethod to modify the driver's behavior when monitoring each MongoDB\nserver. The following table describes all the methods you can chain to your\nsettings to modify the driver's behavior: Method Description addServerListener() Adds a listener for server-related events. addServerMonitorListener() Adds a listener for server monitor-related events. applyConnectionString() Uses the settings from a ConnectionString object. applySettings() Uses the server settings specified in a ServerSettings object. heartbeatFrequency() Sets the interval for a cluster monitor to attempt reaching a server. minHeartbeatFrequency() Sets the minimum interval for server monitoring checks. serverMonitoringMode() Specifies which server monitoring protocol the driver uses. This example specifies the following driver behavior in a MongoDB server: The minimum interval for server monitoring checks to be at least\n 700 MILLISECONDS The cluster monitor to attempt reaching a server every 15 SECONDS Chain the applyToSslSettings() \nmethod to modify the driver's behavior when using TLS/SSL to secure a\nconnection between your application and MongoDB. The following table describes all the methods you can chain to your\nsettings to modify the driver's behavior: Method Description applyConnectionString() Uses the settings from a ConnectionString object. applySettings() Uses the TLS/SSL settings specified in a SslSettings object. context() Sets the SSLContext for use when you enable TLS/SSL. enabled() Whether to enable TLS/SSL. (You must enable this for Atlas clusters.) invalidHostNameAllowed() Whether to allow a mismatch between the server\u2019s hostname and the\nhostname specified by the TLS certificate. This example specifies for the driver to enable TLS/SSL when connecting\nto MongoDB:", - "code": [ - { - "lang": "kotlin", - "value": "val mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(\"\"))\n .build()\n)\n" - }, - { - "lang": "kotlin", - "value": "val mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(\"mongodb+srv:/:@:?connectTimeoutMS(2000)\"))\n .applyToSocketSettings{ builder ->\n builder.connectTimeout(5, TimeUnit.SECONDS)\n }\n .build()\n)\n" - }, - { - "lang": "kotlin", - "value": "val mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(\"mongodb+srv://host1.acme.com\"))\n .build()\n)\n" - }, - { - "lang": "kotlin", - "value": "val mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyToClusterSettings{ builder ->\n builder.mode(ClusterConnectionMode.SINGLE)\n }\n .build()\n)\n" - }, - { - "lang": "kotlin", - "value": "val mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(\"\"))\n .applyToSocketSettings{ builder ->\n builder\n .connectTimeout(10, TimeUnit.SECONDS)\n .readTimeout(15, TimeUnit.SECONDS)\n }\n .build()\n)\n" - }, - { - "lang": "kotlin", - "value": "val mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(\"\"))\n .applyToConnectionPoolSettings{ builder ->\n builder\n .maxWaitTime(10, TimeUnit.SECONDS)\n .maxSize(200)\n }\n .build()\n)\n" - }, - { - "lang": "kotlin", - "value": "val mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(\"\"))\n .applyToServerSettings{ builder ->\n builder\n .minHeartbeatFrequency(700, TimeUnit.MILLISECONDS)\n .heartbeatFrequency(15, TimeUnit.SECONDS)\n }\n .build()\n)\n" - }, - { - "lang": "kotlin", - "value": "val mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(\"\"))\n .applyToSslSettings{ builder ->\n builder.enabled(true)\n }\n .build()\n)\n" - } - ], - "preview": "In this guide, you can learn about the different settings to control\nthe behavior of your MongoClient.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/connection/network-compression", - "title": "Network Compression", - "headings": [ - "Specify Compression Algorithms", - "Compression Algorithm Dependencies" - ], - "paragraphs": "The MongoDB Kotlin Driver provides a connection option to compress messages,\nThis reduces the amount of data passed over the network between MongoDB\nand your application. The driver supports the following algorithms: The driver tests against the following versions of these libraries: If you specify multiple compression algorithms, the driver selects the\nfirst one that is supported by the MongoDB instance that the driver is\nconnected to. Snappy : available in MongoDB 3.4 and later. Zlib : available in MongoDB 3.6 and later. Zstandard : available in MongoDB 4.2 and later. org.xerial.snappy:snappy-java:1.1.8.4 com.github.luben:zstd-jni:1.5.5-2 If your application requires Snappy or Zstandard compression, you must add\n explicit dependencies for those algorithms. You can enable compression on your connection by specifying the\nalgorithms in the following ways: Adding the compressors parameter to your ConnectionString instance Calling the compressorList() method from the MongoClientSettings builder To enable compression on your connection in a ConnectionString \ninstance, specify the compressors parameter. You can specify\none or more of the following values for the compressors parameter: The following example shows how to specify Snappy, Zlib, and\nZstandard as the compression algorithms for a connection: \"snappy\" for Snappy compression \"zlib\" for Zlib compression \"zstd\" for Zstandard compression To enable compression using within your MongoClientSettings ,\ncall the compressorList() \nbuilder method and pass one or more MongoCompressor \ninstances as a parameter. You can specify compression algorithms by calling the following\nmethods from MongoCompressor : The following example shows how to specify Snappy, Zlib, and\nZstandard as the compression algorithms for a connection: createSnappyCompressor() for Snappy compression createZlibCompressor() for Zlib compression createZstdCompressor() for Zstandard compression The JDK supports Zlib compression natively, but\n Snappy and\n Zstandard depend on open source\nimplementations. See\n snappy-java and\n zstd-java for details.", - "code": [ - { - "lang": "kotlin", - "value": "// Replace the placeholders with values from your MongoDB deployment's connection string\nval connectionString = ConnectionString(\"mongodb+srv://:@/?compressors=snappy,zlib,zstd\")\n\n// Create a new client with your settings\nval mongoClient = MongoClient.create(connectionString)\n" - }, - { - "lang": "kotlin", - "value": "// Replace the placeholder with your MongoDB deployment's connection string\nval uri = \"\"\n\nval settings = MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(uri))\n .compressorList(\n listOf(\n MongoCompressor.createSnappyCompressor(),\n MongoCompressor.createZlibCompressor(),\n MongoCompressor.createZstdCompressor())\n )\n .build()\n\n// Create a new client with your settings\nval mongoClient = MongoClient.create(settings)\n" - } - ], - "preview": "The MongoDB Kotlin Driver provides a connection option to compress messages,\nThis reduces the amount of data passed over the network between MongoDB\nand your application.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/connection/socks5", - "title": "Connect to MongoDB by Using a SOCKS5 Proxy", - "headings": [ - "Overview", - "SOCKS5 Proxy Settings", - "Examples", - "Specify Proxy Settings in the MongoClientSettings", - "Specify Proxy Settings in the Connection String", - "API Documentation" - ], - "paragraphs": "In this guide, you can learn how to use the MongoDB Kotlin Driver to connect\nto MongoDB by using a SOCKS5 proxy . SOCKS5 is a standardized\nprotocol for communicating with network services through a proxy server. To learn more about the SOCKS5 protocol, see the Wikipedia entry on\n SOCKS . The proxy settings specify the SOCKS5 proxy server address and your\nauthentication credentials. You can specify your settings in an instance of\n MongoClientSettings or in your connection string. The following table describes the SOCKS5 client options: Name Accepted Values Description proxyHost String Specifies the SOCKS5 proxy IPv4 address, IPv6 address, or hostname.\nYou must provide this value to connect to a SOCKS5 proxy. proxyPort Non-negative integer Specifies the TCP port number of the SOCKS5 proxy server. If you\nset a value for proxyHost , this option defaults to 1080 ,\nbut you can specify a different port number. proxyUsername String Specifies the username for authentication to the SOCKS5 proxy server.\nThe driver ignores null and empty string values for this setting.\nThe driver requires that you pass values for both proxyUsername \nand proxyPassword or that you omit both values. proxyPassword String Specifies the password for authentication to the SOCKS5 proxy server.\nThe driver ignores null and empty string values for this setting.\nThe driver requires that you pass values for both proxyUsername \nand proxyPassword or that you omit both values. The following examples show how to instantiate a MongoClient that connects\nto MongoDB by using a SOCKS5 proxy. The proxy settings can be specified in a\n MongoClientSettings instance or a connection string. These examples use\nthe placeholder values described in the SOCKS5 Proxy Settings section.\nReplace the placeholders with your proxy specifications and credentials. The following code example shows how to specify SOCKS5 proxy settings by\nusing the applyToSocketSettings() builder method when creating a\n MongoClientSettings instance: The following code example shows how to specify SOCKS5 proxy settings in\nyour connection string: To learn more about the methods and types discussed in this guide, see the\nfollowing API documentation: MongoClientSettings.Builder SocketSettings.Builder MongoClient.create() ProxySettings.Builder", - "code": [ - { - "lang": "kotlin", - "value": "val uri = \"\"\n\nval mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(uri))\n .applyToSocketSettings{ builder ->\n builder\n .applyToProxySettings{ proxyBuilder ->\n proxyBuilder\n .host(\"\")\n .port(\"\".toInt())\n .username(\"\")\n .password(\"\")\n .build()\n }\n }\n .build()\n)\n" - }, - { - "lang": "kotlin", - "value": "val connectionString = ConnectionString(\n \"mongodb+srv://:@/?\" +\n \"proxyHost=\" +\n \"&proxyPort=\" +\n \"&proxyUsername=\" +\n \"&proxyPassword=\"\n)\n\nval mongoClient = MongoClient.create(connectionString)\n" - } - ], - "preview": "In this guide, you can learn how to use the MongoDB Kotlin Driver to connect\nto MongoDB by using a SOCKS5 proxy. SOCKS5 is a standardized\nprotocol for communicating with network services through a proxy server.", - "tags": "code example, security, connection string", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/connection/tls", - "title": "Enable TLS/SSL on a Connection", - "headings": [ - "Overview", - "Enable TLS/SSL", - "Configure Certificates", - "Configure the JVM Trust Store", - "Configure the JVM Key Store", - "Configure a Client-Specific Trust Store and Key Store", - "Disable Hostname Verification", - "Restrict Connections to TLS 1.2 Only", - "Customize TLS/SSL Configuration through the Java SE SSLContext", - "Online Certificate Status Protocol (OCSP)", - "Client-Driven OCSP", - "OCSP Stapling" - ], - "paragraphs": "In this guide, you can learn how to connect to MongoDB instances with the\n TLS/SSL \nsecurity protocol using the underlying TLS/SSL support in the JDK. To\nconfigure your connection to use TLS/SSL, enable the TLS/SSL settings in\neither the ConnectionString \nor MongoClientSettings . If you experience trouble setting up your TLS/SSL connection, you can\nuse the -Djavax.net.debug=all system property to view more\nlog statements. See the Oracle guide to debugging TLS/SSL connections \nfor more information. You can enable TLS/SSL for the connection to your MongoDB instance\nin two different ways: through a parameter in your connection string, or\nusing a method in the MongoClientSettings.Builder class. If you connect by using the DNS seedlist protocol, indicated by the\n mongodb+srv prefix in your connection string, the driver\nautomatically enables TLS/SSL. To disable it, set the tls \nparameter value to false in your connection string, or set the\n enabled property to false in the SslSettings.Builder \nblock when creating a MongoClientSettings instance. To learn more about connection behavior when you use a DNS seedlist,\nsee the SRV Connection Format \nsection in the Server manual. To enable TLS/SSL on a connection with a ConnectionString , assign the connection string\nparameter tls a value of true in the connection string passed to\n MongoClient.create() : To configure your MongoClient 's TLS/SSL connection options using the\n MongoClientSettings.Builder class, call the\n applyToSslSettings() \nmethod. Set the enabled property to true in the SslSettings.Builder \nblock to enable TLS/SSL: Kotlin applications that initiate TLS/SSL requests require access to\ncryptographic certificates that prove identity for the application\nitself and other applications with which the application\ninteracts. You can configure access to these certificates in your application with\nthe following mechanisms: The JVM Trust Store and JVM Key Store A Client-Specific Trust Store and Key Store The following sections are based on the documentation for Oracle JDK,\nso some parts may be inapplicable to your JDK or to the custom TLS/SSL\nimplementation you use. The JVM trust store saves certificates that securely identify other\napplications with which your Kotlin application interacts. Using these\ncertificates, your application can prove that the connection to another\napplication is genuine and secure from tampering by third parties. If your MongoDB instance uses a certificate that is signed by an\nauthority that is not present in the JRE's default certificate store,\nyour application must configure two system properties to initiate\nSSL/TLS requests. These properties ensure that your application can\nvalidate the TLS/SSL certificate presented by a connected MongoDB instance. You can create a trust store with the keytool \ncommand line tool provided as part of the JDK: By default, the JRE includes many commonly used public certificates\nfrom signing authorities like Let's Encrypt . As a result, you can connect to\ninstances of MongoDB Atlas (or any other\nserver whose certificate is signed by an authority in the JRE's default\ncertificate store) with TLS/SSL without configuring the trust store. javax.net.ssl.trustStore : the path to a trust store containing the\ncertificate of the signing authority javax.net.ssl.trustStorePassword : the password to access the trust\nstore defined in javax.net.ssl.trustStore The JVM key store saves certificates that securely identify your Kotlin\napplication to other applications. Using these certificates, other\napplications can prove that the connection to your application is\ngenuine and secure from tampering by third parties. An application that initiates TLS/SSL requests needs to set two JVM system\nproperties to ensure that the client presents a TLS/SSL certificate to\nthe MongoDB server: You can create a key store with the keytool \nor openssl \ncommand line tool. For more information on configuring a Kotlin application to use TLS/SSL,\nplease see the JSSE Reference Guide . By default, MongoDB instances do not perform client certificate\nvalidation. You must configure the key store if you configured your MongoDB\ninstance to validate client certificates. javax.net.ssl.keyStore : the path to a key store containing the client's\nTLS/SSL certificates javax.net.ssl.keyStorePassword : the password to access the key store\ndefined in javax.net.ssl.keyStore You can configure a client-specific trust store and key store using the\n init() method of the SSLContext class. You can find an example showing how to configure a client with an SSLContext \ninstance in the\n Customize TLS/SSL Configuration with an SSLContext section of this guide . For more information on the SSLContext class, see the API\ndocumentation for SSL Context . By default, the driver ensures that the hostname included in the server's\nTLS/SSL certificates matches the hostnames provided when constructing\na MongoClient . To disable hostname verification for your\napplication, you can explicitly disable this by setting the\n invalidHostNameAllowed property of the builder to true in the\n applytoSslSettings() builder lambda: Disabling hostname verification can make your configuration\n insecure .\nDisable hostname verification only for testing purposes or\nwhen there is no other alternative. To restrict your application to use only the TLS 1.2 protocol, set the\n jdk.tls.client.protocols system property to \"TLSv1.2\". Java Runtime Environments (JREs) before Java 8 only enabled\nthe TLS 1.2 protocol in update releases. If your JRE has not enabled\nthe TLS 1.2 protocol, upgrade to a later release to connect by using\nTLS 1.2. If your TLS/SSL configuration requires customization, you can\nset the sslContext property of your MongoClient by\npassing an SSLContext \nobject to the builder in the applyToSslSettings() lambda: OCSP is a standard used to check whether X.509 certificates have been\nrevoked. A certificate authority can add an X.509 certificate to the\nCertificate Revocation List (CRL) before the expiry time to invalidate\nthe certificate. When a client sends an X.509 certificate during the TLS\nhandshake, the CA's revocation server checks the CRL and returns a status\nof \"good\", \"revoked\", or \"unknown\". The driver supports the following variations of OCSP: The following sections describe the differences between them and how to enable\nthem for your application. Client-Driven OCSP OCSP Stapling The Kotlin driver uses the JVM arguments configured for the application\nand cannot be overridden for a specific MongoClient instance. In client-driven OCSP, the client sends the certificate in an OCSP request to\nan OCSP responder after receiving the certificate from the server. The OCSP\nresponder checks the status of the certificate with a certificate\nauthority (CA) and reports whether it's valid in a response sent to the\nclient. To enable client-driven OCSP for your application, set the following JVM\nsystem properties: Property Value com.sun.net.ssl.checkRevocation Set this property to true to enable revocation checking. ocsp.enable Set this property to true to enable client-driven OCSP. If the OCSP responder is unavailable, the TLS support provided by the\nJDK reports a \"hard fail\". This differs from the \"soft fail\" behavior of\nthe MongoDB Shell and some other drivers. OCSP stapling is a mechanism in which the server must obtain the signed\ncertificate from the certificate authority (CA) and include it in a\ntime-stamped OCSP response to the client. To enable OCSP stapling for your application, set the following JVM system\nproperties: For more information about OCSP, check out the following resources: Property Description com.sun.net.ssl.checkRevocation Set this property to true to enable revocation checking. jdk.tls.client.enableStatusRequestExtension Oracle JDK 8 Documentation on how to enable OCSP for an application Official IETF specification for OCSP (RFC 6960)", - "code": [ - { - "lang": "kotlin", - "value": "val mongoClient = MongoClient.create(\"mongodb+srv://:@?tls=true\")\n" - }, - { - "lang": "kotlin", - "value": "val settings = MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(\"\"))\n .applyToSslSettings { builder ->\n builder.enabled(true)\n }\n .build()\nval mongoClient = MongoClient.create(settings)\n" - }, - { - "lang": "console", - "value": "keytool -importcert -trustcacerts -file \n -keystore -storepass " - }, - { - "lang": "kotlin", - "value": "val settings = MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(\"\"))\n .applyToSslSettings { builder ->\n builder.enabled(true)\n builder.invalidHostNameAllowed(true)\n }\n .build()\nval mongoClient = MongoClient.create(settings);\n" - }, - { - "lang": "kotlin", - "value": "// You can customize SSL settings using the SSLContext\nval sslContext = SSLContext.getDefault()\n\nval settings = MongoClientSettings.builder()\n .applyToSslSettings { builder ->\n builder.enabled(true)\n builder.context(sslContext)\n }\n .build()\nval mongoClient = MongoClient.create(settings);\n" - } - ], - "preview": "In this guide, you can learn how to connect to MongoDB instances with the\nTLS/SSL\nsecurity protocol using the underlying TLS/SSL support in the JDK. To\nconfigure your connection to use TLS/SSL, enable the TLS/SSL settings in\neither the ConnectionString\nor MongoClientSettings.", - "tags": "code example, security, authentication", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/connection", - "title": "Connection Guide", - "headings": ["Overview"], - "paragraphs": "Learn how to set up a connection and specify connection behavior from your\napplication to a MongoDB deployment using the driver in the following\nsections: For information about authenticating with a MongoDB instance,\nsee Authentication Mechanisms and Enterprise Authentication Mechanisms . Connect to MongoDB View a List of Connection Options Specify Connection Behavior with the MongoClient Class Enable Network Compression Enable TLS/SSL on a Connection Connect to MongoDB by Using a SOCKS5 Proxy", - "code": [], - "preview": "Learn how to set up a connection and specify connection behavior from your\napplication to a MongoDB deployment using the driver in the following\nsections:", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/compound-operations", - "title": "Compound Operations", - "headings": [ - "Overview", - "How to Use Compound Operations", - "Find and Update", - "Example", - "Find and Replace", - "Example", - "Find and Delete", - "Example", - "Avoiding a Race Condition", - "Example With Race Condition", - "Example Without Race Condition" - ], - "paragraphs": "In this guide, you can learn how to perform compound operations with\nthe MongoDB Kotlin driver. Compound operations consist of a read and write operation performed as one\n atomic operation . An atomic operation is an operation which either completes\nentirely, or does not complete at all. Atomic operations cannot partially complete. Atomic operations can help you avoid race conditions in your code. A\nrace condition occurs when your code's behavior is dependent on the order of\nuncontrollable events. MongoDB supports the following compound operations: If you need to perform more complex tasks atomically, such as reading and\nwriting to more than one document, use transactions . Transactions are a\nfeature of MongoDB and other databases that lets you define an arbitrary\nsequence of database commands as an atomic operation. For more information on atomic operations and atomicity, see\n the MongoDB manual entry for atomicity and transactions . For more information on transactions, see\n the MongoDB manual entry for transactions . Find and update one document Find and replace one document Find and delete one document This section shows how to use each compound operation with the MongoDB Kotlin Driver. The following examples use a collection containing these two sample documents. This data is modeled with the following Kotlin data class: By default, each compound operation returns your found document in the state\nbefore your write operation. You can retrieve your found document in the\nstate after your write operation by using the options class corresponding to\nyour compound operation. You can see an example of this configuration in the\n Find and Replace example below . To find and update one document, use the findOneAndUpdate() method of the\n MongoCollection class. The findOneAndUpdate() method returns your found\ndocument or null if no documents match your query. The following example uses the findOneAndUpdate() method to find a\ndocument with the color field set to \"green\" and update the\n food field in that document to \"pizza\" . The example also uses a FindOneAndUpdateOptions instance to specify the\nfollowing options: For more information on the Projections class, see our\n guide on the Projections builder . For more information on the upsert operation, see our\n guide on upserts . For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: Specify an upsert, which inserts the document specified by the query filter if no documents match the query. Set a maximum execution time of 5 seconds for this operation on the MongoDB\ninstance. If the operation takes longer, the findOneAndUpdate() method\nwill throw a MongoExecutionTimeoutException . findOneAndUpdate() FindOneAndUpdateOptions MongoExecutionTimeoutException To find and replace one document, use the findOneAndReplace() method of the\n MongoCollection class. The findOneAndReplace() method returns your found\ndocument or null if no documents match your query. The following example uses the findOneAndReplace() method to find a\ndocument with the color field set to \"green\" and replace it\nwith the following document: The example also uses a FindOneAndReplaceOptions instance to specify that\nthe returned document should be in the state after our replace operation. For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: findOneAndReplace() FindOneAndReplaceOptions To find and delete one document, use the findOneAndDelete() method of the\n MongoCollection class. The findOneAndDelete() method returns your found\ndocument or null if no documents match your query. The following example uses the findOneAndDelete() method to find and\ndelete the document with the largest value in the _id field. The example uses a FindOneAndDeleteOptions instance to specify a\ndescending sort on the _id field. For more information on the Sorts class, see our\n guide on the Sorts builder . For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: findOneAndDelete() FindOneAndDeleteOptions In this section we explore two examples. The first example contains a\nrace condition, the second example uses a compound operation to\navoid the race condition present in the first example. For both examples, let's imagine that we run a hotel with one room and that we\nhave a small Kotlin program to help us checkout this room to a guest. The following document in MongoDB represents the room: This data is modeled with the following Kotlin data class: Let's say our app uses this bookARoomUnsafe method to checkout our room to\na guest: Imagine two separate guests, Jan and Pat, try to book the room with this method\nat the same time. Jan sees this output: And Pat sees this output: When we look at our database, we see the following: Pat will be unhappy. When Pat shows up to our hotel, Jan will be\noccupying her room. What went wrong? Here is the sequence of events that happened from the perspective of our MongoDB\ninstance: Notice that for a brief moment Pat had reserved the room, but as Jan's update\noperation was the last to execute our document has \"Jan\" as the guest. Find and return an empty room for Jan. Find and return an empty room for Pat. Update the room to booked for Pat. Update the room to booked for Jan. Let's use a compound operation to avoid the race condition and\nalways give our users the correct message. Imagine two separate guests, Jan and Pat, try to book the room with this method\nat the same time. Jan sees this output: And Pat sees this output: When we look at our database, we see the following: Pat got the correct message. While she might be sad she didn't get the\nreservation, at least she knows not to travel to our hotel. Here is the sequence of events that happened from the perspective of our MongoDB\ninstance: For information on the Updates class, see our\n guide on the Updates builder . For more information of the Filters class, see our\n guide on the Filters builder . For more information on the findOneAndUpdate() method, see\nthe API Documentation for the MongoCollection class . Find an empty room for Jan and reserve it. Try to find an empty room for Pat and reserve it. When there are not any rooms left, return null . Your MongoDB instance places a write lock on the document you are modifying\nfor the duration of your compound operation.", - "code": [ - { - "lang": "json", - "value": " {\"_id\": 1, \"food\": \"donut\", \"color\": \"green\"}\n {\"_id\": 2, \"food\": \"pear\", \"color\": \"yellow\"}" - }, - { - "lang": "kotlin", - "value": "data class FoodOrder(\n @BsonId val id: Int,\n val food: String,\n val color: String\n)\n" - }, - { - "lang": "kotlin", - "value": "\nval filter = Filters.eq(FoodOrder::color.name, \"green\")\nval update = Updates.set(FoodOrder::food.name, \"pizza\")\nval options = FindOneAndUpdateOptions()\n .upsert(true)\n .maxTime(5, TimeUnit.SECONDS)\n/* The result variable contains your document in the\n state before your update operation is performed\n or null if the document was inserted due to upsert\n being true */\nval result = collection.findOneAndUpdate(filter, update, options)\n\nprintln(result)\n" - }, - { - "lang": "console", - "value": "FoodOrder(id=1, food=donut, color=green)" - }, - { - "lang": "json", - "value": "{\"music\": \"classical\", \"color\": \"green\"}" - }, - { - "lang": "kotlin", - "value": "data class Music(\n @BsonId val id: Int,\n val music: String,\n val color: String\n)\n\nval filter = Filters.eq(FoodOrder::color.name, \"green\")\nval replace = Music(1, \"classical\", \"green\")\nval options = FindOneAndReplaceOptions()\n .returnDocument(ReturnDocument.AFTER)\nval result = collection.withDocumentClass().findOneAndReplace(filter, replace, options)\n\nprintln(result)\n" - }, - { - "lang": "console", - "value": "Music(id=1, music=classical, color=green)" - }, - { - "lang": "kotlin", - "value": "val sort = Sorts.descending(\"_id\")\nval filter = Filters.empty()\nval options = FindOneAndDeleteOptions().sort(sort)\nval result = collection.findOneAndDelete(filter, options)\n\nprintln(result)\n" - }, - { - "lang": "console", - "value": "FoodOrder(id=2, food=pear, color=yellow)" - }, - { - "lang": "json", - "value": " {\"_id\": 1, \"guest\": null, \"room\": \"Blue Room\", \"reserved\": false}" - }, - { - "lang": "kotlin", - "value": "data class HotelRoom(\n @BsonId val id: Int,\n val guest: String? = null,\n val room: String,\n val reserved: Boolean = false\n)\n" - }, - { - "lang": "none", - "value": "You got the Blue Room, Jan" - }, - { - "lang": "none", - "value": "You got the Blue Room, Pat" - }, - { - "lang": "json", - "value": " {\"_id\": 1, \"guest\": \"Jan\", \"room\": \"Blue Room\", \"reserved\": false}" - }, - { - "lang": "kotlin", - "value": "suspend fun bookARoomUnsafe(guestName: String) {\n val filter = Filters.eq(\"reserved\", false)\n val myRoom = hotelCollection.find(filter).firstOrNull()\n if (myRoom == null) {\n println(\"Sorry, we are booked, $guestName\")\n return\n }\n\n val myRoomName = myRoom.room\n\n println(\"You got the $myRoomName, $guestName\")\n\n val update = Updates.combine(Updates.set(\"reserved\", true), Updates.set(\"guest\", guestName))\n val roomFilter = Filters.eq(\"_id\", myRoom.id)\n hotelCollection.updateOne(roomFilter, update)\n}\n" - }, - { - "lang": "console", - "value": "You got the Blue Room, Jan" - }, - { - "lang": "console", - "value": "Sorry, we are booked, Pat" - }, - { - "lang": "json", - "value": " {\"_id\": 1, \"guest\": \"Jan\", \"room\": \"Blue Room\", \"reserved\": false}" - }, - { - "lang": "kotlin", - "value": "suspend fun bookARoomSafe(guestName: String) {\n val update = Updates.combine(\n Updates.set(HotelRoom::reserved.name, true),\n Updates.set(HotelRoom::guest.name, guestName)\n )\n val filter = Filters.eq(\"reserved\", false)\n val myRoom = hotelCollection.findOneAndUpdate(filter, update)\n if (myRoom == null) {\n println(\"Sorry, we are booked, $guestName\")\n return\n }\n\n val myRoomName = myRoom.room\n println(\"You got the $myRoomName, $guestName\")\n}\n" - } - ], - "preview": "In this guide, you can learn how to perform compound operations with\nthe MongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/query-document", - "title": "Specify a Query", - "headings": [ - "Overview", - "Comparison Operators", - "Logical Operators", - "Array Operators", - "Element Operators", - "Evaluation Operators" - ], - "paragraphs": "In this guide, you can learn how to specify a query in the MongoDB Kotlin\ndriver. Most CRUD operations allow you to narrow the set of matched documents by\nspecifying matching criteria in a query filter . Query filters\ncontain one or more query operators that apply to specific fields which\ndetermine which documents to include in the result set. In this page, we cover the following query operators with\nexamples on how to use them: The examples in this guide use the following documents in the\n paint_purchases collection: This data is modeled with the following Kotlin data class: Comparison Operators Logical Operators Array Operators Element Operators Evaluation Operators Comparison operators query data based on comparisons with values in a\ncollection. Common comparison operators include gt() for \"greater\nthan\" comparisons, lte() for \"less than or equal to\" comparisons,\nand ne() for \"not equal to \" comparisons. The following example uses the Filters.gt() method to match all\ndocuments where the value of qty is greater than 7 in the\n paint_purchases collection: Logical operators query data using logic applied to the results of\nfield-level operators. Common logical operators include and() where\nall operators must be true, and or() where at least one of the\noperators must be true. The following example uses the Filters.and() method to match\ndocuments where the value of qty is less than or equal to 5 and\nthe value of color is not \"pink\" in the paint_purchases \ncollection: Array operators query data based on the value or quantity of elements in\nan array field. The following example uses the Filters.size() method to match\ndocuments where the size of the vendor list is 3 in the\n paint_purchases collection: Element operators query data based on the presence or type of a field. The following example uses the Filters.exists() method to match\ndocuments that have a rating in the paint_purchases collection: Evaluation operators query data on higher level logic, like regex\nand text searches. The following example uses the Filters.regex() method to match\ndocuments that have a color ending with the letter \"k\" in the\n paint_purchases collection: For more information about the operators mentioned in this guide,\nsee the following Server Manual Entries: Query Operators Comparison Operators Logical Operators Array Operators Element Operators Evaluation Operators", - "code": [ - { - "lang": "json", - "value": "{ \"_id\": 1, \"color\": \"red\", \"qty\": 9, \"vendor\": [\"A\", \"E\"] }\n{ \"_id\": 2, \"color\": \"purple\", \"qty\": 8, \"vendor\": [\"B\", \"D\", \"F\"], \"rating\": 5 }\n{ \"_id\": 3, \"color\": \"blue\", \"qty\": 5, \"vendor\": [\"A\", \"E\"] }\n{ \"_id\": 4, \"color\": \"white\", \"qty\": 6, \"vendor\": [\"D\"], \"rating\": 9 }\n{ \"_id\": 5, \"color\": \"yellow\", \"qty\": 4, \"vendor\": [\"A\", \"B\"] }\n{ \"_id\": 6, \"color\": \"pink\", \"qty\": 3, \"vendor\": [\"C\"] }\n{ \"_id\": 7, \"color\": \"green\", \"qty\": 8, \"vendor\": [\"C\", \"E\"], \"rating\": 7 }\n{ \"_id\": 8, \"color\": \"black\", \"qty\": 7, \"vendor\": [\"A\", \"C\", \"D\"] }" - }, - { - "lang": "kotlin", - "value": "data class PaintOrder(\n @BsonId val id: Int,\n val qty: Int,\n val color: String,\n val vendor: List,\n val rating: Int? = null\n)\n" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.gt(\"qty\", 7)\nval findFlow = collection.find(filter)\nfindFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=1, qty=9, color=red, vendor=[A, E], rating=null)\nPaintOrder(id=2, qty=8, color=purple, vendor=[B, D, F], rating=5)\nPaintOrder(id=7, qty=8, color=green, vendor=[C, E], rating=7)" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.and(Filters.lte(\"qty\", 5), Filters.ne(\"color\", \"pink\"))\nval findFlow = collection.find(filter)\nfindFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=3, qty=5, color=blue, vendor=[A, E], rating=null)\nPaintOrder(id=5, qty=4, color=yellow, vendor=[A, B], rating=null)" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.size(\"vendor\", 3)\nval findFlow = collection.find(filter)\nfindFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=2, qty=8, color=purple, vendor=[B, D, F], rating=5)\nPaintOrder(id=8, qty=7, color=black, vendor=[A, C, D], rating=null)" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.exists(\"rating\")\nval findFlow = collection.find(filter)\nfindFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=2, qty=8, color=purple, vendor=[B, D, F], rating=5)\nPaintOrder(id=4, qty=6, color=white, vendor=[D], rating=9)\nPaintOrder(id=7, qty=8, color=green, vendor=[C, E], rating=7)" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.regex(\"color\", \"k$\")\nval findFlow = collection.find(filter)\nfindFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=6, qty=3, color=pink, vendor=[C], rating=null)\nPaintOrder(id=8, qty=7, color=black, vendor=[A, C, D], rating=null)" - } - ], - "preview": "In this guide, you can learn how to specify a query in the MongoDB Kotlin\ndriver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/read-operations/change-streams", - "title": "Open Change Streams", - "headings": [ - "Overview", - "Open a Change Stream", - "Example", - "Apply Aggregation Operators to your Change Stream", - "Example", - "Split Large Change Stream Events", - "Include Pre-images and Post-images", - "Create a Collection with Pre-Image and Post-Images Enabled", - "Pre-image Configuration Example", - "Post-image Configuration Example" - ], - "paragraphs": "In this guide, you can learn how to use a change stream to monitor\nreal-time changes to your database. A change stream is a MongoDB server\nfeature that allows your application to subscribe to data changes on a single\ncollection, database, or deployment. You can specify a set of aggregation\noperators to filter and transform the data your application receives.\nWhen connecting to MongoDB v6.0 or later, you can configure the events\nto include the document data before and after the change. Learn how to open and configure your change streams in the following\nsections: Open a Change Stream Apply Aggregation Operators to your Change Stream Split Large Change Stream Events Include Pre-images and Post-images You can open a change stream to subscribe to specific types of data changes\nand produce change events in your application. To open a change stream, call the watch() method on an instance of a\n MongoCollection , MongoDatabase , or MongoClient . The object on which you call the watch() method on determines the scope of\nevents that the change stream listens for. If you call watch() on a MongoCollection , the change stream monitors\na collection. If you call watch() on a MongoDatabase , the change stream monitors all\ncollections in that database. If you call watch() on a MongoClient , the change stream monitors all\nchanges in the connected MongoDB deployment. Standalone MongoDB deployments don't support change streams because\nthe feature requires a replica set oplog. To learn more about the oplog,\nsee the Replica Set Oplog server manual page. The following code example shows how to open a change stream and print\nchange stream events whenever the data in the collection changes: An insert operation on the collection should produce output similar to the\nfollowing text: For a runnable example, see the Watch for Changes usage example page. To learn more about the watch() method, see the following API\ndocumentation: MongoCollection.watch() MongoDatabase.watch() MongoClient.watch() You can pass an aggregation pipeline as a parameter to the watch() method\nto specify which change events the change stream receives. To learn which aggregation operators your MongoDB server version supports, see\n Modify Change Stream Output . The following code example shows how you can apply an aggregation pipeline to\nconfigure your change stream to receive change events for only insert and\nupdate operations: When the change stream receives an update change event, the preceding code\nexample outputs the following text: When connecting to MongoDB v7.0 or later,\nyou can use the $changeStreamSplitLargeEvent aggregation operator to\nsplit event documents that exceed 16 MB into smaller fragments. Use the $changeStreamSplitLargeEvent operator only when you expect\nthe change stream events to exceed the document size limit. For\nexample, you might use this feature if your application requires full\ndocument pre-images or post-images. A $changeStreamSplitLargeEvent aggregation stage returns\nfragments sequentially. You can access the fragments by using a change\nstream cursor. Each fragment document includes a splitEvent object that\ncontains the following fields: The following example opens a change stream that includes an aggregation\npipeline with an $changeStreamSplitLargeEvent aggregation stage to\nsplit large events: To learn more about the $changeStreamSplitLargeEvent aggregation operator,\nsee $changeStreamSplitLargeEvent (aggregation) in the\nServer manual. Field Description fragment The index of the fragment, starting at 1 of The total number of fragments that compose the split event You can have only one $changeStreamSplitLargeEvent stage in your\naggregation pipeline, and it must be the last stage in the pipeline. You can configure the change event to contain or omit the following data: To receive change stream events that include a pre-image or post-image, you\nmust connect to a MongoDB v6.0 or later deployment and set up the following: The pre-image which is a document that represents the version of the\ndocument before the operation if it exists The post-image which is a document that represents the version of the\ndocument after the operation if it exists Enable pre-images and post-images for the collection on your MongoDB\ndeployment. To learn how to enable these on your deployment, see the\n Change Streams with Document Pre- and Post-Images \nMongoDB server manual page. To learn how to instruct the driver to create a collection with pre-images\nand post-images enabled, see the Create a Collection with Pre-Image and Post-Images Enabled \nsection. Configure your change stream to retrieve either or both the pre-images and\npost-images. To configure your change stream to include the pre-image, see\nthe Pre-image Configuration Example . To configure your change stream to include the post-image, see the\n Post-image Configuration Example . To create a collection with the pre-image and post-image option using the\ndriver, specify an instance of ChangeStreamPreAndPostImagesOptions \nand call the createCollection() method as shown in the following example: You can change the pre-image and post-image option in an existing collection\nby running the collMod command from the MongoDB Shell. To learn how to\nperform this operation, see the collMod \nserver manual documentation. When you modify this option on a collection, any change streams open on\nthat collection in your application may fail if configured to require\nreceiving the pre-image or post-image. The following code example shows how you can configure a change stream to\ninclude the pre-image and output the results: The preceding example configures the change stream to use the\n FullDocumentBeforeChange.REQUIRED option. This configures the change\nstream to return pre-images for replace, update, and delete change events and\nfor the server to raise an error if the pre-image is unavailable. Suppose an application updated the latestVersion field of a document in a\ncollection of software library dependencies from the value of 2.0.0 to\n 2.1.0 . The corresponding change event output by the preceding code example\nshould resemble the following text: For a list of options, see the FullDocumentBeforeChange \nAPI documentation. The following code example shows how you can configure a change stream to\ninclude the post-image and output the results: The preceding example configures the change stream to use the\n FullDocument.UPDATE_LOOKUP option. This configures the change\nstream to return both the deltas between the original and changed document\nand a copy of the document at some point in time after the change occurred. Suppose an application updated the population field of a document from\nthe value of 800 to 950 in a collection of city census data. The\ncorresponding change event output by the preceding code example should\nresemble the following text: For a list of options, see the FullDocument \nAPI documentation.", - "code": [ - { - "lang": null, - "value": "Received a change event: ChangeStreamDocument{\n operationType='insert',\n resumeToken={\"_data\": \"825EC...\"},\n namespace=myDb.myChangeStreamCollection,\n ...\n}" - }, - { - "lang": "kotlin", - "value": "\n// Launch the change stream in a separate coroutine,\n// so you can cancel it later.\nval job = launch {\n val changeStream = collection.watch()\n changeStream.collect {\n println(\"Received a change event: $it\")\n }\n}\n\n// Perform MongoDB operations that trigger change events...\n\n// Cancel the change stream when you're done listening for events.\njob.cancel()\n" - }, - { - "lang": "text", - "value": "Received a change event: ChangeStreamDocument{\noperationType=update,\nresumeToken={...},\n..." - }, - { - "lang": "kotlin", - "value": "val pipeline = listOf(\n Aggregates.match(Filters.`in`(\"operationType\",\n listOf(\"insert\", \"update\")))\n)\n\n// Launch the change stream in a separate coroutine,\n// so you can cancel it later.\nval job = launch {\n val changeStream = collection.watch(pipeline)\n changeStream.collect {\n println(\"Received a change event: $it\")\n }\n}\n\n// Perform MongoDB operations that trigger change events...\n\n// Cancel the change stream when you're done listening for events.\njob.cancel()\n" - }, - { - "lang": "kotlin", - "value": "val pipeline = listOf(BsonDocument().append(\"\\$changeStreamSplitLargeEvent\", BsonDocument()))\n\nval job = launch {\n val changeStream = collection.watch(pipeline)\n changeStream.collect {\n println(\"Received a change event: $it\")\n }\n}\n" - }, - { - "lang": "kotlin", - "value": "val collectionOptions = CreateCollectionOptions()\ncollectionOptions.changeStreamPreAndPostImagesOptions(ChangeStreamPreAndPostImagesOptions(true))\ndatabase.createCollection(\"myChangeStreamCollection\", collectionOptions)\n" - }, - { - "lang": "text", - "value": "Received a change event: ChangeStreamDocument{\n operationType=update,\n resumeToken={...}\n namespace=software.libraries,\n destinationNamespace=null,\n fullDocument=null,\n fullDocumentBeforeChange=Document{{_id=6388..., latestVersion=2.0.0, ...}},\n ..." - }, - { - "lang": "kotlin", - "value": "val job = launch {\n val changeStream = collection.watch()\n .fullDocumentBeforeChange(FullDocumentBeforeChange.REQUIRED)\n changeStream.collect {\n println(it)\n }\n}\n// Perform MongoDB operations that trigger change events...\n\n// Cancel the change stream when you're done listening for events.\njob.cancel()\n" - }, - { - "lang": "text", - "value": "Received a change event: ChangeStreamDocument{\n operationType=update,\n resumeToken={...},\n namespace=censusData.cities,\n destinationNamespace=null,\n fullDocument=Document{{_id=6388..., city=Springfield, population=950, ...}},\n updatedFields={\"population\": 950}, ...\n ..." - }, - { - "lang": "kotlin", - "value": "val job = launch {\n val changeStream = collection.watch()\n .fullDocument(FullDocument.UPDATE_LOOKUP)\n changeStream.collect {\n println(it)\n }\n}\n\n// Perform MongoDB operations that trigger change events...\n\n// Cancel the change stream when you're done listening for events.\njob.cancel()\n" - } - ], - "preview": "In this guide, you can learn how to use a change stream to monitor\nreal-time changes to your database. A change stream is a MongoDB server\nfeature that allows your application to subscribe to data changes on a single\ncollection, database, or deployment. You can specify a set of aggregation\noperators to filter and transform the data your application receives.\nWhen connecting to MongoDB v6.0 or later, you can configure the events\nto include the document data before and after the change.", - "tags": "code example, monitoring, aggregation", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/read-operations/flow", - "title": "Access Data From a Flow", - "headings": [ - "Overview", - "Terminal Methods", - "Find the First Document", - "Count Number of Results", - "Convert Results to a List", - "Iterate through Results", - "Explain the Query" - ], - "paragraphs": "In this guide, you can learn how to access data using a Flow with the\nMongoDB Kotlin driver. A Flow is a data type built into Kotlin coroutines that represent a stream\nof values that are being computed asynchronously. The Kotlin coroutine driver\nuses flows to represent the results of database read operations. This page uses an initiating method, find() to show how to access\ndata from a FindFlow . The find() method creates and returns an instance of a\n FindFlow . A FindFlow allows you to browse the documents\nmatched by your search criteria and to further specify which documents\nto see by setting parameters through methods. The following ways to access and store data apply to\nother iterables such as an AggregateFlow . Terminal methods execute an operation on the MongoDB server after\nconfiguring all parameters of a Flow instance controlling the\noperation. Use the firstOrNull() method to retrieve the first document in your query\nresults or null if there are no results: Alternatively, you can use the first() method to retrieve the first document\nin your query or throw a NoSuchElementException if there are no results: These methods are often used when your query filter will match one\ndocument, such as when filtering by a unique index. Use the count() method to retrieve the number of results in the query: Use the toList() method to store your query results in a List : This method is often used when your query filter returns a small number\nof documents that can fit into available memory. Use the collect() method to iterate through fetched documents and\nensure that the flow closes if there is an early termination: Use the explain() method to view information about how MongoDB\nexecutes your operation. The explain() method returns execution plans and performance\nstatistics. An execution plan is a potential way MongoDB\ncan complete an operation. The explain() method provides both the\nwinning plan (the plan MongoDB executed) and rejected plans. The following example prints the JSON representation of the\nwinning plan for aggregation stages that produce execution plans: For more information on the explain operation, see the following\nServer Manual Entries: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: You can specify the level of detail of your explanation by passing a\nverbosity level to the explain() method. The following table shows all verbosity levels for explanations and\ntheir intended use cases: Verbosity Level Use Case ALL_PLANS_EXECUTIONS You want to know which plan MongoDB will choose to run your query. EXECUTION_STATS You want to know if your query is performing well. QUERY_PLANNER You have a problem with your query and you want as much information\nas possible to diagnose the issue. Explain Output Query Plans collect() explain() ExplainVerbosity", - "code": [ - { - "lang": "kotlin", - "value": "val resultsFlow = collection.find()\nval firstResultOrNull = resultsFlow.firstOrNull()\n" - }, - { - "lang": "kotlin", - "value": "try {\n val resultsFlow = collection.find()\n val firstResult = resultsFlow.first()\n} catch (e: NoSuchElementException) {\n println(\"No results found\")\n}\n" - }, - { - "lang": "kotlin", - "value": "val resultsFlow = collection.find()\nval count = resultsFlow.count()\n" - }, - { - "lang": "kotlin", - "value": "val resultsFlow = collection.find()\nval results = resultsFlow.toList()\n" - }, - { - "lang": "kotlin", - "value": "val resultsFlow = collection.find()\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "kotlin", - "value": "val explanation = collection.find().explain(ExplainVerbosity.EXECUTION_STATS)\nval jsonSummary = explanation.getEmbedded(\n listOf(\"queryPlanner\", \"winningPlan\"),\n Document::class.java\n).toJson()\nprintln(jsonSummary)\n" - }, - { - "lang": "json", - "value": "{ \"stage\": \"COLLSCAN\", \"direction\": \"forward\" }" - } - ], - "preview": "In this guide, you can learn how to access data using a Flow with the\nMongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/read-operations/geo", - "title": "Search Geospatially", - "headings": [ - "Overview", - "Coordinates on Earth", - "GeoJSON Positions", - "GeoJSON Types", - "Index", - "Coordinates on a 2D Plane", - "Index", - "Geospatial Queries", - "Query Operators", - "Query Parameters", - "Examples", - "Query by Proximity", - "Query Within a Range" - ], - "paragraphs": "In this guide, you can learn how to search geospatial data with the\nMongoDB Kotlin Driver, and the different geospatial data formats supported by MongoDB. Geospatial data is data that represents a geographical location on\nthe surface of the Earth. Examples of geospatial data include: Locations of movie theaters Borders of countries Routes of bicycle rides Dog exercise areas in New York City To store and query your geospatial data in MongoDB, use GeoJSON . GeoJSON is\na data format created by the Internet Engineering Task Force (IETF). Here is the location of MongoDB headquarters in GeoJSON: For definitive information on GeoJSON, see the\n official IETF specification . A position represents a single place on Earth, and exists in code as an array\ncontaining two or three number values: Longitude in the first position (required) Latitude in the second position (required) Elevation in the third position (optional) GeoJSON orders coordinates as longitude first and latitude second. This may\nbe surprising as geographic coordinate system conventions generally list\nlatitude first and longitude second. Make sure to check what format any other\ntools you are working with use. Popular tools such as OpenStreetMap and Google\nMaps list coordinates as latitude first and longitude second. Your GeoJSON object's type determines its geometric shape. Geometric shapes are\nmade up of positions. Here are some common GeoJSON types and how you can specify them with positions: To learn more about the shapes you can use in MongoDB, see the\n GeoJSON manual entry . Point : a single position. This could represent the location of a\n sculpture . LineString : an array of two or more positions, thus forming a series of line\nsegments. This could represent\n the route of the Great Wall of China . Polygon : an array of positions in which the first and last\nposition are the same, thus enclosing some space. This could represent\n the land within Vatican City . To query data stored in the GeoJSON format, add the field containing\nGeoJSON data to a 2dsphere index. The following snippet creates a\n 2dsphere index on the location.geo field using the Indexes builder: For more information on the Indexes builder, see our\n guide on the Indexes builder . You can store geospatial data using x and y coordinates on\na two-dimensional Euclidean plane. We refer to coordinates on a two-dimensional\nplane as \"legacy coordinate pairs\". Legacy coordinate pairs have the following structure: Your field should contain an array of two values in which the first represents\nthe x axis value and the second represents the y axis value. To query data stored as legacy coordinate pairs, you must add the field containing\nlegacy coordinate pairs to a 2d index. The following snippet creates a\n 2d index on the coordinates field using the Indexes builder: For more information on the Indexes builder, see our\n guide on the Indexes builder . For more information on legacy coordinate pairs, see the\n MongoDB server manual page on legacy coordinate pairs . Spherical ( 2dsphere ) and flat ( 2d ) indexes support some, but\nnot all, of the same query operators. For a full list of operators\nand their index compatibility, see the\n manual entry for geospatial queries . Geospatial queries consist of a query operator and GeoJSON shapes as query\nparameters. To query your geospatial data, use one of the following query operators: You can specify these query operators in the MongoDB Kotlin driver with the\n near() , geoWithin() , nearSphere() , and geoIntersects() utility\nmethods of the Filters builder class. For more information on geospatial query operators, see the\n manual entry for geospatial queries . For more information on Filters , see our\n guide on the Filters builder . $near $geoWithin $nearSphere $geoIntersects requires a 2dsphere index To specify a shape to use in a geospatial query, use the\n Position , Point , LineString , and Polygon classes of the MongoDB\nKotlin driver. For a full list of the GeoJSON shapes available in the MongoDB Kotlin driver, see the\n GeoJSON package \nAPI Documentation. The following examples use the MongoDB Atlas sample dataset. You can learn how\nto set up your own free-tier Atlas cluster and how to load the sample dataset\nin our quick start guide . The examples use the theaters collection in the sample_mflix database\nfrom the sample dataset. The examples require the following imports: The data is modeled using the following Kotlin data class: The results are modeled using the following Kotlin data class: The theaters collection already contains a 2dsphere index on the\n \"${Theater::location.name}.${Theater.Location::geo.name}\" field. To search for and return documents from nearest to farthest from a point, use\nthe near() static utility method of the Filters builder class. The\n near() method constructs a query with the $near query operator. The following example queries for theaters between 10,000 and 5,000 \nmeters from the Great Lawn of Central Park: For more information on the $near operator, see the\n reference documentation for $near . For more information on Filters , see\n our guide on the Filters builder . MongoDB uses the\n same reference system \nas GPS satellites to calculate geometries over the Earth. To search for geospatial data within a specified shape use the geoWithin() \nstatic utility method of the Filters builder class. The geoWithin() \nmethod constructs a query with the $geoWithin query operator. The following example searches for movie theaters in a section of Long Island. The following figure shows the polygon defined by the\n longIslandTriangle variable and dots representing the locations of\nthe movie theaters returned by our query. For more information on the $geoWithin operator, see the\n reference documentation for $geoWithin For more information on the operators you can use in your query, see the\n MongoDB server manual page on geospatial query operators", - "code": [ - { - "lang": "json", - "value": "\"MongoDB Headquarters\" : {\n \"type\": \"point\",\n \"coordinates\": [-73.986805, 40.7620853]\n}" - }, - { - "lang": "kotlin", - "value": "collection.createIndex((Indexes.geo2dsphere(\"location.geo\")))\n" - }, - { - "lang": "json", - "value": "\"\" : [ x, y ]" - }, - { - "lang": "kotlin", - "value": "collection.createIndex((Indexes.geo2d(\"coordinates\")))\n" - }, - { - "lang": null, - "value": "import com.mongodb.client.model.geojson.Point\nimport com.mongodb.client.model.geojson.Polygon\nimport com.mongodb.client.model.geojson.Position\nimport com.mongodb.client.model.Filters.near\nimport com.mongodb.client.model.Filters.geoWithin\nimport com.mongodb.client.model.Projections.fields\nimport com.mongodb.client.model.Projections.include\nimport com.mongodb.client.model.Projections.excludeId" - }, - { - "lang": "kotlin", - "value": "data class Theater(\n val theaterId: Int,\n val location: Location\n) {\n data class Location(\n val address: Address,\n val geo: Point\n ) {\n data class Address(\n val street1: String,\n val street2: String? = null,\n val city: String,\n val state: String,\n val zipcode: String\n )\n }\n}\n" - }, - { - "lang": "kotlin", - "value": "data class TheaterResults(\n val location: Location\n) {\n data class Location(\n val address: Address\n ) {\n data class Address(\n val city: String\n )\n }\n}\n\n" - }, - { - "lang": "kotlin", - "value": "val database = client.getDatabase(\"sample_mflix\")\nval collection = database.getCollection(\"theaters\")\nval centralPark = Point(Position(-73.9667, 40.78))\nval query = Filters.near(\n \"${Theater::location.name}.${Theater.Location::geo.name}\", centralPark, 10000.0, 5000.0\n)\nval projection = Projections.fields(\n Projections.include(\n \"${Theater::location.name}.${Theater.Location::address.name}.${Theater.Location.Address::city.name}\"),\n Projections.excludeId()\n)\nval resultsFlow = collection.find(query).projection(projection)\n\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "TheaterResults(location=Location(address=Address(city=Bronx)))\nTheaterResults(location=Location(address=Address(city=New York)))\nTheaterResults(location=Location(address=Address(city=New York)))\nTheaterResults(location=Location(address=Address(city=Long Island City)))\nTheaterResults(location=Location(address=Address(city=New York)))\nTheaterResults(location=Location(address=Address(city=Secaucus)))\nTheaterResults(location=Location(address=Address(city=Jersey City)))\nTheaterResults(location=Location(address=Address(city=Elmhurst)))\nTheaterResults(location=Location(address=Address(city=Flushing)))\nTheaterResults(location=Location(address=Address(city=Flushing)))\nTheaterResults(location=Location(address=Address(city=Flushing)))\nTheaterResults(location=Location(address=Address(city=Elmhurst)))" - }, - { - "lang": "kotlin", - "value": "val longIslandTriangle = Polygon(\n listOf(\n Position(-72.0, 40.0),\n Position(-74.0, 41.0),\n Position(-72.0, 39.0),\n Position(-72.0, 40.0)\n )\n)\nval projection = Projections.fields(\n Projections.include(\n \"${Theater::location.name}.${Theater.Location::address.name}.${Theater.Location.Address::city.name}\"),\n Projections.excludeId()\n)\nval geoWithinComparison = Filters.geoWithin(\n \"${Theater::location.name}.${Theater.Location::geo.name}\", longIslandTriangle\n)\nval resultsFlow = collection.find(geoWithinComparison)\n .projection(projection)\n\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "TheaterResults(location=Location(address=Address(city=Baldwin))))\nTheaterResults(location=Location(address=Address(city=Levittown)))\nTheaterResults(location=Location(address=Address(city=Westbury)))\nTheaterResults(location=Location(address=Address(city=Mount Vernon)))\nTheaterResults(location=Location(address=Address(city=Massapequa)))" - } - ], - "preview": "In this guide, you can learn how to search geospatial data with the\nMongoDB Kotlin Driver, and the different geospatial data formats supported by MongoDB.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/read-operations/limit", - "title": "Limit the Number of Returned Results", - "headings": [ - "Overview", - "Sample Documents", - "Specify a Limit", - "Combining Skip and Limit" - ], - "paragraphs": "In this guide, you can learn how to limit the number of results returned\nfrom read operations with the MongoDB Kotlin driver. Use limit() to cap the number of documents that a read operation returns.\nThis instance method designates the maximum number of\ndocuments that a read operation can return. If there are not enough documents\nto reach the specified limit, it can return a smaller number.\nIf you use limit() with the skip() instance method, the skip applies\nfirst and the limit only applies to the documents left over after\nthe skip. For more information on the skip() method, see our\n guide on Skipping Returned Documents . The following examples demonstrate, respectively, how to insert data into\na collection, how to use limit() to restrict the number of returned documents,\nand how to combine limit() with skip() to further narrow the results returned from a query. The following sections feature examples that update this sample document: This data is modeled with the following Kotlin data class: The next example queries the collection to return the top three\nlongest books. It first matches all the documents with the query, then sorts on the\n length field to return books with longer lengths before\nbooks with shorter lengths. Lastly, it limits the return value to 3 documents,\nand returns the following three documents, sorted by length: The order in which you call limit() and sort() does not matter\nbecause the find command always applies the sort first and the\nlimit after it. The following two calls are equivalent: To see the next three longest books, append the skip() method to your\n find() call. The integer argument passed to skip() will determine\nhow many documents the find operation returns. This operation returns the\ndocuments that describe the fourth through sixth longest books: You can combine skip() and limit() in this way to implement paging for your\ncollection, returning only small subsets of the collection at one time. For more information about the methods and classes mentioned in this guide,\nsee the following API Documentation: In order to ensure stable sorts across multiple queries, you must sort\nusing a unique key (such as _id ). Otherwise, a call to skip() \nand limit() may produce unpredictable results when combined with\n sort() . For example, consider the following data: If you sorted by type alone, sort() does not guarantee the same order\nupon return. Appending skip() and limit() to the sort() \ncould return different documents for different queries. In this case, sorting\nby data or serial_no would guarantee a stable sort, as both are unique keys. FindFlow.collect() MongoCollection.find()", - "code": [ - { - "lang": "json", - "value": "{ \"_id\": 1, \"title\": \"The Brothers Karamazov\", \"author\": \"Dostoyevsky\", \"length\": 824 }\n{ \"_id\": 2, \"title\": \"Les Mis\u00e9rables\", \"author\": \"Hugo\", \"length\": 1462 }\n{ \"_id\": 3, \"title\": \"Atlas Shrugged\", \"author\": \"Rand\", \"length\": 1088 }\n{ \"_id\": 4, \"title\": \"Infinite Jest\", \"author\": \"Wallace\", \"length\": 1104 }\n{ \"_id\": 5, \"title\": \"Cryptonomicon\", \"author\": \"Stephenson\", \"length\": 918 }\n{ \"_id\": 6, \"title\": \"A Dance with Dragons\", \"author\": \"Martin\", \"length\": 1104 }" - }, - { - "lang": "kotlin", - "value": "data class Book(\n @BsonId val id: Int,\n val title: String,\n val author: String,\n val length: Int\n)\n" - }, - { - "lang": "kotlin", - "value": "val results = collection.find()\n .sort(descending(\"length\"))\n .limit(3)\n\nresults.collect { println(it) }\n" - }, - { - "lang": "console", - "value": " Book(id=2, title=Les Mis\u00e9rables, author=Hugo, length=1462)\n Book(id=6, title=A Dance with Dragons, author=Martin, length=1104)\n Book(id=4, title=Infinite Jest, author=Wallace, length=1104)" - }, - { - "lang": "kotlin", - "value": " collection.find().sort(descending(\"length\")).limit(3)\n collection.find().limit(3).sort(descending(\"length\"))\n" - }, - { - "lang": "kotlin", - "value": "val results = collection.find()\n .sort(descending(\"length\"))\n .skip(3)\n .limit(3)\n\nresults.collect { println(it) }\n" - }, - { - "lang": "console", - "value": " Book(id=3, title=Atlas Shrugged, author=Rand, length=1088)\n Book(id=5, title=Cryptonomicon, author=Stephenson, length=918)\n Book(id=1, title=The Brothers Karamazov, author=Dostoyevsky, length=824)" - }, - { - "lang": "json", - "value": "{ type: \"computer\", data: \"1\", serial_no: 235235 }\n{ type: \"computer\", data: \"2\", serial_no: 235237 }\n{ type: \"computer\", data: \"3\", serial_no: 235239 }\n{ type: \"computer\", data: \"4\", serial_no: 235241 }" - } - ], - "preview": "In this guide, you can learn how to limit the number of results returned\nfrom read operations with the MongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/read-operations/project", - "title": "Specify Which Fields to Return", - "headings": ["Overview", "Behavior", "Explanation"], - "paragraphs": "In this guide, you can learn how to control which fields appear in\ndocuments returned from read operations with the MongoDB Kotlin driver. Many read requests require only a subset of fields in a document.\nFor example, when logging a user in you may only need their username, and\nnot all of their profile information. By default, queries in MongoDB return\nall fields in matching documents. You can use a projection to return\nonly the data you need. A projection is a document that instructs MongoDB which fields of a\ndocument to return. Use the Projections class\nto construct a projection document. Projections work in two ways: These two methods of projection are mutually exclusive: if you\nexplicitly include fields, you cannot explicitly exclude fields, and\nvice versa. Explicitly including fields. This has the side-effect of implicitly\nexcluding all unspecified fields. Implicitly excluding fields. This has the side-effect of implicitly\nincluding all unspecified fields. The _id field is not subject to these mechanics. You must\nexplicitly exclude the _id field if you do not want it returned.\nYou can exclude the _id field even if you have specified certain\nfields to include. Consider the following collection containing documents that describe\nvarieties of fruit: This data is modeled using the following Kotlin data class: In the following query, pass the projection to return the name \nfield of each document. The results are modeled using the FruitName Kotlin data class: The projection document specifies that the read operation result should\n include the name field of each returned document. As a result, this\nprojection implicitly excludes the qty and rating fields. Chaining\nthis projection to find() with an empty query filter yields the\nabove results. Despite the fact that this projection only explicitly included the\n name field, the query also returned the _id field, represented by id in the data class. The _id field is a special case: it is always included in every query\nresult unless explicitly excluded. That's because the _id field is a\nunique identifier for each document, a property that can be useful when\nconstructing queries. The _id is the only exception to the mutually exclusive include-exclude\nbehavior in projections: you can explicitly exclude the _id field\neven when explicitly including other fields if you do not want _id \nto be present in returned documents. The projection document specifies that the read operation result should\n include the name field of each returned document, and specifies to\n exclude the _id field. As a result, this projection implicitly\nexcludes the qty and rating fields. Chaining this projection to\n find() with an empty query filter yields the above results. You can also specify multiple fields to include in your projection. This example that identifies two fields to include in the projection yields\nthe following results using the FruitRating Kotlin data class: For additional projection examples, see the\n MongoDB Manual page on Project Fields to Return from Query . The order in which you specify the fields in the projection does not\nalter the order in which they are returned.", - "code": [ - { - "lang": "json", - "value": "{ \"_id\": 1, \"name\": \"apples\", \"qty\": 5, \"rating\": 3 },\n{ \"_id\": 2, \"name\": \"bananas\", \"qty\": 7, \"rating\": 1 },\n{ \"_id\": 3, \"name\": \"oranges\", \"qty\": 6, \"rating\": 2 },\n{ \"_id\": 4, \"name\": \"avocados\", \"qty\": 3, \"rating\": 5 }," - }, - { - "lang": "kotlin", - "value": "data class Fruit(\n @BsonId val id: Int,\n val name: String,\n val qty: Int,\n val rating: Int\n)\n" - }, - { - "lang": "kotlin", - "value": "data class FruitName(\n @BsonId val id: Int? = null,\n val name: String\n)\n\n// Return all documents with only the name field\nval filter = Filters.empty()\nval projection = Projections.fields(\n Projections.include(FruitName::name.name)\n)\nval flowResults = collection.find(filter).projection(projection)\n\nflowResults.collect { println(it)}\n" - }, - { - "lang": "console", - "value": "FruitName(id=1, name=apples),\nFruitName(id=2, name=bananas),\nFruitName(id=3, name=oranges),\nFruitName(id=4, name=avocados)" - }, - { - "lang": "kotlin", - "value": "data class FruitName(\n @BsonId val id: Int? = null,\n val name: String\n)\n\n// Return all documents with *only* the name field\n// excludes the id\nval filter = Filters.empty()\nval projection = Projections.fields(\n Projections.include(FruitName::name.name),\n Projections.excludeId()\n)\nval flowResults = collection.find(filter).projection(projection)\n\nflowResults.collect { println(it)}\n" - }, - { - "lang": "console", - "value": "FruitName(name=apples),\nFruitName(name=bananas),\nFruitName(name=oranges),\nFruitName(name=avocados)" - }, - { - "lang": "kotlin", - "value": "data class FruitRating(\n val name: String,\n val rating: Int\n)\n\nval filter = Filters.empty()\nval projection = Projections.fields(\n Projections.include(FruitRating::name.name, FruitRating::rating.name),\n Projections.excludeId()\n)\nval flowResults = collection.find(filter).projection(projection)\n\nflowResults.collect { println(it)}\n" - }, - { - "lang": "console", - "value": "FruitRating(name=apples, rating=3),\nFruitRating(name=bananas, rating=1),\nFruitRating(name=oranges, rating=2),\nFruitRating(name=avocados, rating=5)" - } - ], - "preview": "In this guide, you can learn how to control which fields appear in\ndocuments returned from read operations with the MongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/read-operations/retrieve", - "title": "Retrieve Data", - "headings": [ - "Overview", - "Sample Data for Examples", - "Find Operation", - "Example", - "Aggregate Operation", - "Example" - ], - "paragraphs": "In this guide, you can learn how to retrieve data from your MongoDB\ndatabase. To retrieve data, use read operations. Read operations allow you to do the following: Retrieve a subset of documents from your collection using a find operation Perform transformations on retrieved documents from your collection using an aggregate operation Monitor real-time changes to your database using change streams The following sections feature examples of how the owner of a paint\nstore manages their customers' orders. For each order, the owner keeps\ntrack of the color and quantity, which corresponds to the color and\n qty fields in their paint_order collection: This data is modeled with the following Kotlin data class: Use the find operation to retrieve a subset of your existing data in\nMongoDB. You can specify what data to return including which documents\nto retrieve, in what order to retrieve them, and how many to retrieve. To perform a find operation, call the find() method on an instance\nof a MongoCollection . This method searches a collection for documents that\nmatch the query filter you provide. For more information on how to\nspecify a query, see our Specify a Query guide. The owner would like to know which orders contain greater than three, but\nless than nine cans of paint from their paint_order collection . To address this scenario, the owner finds orders to match the criteria: After the owner runs this query, they find two orders that matched the\ncriteria. For more information on how to build filters, see our Filters Builders guide. For a runnable find() example, see our Find Multiple\nDocuments page. Use the aggregate operation to perform the stages in an aggregation\npipeline. An aggregation pipeline is a multi-staged transformation that\nproduces an aggregated result. To perform an aggregate operation, call the aggregate() method on an\ninstance of a MongoCollection . This method accepts aggregation\nexpressions to run in sequence. To perform aggregations, you can\ndefine aggregation stages that specify how to match documents, rename\nfields, and group values. For more information, see our\n Aggregation guide. The owner would like to know which paint color is the most purchased\n(highest quantity sold) from their paint_order collection . To address the scenario, the owner creates an aggregation pipeline that: After the owner runs the aggregation, they find that \"green\" is the most\npurchased color. For more information on how to construct an aggregation pipeline, see\nthe MongoDB server manual page on Aggregation . For additional information on the methods mentioned on this page, see\nthe following API Documentation: Matches all the documents in the paint_order collection Groups orders by colors Sums up the quantity field by color Orders the results by highest-to-lowest quantity MongoCollection.find() MongoCollection.aggregate()", - "code": [ - { - "lang": "json", - "value": "{ \"_id\": 1, \"color\": \"purple\", \"qty\": 10 }\n{ \"_id\": 2, \"color\": \"green\", \"qty\": 8 }\n{ \"_id\": 3, \"color\": \"purple\", \"qty\": 4 }\n{ \"_id\": 4, \"color\": \"green\", \"qty\": 11 }" - }, - { - "lang": "kotlin", - "value": "data class PaintOrder(\n @BsonId val id: Int,\n val qty: Int,\n val color: String\n)\n" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.and(Filters.gt(\"qty\", 3), Filters.lt(\"qty\", 9))\nval resultsFlow = collection.find(filter)\n\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=2, qty=8, color=green)\nPaintOrder(id=3, qty=4, color=purple)" - }, - { - "lang": "kotlin", - "value": "data class AggregationResult(@BsonId val id: String, val qty: Int)\n\nval filter = Filters.empty()\nval pipeline = listOf(\n Aggregates.match(filter),\n Aggregates.group(\n \"\\$color\",\n Accumulators.sum(\"qty\", \"\\$qty\")\n ),\n Aggregates.sort(Sorts.descending(\"qty\"))\n)\nval resultsFlow = collection.aggregate(pipeline)\n\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=2, qty=8, color=green)\nPaintOrder(id=3, qty=4, color=purple)" - } - ], - "preview": "In this guide, you can learn how to retrieve data from your MongoDB\ndatabase. To retrieve data, use read operations.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/read-operations/skip", - "title": "Skip Returned Results", - "headings": [ - "Overview", - "Examples", - "Using a FindIterable", - "Using Aggregation" - ], - "paragraphs": "In this guide, you can learn how to skip a specified number of returned\nresults from read operations with the MongoDB Kotlin driver. You can skip results on the returned results of a query by using the\n skip() method. You can also skip documents at a specific stage in an\naggregation pipeline by specifying a $skip aggregation stage. The skip() method takes an integer that specifies the number of documents\nto omit from the beginning of the list of documents returned by the\n FindFlow . You can use the skip() method to skip the first two documents as follows: Aggregates.skip() \nis an optional stage in the aggregation pipeline that specifies how many\ndocuments to omit from the beginning of the results of the prior stage. You can use the Aggregates.skip() method to skip the first two documents as follows: The following example is about a paint store that sells eight different\ncolors of paint. The best colors sell quicker than the other colors.\nOne day, a customer asks what the three best-selling (lowest inventory)\ncolors are. The paint store keeps track of inventory in the qty \nfield in their paint_inventory collection: This data is modeled with the following Kotlin data class: To address the scenario, the paint store needs to query the\n paint_inventory collection with an empty filter, sort the documents\nby qty field and omit the first five results. The find() method returns all documents. The sort() method specifies documents to display from highest to lowest based on the qty field. The skip() method specifies to omit the first five documents. After the paint store runs the query, they find the three best-selling colors are pink,\nred, and white. The match() stage returns all documents. The sort() stage specifies documents to display from highest to lowest based on the qty field. The skip() stage specifies to omit the first five documents. If the value of skip is greater than or equal to the number of matched\ndocuments for a query, that query returns no documents. If the skip() method from the preceding example skips the first nine\ndocuments, no results would return since the specified quantity\nexceeds the number of matched documents.", - "code": [ - { - "lang": "kotlin", - "value": "collection.find().skip(2)\n" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.empty()\nval results = collection.aggregate(listOf(\n Aggregates.match(filter),\n Aggregates.skip(2))\n)\n" - }, - { - "lang": "json", - "value": "{ \"_id\": 1, \"color\": \"red\", \"qty\": 5 }\n{ \"_id\": 2, \"color\": \"purple\", \"qty\": 10 }\n{ \"_id\": 3, \"color\": \"blue\", \"qty\": 9 }\n{ \"_id\": 4, \"color\": \"white\", \"qty\": 6 }\n{ \"_id\": 5, \"color\": \"yellow\", \"qty\": 11 }\n{ \"_id\": 6, \"color\": \"pink\", \"qty\": 3 }\n{ \"_id\": 7, \"color\": \"green\", \"qty\": 8 }\n{ \"_id\": 8, \"color\": \"orange\", \"qty\": 7 }" - }, - { - "lang": "kotlin", - "value": "data class PaintOrder(\n @BsonId val id: Int,\n val qty: Int,\n val color: String\n)\n" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.empty()\nval results = collection.find(filter)\n .sort(descending(PaintOrder::qty.name))\n .skip(5)\nresults.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=4, qty=6, color=white)\nPaintOrder(id=1, qty=5, color=red)\nPaintOrder(id=6, qty=3, color=pink)" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.empty()\nval aggregate = listOf(\n Aggregates.match(filter),\n Aggregates.sort(descending(PaintOrder::qty.name)),\n Aggregates.skip(5)\n)\nval findFlow = collection.aggregate(aggregate)\nfindFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=4, qty=6, color=white)\nPaintOrder(id=1, qty=5, color=red)\nPaintOrder(id=6, qty=3, color=pink)" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.empty()\nval emptyQuery = listOf(\n Aggregates.match(filter),\n Aggregates.sort(descending(PaintOrder::qty.name)),\n Aggregates.skip(9)\n)\nval findFlow = collection.aggregate(emptyQuery)\nfindFlow.collect { println(it) }\n" - } - ], - "preview": "In this guide, you can learn how to skip a specified number of returned\nresults from read operations with the MongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/read-operations/sort", - "title": "Sort Results", - "headings": [ - "Overview", - "Methods For Sorting", - "Sorting Direction", - "Ascending", - "Descending", - "Handling Ties", - "Combining Sort Criteria", - "Text Search" - ], - "paragraphs": "In this guide, you can learn how to use sort operations to order your\nresults from read operations with the MongoDB Kotlin driver. The sort operation orders the documents returned from your query by your specified\n sort criteria . Sort criteria are the rules you pass to MongoDB that describe\nhow you would like your data to be ordered. Some examples of sort criteria are: You should read this guide to learn how to perform the following\nactions: The examples in this guide use a sample collection that contains the following\ndocuments: This data is modeled with the following Kotlin data class: Smallest number to largest number Earliest time of day to latest time of day Alphabetical order by first name Perform ascending sorts and descending sorts Combine sort criteria Sort on the text score of a text search You can sort results retrieved by a query, or you can sort results\nwithin an aggregation pipeline. To sort your query results, use the\n sort() method of a FindFlow instance. To sort your results within an\naggregation pipeline, use the Aggregates.sort() static factory method. Both\nof these methods receive objects that implement the Bson interface as\narguments. For more information, see the API Documentation for the\n BSON interface . You can use the sort() method of a FindFlow instance as follows: You can use the Aggregates.sort() method within an aggregation pipeline to\nsort the documents in the\n sample collection from smallest to\nlargest value of the orderTotal field as follows: In the preceding code snippets, we specify the sort criteria using the Sorts \nbuilder class. While it is possible to specify sort criteria using any class\nthat implements the Bson interface, we recommend that you specify sort\ncriteria through the Sorts builder. For more information on the Sorts \nbuilder class, see the Sorts builder guide. For more information about the classes and interfaces in this section, see the\nfollowing API Documentation: FindFlow Aggregates Sorts BSON Document The direction of your sort can either be ascending or descending .\nAn ascending sort orders your results from smallest to largest. A\ndescending sort orders your results from largest to smallest. Here are some examples of data sorted in ascending order: Here are some examples of data sorted in descending order: The following subsections show how to specify these sort criteria. Numbers: 1, 2, 3, 43, 43, 55, 120 Dates: 1990-03-10, 1995-01-01, 2005-10-30, 2005-12-21 Words (ASCII): Banana, Dill, carrot, cucumber, hummus Numbers: 100, 30, 12, 12, 9, 3, 1 Dates: 2020-01-01, 1998-12-11, 1998-12-10, 1975-07-22 Words (reverse ASCII): pear, grapes, apple, Cheese To specify an ascending sort, use the Sorts.ascending() static\nfactory method. Pass the Sorts.ascending() method\nthe name of the field you need to sort in ascending order. You can pass the sort() method the output of the Sorts.ascending() \nmethod to specify an ascending sort on a field as follows: The preceding sort() method returns a FindIterable object that can iterate\nover the documents in your collection, sorted from smallest to largest on the\nspecified field name. In the following code example, we use the ascending() method to sort the\n sample collection \nby the orderTotal field: To specify a descending sort, use the Sorts.descending() static factory\nmethod. Pass the Sorts.descending() method the name of the field you need to sort in descending order. The following code snippet shows how to specify a descending sort on the\n orderTotal field and return the documents in the\n sample collection \nin descending order: A tie occurs when two or more documents have identical values in the field\nyou are using to order your results. MongoDB does not guarantee sort order in\nthe event of ties. For example, suppose we encounter a tie when applying a sort\nto the sample collection using the following\ncode: Since multiple documents that matched the query contain the same value\nin the date field, the documents may not be returned in a consistent order. If you need to guarantee a specific order for documents that have fields\nwith identical values, you can specify additional fields to sort on in the event\nof a tie. We can specify an ascending sort on the date field followed by the\n orderTotal field to return the documents in the\n sample collection \nin the following order: To combine sort criteria, use the Sorts.orderBy() static factory\nmethod. This method constructs an object containing an ordered list of sort\ncriteria. When performing the sort, if the previous sort criteria result in a\ntie, the sort uses the next sort criteria in the list to determine the order. In the following code snippet, we use the orderBy() method to order the data\nby performing a descending sort on the date field, and in the event of a\ntie, by performing an ascending sort on the orderTotal field. With\nthese sort criteria, the code returns the documents in the sample\ncollection in the following order: You can specify the order of the results of a\n text search by how closely the string values of\neach result's fields specified by the collection's text index match your search\nstring. The text search assigns a numerical\n text score to\nindicate how closely each result matches the search string. Use the\n Sorts.metaTextScore() static factory method to build your sort criteria to\nsort by the text score. In the following code example, we show how you can use the\n Sorts.metaTextScore() method to sort the results of a text\nsearch on the sample collection .\nThe code example uses the Filters ,\n Indexes , and\n Projections builders. The code example performs the following actions: The data is modeled with the following Kotlin data class: For more information about the classes in this section, see the\nfollowing API Documentation: For more information, see the\n Sorts class API Documentation.\nSee the server manual documentation for more information on the $text \nquery operator and the\n $meta \naggregation pipeline operator. You need a text index on your collection to\nperform a text search. See the server manual documentation for more\ninformation on how to\n create a text index . Creates a text index for your\n sample collection \non the description field. If you call createIndex() specifying an index that\nalready exists on the collection, the operation does not create a new index. Runs your text search for the phrase \"vanilla\" . Projects text scores into your query results as the\n score field. Sorts your results by text score (best match first). The structure of text search has changed for MongoDB 4.4 or later. You no\nlonger need to project Projections.metaTextScore() into your\n FindFlow instance in order to sort on the text score. In addition,\nthe field name you specify in a $meta text score aggregation operation\nused in a sort is ignored. This means that the field name argument you pass\nto Sorts.metaTextScore() is disregarded. Filters Indexes Projections MongoCollection", - "code": [ - { - "lang": "json", - "value": "{ \"_id\": 1, \"date\": \"2022-01-03\", \"orderTotal\": 17.86, \"description\": \"1/2 lb cream cheese and 1 dozen bagels\" },\n{ \"_id\": 2, \"date\": \"2022-01-11\", \"orderTotal\": 83.87, \"description\": \"two medium vanilla birthday cakes\" },\n{ \"_id\": 3, \"date\": \"2022-01-11\", \"orderTotal\": 19.49, \"description\": \"1 dozen vanilla cupcakes\" },\n{ \"_id\": 4, \"date\": \"2022-01-15\", \"orderTotal\": 43.62, \"description\": \"2 chicken lunches and a diet coke\" },\n{ \"_id\": 5, \"date\": \"2022-01-23\", \"orderTotal\": 60.31, \"description\": \"one large vanilla and chocolate cake\" },\n{ \"_id\": 6, \"date\": \"2022-01-23\", \"orderTotal\": 10.99, \"description\": \"1 bagel, 1 orange juice, 1 muffin\" }" - }, - { - "lang": "kotlin", - "value": "data class Order(\n @BsonId val id: Int,\n val date: String,\n val orderTotal: Double,\n val description: String,\n)\n" - }, - { - "lang": "kotlin", - "value": "val resultsFlow = collection.find().sort(Sorts.ascending(Order::orderTotal.name))\n" - }, - { - "lang": "kotlin", - "value": "val resultsFlow = collection.aggregate(listOf(\n Aggregates.sort(Sorts.ascending(Order::orderTotal.name))\n))\n\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Order(id=6, date=2022-01-23, orderTotal=10.99, description=1 bagel, 1 orange juice, 1 muffin)\nOrder(id=1, date=2022-01-03, orderTotal=17.86, description=1/2 lb cream cheese and 1 dozen bagels)\nOrder(id=3, date=2022-01-11, orderTotal=19.49, description=1 dozen vanilla cupcakes)\nOrder(id=4, date=2022-01-15, orderTotal=43.62, description=2 chicken lunches and a diet coke)\nOrder(id=5, date=2022-01-23, orderTotal=60.31, description=one large vanilla and chocolate cake)\nOrder(id=2, date=2022-01-11, orderTotal=83.87, description=two medium vanilla birthday cakes)" - }, - { - "lang": "kotlin", - "value": "collection.find().sort(Sorts.ascending(\"\"))" - }, - { - "lang": "kotlin", - "value": "val resultsFlow = collection.find()\n .sort(Sorts.ascending(Order::orderTotal.name))\n\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Order(id=6, date=2022-01-23, orderTotal=10.99, description=1 bagel, 1 orange juice, 1 muffin)\nOrder(id=1, date=2022-01-03, orderTotal=17.86, description=1/2 lb cream cheese and 1 dozen bagels)\nOrder(id=3, date=2022-01-11, orderTotal=19.49, description=1 dozen vanilla cupcakes)\nOrder(id=4, date=2022-01-15, orderTotal=43.62, description=2 chicken lunches and a diet coke)\nOrder(id=5, date=2022-01-23, orderTotal=60.31, description=one large vanilla and chocolate cake)\nOrder(id=2, date=2022-01-11, orderTotal=83.87, description=two medium vanilla birthday cakes)" - }, - { - "lang": "kotlin", - "value": "val resultsFlow = collection.find()\n .sort(Sorts.descending(Order::orderTotal.name))\n\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Order(id=2, date=2022-01-11, orderTotal=83.87, description=two medium vanilla birthday cakes)\nOrder(id=5, date=2022-01-23, orderTotal=60.31, description=one large vanilla and chocolate cake)\nOrder(id=4, date=2022-01-15, orderTotal=43.62, description=2 chicken lunches and a diet coke)\nOrder(id=3, date=2022-01-11, orderTotal=19.49, description=1 dozen vanilla cupcakes)\nOrder(id=1, date=2022-01-03, orderTotal=17.86, description=1/2 lb cream cheese and 1 dozen bagels)\nOrder(id=6, date=2022-01-23, orderTotal=10.99, description=1 bagel, 1 orange juice, 1 muffin)" - }, - { - "lang": "kotlin", - "value": "collection.find().sort(Sorts.ascending(Order::date.name))\n" - }, - { - "lang": "kotlin", - "value": "collection.find().sort(Sorts.ascending(Order::date.name, Order::orderTotal.name))\n" - }, - { - "lang": "console", - "value": "Order(id=1, date=2022-01-03, orderTotal=17.86, description=1/2 lb cream cheese and 1 dozen bagels)\nOrder(id=3, date=2022-01-11, orderTotal=19.49, description=1 dozen vanilla cupcakes)\nOrder(id=2, date=2022-01-11, orderTotal=83.87, description=two medium vanilla birthday cakes)\nOrder(id=4, date=2022-01-15, orderTotal=43.62, description=2 chicken lunches and a diet coke)\nOrder(id=6, date=2022-01-23, orderTotal=10.99, description=1 bagel, 1 orange juice, 1 muffin)\nOrder(id=5, date=2022-01-23, orderTotal=60.31, description=one large vanilla and chocolate cake)" - }, - { - "lang": "kotlin", - "value": "val orderBySort = Sorts.orderBy(\n Sorts.descending(Order::date.name), Sorts.ascending(Order::orderTotal.name)\n)\nval results = collection.find().sort(orderBySort)\n\nresults.collect {println(it) }\n" - }, - { - "lang": "console", - "value": "Order(id=6, date=2022-01-23, orderTotal=10.99, description=1 bagel, 1 orange juice, 1 muffin)\nOrder(id=5, date=2022-01-23, orderTotal=60.31, description=one large vanilla and chocolate cake)\nOrder(id=4, date=2022-01-15, orderTotal=43.62, description=2 chicken lunches and a diet coke)\nOrder(id=3, date=2022-01-11, orderTotal=19.49, description=1 dozen vanilla cupcakes)\nOrder(id=2, date=2022-01-11, orderTotal=83.87, description=two medium vanilla birthday cakes)\nOrder(id=1, date=2022-01-03, orderTotal=17.86, description=1/2 lb cream cheese and 1 dozen bagels)" - }, - { - "lang": "kotlin", - "value": "import com.mongodb.client.model.Sorts\nimport com.mongodb.client.model.Projections\nimport com.mongodb.client.model.Filters\nimport com.mongodb.client.model.Indexes" - }, - { - "lang": "kotlin", - "value": "data class OrderScore(\n @BsonId val id: Int,\n val description: String,\n val score: Double\n)\n" - }, - { - "lang": "kotlin", - "value": "collection.createIndex(Indexes.text(Order::description.name))\nval metaTextScoreSort = Sorts.orderBy(\n Sorts.metaTextScore(OrderScore::score.name),\n Sorts.descending(\"_id\")\n)\nval metaTextScoreProj = Projections.metaTextScore(OrderScore::score.name)\nval searchTerm = \"vanilla\"\nval searchQuery = Filters.text(searchTerm)\n\nval results = collection.find(searchQuery)\n .projection(metaTextScoreProj)\n .sort(metaTextScoreSort)\n\nresults.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "OrderScore(id=3, description=1 dozen vanilla cupcakes, score=0.625)\nOrderScore(id=5, description=one large vanilla and chocolate cake, score=0.6)\nOrderScore(id=2, description=two medium vanilla birthday cakes, score=0.6)" - } - ], - "preview": "In this guide, you can learn how to use sort operations to order your\nresults from read operations with the MongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/read-operations/text", - "title": "Search Text", - "headings": [ - "Overview", - "Sample Documents", - "Text Index", - "Text Search", - "Specify Options", - "Search Text by a Term", - "Example", - "Example", - "Search Text by a Phrase", - "Example", - "Search Text with Terms Excluded", - "Example" - ], - "paragraphs": "In this guide, you can learn how to run a text search in the MongoDB\nKotlin driver. You can use a text search to retrieve documents that contain a term \nor a phrase in a specified field. A term is a sequence of characters\nthat excludes whitespace characters. A phrase is a sequence of terms\nwith any number of whitespace characters. The following sections show you how to perform the following types of\ntext searches: If you want to sort your text search results, see the Text Search section of our Sort Results guide. Search Text by a Term Search Text by a Phrase Search Text with Terms Excluded The following sections feature examples of text searches on the\n fast_and_furious_movies collection. Each section uses a variable\nnamed collection to refer to the MongoCollection instance of the\n fast_and_furious_movies collection. The fast_and_furious_movies collection contains documents that\ndescribe one of the several movies that are part of the Fast and Furious\nmovie franchise. Each document contains a title field and a tags field. This data is modeled with the following Kotlin data class: You must create a text index before running a text search. A text\nindex specifies the string or string array field on which to run a text\nsearch. In the following examples, you run text searches on the title \nfield in the fast_and_furious_movies collection. To enable text\nsearches on the title field, create a text index using the\n Indexes builder with the following snippet: For more information, see the following resources: Text Indexes section of our Indexes guide Text Indexes Server Manual Entry Use the Filters.text() method to specify a text search. The Filters.text() method uses the Filters builder to define a query filter specifying\nwhat to search for during the text search. The query filter is\nrepresented by a BSON instance. Pass the query filter to the\n find() method to run a text search. When you execute the find() method, MongoDB runs a text search on\nall the fields indexed with the text index on the collection. MongoDB\nreturns documents that contain one or more of the search terms and a\nrelevance score for each result. For more information on relevance\nscores, see the Text Search section in\nour Sort Results guide. You can include TextSearchOptions as the second parameter of the\n Filters.text() method to specify text search options such as case\nsensitivity. By default, text searches run without case sensitivity\nwhich means the search matches lowercase and uppercase values. To specify a case sensitive search, use the following snippet: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: Filters.text() TextSearchOptions Pass a term as a string to the Filters.text() method to specify the\nterm in your text search. The following example runs a text search on the documents in the\n fast_and_furious_movies collection for titles that contain the\nterm \"fast\": To match multiple terms in your text search, separate each term\nwith spaces in the Filters.text() builder method. The builder method\nreturns the text search query as a Bson instance. When you pass\nthis to the find() method, it returns documents that match any of\nthe terms. The following example runs a text search on the documents in the\n fast_and_furious_movies collection for titles that contain the\nterms \"fate\" or \"7\": Pass a phrase with escaped quotes to the Filters.text() method to\nspecify the phrase in your text search. Escaped quotes are double quote\ncharacters preceded by a backslash character. If you don't add escaped\nquotes around the phrase, the find() method runs a term search . The following example runs a text search on the documents in the\n fast_and_furious_movies collection for titles that contain the\nphrase \"fate of the furious\": For each term you want to exclude from your text search, prefix the term\nwith a minus sign in the string that you pass to the Filters.text() \nbuilder method. None of the documents returned from the search contain the excluded term\nin your text index field. You must have at least one text search term if you want to\nexclude terms from your search. The following example runs a text search on the documents in the\n fast_and_furious_movies collection for titles that contain the\nterm \"furious\", but do not contain the term \"fast\":", - "code": [ - { - "lang": "json", - "value": "{ \"_id\": 1, \"title\": \"2 Fast 2 Furious \", \"tags\": [\"undercover\", \"drug dealer\"] }\n{ \"_id\": 2, \"title\": \"Fast 5\", \"tags\": [\"bank robbery\", \"full team\"] }\n{ \"_id\": 3, \"title\": \"Furious 7\", \"tags\": [\"emotional\"] }\n{ \"_id\": 4, \"title\": \"The Fate of the Furious\", \"tags\": [\"betrayal\"] }" - }, - { - "lang": "kotlin", - "value": "data class Movies(\n @BsonId val id: Int,\n val title: String,\n val tags: List\n)\n" - }, - { - "lang": "kotlin", - "value": "collection.createIndex(Indexes.text(\"title\"))\n" - }, - { - "lang": "kotlin", - "value": "val options: TextSearchOptions = TextSearchOptions().caseSensitive(true)\nval filter = Filters.text(\"SomeText\", options)\n" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.text(\"fast\")\nval findFlow = collection.find(filter)\nfindFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Movies(id=1, title=2 Fast 2 Furious, tags=[undercover, drug dealer])\nMovies(id=2, title=Fast 5, tags=[bank robbery, full team])" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.text(\"fate 7\")\nval findFlow = collection.find(filter)\nfindFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Movies(id=3, title=Furious 7, tags=[emotional])\nMovies(id=4, title=The Fate of the Furious, tags=[betrayal])" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.text(\"\\\"fate of the furious\\\"\")\nval findFlow = collection.find(filter)\nfindFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Movies(id=4, title=The Fate of the Furious, tags=[betrayal])" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.text(\"furious -fast\")\nval findFlow = collection.find(filter)\nfindFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Movies(id=3, title=Furious 7, tags=[emotional])\nMovies(id=4, title=The Fate of the Furious, tags=[betrayal])" - } - ], - "preview": "In this guide, you can learn how to run a text search in the MongoDB\nKotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/read-operations", - "title": "Read Operations", - "headings": [], - "paragraphs": "Retrieve Data Access Data From a Flow Open Change Streams Sort Results Skip Returned Results Limit the Number of Returned Results Specify Which Fields to Return Search Geospatially Search Text", - "code": [], - "preview": null, - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/write-operations/bulk", - "title": "Bulk Operations", - "headings": [ - "Overview", - "Performing Bulk Operations", - "Insert Operation", - "Example", - "Replace Operation", - "Example", - "Update Operation", - "Example", - "Delete Operation", - "Example", - "Order of Execution", - "Ordered Execution", - "Example", - "Unordered Execution", - "Summary" - ], - "paragraphs": "In this guide, you can learn how to use bulk operations in the\nMongoDB Kotlin Driver. For individual CRUD operations, you can use the relevant method. For\nexample, to insert one document and then update multiple documents, you\ncan use the insertOne() method and the updateMany() method. The MongoClient performs these operations by making a request to the\ndatabase corresponding to each operation. You can reduce the number of\ncalls to the database by using bulk operations. Bulk operations consist of a large number of write operations. To perform\na bulk operation, pass a List containing WriteModel documents to the\n bulkWrite() method. A WriteModel is a model that represents a single\nwrite operation. The following sections show how to create and use each variation of the WriteModel \ntype. The examples in each section use the following documents in the people collection: This data is modeled with the following Kotlin data class: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: bulkWrite() WriteModel BulkWriteOptions To perform an insert operation, create an InsertOneModel specifying\nthe document you want to insert. To insert multiple documents, you must\ncreate an InsertOneModel for each document you want to insert. The following example creates an InsertOneModel for two documents\ndescribing people: For more information about the methods and classes mentioned in this section,\nsee the InsertOneModel API Documentation. When performing a bulkWrite() operation, the InsertOneModel cannot\ninsert a document with an _id that already exists in the\ncollection. In this case, the driver throws a MongoBulkWriteException . The following example tries to insert two documents where the _id \nvalues are 1 and 3 . Since there is already a document with an _id \nof 1 in the collection, the operation results in an error: To learn about why the driver didn't insert the document with the\n _id of 3 , see the Order of Execution section. To perform a replace operation, create a ReplaceOneModel specifying\na query filter for the document you want to replace and the replacement\ndocument. When performing a bulkWrite() , the ReplaceOneModel cannot\nmake changes that violate unique index constraints on\nthe collection. Additionally, the model does not perform the replace\noperation if there are no matches to the query filter. The following example creates a ReplaceOneModel to\nreplace a document where the _id is 1 with a document that\ncontains the additional location field: For more information about the methods and classes mentioned in this section,\nsee the following resources: ReplaceOneModel API Documentation Unique indexes Server Manual Explanation To perform an update operation, create an UpdateOneModel or an\n UpdateManyModel that specifies a query filter and an update document. The UpdateOneModel updates the first document that matches your query\nfilter and the UpdateManyModel updates all the documents that\nmatch your query filter. When performing a bulkWrite() , the UpdateOneModel and\n UpdateManyModel types cannot make changes that violate unique\nindex constraints on the collection. Additionally, the models do not\nperform update operations if there are no matches to the query\nfilter. The following example creates an UpdateOneModel to increment the age \nfield by 1 in a document where the _id is 2 : For more information about the methods and classes mentioned in this section,\nsee the following resources: UpdateOneModel API Documentation UpdateManyModel API Documentation unique indexes Server Manual Explanation To perform a delete operation, create a DeleteOneModel or a\n DeleteManyModel that specifies a query filter for documents you want\nto delete. The DeleteOneModel deletes the first document that matches your query\nfilter and the DeleteManyModel deletes all the documents that\nmatch your query filter. When performing a bulkWrite() , the DeleteOneModel and\n DeleteManyModel types do not delete any documents if there are no\nmatches to the query filter. The following example creates a DeleteOneModel to delete\na document where the _id is 1 and a DeleteManyModel to delete\ndocuments where the age value is less than 30 : For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: DeleteOneModel DeleteManyModel The bulkWrite() method accepts an optional BulkWriteOptions as\na second parameter to specify if you want to execute the bulk operations\nas ordered or unordered. By default, the bulkWrite() method executes bulk operations in\norder. This means that the operations execute in the order you\nadded them to the list until any error occurs. The following example performs these bulk operations: After running this example, your collection contains the following\ndocument: An insert operation for a document where the name is\n \"Zaynab Omar\" and the age is 37 A replace operation for a document where the _id is 1 with a new\ndocument that contains the location field An update operation for a document where the _id is 6 to\nchange the name field A delete operation for all documents that have an age value\ngreater than 50 You can also execute bulk operations in any order by passing false \nto the ordered() method on a BulkWriteOptions object. This means that\nall the write operations execute regardless of errors. If any errors occur,\nthe driver reports them at the end. The following code shows how to execute a bulk operation with no order\nof execution: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: Unordered bulk operations do not guarantee the order of execution. The\norder may differ from the way you list them to optimize the runtime. In the preceding example, if the bulkWrite() method performed the\ninsert operation after the update operation, the update operation\nwould not produce changes because the document did not exist\nat that point in time. The collection would then contain the following\ndocuments: BulkWriteOptions ordered() To perform a bulk operation, create and pass a list of\n WriteModel documents to the bulkWrite() method. There are six variations of WriteModel : There are two ways to execute the bulkWrite() method: InsertOneModel ReplaceOneModel UpdateOneModel UpdateManyModel DeleteOneModel DeleteManyModel Ordered, where the driver performs the write operations in order until any error occurs Unordered, where the driver performs all the write operations in any order and\nreports any errors after the operations complete", - "code": [ - { - "lang": "json", - "value": "{ \"_id\": 1, \"name\": \"Karen Sandoval\", \"age\": 31 }\n{ \"_id\": 2, \"name\": \"William Chin\", \"age\": 54 }\n{ \"_id\": 8, \"name\": \"Shayla Ray\", \"age\": 20 }" - }, - { - "lang": "kotlin", - "value": "data class Person(\n @BsonId val id: Int,\n val name: String,\n val age: Int? = null,\n val location: String? = null\n)\n" - }, - { - "lang": "kotlin", - "value": "val juneDoc = InsertOneModel(Person(3, \"June Carrie\", 17))\nval kevinDoc = InsertOneModel(Person(4, \"Kevin Moss\", 22))\n" - }, - { - "lang": "kotlin", - "value": "try {\n val bulkOperations = listOf(\n (InsertOneModel(Person(1, \"James Smith\", 13))),\n (InsertOneModel(Person(3, \"Colin Samuels\")))\n )\n val bulkWrite = collection.bulkWrite(bulkOperations)\n} catch (e: MongoBulkWriteException) {\n println(\"A MongoBulkWriteException occurred with the following message: \" + e.message)\n}\n" - }, - { - "lang": "console", - "value": "A MongoBulkWriteException occurred with the following message:\nBulk write operation error on server sample-shard-00-02.pw0q4.mongodb.net:27017.\nWrite errors: [BulkWriteError{index=0, code=11000, message='E11000 duplicate key\nerror collection: crudOps.bulkWrite index: _id_ dup key: { _id: 1 }', details={}}]." - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval insert = Person(1, \"Celine Stork\", location = \"San Diego, CA\")\nval doc = ReplaceOneModel(filter, insert)\n" - }, - { - "lang": "java", - "value": "val filter = Filters.eq(\"_id\", 2)\nval update = Updates.inc(Person::age.name, 1)\nval doc = UpdateOneModel(filter, update)\n" - }, - { - "lang": "kotlin", - "value": "val deleteId1 = DeleteOneModel(Filters.eq(\"_id\", 1))\nval deleteAgeLt30 = DeleteManyModel(Filters.lt(Person::age.name, 30))\n" - }, - { - "lang": "json", - "value": "{ \"_id\": 1, \"name\": \"Sandy Kane\", \"location\": \"Helena, MT\" }\n{ \"_id\": 8, \"name\": \"Shayla Ray\", \"age\": 20 }\n{ \"_id\": 6, \"name\": \"Zaynab Hassan\", \"age\": 37 }" - }, - { - "lang": "kotlin", - "value": "val insertMdl = InsertOneModel(Person(6, \"Zaynab Omar\", 37))\nval replaceMdl = ReplaceOneModel(\n Filters.eq(\"_id\", 1),\n Person(1, \"Sandy Kane\", location = \"Helena, MT\")\n)\nval updateMdl = UpdateOneModel(\n Filters.eq(\"_id\", 6),\n Updates.set(Person::name.name, \"Zaynab Hassan\")\n )\nval deleteMdl = DeleteManyModel(Filters.gt(Person::age.name, 50))\n\nval bulkOperations = listOf(\n insertMdl,\n replaceMdl,\n updateMdl,\n deleteMdl\n)\n\nval result = collection.bulkWrite(bulkOperations)\n" - }, - { - "lang": "kotlin", - "value": "val options = BulkWriteOptions().ordered(false)\nval unorderedResult = collection.bulkWrite(bulkOperations, options)\n" - }, - { - "lang": "json", - "value": "{ \"_id\": 1, \"name\": \"Sandy Kane\", \"location\": \"Helena, MT\" }\n{ \"_id\": 8, \"name\": \"Shayla Ray\", \"age\": 20 }\n{ \"_id\": 6, \"name\": \"Zaynab Omar\", \"age\": 37 }" - } - ], - "preview": "In this guide, you can learn how to use bulk operations in the\nMongoDB Kotlin Driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/write-operations/delete", - "title": "Delete Documents", - "headings": [ - "Overview", - "Sample Documents", - "Delete Many Documents", - "Delete a Document", - "Find and Delete a Document" - ], - "paragraphs": "In this guide, you can learn how to remove documents with the MongoDB Kotlin\ndriver. You can remove documents by passing a query filter to the\n deleteOne() , deleteMany() or findOneAndDelete() methods. The deleteOne() method deletes a single document. If the query\nfilter matches more than one document, the method will remove the first\noccurrence of a match in the collection. The deleteMany() method deletes all documents that match the query\nfilter. The findOneAndDelete() method atomically finds and deletes the first\noccurrence of a match in the collection. To specify a collation or hint an index, use DeleteOptions \nas a second parameter to the deleteOne() and deleteMany() methods. To specify a collation, hint an index, specify sort order, or specify a\nprojection on the returned document, use FindOneAndDeleteOptions \nas the second parameter to the findOneAndDelete() method. When deleting a single document, filter your query by a unique index,\nsuch as an _id , to ensure your query matches the document you want to\ndelete. The following examples are about a paint store that sells eight different\ncolors of paint. The store had their annual online sale resulting in the\nfollowing documents in their paint_inventory collection: This data is modeled with the following Kotlin data class: The paint store website displays all documents in the\n paint_inventory collection. To reduce customer confusion, the store\nwants to remove the colors that are out of stock. To remove the out of stock colors, query the paint_inventory \ncollection where the qty is 0 and pass the query to the\n deleteMany() method: The following shows the documents remaining in the paint_inventory \ncollection: The store is donating the remaining quantity of their yellow paint. This\nmeans that the qty for yellow is now 0 and we need to remove yellow\nfrom the collection. To remove yellow, query the paint_inventory collection where the\n color is \"yellow\" and pass the query to the deleteOne() \nmethod: The following shows the documents remaining in the paint_inventory \ncollection: The store would like to raffle the remaining quantity of purple paint\nand remove purple from the paint_inventory collection. To pick a color, query the paint_inventory collection where the\n color is \"purple\" and pass the query to the findOneAndDelete() \nmethod. Unlike the other delete methods, findOneAndDelete() returns the\ndeleted document: The following shows the documents remaining in the paint_inventory \ncollection: For more information about the methods and classes mentioned in this guide,\nsee the following resources: If there are no matches to your query filter, no document gets\ndeleted and the method returns null . deleteOne() API Documentation deleteMany() API Documentation findOneAndDelete() API Documentation DeleteOptions API Documentation FindOneAndDeleteOptions API Documentation db.collection.deleteOne() Server Manual Entry db.collection.deleteMany() Server Manual Entry db.collection.findOneAndDelete() Server Manual Entry", - "code": [ - { - "lang": "json", - "value": "{ \"_id\": 1, \"color\": \"red\", \"qty\": 5 }\n{ \"_id\": 2, \"color\": \"purple\", \"qty\": 8 }\n{ \"_id\": 3, \"color\": \"blue\", \"qty\": 0 }\n{ \"_id\": 4, \"color\": \"white\", \"qty\": 0 }\n{ \"_id\": 5, \"color\": \"yellow\", \"qty\": 6 }\n{ \"_id\": 6, \"color\": \"pink\", \"qty\": 0 }\n{ \"_id\": 7, \"color\": \"green\", \"qty\": 0 }\n{ \"_id\": 8, \"color\": \"black\", \"qty\": 8 }" - }, - { - "lang": "kotlin", - "value": "data class PaintOrder(\n @BsonId val id: Int,\n val qty: Int,\n val color: String\n)\n" - }, - { - "lang": "json", - "value": "{ \"_id\": 1, \"color\": \"red\", \"qty\": 5 }\n{ \"_id\": 2, \"color\": \"purple\", \"qty\": 8 }\n{ \"_id\": 5, \"color\": \"yellow\", \"qty\": 6 }\n{ \"_id\": 8, \"color\": \"black\", \"qty\": 8 }" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"qty\", 0)\ncollection.deleteMany(filter)\n" - }, - { - "lang": "json", - "value": "{ \"_id\": 1, \"color\": \"red\", \"qty\": 5 }\n{ \"_id\": 2, \"color\": \"purple\", \"qty\": 8 }\n{ \"_id\": 8, \"color\": \"black\", \"qty\": 8 }" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"color\", \"yellow\")\ncollection.deleteOne(filter)\n" - }, - { - "lang": "json", - "value": " { \"_id\": 1, \"color\": \"red\", \"qty\": 5 }\n { \"_id\": 8, \"color\": \"black\", \"qty\": 8 }" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"color\", \"purple\")\nval result = collection.findOneAndDelete(filter)\n\nprintln(\"The following was deleted: $result\")\n" - }, - { - "lang": "console", - "value": "The following was deleted: PaintOrder(id=2, qty=8, color=purple)" - } - ], - "preview": "In this guide, you can learn how to remove documents with the MongoDB Kotlin\ndriver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/write-operations/embedded-arrays", - "title": "Update Arrays in a Document", - "headings": [ - "Overview", - "Sample Document", - "Specifying an Update", - "Specifying Array Elements", - "The First Matching Array Element", - "Example", - "Matching All Array Elements", - "Example", - "Matching Multiple Array Elements", - "Example" - ], - "paragraphs": "In this guide, you can learn how to update arrays in a document with the\nMongoDB Kotlin driver. To update an array, you must do the following: Specify the update you want to perform Specify what array elements to apply your update to Perform an update operation using these specifications The following sections feature examples that update this sample\ndocument: This data is modeled with the following Kotlin data class: The examples on this page use the findOneAndUpdate() method of the\n MongoCollection class to retrieve and update the document. Each\nexample uses an instance of the FindOneAndUpdateOptions class to\nhave MongoDB retrieve the document after the update occurs. For\nmore information on the findOneAndUpdate() method, see our\n Compound Operations guide . To specify an update, use the Updates builder. The Updates \nbuilder provides static utility methods to construct update\nspecifications. For more information on using the Updates builder with\narrays, see our guide on the Updates builder . The following example performs these actions: Query for the sample document Append \"17\" to the qty array in the document that matches the query filter You can specify which array elements to update using a positional\noperator. Positional operators can specify the first, all, or certain\narray elements to update. To specify elements in an array with positional operators, use dot\nnotation . Dot notation is a property access syntax for navigating BSON\nobjects. For additional information, see the Server Manual Entry on\n dot notation . To update the first array element that matches your query filter, use the\npositional $ operator. The array field must appear as part of your\nquery filter to use the positional $ operator. The following example performs these actions: For more information about the methods and operators mentioned in this section,\nsee the following resources: Query for a document with a qty field containing the value \"18\" Decrement the first array value in the document that matches the query filter by \"3\" Positional $ Operator Server Manual Entry inc() API Documentation To update all elements in an array, use the all positional $[] operator. The following example performs these actions: For more information about the methods and operators mentioned in this section,\nsee the following resources: Query for the sample document Multiply array elements matching the query filter by \"2\" All Positional $[] Operator Server Manual Entry mul() API Documentation To update array elements that match a filter, use the\nfiltered positional $[] operator. You must include an\narray filter in your update operation to specify which array elements to\nupdate. The is the name you give your array filter. This value\nmust begin with a lowercase letter and contain only alphanumeric\ncharacters. The following example performs these actions: For more information about the methods and operators mentioned in this section,\nsee the following resources: Query for the sample document Set an array filter to search for values less than \"15\" Increment array elements matching the query filter by \"5\" Filtered Positional $[] Operator Server Manual Entry inc() API Documentation", - "code": [ - { - "lang": "json", - "value": "{ \"_id\": 1, \"color\": \"green\", \"qty\": [8, 12, 18] }" - }, - { - "lang": "kotlin", - "value": "data class PaintOrder(\n @BsonId val id: Int,\n val qty: List,\n val color: String\n)\n" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.push(PaintOrder::qty.name, 17)\nval options = FindOneAndUpdateOptions()\n .returnDocument(ReturnDocument.AFTER)\nval result = collection.findOneAndUpdate(filter, update, options)\n\nprint(result)\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=1, qty=[8, 12, 18, 17], color=green)" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(PaintOrder::qty.name, 18)\nval update = Updates.inc(\"${PaintOrder::qty.name}.$\", -3)\nval options = FindOneAndUpdateOptions()\n .returnDocument(ReturnDocument.AFTER)\nval result = collection.findOneAndUpdate(filter, update, options)\n\nprint(result)\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=1, qty=[8, 12, 15], color=green)" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.mul(\"${PaintOrder::qty.name}.$[]\", 2)\nval options = FindOneAndUpdateOptions()\n .returnDocument(ReturnDocument.AFTER)\nval result = collection.findOneAndUpdate(filter, update, options)\n\nprintln(result)\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=1, qty=[16, 24, 36], color=green)" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(\"_id\", 1)\nval smallerFilter = Filters.lt(\"smaller\", 15)\nval options = FindOneAndUpdateOptions()\n .returnDocument(ReturnDocument.AFTER)\n .arrayFilters(listOf(smallerFilter))\nval update = Updates.inc(\"${PaintOrder::qty.name}.$[smaller]\", 5)\nval result = collection.findOneAndUpdate(filter, update, options)\n\nprintln(result)\n" - }, - { - "lang": "console", - "value": "PaintOrder(id=1, qty=[13, 17, 18], color=green)" - } - ], - "preview": "In this guide, you can learn how to update arrays in a document with the\nMongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/write-operations/insert", - "title": "Insert Operations", - "headings": [ - "Overview", - "A Note About _id", - "Insert a Single Document", - "Example", - "Insert Multiple Documents", - "Example", - "Summary" - ], - "paragraphs": "In this guide, you can learn how to insert documents with the MongoDB Kotlin\ndriver. You can use MongoDB to retrieve, update, and delete information. To\nperform any of those operations, that information, such as user profiles\nand orders, needs to exist in MongoDB. For that information to exist,\nyou need to first perform an insert operation. An insert operation inserts a single or multiple documents into MongoDB\nusing the insertOne() , insertMany() , and bulkWrite() \nmethods. The following sections focus on insertOne() and\n insertMany() . For information on how to use the bulkWrite() \nmethod, see our\n guide on Bulk Operations . In the following examples, a paint store has an inventory of different colors\nof paint. This data is modeled with the following Kotlin data class: When inserting a document, MongoDB enforces one constraint on your\ndocuments by default: each document must contain a unique _id \nfield. There are two ways to manage this field: Unless you have provided strong guarantees for uniqueness, we recommend\nyou let the driver automatically generate _id values. For additional information on unique indexes, see the manual entry on\n Unique Indexes . You can manage this field yourself, ensuring each value you use is unique. You can let the driver automatically generate unique ObjectId values. Duplicate _id values violate unique index constraints, resulting\nin a WriteError . Use the insertOne() method when you want to insert a single\ndocument. On successful insertion, the method returns an InsertOneResult \ninstance representing the _id of the new document. The following example creates and inserts a document using the\n insertOne() method: For more information about the methods and classes mentioned in this section,\nsee the following resources: insertOne() API Documentation InsertOneResult API Documentation Manual Explanation on insertOne() Runnable Insert a Document example Use the insertMany() method when you want to insert multiple\ndocuments. This method inserts documents in the order specified until an\nexception occurs, if any. For example, assume you want to insert the following documents: If you attempt to insert these documents, a WriteError occurs at the\nthird document and the documents prior to the error get inserted into\nyour collection. On successful insertion, the method returns an InsertManyResult \ninstance representing the _id of each new document. Use a try-catch block to get an acknowledgment for successfully\nprocessed documents before the error occurs. The output consists of\ndocuments MongoDB can process: If you look inside your collection, you should see the following documents: The following example creates and adds two documents to a List , and\ninserts the List using the insertMany() method: For more information about the methods and classes mentioned in this section,\nsee the following resources: insertMany() API Documentation InsertManyResult API Documentation Manual Explanation on insertMany() Runnable Insert Multiple Documents example There are three ways to perform an insert operation, but we focused on two: Both methods automatically generate an _id if you omit the field in\nyour document. If the insertion is successful, both methods return an instance\nrepresenting the _id of each new document. The insertOne() method inserts a single document. The insertMany() method inserts multiple documents.", - "code": [ - { - "lang": "kotlin", - "value": "data class PaintOrder(\n @BsonId val id: ObjectId? = null,\n val qty: Int,\n val color: String\n)\n" - }, - { - "lang": "kotlin", - "value": "val paintOrder = PaintOrder(ObjectId(), 5, \"red\")\nval result = collection.insertOne(paintOrder)\n\nval insertedId = result.insertedId?.asObjectId()?.value\n\nprintln(\"Inserted a document with the following id: $insertedId\")\n" - }, - { - "lang": "console", - "value": "Inserted a document with the following id: 60930c39a982931c20ef6cd6" - }, - { - "lang": "json", - "value": "{ \"color\": \"red\", \"qty\": 5 }\n{ \"color\": \"purple\", \"qty\": 10 }\n{ \"color\": \"yellow\", \"qty\": 3 }\n{ \"color\": \"blue\", \"qty\": 8 }" - }, - { - "lang": "json", - "value": "{ \"color\": \"red\", \"qty\": 5 }\n{ \"color\": \"purple\", \"qty\": 10 }" - }, - { - "lang": "kotlin", - "value": "val result = collection.insertMany(paintOrders)\ntry {\n println(\"Inserted documents with the following ids: ${result.insertedIds}\")\n} catch(e: MongoBulkWriteException){\n val insertedIds = e.writeResult.inserts.map { it.id.asInt32().value }\n println(\n \"A MongoBulkWriteException occurred, but there are \" +\n \"successfully processed documents with the following ids: $insertedIds\"\n )\n collection.find().collect { println(it) }\n}\n" - }, - { - "lang": "console", - "value": "A MongoBulkWriteException occurred, but there are successfully processed\ndocuments with the following ids: [60930c3aa982931c20ef6cd7, 644ad1378ea29443837a14e9, 60930c3aa982931c20ef6cd8]" - }, - { - "lang": "kotlin", - "value": "val paintOrders = listOf(\n PaintOrder(ObjectId(), 5, \"red\"),\n PaintOrder(ObjectId(), 10, \"purple\")\n)\nval result = collection.insertMany(paintOrders)\n\nprintln(\"Inserted a document with the following ids: ${result.insertedIds.toList()}\")\n" - }, - { - "lang": "console", - "value": "Inserted documents with the following ids: [60930c3aa982931c20ef6cd7, 60930c3aa982931c20ef6cd8]" - } - ], - "preview": "In this guide, you can learn how to insert documents with the MongoDB Kotlin\ndriver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/write-operations/modify", - "title": "Modify Documents", - "headings": [ - "Overview", - "Update", - "Update Operation Parameters", - "Example", - "Replace", - "Replace Operation Parameters", - "Example" - ], - "paragraphs": "In this guide, you can learn how to modify documents in a MongoDB\ncollection using two distinct operation types: Update operations specify the fields and values to change in one or more\ndocuments. A replace operation specifies the fields and values to replace\na single document from your collection. In the following examples, a paint store sells five different\ncolors of paint. The paint_inventory collection represents their\ncurrent inventory: This data is modeled with the following Kotlin data class: Update Replace Update operations can modify fields and values. They apply changes\nspecified in an update document to one or more documents that match your\nquery filter. The updateOne() \nmethod changes the first document your query filter matches and the\n updateMany() \nmethod changes all the documents your query filter matches. You can call the updateOne() and updateMany() methods on a\n MongoCollection instance as follows: The updateOne() and updateMany() methods both have the following\nparameters: You can create the updateDocument using an Updates builder as\nfollows: See the MongoDB API documentation for a complete list of\nUpdates builders and their usage . query specifies a query filter with the criteria to match documents to update in your collection updateDocument specifies the fields and values to modify in the matching document or documents. For this example, we use the Updates builder to create the update document. The paint store needs to update their inventory after a customer returns a\ncan of yellow paint. To update the single can of paint, call the updateOne() method specifying\nthe following: The paint store then receives a fresh shipment and needs to update their\ninventory again. The shipment contains 20 cans of each paint color. To update the inventory, call the updateMany() method specifying the\nfollowing: The following shows the updated documents in the paint_inventory collection: If zero documents match the query filter in the update operation,\n updateMany() makes no changes to documents in the collection. See\nour upsert guide to\nlearn how to insert a new document instead of updating one if no\ndocuments match. A query filter that matches the yellow color An update document that contains instructions to increment the qty field by \"1\" A query filter that matches all the colors An update document that contains instructions to increment the qty field by \"20\" The updateOne() and updateMany() methods cannot make changes\nto a document that violate unique index constraints on the\ncollection. See the MongoDB server manual for more information on\n unique indexes . A replace operation substitutes one document from your collection. The\nsubstitution occurs between a document your query filter matches and a\nreplacement document. The replaceOne() \nmethod removes all the existing fields and values in the\nmatching document (except the _id field) and substitutes it with the\nreplacement document. You can call the replaceOne() method on a MongoCollection \ninstance as follows: The replaceOne() method has the following parameters: query specifies a query filter with the criteria to match a document to replace in your collection replacementDocument specifies fields and values of a new Document object to replace in the matched document The paint store realizes they need to update their inventory again. What they\nthought was 20 cans of pink paint is actually 25 cans of orange paint. To update the inventory, call the replaceOne() method specifying the\nfollowing: The following shows the updated document: If zero documents match the query filter in the replace operation,\n replaceOne() makes no changes to documents in the collection. See\nour upsert guide to\nlearn how to insert a new document instead of replacing one if no\ndocuments match. If multiple documents match the query filter specified in\nthe replaceOne() method, it replaces the first result. A query filter that matches documents where the color is \"pink\" A replacement document where the color is \"orange\" and the qty is \"25\" The replaceOne() method cannot make changes to a document that\nviolate unique index constraints on the collection. See the MongoDB\nserver manual for more information on unique indexes .", - "code": [ - { - "lang": "json", - "value": "{ \"_id\": 1, \"color\": \"red\", \"qty\": 5 }\n{ \"_id\": 2, \"color\": \"purple\", \"qty\": 8 }\n{ \"_id\": 3, \"color\": \"yellow\", \"qty\": 0 }\n{ \"_id\": 4, \"color\": \"green\", \"qty\": 6 }\n{ \"_id\": 5, \"color\": \"pink\", \"qty\": 0 }" - }, - { - "lang": "kotlin", - "value": "data class PaintOrder(\n @BsonId val id: Int,\n val color: String,\n val qty: Int\n)\n" - }, - { - "lang": "kotlin", - "value": "collection.updateOne(query, updateDocument)\n\ncollection.updateMany(query, updateDocument)" - }, - { - "lang": "kotlin", - "value": "val updateDocument = Updates.operator(field, value)" - }, - { - "lang": "json", - "value": " { \"_id\": 1, \"color\": \"red\", \"qty\": 25 }\n { \"_id\": 2, \"color\": \"purple\", \"qty\": 28 }\n { \"_id\": 3, \"color\": \"yellow\", \"qty\": 20 }\n { \"_id\": 4, \"color\": \"green\", \"qty\": 26 }\n { \"_id\": 5, \"color\": \"pink\", \"qty\": 20 }" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(PaintOrder::color.name, \"yellow\")\nval update = Updates.inc(PaintOrder::qty.name, 1)\nval result = collection.updateOne(filter, update)\n\nprintln(\"Matched document count: $result.matchedCount\")\nprintln(\"Modified document count: $result.modifiedCount\")\n" - }, - { - "lang": "console", - "value": " Matched document count: 1\n Modified document count: 1" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.empty()\nval update = Updates.inc(PaintOrder::qty.name, 20)\nval result = collection.updateMany(filter, update)\n\nprintln(\"Matched document count: $result.matchedCount\")\nprintln(\"Modified document count: $result.modifiedCount\")\n" - }, - { - "lang": "console", - "value": " Matched document count: 5\n Modified document count: 5" - }, - { - "lang": "kotlin", - "value": "collection.replaceOne(query, replacementDocument)" - }, - { - "lang": "json", - "value": " { \"_id\": 5, \"color\": \"orange\", \"qty\": 25 }" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(PaintOrder::color.name, \"pink\")\nval update = PaintOrder(5, \"orange\", 25)\nval result = collection.replaceOne(filter, update)\n\nprintln(\"Matched document count: $result.matchedCount\")\nprintln(\"Modified document count: $result.modifiedCount\")\n" - }, - { - "lang": "console", - "value": " Matched document count: 1\n Modified document count: 1" - } - ], - "preview": "In this guide, you can learn how to modify documents in a MongoDB\ncollection using two distinct operation types:", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/write-operations/upsert", - "title": "Insert or Update in a Single Operation", - "headings": ["Overview", "Specify an Upsert"], - "paragraphs": "In this guide, you can learn how to perform an upsert with the\nMongoDB Kotlin driver. Applications use insert and update operations to store and modify data.\nSometimes, you need to choose between an insert and update depending on\nwhether the document exists. MongoDB simplifies this decision for us\nwith an upsert option. An upsert : Updates documents that match your query filter Inserts a document if there are no matches to your query filter To specify an upsert with the updateOne() or updateMany() \nmethods, pass true to UpdateOptions.upsert() . To specify an upsert with the replaceOne() method, pass true to\n ReplaceOptions.upsert() . In the following example, a paint store sells eight different\ncolors of paint. The store had their annual online sale. Their\n paint_inventory collection now shows the following documents: This data is modeled with the following Kotlin data class: The store received a fresh shipment and needs to update their inventory.\nThe first item in the shipment is ten cans of orange paint. To update the inventory, query the paint_inventory collection\nwhere the color is \"orange\" , specify an update to increment the\n qty field by 10 , and specify true to\n UpdateOptions.upsert() : This AcknowledgedUpdateResult tells us: The following shows the documents in the paint_inventory collection: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: Zero documents matched our query filter Zero documents in our collection got modified A document with an _id of 606b4cfc1601f9443b5d6978 got upserted Not including UpdateOptions results in no change to the collection. UpdateOptions.upsert() ReplaceOptions.upsert()", - "code": [ - { - "lang": "json", - "value": "{ \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958da\" }, \"color\": \"red\", \"qty\": 5 }\n{ \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958db\" }, \"color\": \"purple\", \"qty\": 8 }\n{ \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958dc\" }, \"color\": \"blue\", \"qty\": 0 }\n{ \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958dd\" }, \"color\": \"white\", \"qty\": 0 }\n{ \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958de\" }, \"color\": \"yellow\", \"qty\": 6 }\n{ \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958df\" }, \"color\": \"pink\", \"qty\": 0 }\n{ \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958e0\" }, \"color\": \"green\", \"qty\": 0 }\n{ \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958e1\" }, \"color\": \"black\", \"qty\": 8 }" - }, - { - "lang": "json", - "value": " { \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958da\" }, \"color\": \"red\", \"qty\": 5 }\n { \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958db\" }, \"color\": \"purple\", \"qty\": 8 }\n { \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958dc\" }, \"color\": \"blue\", \"qty\": 0 }\n { \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958dd\" }, \"color\": \"white\", \"qty\": 0 }\n { \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958de\" }, \"color\": \"yellow\", \"qty\": 6 }\n { \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958df\" }, \"color\": \"pink\", \"qty\": 0 }\n { \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958e0\" }, \"color\": \"green\", \"qty\": 0 }\n { \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958e1\" }, \"color\": \"black\", \"qty\": 8 }\n { \"_id\": { \"$oid\": \"606b4cfc1601f9443b5d6978\" }, \"color\": \"orange\", \"qty\": 10 }]" - }, - { - "lang": "kotlin", - "value": "data class PaintOrder(\n @BsonId val id: ObjectId = ObjectId(),\n val qty: Int,\n val color: String\n)\n" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(PaintOrder::color.name, \"orange\")\nval update = Updates.inc(PaintOrder::qty.name, 10)\nval options = UpdateOptions().upsert(true)\n\nval results = collection.updateOne(filter, update, options)\n\nprintln(results)\n" - }, - { - "lang": "console", - "value": " AcknowledgedUpdateResult{ matchedCount=0, modifiedCount=0, upsertedId=BsonObjectId{ value=606b4cfc1601f9443b5d6978 }}" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(PaintOrder::color.name, \"orange\")\nval update = Updates.inc(PaintOrder::qty.name, 10)\n\nval results = collection.updateOne(filter, update)\n\nprintln(results)\n" - }, - { - "lang": "console", - "value": "AcknowledgedUpdateResult{ matchedCount=0, modifiedCount=0, upsertedId=null }" - } - ], - "preview": "In this guide, you can learn how to perform an upsert with the\nMongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud/write-operations", - "title": "Write Operations", - "headings": [], - "paragraphs": "Insert Operations Delete Documents Modify Documents Update Arrays in a Document Insert or Update in a Single Operation Bulk Operations", - "code": [], - "preview": null, - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/crud", - "title": "CRUD Operations", - "headings": [], - "paragraphs": "CRUD (Create, Read, Update, Delete) operations enable you to work with\ndata stored in MongoDB. Some operations combine aspects of read and write operations. See our\nguide on compound operations \nto learn more about these hybrid methods. Read Operations find and return\ndocuments stored in your database. Write Operations insert, modify,\nor delete documents in your database.", - "code": [], - "preview": "CRUD (Create, Read, Update, Delete) operations enable you to work with\ndata stored in MongoDB.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/data-formats/codecs", - "title": "Codecs", - "headings": [ - "Overview", - "Codec", - "CodecRegistry", - "CodecProvider", - "Default Codec Registry", - "BsonTypeClassMap", - "Custom Codec Example" - ], - "paragraphs": "In this guide, you can learn about Codecs and the supporting classes that\nhandle the encoding and decoding of Kotlin objects to and from BSON data\nin the MongoDB Kotlin driver. The Codec abstraction allows you to map any Kotlin type to\na corresponding BSON type. You can use this to map your domain objects\ndirectly to and from BSON instead of using data classes or an intermediate\nmap-based object such as Document or BsonDocument . You can learn how to specify custom encoding and decoding logic using\nthe Codec abstraction and view example implementations in the following\nsections: Codec CodecRegistry CodecProvider Custom Codec Example The Codec interface contains abstract methods for serializing and\ndeserializing Kotlin objects to BSON data. You can define your conversion logic\nbetween BSON and your Kotlin object in your implementation of this interface. To implement the Codec interface, override the encode() , decode() ,\nand getEncoderClass() abstract methods. The encode() method requires the following parameters: This method uses the BsonWriter instance to send the encoded value to\nMongoDB and does not return a value. The decode() method returns your Kotlin object instance populated with the\nvalue from the BSON data. This method requires the following parameters: The getEncoderClass() method returns a class instance of the Kotlin class\nsince Kotlin cannot infer the type due to type erasure. See the following code examples that show how you can implement a custom\n Codec . The PowerStatus enum contains the values \"ON\" and \"OFF\" to represent\nthe states of an electrical switch. The PowerStatusCodec class implements Codec in order to convert\nthe Kotlin enum values to corresponding BSON boolean values. The\n encode() method converts a PowerStatus to a BSON boolean and the\n decode() method performs the conversion in the opposite direction. You can add an instance of the PowerStatusCodec to your CodecRegistry \nwhich contains a mapping between your Codec and the Kotlin object type to\nwhich it applies. Continue to the CodecRegistry \nsection of this page to see how you can include your Codec . For more information about the classes and interfaces in this section, see the\nfollowing API Documentation: Parameter Type Description writer An instance of a class that implements BsonWriter , an interface type\nthat exposes methods for writing a BSON document. For example, the\n BsonBinaryWriter implementation writes to a binary stream of data.\nUse this instance to write your BSON value using the appropriate\nwrite method. value The data that your implementation encodes. The type must match the type\nvariable assigned to your implementation. encoderContext Contains meta information about the Kotlin object data that it encodes\nto BSON including whether to store the current value in a\nMongoDB collection. Parameter Type Description bsonReader An instance of a class that implements BsonReader , an interface type\nthat exposes methods for reading a BSON document. For example, the\n BsonBinaryReader implementation reads from a binary stream of data. decoderContext Contains information about the BSON data that it decodes to a Kotlin\nobject. Codec BsonWriter BsonBinaryWriter EncoderContext BsonReader DecoderContext BsonBinaryReader A CodecRegistry is an immutable collection of Codec instances that\nencode and decode the Kotlin classes they specify. You can use any of the\nfollowing CodecRegistries class static factory methods to construct a\n CodecRegistry from the Codec instances contained in the associated\ntypes: The following code snippet shows how to construct a CodecRegistry using\nthe fromCodecs() method: In the preceding example, we assign the CodecRegistry the following Codec \nimplementations: You can retrieve the Codec instances from the CodecRegistry instance\nfrom the prior example using the following code: If you attempt to retrieve a Codec instance for a class that is not\nregistered, the get() method throws a CodecConfigurationException \nexception. For more information about the classes and interfaces in this section, see the\nfollowing API Documentation: fromCodecs() fromProviders() fromRegistries() IntegerCodec , a Codec that converts Integers and is part of the BSON package. PowerStatusCodec , our sample Codec \nthat converts Kotlin enum values to BSON booleans. CodecRegistries IntegerCodec A CodecProvider is an interface that contains abstract methods that create\n Codec instances and assign them to a CodecRegistry instance. Similar\nto the CodecRegistry , the BSON library uses the Codec instances\nretrieved by the get() method to convert between Kotlin and BSON data types. However, in cases in which you add a class that contains fields that require\ncorresponding Codec objects, you need to ensure that you instantiate the\n Codec objects for the class' fields before you instantiate the\n Codec for the class. You can use the CodecRegistry parameter in\nthe get() method to pass any of the Codec instances that the\n Codec relies on. The following code example shows how you can implement CodecProvider to\npass the MonolightCodec any Codec instances it needs in a\n CodecRegistry instance such as the PowerStatusCodec from our prior\nexample: To see a runnable example that demonstrates read and write operations using\nthese Codec classes, see the Custom Codec Example \nsection of this guide. The default codec registry is a set of CodecProvider classes that\nspecify conversion between commonly-used Kotlin and MongoDB types. The\ndriver automatically uses the default codec registry unless you specify\na different one. If you need to override the behavior of one or more Codec classes, but\nkeep the behavior from the default codec registry for the other classes,\nyou can specify all of the registries in order of precedence. For example,\nsuppose you wanted to override the default provider behavior of a Codec for\nenum types with your custom MyEnumCodec , you must add it to the registry\nlist prior to the default codec registry as shown in the example below: For more information about the classes and interfaces in this section, see\nthe following API documentation sections: CodecProvider Default codec registry The BsonTypeClassMap class contains a recommended mapping between BSON\nand Kotlin types. You can use this class in your custom Codec or\n CodecProvider to help you manage which Kotlin types to decode your BSON\ntypes to container classes that implement Iterable or Map such as\nthe Document class. You can add or modify the BsonTypeClassMap default mapping by passing a\n Map containing new or replacement entries. The following code snippet shows how you can retrieve the Kotlin class type\nthat corresponds to the BSON type in the default BsonTypeClassMap \ninstance: You can modify these mappings in your instance by specifying replacements in the\n BsonTypeClassMap constructor. The following code snippet shows how\nyou can replace the mapping for ARRAY in your BsonTypeClassMap \ninstance with the Set class: For a complete list of the default mappings, refer to the\n BsonTypeClassMap API Documentation. For an example of how the Document class uses BsonTypeClassMap , see\nthe driver source code for the following classes: DocumentCodecProvider DocumentCodec In this section, we show how you can implement Codec and CodecProvider \nto define the encoding and decoding logic for a custom Kotlin class. We also show\nhow you can specify and use your custom implementations to perform insert\nand retrieve operations. The following code snippet shows our example custom class called Monolight \nand its fields that we want to store and retrieve from a MongoDB collection: This class contains the following fields, each of which we need to assign a\n Codec : The following code example shows how we can implement a Codec for the\n Monolight class. Note that the constructor expects an instance of\n CodecRegistry from which it retrieves the Codec instances it needs\nto encode and decode its fields: To ensure we make the Codec instances for the fields available for\n Monolight , we implement a custom CodecProvider shown in the following\ncode example: After defining the conversion logic, we can perform the following: The following example class contains code that assigns the\n MonolightCodecProvider to the MongoCollection instance by passing it\nto the withCodecRegistry() method. The example class also inserts and\nretrieves data using the Monolight class and associated codecs: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: As an alternative to implementing custom codecs, you can use\nKotlin serialization to handle your data encoding and decoding with\n @Serializable classes. You might choose Kotlin serialization if you are\nalready familiar with the framework or prefer to use an idiomatic Kotlin approach.\nSee the Kotlin Serialization \ndocumentation for more information. powerStatus describes whether the light is switched \"on\" or \"off\" for\nwhich we use the PowerStatusCodec that\nconverts specific enum values to BSON booleans. colorTemperature describes the color of the light and contains an\n Int value for which we use the IntegerCodec included in the\nBSON library. Store data from instances of Monolight into MongoDB Retrieve data from MongoDB into instances of Monolight withCodecRegistry() MongoClientSettings.getDefaultCodecRegistry() Codec CodecProvider", - "code": [ - { - "lang": "kotlin", - "value": "enum class PowerStatus {\n ON,\n OFF\n}\n" - }, - { - "lang": "kotlin", - "value": "class PowerStatusCodec : Codec {\n override fun encode(writer: BsonWriter, value: PowerStatus, encoderContext: EncoderContext) = writer.writeBoolean(value == PowerStatus.ON)\n\n override fun decode(reader: BsonReader, decoderContext: DecoderContext): PowerStatus {\n return when (reader.readBoolean()) {\n true -> PowerStatus.ON\n false -> PowerStatus.OFF\n }\n }\n\n override fun getEncoderClass(): Class = PowerStatus::class.java\n}\n" - }, - { - "lang": "kotlin", - "value": "val codecRegistry = CodecRegistries.fromCodecs(IntegerCodec(), PowerStatusCodec())\n" - }, - { - "lang": "kotlin", - "value": "val powerStatusCodec = codecRegistry.get(PowerStatus::class.java)\nval integerCodec = codecRegistry.get(Integer::class.java)\n" - }, - { - "lang": "kotlin", - "value": "class MonolightCodec(registry: CodecRegistry) : Codec {\n private val powerStatusCodec: Codec\n private val integerCodec: Codec\n\n init {\n powerStatusCodec = registry[PowerStatus::class.java]\n integerCodec = IntegerCodec()\n }\n\n override fun encode(writer: BsonWriter, value: Monolight, encoderContext: EncoderContext) {\n writer.writeStartDocument()\n writer.writeName(\"powerStatus\")\n powerStatusCodec.encode(writer, value.powerStatus, encoderContext)\n writer.writeName(\"colorTemperature\")\n integerCodec.encode(writer, value.colorTemperature, encoderContext)\n writer.writeEndDocument()\n }\n\n override fun decode(reader: BsonReader, decoderContext: DecoderContext): Monolight {\n val monolight = Monolight()\n reader.readStartDocument()\n while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) {\n when (reader.readName()) {\n \"powerStatus\" -> monolight.powerStatus = powerStatusCodec.decode(reader, decoderContext)\n \"colorTemperature\" -> monolight.colorTemperature = integerCodec.decode(reader, decoderContext)\n \"_id\" -> reader.readObjectId()\n }\n }\n reader.readEndDocument()\n return monolight\n }\n\n override fun getEncoderClass(): Class = Monolight::class.java\n}\n" - }, - { - "lang": "kotlin", - "value": "val newRegistry = CodecRegistries.fromRegistries(\n CodecRegistries.fromCodecs(MyEnumCodec()),\n MongoClientSettings.getDefaultCodecRegistry()\n)\n" - }, - { - "lang": "kotlin", - "value": "val bsonTypeClassMap = BsonTypeClassMap()\nval clazz = bsonTypeClassMap[BsonType.ARRAY]\nprintln(\"Class name: \" + clazz.name)\n" - }, - { - "lang": "console", - "value": "Java type: java.util.List" - }, - { - "lang": "kotlin", - "value": "val replacements = mutableMapOf>(BsonType.ARRAY to MutableSet::class.java)\nval bsonTypeClassMap = BsonTypeClassMap(replacements)\nval clazz = bsonTypeClassMap[BsonType.ARRAY]\nprintln(\"Class name: \" + clazz.name)\n" - }, - { - "lang": "console", - "value": "Java type: java.util.Set" - }, - { - "lang": "kotlin", - "value": "data class Monolight(\n var powerStatus: PowerStatus = PowerStatus.OFF,\n var colorTemperature: Int? = null\n) {\n override fun toString(): String = \"Monolight [powerStatus=$powerStatus, colorTemperature=$colorTemperature]\"\n}\n" - }, - { - "lang": "kotlin", - "value": "class MonolightCodec(registry: CodecRegistry) : Codec {\n private val powerStatusCodec: Codec\n private val integerCodec: Codec\n\n init {\n powerStatusCodec = registry[PowerStatus::class.java]\n integerCodec = IntegerCodec()\n }\n\n override fun encode(writer: BsonWriter, value: Monolight, encoderContext: EncoderContext) {\n writer.writeStartDocument()\n writer.writeName(\"powerStatus\")\n powerStatusCodec.encode(writer, value.powerStatus, encoderContext)\n writer.writeName(\"colorTemperature\")\n integerCodec.encode(writer, value.colorTemperature, encoderContext)\n writer.writeEndDocument()\n }\n\n override fun decode(reader: BsonReader, decoderContext: DecoderContext): Monolight {\n val monolight = Monolight()\n reader.readStartDocument()\n while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) {\n when (reader.readName()) {\n \"powerStatus\" -> monolight.powerStatus = powerStatusCodec.decode(reader, decoderContext)\n \"colorTemperature\" -> monolight.colorTemperature = integerCodec.decode(reader, decoderContext)\n \"_id\" -> reader.readObjectId()\n }\n }\n reader.readEndDocument()\n return monolight\n }\n\n override fun getEncoderClass(): Class = Monolight::class.java\n}\n" - }, - { - "lang": "kotlin", - "value": "class MonolightCodecProvider : CodecProvider {\n @Suppress(\"UNCHECKED_CAST\")\n override fun get(clazz: Class, registry: CodecRegistry): Codec? {\n return if (clazz == Monolight::class.java) {\n MonolightCodec(registry) as Codec\n } else null // Return null when not a provider for the requested class\n }\n}\n" - }, - { - "lang": "kotlin", - "value": "fun main() = runBlocking {\n val mongoClient = MongoClient.create(\"\")\n val codecRegistry = CodecRegistries.fromRegistries(\n CodecRegistries.fromCodecs(IntegerCodec(), PowerStatusCodec()),\n CodecRegistries.fromProviders(MonolightCodecProvider()),\n MongoClientSettings.getDefaultCodecRegistry()\n )\n val database = mongoClient.getDatabase(\"codecs_example_products\")\n val collection = database.getCollection(\"monolights\")\n .withCodecRegistry(codecRegistry)\n\n // Construct and insert an instance of Monolight\n val myMonolight = Monolight(PowerStatus.ON, 5200)\n collection.insertOne(myMonolight)\n\n // Retrieve one or more instances of Monolight\n val lights = collection.find().toList()\n println(lights)\n}\n" - }, - { - "lang": "none", - "value": "[Monolight [powerStatus=ON, colorTemperature=5200]]" - } - ], - "preview": "In this guide, you can learn about Codecs and the supporting classes that\nhandle the encoding and decoding of Kotlin objects to and from BSON data\nin the MongoDB Kotlin driver. The Codec abstraction allows you to map any Kotlin type to\na corresponding BSON type. You can use this to map your domain objects\ndirectly to and from BSON instead of using data classes or an intermediate\nmap-based object such as Document or BsonDocument.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/data-formats/document-data-format-bson", - "title": "Document Data Format: BSON", - "headings": [ - "Overview", - "BSON Data Format", - "MongoDB and BSON", - "Install the BSON Library" - ], - "paragraphs": "In this guide, you can learn about the BSON data format, how MongoDB\nuses it, and how to install the BSON library independently of the\nMongoDB Kotlin driver. BSON , or Binary JSON, is the data format that MongoDB uses to organize\nand store data. This data format includes all JSON data structure types and\nadds support for types including dates, different size integers, ObjectIds, and\nbinary data. For a complete list of supported types, see the\n BSON Types server manual page. The binary format is not human-readable, but you can use the\n BSON library to convert it to a JSON\nrepresentation. You can read more about the relationship between these\nformats in our article on JSON and BSON . The MongoDB Kotlin driver, which uses the BSON library, allows you to work\nwith BSON data by using one of the object types that implements the\n BSON interface ,\nincluding: For more information on using these object types, see our\n Documents guide . Document (BSON library package) BsonDocument (BSON library package) RawBsonDocument (BSON library package) JsonObject (BSON library package) These instructions show you how to add the BSON library as a dependency to\nyour project. If you added the MongoDB Kotlin driver as a dependency to your\nproject, you can skip this step since the BSON library is already included\nas a required dependency of the driver. For instructions on how to add the\nMongoDB Kotlin driver as a dependency to your project, see the\n driver installation section of our Quick Start\nguide. We recommend that you use the Maven or\n Gradle build automation tool to manage your project's\ndependencies. Select from the following tabs to see the dependency declaration\nfor that tool: If you are not using one of the preceding tools, you can include it in\nyour project by downloading the JAR file directly from the\n sonatype repository . The following snippet shows the dependency declaration in the\n dependencies section of your pom.xml file. The following snippet shows the dependency declaration in the\n dependencies object in your build.gradle file.", - "code": [ - { - "lang": "xml", - "value": "\n \n org.mongodb\n bson\n 5.1.2\n \n" - }, - { - "lang": "kotlin", - "value": "dependencies {\n implementation(\"org.mongodb:bson:5.1.2\")\n}" - } - ], - "preview": "In this guide, you can learn about the BSON data format, how MongoDB\nuses it, and how to install the BSON library independently of the\nMongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/data-formats/document-data-format-data-class", - "title": "Document Data Format: Data Classes", - "headings": [ - "Overview", - "Serialize and Deserialize a Data Class", - "Example Data Class", - "Insert a Data Class", - "Retrieve a Data Class", - "Specify Component Conversion Using Annotations", - "Example Annotated Data Class", - "Insert an Annotated Data Class", - "Retrieve an Annotated Data Class", - "Operations with Recursive Types" - ], - "paragraphs": "In this guide, you can learn how to store and retrieve data in the\nMongoDB Kotlin Driver using Kotlin data classes . The driver natively supports encoding and decoding Kotlin data classes for\nMongoDB read and write operations using the default codec registry . The\ndefault codec registry is a collection of classes called codecs that\ndefine how to encode and decode Kotlin and Java types. The code examples in this section reference the following sample data class, which\ndescribes a data storage device: You can insert a DataStorage instance as shown in the following code: You can retrieve documents as DataStorage instances and print them\nas shown in the following code: You specify a class for documents returned from a collection, even if it\nis different than the class you specified when retrieving the\ncollection. The following example performs an update to the document\nrepresented by the DataStorage data class in the previous example\nand returns the updated document as a NewDataStorage type. The\noperation adds the releaseDate field to the document with a\n name value of tape : For more information about this feature, see Specify Return Type in the Databases and Collections guide. This section describes the annotations you can use to configure the\nserialization behavior of data classes and provides an example to\ndemonstrate the annotation behavior. You can use the following annotations on data classes: For reference information on these property annotations,\nrefer to the org.bson.codecs.pojo.annotations \npackage. Annotation Name Description BsonId Marks a property to serialize as the _id property. BsonProperty Specifies a custom document field name when converting the data class\nfield to BSON. BsonRepresentation Specifies the BSON type MongoDB uses to store the value. Use this\nannotation only when you need to store a value as a different\nBSON type than the data class property. Your code might throw an exception if you include the\n BsonRepresentation annotation on a property that you store\nas the same type as the data class property. The code examples in this section reference the following sample data class, which\ndescribes a network device: You can insert a NetworkDevice instance as shown in the following code: The inserted document in MongoDB should resemble the following: You can retrieve documents as NetworkDevice instances and print them\nas shown in the following code: The driver natively supports encoding and decoding of recursively\ndefined data classes without causing runtime recursion. This support extends\nto cycles of multiple data class types in type definitions. The following\ncode provides an example of a recursive data class design: You can perform read and write operations on recursively defined data classes the same\nway you would for other data classes. The following code shows how you can\nexecute a find operation on a collection of DataClassTree types:", - "code": [ - { - "lang": "kotlin", - "value": "data class DataStorage(val productName: String, val capacity: Double)\n" - }, - { - "lang": "kotlin", - "value": "val collection = database.getCollection(\"data_storage\")\nval record = DataStorage(\"tape\", 5.0)\ncollection.insertOne(record)\n" - }, - { - "lang": "kotlin", - "value": "val collection = database.getCollection(\"data_storage_devices\")\n\n// Retrieve and print the documents as data classes\nval resultsFlow = collection.find()\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "DataStorage(productName=tape, capacity=5.0)" - }, - { - "lang": "kotlin", - "value": "// Define a data class for returned documents\ndata class NewDataStorage(\n val productName: String,\n val capacity: Double,\n val releaseDate: LocalDate\n)\n\nval filter = Filters.eq(DataStorage::productName.name, \"tape\")\nval update = Updates.currentDate(\"releaseDate\")\nval options = FindOneAndUpdateOptions().returnDocument(ReturnDocument.AFTER)\n\n// Specify the class for returned documents as the type parameter in withDocumentClass()\nval result = collection\n .withDocumentClass()\n .findOneAndUpdate(filter, update, options)\n\nprintln(\"Updated document: ${result}\")\n" - }, - { - "lang": "console", - "value": "Updated document: NewDataStorage(productName=tape, capacity=5.0, releaseDate=2023-06-15)" - }, - { - "lang": "kotlin", - "value": "data class NetworkDevice(\n @BsonId\n @BsonRepresentation(BsonType.OBJECT_ID)\n val deviceId: String,\n val name: String,\n @BsonProperty(\"type\")\n val deviceType: String\n)\n" - }, - { - "lang": "json", - "value": "{\n _id: ObjectId(\"fedc...\"),\n name: 'Enterprise Wi-fi',\n type: 'router'\n}" - }, - { - "lang": "kotlin", - "value": "val collection = database.getCollection(\"network_devices\")\n\n// Insert the record\nval deviceId = ObjectId().toHexString()\nval device = NetworkDevice(deviceId, \"Enterprise Wi-fi\", \"router\")\ncollection.insertOne(device)\n" - }, - { - "lang": "kotlin", - "value": "val collection = database.getCollection(\"network_devices\")\n\n// Return all documents in the collection as data classes\nval resultsFlow = collection.find()\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "NetworkDevice(deviceId=645cf..., name=Enterprise Wi-fi, deviceType=router)" - }, - { - "lang": "kotlin", - "value": "data class DataClassTree(\n val content: String,\n val left: DataClassTree?,\n val right: DataClassTree?\n)\n" - }, - { - "lang": "kotlin", - "value": "val collection = database.getCollection(\"myCollection\")\n\nval filter = Filters.eq(\"left.left.right.content\", \"high german\")\nval resultsFlow = collection.find(filter)\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "DataClassTree(content=indo-european, left=DataClassTree(content=germanic, left=DataClassTree(content=german, left=null, right=DataClassTree(content=high german, ...)), right=...)" - } - ], - "preview": "In this guide, you can learn how to store and retrieve data in the\nMongoDB Kotlin Driver using Kotlin data classes.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/data-formats/document-data-format-extended-json", - "title": "Document Data Format: Extended JSON", - "headings": [ - "Overview", - "Extended JSON Formats", - "Extended JSON Examples", - "Read Extended JSON", - "Using the Document Classes", - "Using the BSON Library", - "Write Extended JSON", - "Using the Document Classes", - "Using the BSON Library", - "Custom BSON Type Conversion" - ], - "paragraphs": "In this guide, you can learn how to use the Extended JSON format in the\nMongoDB Kotlin driver. JSON is a data format that represents the values of objects, arrays, numbers,\nstrings, booleans, and nulls. The Extended JSON format defines a reserved\nset of keys prefixed with \" $ \" to represent field type information that\ndirectly corresponds to each type in BSON, the format that MongoDB uses to\nstore data. This guide explains the following topics: For more information on the difference between these formats, see our\n article on JSON and BSON . The different MongoDB Extended JSON formats How to use the BSON library to convert between Extended JSON and Kotlin objects How to create a custom conversion of BSON types MongoDB Extended JSON features different string formats to represent BSON data.\nEach of the different formats conform to the JSON RFC\nand meet specific use cases. The extended format, also known as the\n canonical format, features specific representations for every BSON type\nfor bidirectional conversion without loss of information. The Relaxed mode \nformat is more concise and closer to ordinary JSON, but does not represent\nall the type information such as the specific byte size of number fields. See the following table to see a description of each format: For more detailed information on these formats, see the following\nresources: Name Description Extended Relaxed Mode Shell Strict The driver parses the $uuid Extended JSON type from a string to a\n BsonBinary object of binary subtype 4. For more information about $uuid field\nparsing, see the\n special rules for parsing $uuid fields \nsection in the extended JSON specification. JSON RFC Official Documentation MongoDB Extended JSON Server Manual Entry BsonBinary API Documentation Extended JSON specification GitHub Documentation The following examples show a document containing an ObjectId, date, and long\nnumber field represented in each Extended JSON format. Click the tab that\ncorresponds to the format of the example you want to see: You can read an Extended JSON string into a Kotlin document object by calling\nthe parse() static method from either the Document or BsonDocument \nclass, depending on which object type you need. This method parses the Extended\nJSON string in any of the formats and returns an instance of that class\ncontaining the data. The following example shows how you can use the Document class to read\nan example Extended JSON string into a Document object using the\n parse() method: For more information, see our Fundamentals page\non Documents . You can also read an Extended JSON string into Kotlin objects without using\nthe MongoDB Kotlin driver's document classes by using the JsonReader class.\nThis class contains methods to sequentially parse the fields and values\nin any format of the Extended JSON string, and returns them as Kotlin objects.\nThe driver's document classes also use this class to parse Extended JSON. The following code example shows how you can use the JsonReader class to convert\nan Extended JSON string into Kotlin objects: For more information, see the JsonReader API Documentation. You can write an Extended JSON string from an instance of Document or\n BsonDocument by calling the toJson() method, optionally passing it an\ninstance of JsonWriterSettings to specify the Extended JSON format. In this example, we output the Extended JSON in the Relaxed mode format. You can also output an Extended JSON string from data in Kotlin objects using\nthe BSON library with the JsonWriter class. To construct an instance\nof JsonWriter , pass a subclass of a Java Writer to specify how\nyou want to output the Extended JSON. You can optionally pass a JsonWriterSettings \ninstance to specify options such as the Extended JSON format. By default, the\n JsonWriter uses the Relaxed mode format. The MongoDB Kotlin driver's\ndocument classes also use this class to convert BSON to Extended JSON. The following code example shows how you can use JsonWriter to create an\nExtended JSON string and output it to System.out . We specify the format\nby passing the outputMode() builder method the JsonMode.EXTENDED constant: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: JsonWriter JsonWriterSettings outputMode() In addition to specifying the outputMode() to format the JSON output, you\ncan further customize the output by adding converters to your\n JsonWriterSettings.Builder . These converter methods detect the Kotlin types\nand execute the logic defined by the Converter passed to them. The following sample code shows how to append converters, defined as lambda\nexpressions, to simplify the Relaxed mode JSON output. For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: Converter JsonWriterSettings.Builder", - "code": [ - { - "lang": "json", - "value": "{\n \"_id\": { \"$oid\": \"573a1391f29313caabcd9637\" },\n \"createdAt\": { \"$date\": { \"$numberLong\": \"1601499609\" }},\n \"numViews\": { \"$numberLong\": \"36520312\" }\n}" - }, - { - "lang": "json", - "value": "{\n \"_id\": { \"$oid\": \"573a1391f29313caabcd9637\" },\n \"createdAt\": { \"$date\": \"2020-09-30T18:22:51.648Z\" },\n \"numViews\": 36520312\n}" - }, - { - "lang": "json", - "value": "{\n \"_id:\": ObjectId(\"573a1391f29313caabcd9637\"),\n \"createdAt\": ISODate(\"2020-09-30T18:22:51.648Z\"),\n \"numViews\": NumberLong(\"36520312\")\n}" - }, - { - "lang": "json", - "value": "{\n \"_id:\": { \"$oid\": \"573a1391f29313caabcd9637\" },\n \"createdAt\": { \"$date\": 1601499609 },\n \"numViews\": { \"$numberLong\": \"36520312\" }\n}" - }, - { - "lang": "kotlin", - "value": "val ejsonStr = \"\"\"\n { \"_id\": { \"${\"$\"}oid\": \"507f1f77bcf86cd799439011\"},\n \"myNumber\": {\"${\"$\"}numberLong\": \"4794261\" }}\n \"\"\".trimIndent()\n\nval doc = Document.parse(ejsonStr)\n\nprintln(doc)\n" - }, - { - "lang": "console", - "value": "Document{{_id=507f1f77bcf86cd799439011, myNumber=4794261}}" - }, - { - "lang": "kotlin", - "value": "val ejsonStr = \"\"\"\n { \"_id\": { \"${\"$\"}oid\": \"507f1f77bcf86cd799439011\"},\n \"myNumber\": {\"${\"$\"}numberLong\": \"4794261\" }}\n \"\"\".trimIndent()\n\nval jsonReader = JsonReader(ejsonStr)\n\njsonReader.readStartDocument()\n\njsonReader.readName(\"_id\")\nval id = jsonReader.readObjectId()\njsonReader.readName(\"myNumber\")\nval myNumber = jsonReader.readInt64()\n\njsonReader.readEndDocument()\n\nprintln(id.toString() + \" is type: \" + id.javaClass.name)\nprintln(myNumber.toString() + \" is type: \" + myNumber.javaClass.name)\n\njsonReader.close()\n" - }, - { - "lang": "console", - "value": "507f1f77bcf86cd799439011 is type: org.bson.types.ObjectId\n4794261 is type: java.lang.Long" - }, - { - "lang": "kotlin", - "value": "val myDoc = Document().append(\"_id\", ObjectId(\"507f1f77bcf86cd799439012\"))\n .append(\"myNumber\", 11223344)\n\nval settings = JsonWriterSettings.builder().outputMode(JsonMode.RELAXED).build()\nmyDoc.toJson(settings)\n" - }, - { - "lang": "javascript", - "value": "{\"_id\": {\"$oid\": \"507f1f77bcf86cd799439012\"}, \"myNumber\": 11223344}" - }, - { - "lang": "kotlin", - "value": "val settings = JsonWriterSettings.builder().outputMode(JsonMode.EXTENDED).build()\n\nJsonWriter(BufferedWriter(OutputStreamWriter(System.out)), settings).use { jsonWriter ->\n jsonWriter.writeStartDocument()\n jsonWriter.writeObjectId(\"_id\", ObjectId(\"507f1f77bcf86cd799439012\"))\n jsonWriter.writeInt64(\"myNumber\", 11223344)\n jsonWriter.writeEndDocument()\n jsonWriter.flush()\n}\n" - }, - { - "lang": "javascript", - "value": "{\"_id\": {\"$oid\": \"507f1f77bcf86cd799439012\"}, \"myNumber\": {\"$numberLong\": \"11223344\"}}" - }, - { - "lang": "kotlin", - "value": "val settings = JsonWriterSettings.builder()\n .outputMode(JsonMode.RELAXED)\n .objectIdConverter { value, writer -> writer.writeString(value.toHexString()) }\n .timestampConverter { value, writer ->\n val ldt = LocalDateTime.ofInstant(Instant.ofEpochSecond(value.time.toLong()), ZoneOffset.UTC)\n writer.writeString(ldt.format(DateTimeFormatter.ISO_DATE_TIME))\n }\n .build()\n\nval doc = Document()\n .append(\"_id\", ObjectId(\"507f1f77bcf86cd799439012\"))\n .append(\"createdAt\", BsonTimestamp(1601516589,1))\n .append(\"myNumber\", 4794261)\n\nprintln(doc.toJson(settings))\n" - }, - { - "lang": "javascript", - "value": "{\"_id\": \"507f1f77bcf86cd799439012\", \"createdAt\": \"2020-10-01T01:43:09\", \"myNumber\": 4794261}\n\n// Without specifying the converters, the Relaxed mode JSON output\n// should look something like this:\n{\"_id\": {\"$oid\": \"507f1f77bcf86cd799439012\"}, \"createdAt\": {\"$timestamp\": {\"t\": 1601516589, \"i\": 1}}, \"myNumber\": 4794261}" - } - ], - "preview": "In this guide, you can learn how to use the Extended JSON format in the\nMongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/data-formats/documents", - "title": "Documents", - "headings": [ - "Overview", - "Document", - "BsonDocument", - "JsonObject", - "Summary" - ], - "paragraphs": "In this guide, you can learn how to use documents in the\nMongoDB Kotlin driver. A MongoDB document is a data structure that contains key/value fields in\nbinary JSON (BSON) format. You can use documents and the data they contain\nin their fields to store data as well as issue commands or queries in\nMongoDB. For more information on the terminology, structure, and limitations of documents,\nread our page on Documents in the MongoDB manual. The MongoDB Kotlin driver and BSON library include the following classes that help you\naccess and manipulate the BSON data in documents: While you can use any of these classes in your application, we recommend\nthat you use the Document class since it can concisely represent\ndynamically structured documents of any complexity. It implements the\n Map interface which enables it to use loosely-typed\nvalues. Name Package Implements Map Recommended Usage Document org.bson Yes, implements Map When you want a flexible and concise data representation. BsonDocument org.bson Yes, implements Map When you need a type-safe API. JsonObject org.bson.json No When you only want to work with JSON strings. The Document class offers a flexible representation of a BSON document.\nYou can access and manipulate fields using Kotlin types from the standard\nlibrary with this class. See the following table for mappings between\nfrequently-used BSON and Kotlin types: In the following code snippet, we show how to instantiate and build a sample\n Document instance representing a document containing several\ndifferent field types: To insert this document into a collection, instantiate a collection\nusing the getCollection() method and call the insertOne operation as follows: Once you perform a successful insert, you can retrieve the sample document\ndata from the collection using the following code: For more information on retrieving and manipulating MongoDB data, see our\n CRUD guide . For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: BSON type Kotlin type Array kotlin.collections.List Binary org.bson.types.Binary Boolean kotlin.Boolean Date java.time.LocalDateTime Document org.bson.Document Double kotlin.Double Int32 kotlin.Int Int64 kotlin.Long Null null ObjectId org.bson.types.ObjectId String kotlin.String The preceding code sample uses helper methods that check the returned type\nand throw an exception if it is unable to cast the field value.\nYou can call the get() method to retrieve values as type\n Object and to skip type checking. Document getCollection() get() The BsonDocument class provides a type-safe API to access and manipulate\na BSON document. You need to specify the BSON type from the BSON\nlibrary for each field. See the following table for mappings between\nfrequently-used BSON and BSON library types: In the following code snippet, we show how to instantiate and build a sample\n BsonDocument instance representing a document containing several\ndifferent field types: To insert this document into a collection, instantiate a collection\nusing the getCollection() method specifying the BsonDocument \nclass as the documentClass parameter. Then, call the\n insertOne operation as follows: Once you perform a successful insert, you can retrieve the sample document\ndata from the collection using the following code: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: BSON type BSON library type Array org.bson.BsonArray Binary org.bson.BsonBinary Boolean org.bson.Boolean Date (long value) org.bson.BsonDateTime Document org.bson.BsonDocument Double org.bson.BsonDouble Int32 org.bson.BsonInt32 Int64 org.bson.BsonInt64 Null org.bson.BsonNull ObjectId org.bson.BsonObjectId String org.bson.BsonString The preceding code sample uses helper methods that check the returned type\nand throw a BsonInvalidOperationException if it is unable to cast\nthe field value. You can call the get() method to retrieve values as type\n BsonValue and to skip type checking. BsonDocument getCollection() BsonInvalidOperationException get() BsonValue The JsonObject class acts as a wrapper for JSON strings.\nIf you only want to work with JSON data, you can use JsonObject \nto avoid unnecessary data conversion to a Map object. By default, JsonObject stores Extended JSON .\nYou can customize the format of JSON in JsonObject by specifying a\n JsonObjectCodec and passing it a JsonWriterSettings \nobject. For more information on JSON formats, see\nour Extended JSON guide . In the following code snippet, we show how to instantiate a sample JsonObject \ninstance wrapping an Extended JSON string containing different types of key value pairs: To insert this document into a collection, instantiate a collection\nusing the getCollection() method specifying the JsonObject class\nas the documentClass parameter. Then, call the\n insertOne operation as follows: Once you perform a successful insert, you can retrieve the sample JSON data from the\ncollection. While you can use any class that extends Bson to specify your query,\nhere is how to query your data using a JsonObject : For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: JsonObject JsonObjectCodec JsonWriterSettings getCollection() In this guide, we covered the following topics on classes you can use to\nwork with BSON data: Described Kotlin classes you can use to work with MongoDB documents and\nwhy you might prefer one over the other. Provided usage examples for each class on building documents containing\nmultiple types, inserting them into a collection, and\nretrieving/accessing their typed fields.", - "code": [ - { - "lang": "kotlin", - "value": "val author = Document(\"_id\", ObjectId())\n .append(\"name\", \"Gabriel Garc\u00eda M\u00e1rquez\")\n .append(\n \"dateOfDeath\",\n LocalDateTime.of(2014, 4, 17, 4, 0)\n )\n .append(\n \"novels\", listOf(\n Document(\"title\", \"One Hundred Years of Solitude\").append(\"yearPublished\", 1967),\n Document(\"title\", \"Chronicle of a Death Foretold\").append(\"yearPublished\", 1981),\n Document(\"title\", \"Love in the Time of Cholera\").append(\"yearPublished\", 1985)\n )\n )\n" - }, - { - "lang": "kotlin", - "value": "// val mongoClient = \n\nval database = mongoClient.getDatabase(\"fundamentals_data\")\nval collection = database.getCollection(\"authors\")\nval result = collection.insertOne(author)\n" - }, - { - "lang": "kotlin", - "value": "val doc = collection.find(Filters.eq(\"name\", \"Gabriel Garc\u00eda M\u00e1rquez\")).firstOrNull()\ndoc?.let {\n println(\"_id: ${it.getObjectId(\"_id\")}, name: ${it.getString(\"name\")}, dateOfDeath: ${it.getDate(\"dateOfDeath\")}\")\n\n it.getList(\"novels\", Document::class.java).forEach { novel ->\n println(\"title: ${novel.getString(\"title\")}, yearPublished: ${novel.getInteger(\"yearPublished\")}\")\n }\n}\n" - }, - { - "lang": "none", - "value": "_id: 5fb5fad05f734e3794741a35, name: Gabriel Garc\u00eda M\u00e1rquez, dateOfDeath: Thu Apr 17 00:00:00 EDT 2014\ntitle: One Hundred Years of Solitude, yearPublished: 1967\ntitle: Chronicle of a Death Foretold, yearPublished: 1981\ntitle: Love in the Time of Cholera, yearPublished: 1985" - }, - { - "lang": "kotlin", - "value": "val author = BsonDocument()\n .append(\"_id\", BsonObjectId())\n .append(\"name\", BsonString(\"Gabriel Garc\u00eda M\u00e1rquez\"))\n .append(\n \"dateOfDeath\",\n BsonDateTime(\n LocalDateTime.of(2014, 4, 17, 0, 0).atZone(ZoneId.of(\"America/New_York\")).toInstant().toEpochMilli()\n )\n )\n .append(\n \"novels\", BsonArray(\n listOf(\n BsonDocument().append(\"title\", BsonString(\"One Hundred Years of Solitude\"))\n .append(\"yearPublished\", BsonInt32(1967)),\n BsonDocument().append(\"title\", BsonString(\"Chronicle of a Death Foretold\"))\n .append(\"yearPublished\", BsonInt32(1981)),\n BsonDocument().append(\"title\", BsonString(\"Love in the Time of Cholera\"))\n .append(\"yearPublished\", BsonInt32(1985))\n )\n )\n )\n" - }, - { - "lang": "kotlin", - "value": "// val mongoClient = \n\nval database = mongoClient.getDatabase(\"fundamentals_data\")\nval collection = database.getCollection(\"authors\")\n\nval result: InsertOneResult = collection.insertOne(author)\n" - }, - { - "lang": "kotlin", - "value": "// \n\nval doc = collection.find(Filters.eq(\"name\", \"Gabriel Garc\u00eda M\u00e1rquez\")).firstOrNull()\ndoc?.let {\n println(\"_id: ${it.getObjectId(\"_id\").value}, name: ${it.getString(\"name\").value}, dateOfDeath: ${Instant.ofEpochMilli(it.getDateTime(\"dateOfDeath\").value).atZone(ZoneId.of(\"America/New_York\")).toLocalDateTime()}\")\n\n it.getArray(\"novels\").forEach { novel ->\n val novelDocument = novel.asDocument()\n println(\"title: ${novelDocument.getString(\"title\").value}, yearPublished: ${novelDocument.getInt32(\"yearPublished\").value}\")\n }\n}\n" - }, - { - "lang": "none", - "value": "_id: 5fb5fad05f734e3794741a35, name: Gabriel Garc\u00eda M\u00e1rquez, dateOfDeath: 2014-04-17T00:00\ntitle: One Hundred Years of Solitude, yearPublished: 1967\ntitle: Chronicle of a Death Foretold, yearPublished: 1981\ntitle: Love in the Time of Cholera, yearPublished: 1985" - }, - { - "lang": "kotlin", - "value": "val ejsonStr = \"\"\"\n {\"_id\": {\"${\"$\"}oid\": \"6035210f35bd203721c3eab8\"},\n \"name\": \"Gabriel Garc\u00eda M\u00e1rquez\",\n \"dateOfDeath\": {\"${\"$\"}date\": \"2014-04-17T04:00:00Z\"},\n \"novels\": [\n {\"title\": \"One Hundred Years of Solitude\",\"yearPublished\": 1967},\n {\"title\": \"Chronicle of a Death Foretold\",\"yearPublished\": 1981},\n {\"title\": \"Love in the Time of Cholera\",\"yearPublished\": 1985}]}\n \"\"\".trimIndent()\n\nval author = JsonObject(ejsonStr)\n" - }, - { - "lang": "kotlin", - "value": "// val mongoClient = ;\n\nval database = mongoClient.getDatabase(\"fundamentals_data\")\nval collection= database.getCollection(\"authors\")\n\nval result = collection.insertOne(author)\n" - }, - { - "lang": "kotlin", - "value": "// val mongoClient = ;\n\nval query = JsonObject(\"{\\\"name\\\": \\\"Gabriel Garc\\\\u00eda M\\\\u00e1rquez\\\"}\")\nval jsonResult = collection.find(query).firstOrNull()\njsonResult?.let {\n println(\"query result in extended json format: \" + jsonResult.json)\n}\n" - }, - { - "lang": "none", - "value": "query result in extended json format: {\"_id\": {\"$oid\": \"6035210f35bd203721c3eab8\"}, \"name\": \"Gabriel Garc\u00eda M\u00e1rquez\", \"dateOfDeath\": {\"$date\": \"2014-04-17T04:00:00Z\"}, \"novels\": [{\"title\": \"One Hundred Years of Solitude\", \"yearPublished\": 1967}, {\"title\": \"Chronicle of a Death Foretold\", \"yearPublished\": 1981}, {\"title\": \"Love in the Time of Cholera\", \"yearPublished\": 1985}]}" - } - ], - "preview": "In this guide, you can learn how to use documents in the\nMongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/data-formats/serialization", - "title": "Kotlin Serialization", - "headings": [ - "Overview", - "Supported Types", - "Add Kotlin Serialization to Your Project", - "Annotate Data Classes", - "Custom Serializer Example", - "Customize the Serializer Configuration", - "Custom Codec Example", - "Polymorphic Serialization", - "Polymorphic Data Classes Example" - ], - "paragraphs": "The Kotlin driver supports the kotlinx.serialization library for\nserializing and deserializing Kotlin objects. The driver provides an efficient Bson serializer that you can use with\nclasses marked as @Serializable to handle the serialization of Kotlin objects\nto BSON data. You can also install the bson-kotlinx library to support\n custom codecs with configurations to encode\ndefaults, encode nulls, and define class discriminators. Although you can use the Kotlin driver with the Kotlin serialization Json \nlibrary, the Json serializer does not directly support BSON value types such\nas ObjectId . You must provide a custom serializer that can handle the\nconversion between BSON and JSON. To learn how to use the Codec interface instead of the\nKotlin serialization library to specify custom encoding and decoding\nof Kotlin objects to BSON data, see the Codecs guide. You might choose Kotlin serialization if you are already familiar\nwith the framework or if you prefer to use an idiomatic Kotlin approach. The Kotlin driver supports: All Kotlin types that are supported by the Kotlin serialization library All available BSON types Support for serialization in the Kotlin driver depends on the official Kotlin\nserialization library . Select from the following tabs to see how to add the serialization\ndependencies to your project by using the Gradle and\n Maven package managers: If you are using Gradle to manage your\ndependencies, add the following to your build.gradle.kts dependencies list: If you are using Maven to manage your\ndependencies, add the following to your pom.xml dependencies list: To declare a class as serializable, annotate your Kotlin data classes with the\n @Serializable annotation from the Kotlin serialization framework. You can use your data classes in your code as normal after you mark them as serializable.\nThe Kotlin driver and the Kotlin serialization framework handle the\nBSON serialization and deserialization. This example shows a simple data class annotated with the following: For more information on serializable classes and available annotation classes,\nsee the official Kotlin Serialization \ndocumentation. @Serializable to mark the class as serializable. @SerialName to specify the name of the id and manufacturer properties\nin the BSON document. This can be used in place of the @BsonId and\n @BsonProperty annotations, which are unsupported in serializable classes. @Contextual to mark the BSON id property to use the built-in ObjectIdSerializer .\nThis annotation is required for BSON types to be serialized correctly. You cannot use annotations \nfrom the org.bson.codecs.pojo.annotations package on @Serializable data classes. You can create a custom serializer to handle how your data is\nrepresented in BSON. The Kotlin driver uses the KSerializer \ninterface from the kotlinx.serialization package to implement custom\nserializers. You can specify the custom serializer as the parameter to\nthe @Serializable annotation for a specific field. The following example shows how to create a custom\n KSerializer instance to convert a kotlinx.datetime.Instant to a\n BsonDateTime : The following code shows the PaintOrder data class in which the\n orderDate field has an annotation that specifies the custom\nserializer class defined in the preceding code: For more information about the methods and classes mentioned in this section,\nsee the following API documentation: KSerializer Instant BsonEncoder BsonDecoder You can use the KotlinSerializerCodec class from the org.bson.codecs.kotlinx \npackage to create a codec for your @Serializable data classes and\ncustomize what is stored. Use the BsonConfiguration class to define the configuration,\nincluding whether to encode defaults, encode nulls, or define class discriminators. To create a custom codec, install the bson-kotlinx \ndependency to your project. Select from the following tabs to see how to\nadd the dependency to your project by using the Gradle and\n Maven package managers: Then, you can define your codec using the\n KotlinSerializerCodec.create() \nmethod and add it to the registry. If you are using Gradle to manage your\ndependencies, add the following to your build.gradle.kts dependencies list: If you are using Maven to manage your\ndependencies, add the following to your pom.xml dependencies list: You can also optionally install the bson-kotlin dependency\nthrough the default codec registry. This dependency uses reflection\nand the codec registry to support Kotlin data classes, but it does\nnot support certain POJO annotations such as BsonDiscriminator ,\n BsonExtraElements , and BsonConstructor . To learn more, see\nthe bson-kotlin API documentation . Generally, we recommend that you install and use the faster\n bson-kotlinx library for codec configuration. The following example shows how to create a codec using the\n KotlinSerializerCodec.create() method and configure it to not encode defaults: For more information about the methods and classes mentioned in this section,\nsee the following API documentation: KotlinSerializerCodec KotlinSerializerCodec.create() BsonConfiguration The Kotlin driver natively supports serialization and deserialization\nof polymorphic classes. When you mark a sealed interface and data\nclasses that inherit that interface with the @Serializable \nannotation, the driver uses a KSerializer implementation to handle\nconversion of your types to and from BSON. When you insert an instance of a polymorphic data class into MongoDB,\nthe driver adds the field _t , the\ndiscriminator field. The value of this field is the data class name. The following example creates an interface and two data classes that\ninherit that interface. In the data classes, the id field is marked\nwith the annotations described in the\n Annotate Data Classes section: Then, you can perform operations with data classes as usual. The\nfollowing example parametrizes the collection with the Person \ninterface, then performs operations with the polymorphic classes\n Teacher and Student . When you retrieve documents, the driver\nautomatically detects the type based on the discriminator value and\ndeserializes them accordingly.", - "code": [ - { - "lang": "kotlin", - "value": "implementation(\"org.jetbrains.kotlinx:kotlinx-serialization-core:1.5.1\")\nimplementation(\"org.mongodb:bson-kotlinx:5.1.2\")" - }, - { - "lang": "kotlin", - "value": "\n org.jetbrains.kotlinx\n kotlinx-serialization-core\n 1.5.1\n\n\n org.mongodb\n bson-kotlinx\n 5.1.2\n" - }, - { - "lang": "kotlin", - "value": "@Serializable\ndata class PaintOrder(\n @SerialName(\"_id\") // Use instead of @BsonId\n @Contextual val id: ObjectId?,\n val color: String,\n val qty: Int,\n @SerialName(\"brand\")\n val manufacturer: String = \"Acme\" // Use instead of @BsonProperty\n)\n" - }, - { - "lang": "kotlin", - "value": "object InstantAsBsonDateTime : KSerializer {\n override val descriptor: SerialDescriptor = PrimitiveSerialDescriptor(\"InstantAsBsonDateTime\", PrimitiveKind.LONG)\n\n override fun serialize(encoder: Encoder, value: Instant) {\n when (encoder) {\n is BsonEncoder -> encoder.encodeBsonValue(BsonDateTime(value.toEpochMilliseconds()))\n else -> throw SerializationException(\"Instant is not supported by ${encoder::class}\")\n }\n }\n\n override fun deserialize(decoder: Decoder): Instant {\n return when (decoder) {\n is BsonDecoder -> Instant.fromEpochMilliseconds(decoder.decodeBsonValue().asDateTime().value)\n else -> throw SerializationException(\"Instant is not supported by ${decoder::class}\")\n }\n }\n}\n" - }, - { - "lang": "kotlin", - "value": "@Serializable\ndata class PaintOrder(\n val color: String,\n val qty: Int,\n @Serializable(with = InstantAsBsonDateTime::class)\n val orderDate: Instant,\n)\n" - }, - { - "lang": "kotlin", - "value": "implementation(\"org.mongodb:bson-kotlinx:5.1.2\")" - }, - { - "lang": "kotlin", - "value": "\n org.jetbrains.kotlinx\n bson-kotlinx\n 5.1.2\n" - }, - { - "lang": "kotlin\n :copyable: true", - "value": "import org.bson.codecs.configuration.CodecRegistries\nimport org.bson.codecs.kotlinx.BsonConfiguration\nimport org.bson.codecs.kotlinx.KotlinSerializerCodec" - }, - { - "lang": "kotlin", - "value": "val myCustomCodec = KotlinSerializerCodec.create(\n bsonConfiguration = BsonConfiguration(encodeDefaults = false)\n)\n\nval registry = CodecRegistries.fromRegistries(\n CodecRegistries.fromCodecs(myCustomCodec), collection.codecRegistry\n)\n" - }, - { - "lang": "kotlin", - "value": "@Serializable\nsealed interface Person {\n val name: String\n}\n\n@Serializable\ndata class Student(\n @Contextual\n @SerialName(\"_id\")\n val id: ObjectId,\n override val name: String,\n val grade: Int,\n) : Person\n\n@Serializable\ndata class Teacher(\n @Contextual\n @SerialName(\"_id\")\n val id: ObjectId,\n override val name: String,\n val department: String,\n) : Person\n" - }, - { - "lang": "kotlin", - "value": "val collection = database.getCollection(\"school\")\n\nval teacherDoc = Teacher(ObjectId(), \"Vivian Lee\", \"History\")\nval studentDoc = Student(ObjectId(), \"Kate Parker\", 10)\n\ncollection.insertOne(teacherDoc)\ncollection.insertOne(studentDoc)\n\nprintln(\"Retrieving by using data classes\")\ncollection.withDocumentClass()\n .find(Filters.exists(\"department\"))\n .first().also { println(it) }\n\ncollection.withDocumentClass()\n .find(Filters.exists(\"grade\"))\n .first().also { println(it) }\n\nprintln(\"\\nRetrieving by using Person interface\")\nval resultsFlow = collection.withDocumentClass().find()\nresultsFlow.collect { println(it) }\n\nprintln(\"\\nRetrieving as Document type\")\nval resultsDocFlow = collection.withDocumentClass().find()\nresultsDocFlow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Retrieving by using data classes\nTeacher(id=..., name=Vivian Lee, department=History)\nStudent(id=..., name=Kate Parker, grade=10)\n\nRetrieving by using Person interface\nTeacher(id=..., name=Vivian Lee, department=History)\nStudent(id=..., name=Kate Parker, grade=10)\n\nRetrieving as Document type\nDocument{{_id=..., _t=Teacher, name=Vivian Lee, department=History}}\nDocument{{_id=..., _t=Student, name=Kate Parker, grade=10}}" - } - ], - "preview": "The Kotlin driver supports the kotlinx.serialization library for\nserializing and deserializing Kotlin objects.", - "tags": "code example, data model, conversion, polymorphism", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/data-formats", - "title": "Data Formats", - "headings": [], - "paragraphs": "Document Data Format: Data Classes Document Data Format: BSON Document Data Format: Extended JSON Documents Kotlin Serialization Codecs", - "code": [], - "preview": null, - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/databases-collections", - "title": "Databases and Collections", - "headings": [ - "Overview", - "Access a Database", - "Access a Collection", - "Specify Return Type", - "Create a Collection", - "Document Validation", - "Get a List of Collections", - "Drop a Collection", - "Specify Read Preferences, Read Concerns, and Write Concerns" - ], - "paragraphs": "In this guide, you can learn how to use MongoDB databases and\ncollections with the MongoDB Kotlin driver. MongoDB organizes data into a hierarchy of the following levels: With the MongoDB Kotlin driver, you can model data by using Kotlin data\nclasses or by using the Document class to store and\nretrieve data from MongoDB. To learn more about using data classes, see\nthe guide on the Data Class Data Format . To learn more about using the Document \nclass, see the guide on the Document Data Format . Databases : Databases are the top level of data organization in a MongoDB instance. Collections : Databases are organized into collections which contain documents . Documents : Documents contain literal data such as strings, numbers, and dates, as well as other embedded documents. For more information on document field types and structure, see the Server documentation on documents . Use the getDatabase() method of\na MongoClient instance to access a MongoDatabase in a MongoDB\ninstance. The following example accesses a database named testDatabase : Use the getCollection() \nmethod of a MongoDatabase instance to access a\n MongoCollection in a database of your connected MongoDB instance. The following example accesses a collection named testCollection from a\n MongoDatabase that contains documents of type ExampleDataClass : If the provided collection name does not already exist in the database,\nMongoDB implicitly creates the collection when you first insert data\ninto that collection. The driver provides a way for you to specify a class for documents\nreturned from a collection, even if it is different than the class you\nspecified when retrieving the collection. You can specify a return class\nby using the MongoCollection.withDocumentClass() \nmethod. Specifying a different return class could be useful in the following\nsituations: The following example retrieves a collection that\ncontains data represented by the Fruit data class but returns the result\nof a findOneAndUpdate() operation as an instance of the NewFruit \nclass. The operation changes the name of the qty field to\n quantity and adds an item to the seasons array field in the\ndocument with a name value of \"strawberry\" : Your collection contains multiple data types. You specify a projection that changes your data fields. You cannot directly specify a return type on a method that changes the data,\nsuch as findOneAndUpdate() or findOneAndReplace() . Use the createCollection() \nmethod of a MongoDatabase instance to create a collection\nin a database of your connected MongoDB instance. The following example creates a collection called exampleCollection : You can specify collection options like maximum size and document\nvalidation rules using the CreateCollectionOptions \nclass. The createCollection() method accepts an instance of\n CreateCollectionOptions as an optional second parameter. Document validation provides the ability to validate documents\nagainst a series of filters during writes to a collection. You can\nspecify these filters using the ValidationOptions \nclass, which accepts a series of Filters instances\nthat specify the validation rules and expressions: For more information, see the server documentation for document\nvalidation . You can query for a list of collections in a database using the\n MongoDatabase.listCollectionNames() method: You can remove a collection from the database using the\n MongoCollection.drop() method: Dropping a collection from your database also permanently deletes all\ndocuments within that collection and all indexes on that collection.\nOnly drop collections that contain data that is no longer needed. Read preferences , read concerns , and write concerns control\nhow the driver routes read operations and waits for acknowledgment for\nread and write operations when connected to a MongoDB replica set.\nRead preferences and read concerns apply to all read operations;\nwrite concerns apply to all write operations. MongoDatabase instances inherit their write concern, read concern,\nand write preference settings from the MongoClient used to create\nthem. MongoCollection instances inherit their write concern, read concern,\nand write preference settings from the MongoDatabase used to create\nthem. However, you can use the following methods to obtain an instance\nof a MongoDatabase or MongoCollection with a read preference,\nread concern, or write concern that differs from the setting they would\nnormally inherit: For more information on these topics, see the following pages in the\nServer manual: MongoDatabase.withReadConcern() MongoDatabase.withReadPreference() MongoDatabase.withWriteConcern() MongoCollection.withReadConcern() MongoCollection.withReadPreference() MongoCollection.withWriteConcern() The withReadConcern() , withReadPreference() , and\n withWriteConcern methods create a new instance of a\n MongoDatabase or MongoCollection with the desired preference\nor concern. The MongoDatabase or MongoCollection upon which\nthe method is called retains its original preference and concern\nsettings. Read Preference Read Concern Write Concern", - "code": [ - { - "lang": "kotlin", - "value": "val database = client.getDatabase(\"testDatabase\")\n" - }, - { - "lang": "kotlin", - "value": "data class ExampleDataClass(\n @BsonId val id: ObjectId = ObjectId(),\n val exampleProperty: String,\n)\n" - }, - { - "lang": "kotlin", - "value": "val collection = database.getCollection(\"testCollection\")\n" - }, - { - "lang": "kotlin", - "value": "data class Fruit(\n @BsonId val id: Int,\n val name: String,\n val qty: Int,\n val seasons: List\n)\n" - }, - { - "lang": "kotlin", - "value": "val collection =\n database.getCollection(\"fruits\")\n\n// Define a data class for returned documents\ndata class NewFruit(\n @BsonId val id: Int,\n val name: String,\n val quantity: Int,\n val seasons: List\n)\n\nval filter = Filters.eq(Fruit::name.name, \"strawberry\")\nval update = Updates.combine(\n Updates.rename(Fruit::qty.name, \"quantity\"),\n Updates.push(Fruit::seasons.name, \"fall\"),\n)\nval options = FindOneAndUpdateOptions()\n .returnDocument(ReturnDocument.AFTER)\n\n// Specify the class for returned documents as the type parameter in withDocumentClass()\nval result = collection\n .withDocumentClass()\n .findOneAndUpdate(filter, update, options)\nprintln(result)\n" - }, - { - "lang": "console", - "value": "NewFruit(id=1, name=strawberry, quantity=205, seasons=[summer, fall])" - }, - { - "lang": "kotlin", - "value": "database.createCollection(\"exampleCollection\")\n" - }, - { - "lang": "kotlin", - "value": "val collOptions: ValidationOptions = ValidationOptions().validator(\n Filters.or(\n Filters.exists(\"title\"),\n Filters.exists(\"name\")\n )\n)\ndatabase.createCollection(\n \"movies\",\n CreateCollectionOptions().validationOptions(collOptions)\n)\n" - }, - { - "lang": "kotlin", - "value": "val collectionList = database.listCollectionNames().toList()\n\nprintln(collectionList)\n" - }, - { - "lang": "console", - "value": "[movies, exampleCollection]" - }, - { - "lang": "kotlin", - "value": "val collection =\n database.getCollection(\"movies\")\ncollection.drop()\n" - } - ], - "preview": "In this guide, you can learn how to use MongoDB databases and\ncollections with the MongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/encrypt-fields", - "title": "In-Use Encryption", - "headings": [ - "Overview", - "Queryable Encryption", - "Client-side Field Level Encryption" - ], - "paragraphs": "You can use the Kotlin driver to encrypt specific document fields by using a\nset of features called in-use encryption . In-use encryption allows\nyour application to encrypt data before sending it to MongoDB\nand query documents with encrypted fields. In-use encryption prevents unauthorized users from viewing plaintext\ndata as it is sent to MongoDB or while it is in an encrypted database. To\nenable in-use encryption in an application and authorize it to decrypt\ndata, you must create encryption keys that only your application can\naccess. Only applications that have access to your encryption\nkeys can access the decrypted, plaintext data. If an attacker gains\naccess to the database, they can only see the encrypted ciphertext data\nbecause they lack access to the encryption keys. You might use in-use encryption to encrypt fields in your MongoDB\ndocuments that contain the following types of sensitive data: MongoDB offers the following features to enable in-use encryption: Credit card numbers Addresses Health information Financial information Any other sensitive or personally identifiable information (PII) Queryable Encryption Client-side Field Level Encryption Queryable Encryption is the next-generation in-use encryption feature,\nfirst introduced as a preview feature in MongoDB Server version 6.0 and\nas a generally available (GA) feature in MongoDB 7.0. Queryable\nEncryption supports searching encrypted fields for equality and encrypts\neach value uniquely. To learn more about Queryable Encryption, see Queryable\nEncryption in the Server manual. The implementation of Queryable Encryption in MongoDB 6.0 is incompatible with the GA version introduced in MongoDB 7.0. The Queryable Encryption preview feature is no longer supported. Client-side Field Level Encryption (CSFLE) was introduced in MongoDB\nServer version 4.2 and supports searching encrypted fields for equality.\nCSFLE differs from Queryable Encryption in that you can select either a\ndeterministic or random encryption algorithm to encrypt fields. You can only\nquery encrypted fields that use a deterministic encryption algorithm when\nusing CSFLE. When you use a random encryption algorithm to encrypt\nfields in CSFLE, they can be decrypted, but you cannot perform equality\nqueries on those fields. When you use Queryable Encryption, you cannot\nspecify the encryption algorithm, but you can query all encrypted\nfields. When you deterministically encrypt a value, the same input value\nproduces the same output value. While deterministic encryption allows\nyou to perform queries on those encrypted fields, encrypted data with\nlow cardinality is susceptible to code breaking by frequency analysis. To learn more about CSFLE, see CSFLE in the\nServer manual. To learn more about these concepts, see the following Wikipedia\nentries: Cardinality Frequency Analysis", - "code": [], - "preview": "You can use the Kotlin driver to encrypt specific document fields by using a\nset of features called in-use encryption. In-use encryption allows\nyour application to encrypt data before sending it to MongoDB\nand query documents with encrypted fields.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/enterprise-auth", - "title": "Enterprise Authentication Mechanisms", - "headings": [ - "Overview", - "Specify an Authentication Mechanism", - "Mechanisms", - "Kerberos (GSSAPI)", - "LDAP (PLAIN)", - "MONGODB-OIDC", - "Azure IMDS", - "GCP IMDS", - "Custom Callback" - ], - "paragraphs": "In this guide, you can learn how to authenticate with MongoDB using each\n authentication mechanism available exclusively in the MongoDB Enterprise\nEdition. You can use the following mechanisms with the latest version of MongoDB\nEnterprise Edition: Authentication Mechanisms guide . For more\ninformation on establishing a connection to your MongoDB cluster, read our\n Connection Guide . Kerberos (GSSAPI) LDAP (PLAIN) MONGODB-OIDC You can specify your authentication mechanism and credentials when connecting\nto MongoDB using either of the following: A connection string (also known as a connection URI ) specifies how to\nconnect and authenticate to your MongoDB cluster. To authenticate using a connection string, include your settings in your\nconnection string and pass it to the MongoClient.create() method to\ninstantiate your MongoClient . The Connection String \ntab in each section provides the syntax for authenticating using a\n connection string . Alternatively, you can use the MongoCredential class to specify your\nauthentication details. The MongoCredential class contains static factory\nmethods that construct instances containing your authentication mechanism and\ncredentials. When you use the MongoCredential helper class, you need\nto use the MongoClientSettings.Builder class to configure your\nconnection settings when constructing your MongoClient . The\n MongoCredential tab in each section provides the syntax for\nauthenticating using a MongoCredential . For more information on these classes and methods, refer to the following API\ndocumentation: A connection string A MongoCredential factory method MongoClient.create() MongoClient MongoClientSettings.Builder MongoCredential The Generic Security Services API ( GSSAPI ) authentication mechanism\nallows the user to authenticate to a Kerberos service using the user's\nprincipal name. The following code snippets show how to specify the authentication mechanism,\nusing the following placeholders: Select the Connection String or the MongoCredential \ntab below for instructions and sample code for specifying this authentication\nmechanism: In order to acquire a\n Kerberos ticket ,\nthe GSSAPI Java libraries require you to specify the realm and Key Distribution\nCenter (KDC) system properties. See the sample settings in the following example: You may need to specify one or more of the following additional\n MongoCredential mechanism properties depending on your Kerberos setup: By default, the Kotlin driver caches Kerberos tickets by MongoClient instance.\nIf your deployment needs to frequently create and destroy MongoClient instances,\nyou can change the default Kerberos ticket caching behavior to cache by process\nto improve performance. The method refers to the GSSAPI authentication mechanism instead\nof Kerberos because the driver authenticates using the\n GSSAPI RFC-4652 SASL\nmechanism. Kerberos principal - your URL-encoded principal name, e.g. \"username%40REALM.ME\" hostname - network address of your MongoDB server, accessible by your client port - port number of your MongoDB server To specify the GSSAPI authentication mechanism using a connection\nstring: Your code to instantiate a MongoClient should resemble the following: Assign the authMechanism URL parameter to the value GSSAPI (optional) Assign the authSource URL parameter to the value $external If you specify the GSSAPI mechanism, you cannot assign\n authSource to any value other than $external . To specify the GSSAPI authentication mechanism using the\n MongoCredential class, use the createGSSAPICredential() \nmethod. Your code to instantiate a MongoClient should resemble the following: SERVICE_NAME CANONICALIZE_HOST_NAME JAVA_SUBJECT JAVA_SASL_CLIENT_PROPERTIES JAVA_SUBJECT_PROVIDER To specify one of the GSSAPI additional properties, include it in the\nconnection string as a URL parameter using the format:\n : . Your code to instantiate a MongoClient using GSSAPI and additional\nproperties might resemble the following: You can only specify the following GSSAPI properties using the\n MongoCredential : Select the MongoCredential tab to see how to specify\nthem. JAVA_SUBJECT JAVA_SASL_CLIENT_PROPERTIES JAVA_SUBJECT_PROVIDER To specify one of the GSSAPI additional properties, call the\n withMechanismProperty() method on your MongoCredential \ninstance and pass the property name and value as parameters. Use the\nproperty name constants defined in the MongoCredential class: Select the SERVICE_NAME_KEY or JAVA_SUBJECT_KEY tab to\nsee sample code to instantiate a MongoCredential that uses GSSAPI and\nthe selected property: SERVICE_NAME_KEY CANONICALIZE_HOST_NAME_KEY JAVA_SUBJECT_KEY JAVA_SASL_CLIENT_PROPERTIES_KEY JAVA_SUBJECT_PROVIDER_KEY To cache Kerberos tickets by process, you must use the MongoCredential authentication\nmechanism, as the connection string authentication mechanism does not support the JAVA_SUBJECT_PROVIDER \nmechanism property. If you would like to cache Kerberos tickets by process, select the MongoCredential \ntab to learn how to accomplish this. To cache Kerberos tickets by process, you must specify the JAVA_SUBJECT_PROVIDER \nmechanism property and provide a\n KerberosSubjectProvider \nin your MongoCredential instance. The code to configure the Kotlin driver to cache Kerberos tickets\nby process should resemble the following: On Windows, Oracle\u2019s JRE uses LSA \nrather than SSPI \nin its implementation of GSSAPI which limits interoperability with\nWindows Active Directory and implementations of single sign-on. See the\nfollowing articles for more information: JDK-8054026 JDK-6722928 SO 23427343 Available in MongoDB Enterprise Edition 3.4 and later. You can authenticate to a Lightweight Directory Access Protocol (LDAP)\nserver using your directory server username and password. You can specify this authentication mechanism by setting the authMechanism \nparameter to PLAIN and including your LDAP username and password in the\n connection string . The following code snippets show how to specify the authentication mechanism,\nusing the following placeholders: Select the Connection String or the MongoCredential \ntab below for instructions and sample code for specifying this authentication\nmechanism: The authentication mechanism is named PLAIN instead of LDAP since it\nauthenticates using the PLAIN Simple Authentication and Security Layer\n(SASL) defined in RFC-4616 . LDAP username - your LDAP username password - your LDAP user's password hostname - network address of your MongoDB server, accessible by your client port - port number of your MongoDB server To specify the LDAP (PLAIN) authentication mechanism using a connection\nstring: Your code to instantiate a MongoClient should resemble the following: Assign the authMechanism URL parameter to the value PLAIN (optional) Assign the authSource URL parameter to the value $external If you specify the PLAIN mechanism, you cannot assign\n authSource to any value other than $external . To specify the LDAP (PLAIN) authentication mechanism using the\n MongoCredential class, use the createPlainCredential() \nmethod. Your code to instantiate a MongoClient should resemble the following: The following sections describe how to use the MONGODB-OIDC\nauthentication mechanism to authenticate to various platforms. For more information about the MONGODB-OIDC authentication mechanism, see\n OpenID Connect Authentication and\n MongoDB Server Parameters \nin the MongoDB Server manual. The MONGODB-OIDC authentication mechanism requires MongoDB server v7.0 or later running\non a Linux platform. If your application runs on an Azure VM, or otherwise uses the\n Azure Instance Metadata Service \n(IMDS), you can authenticate to MongoDB by using the Kotlin driver's built-in Azure\nsupport. You can specify Azure IMDS OIDC authentication either by\nusing a MongoCredential instance or by specifying your credentials\nin the connection string. Select from the Connection String or MongoCredential tabs to\nsee the corresponding syntax. Replace the placeholder in the\nfollowing code with the percent-encoded value of the audience server\nparameter configured on your MongoDB deployment. The comma ( , ) character and its encoding ( %2C ) are\nreserved, and using these characters in a value causes the\ndriver to interpret commas as delimiters of key-value pairs.\nYou must specify values that contain commas in a MongoCredential instance, as\ndemonstrated in the MongoCredential tab. Replace the placeholder with the client ID or application ID of the\nAzure managed identity or enterprise application. Replace the \nplaceholder with the value of the\n audience server parameter configured on your MongoDB deployment. If your application runs on a Google Compute Engine VM, or otherwise uses the\n GCP Instance Metadata Service ,\nyou can authenticate to MongoDB by using the Kotlin driver's built-in GCP\nsupport. You can specify GCP IMDS OIDC authentication either by\nusing a MongoCredential instance or by specifying your credentials\nin the connection string. Select from the Connection String or MongoCredential tabs to\nsee the corresponding syntax. Replace the placeholder in the\nfollowing code with the percent-encoded value of the audience server\nparameter configured on your MongoDB deployment. The comma ( , ) character and its encoding ( %2C ) are\nreserved, and using these characters in a value causes the\ndriver to interpret commas as delimiters of key-value pairs.\nYou must specify values that contain commas in a MongoCredential instance, as\ndemonstrated in the MongoCredential tab. Replace the placeholder with the value of the\n audience server parameter configured on your MongoDB deployment. The Kotlin driver doesn't offer built-in support for all platforms, including\nAzure Functions and Azure Kubernetes Service (AKS). Instead, you\nmust define a custom callback to use OIDC to authenticate from these platforms.\nTo do so, use the \"OIDC_CALLBACK\" authentication property, as shown in the following\ncode example: The value of the \"OIDC_CALLBACK\" property must be a lambda or other implementation\nof the OidcCallback functional interface that accepts an OidcCallbackContext \nas a parameter and returns an OidcCallbackResult . The following example uses an example callback to retrieve an OIDC token from a file\nnamed \"access-token.dat\" in the local file system:", - "code": [ - { - "lang": "none", - "value": "java.security.krb5.realm=MYREALM.ME\njava.security.krb5.kdc=mykdc.myrealm.me" - }, - { - "lang": "kotlin", - "value": "val connectionString = ConnectionString(\"@:/?authSource=$external&authMechanism=GSSAPI\")\nval mongoClient = MongoClient.create(connectionString)\n" - }, - { - "lang": "kotlin", - "value": "val credential = MongoCredential.createGSSAPICredential(\"\")\n\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder ->\n builder.hosts(listOf(ServerAddress(\"\", )))\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" - }, - { - "lang": "kotlin", - "value": "val connectionString = ConnectionString(\"@:/?authSource=$external&authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:myService\")\nval mongoClient = MongoClient.create(connectionString)\n" - }, - { - "lang": "kotlin", - "value": "val credential = MongoCredential.createGSSAPICredential(\"\")\n .withMechanismProperty(MongoCredential.SERVICE_NAME_KEY, \"myService\")\n" - }, - { - "lang": "kotlin", - "value": "val loginContext = LoginContext(\"\")\nloginContext.login()\nval subject: Subject = loginContext.subject\n\nval credential = MongoCredential.createGSSAPICredential(\"\")\n .withMechanismProperty(MongoCredential.JAVA_SUBJECT_KEY, subject)\n" - }, - { - "lang": "kotlin", - "value": "/* All MongoClient instances sharing this instance of KerberosSubjectProvider\nwill share a Kerberos ticket cache */\nval myLoginContext = \"myContext\"\n/* Login context defaults to \"com.sun.security.jgss.krb5.initiate\"\nif unspecified in KerberosSubjectProvider */\nval credential = MongoCredential.createGSSAPICredential(\"\")\n .withMechanismProperty(\n MongoCredential.JAVA_SUBJECT_PROVIDER_KEY,\n KerberosSubjectProvider(myLoginContext)\n )\n" - }, - { - "lang": "kotlin", - "value": "val connectionString = ConnectionString(\":@:/?authSource=$external&authMechanism=PLAIN\")\nval mongoClient = MongoClient.create(connectionString)\n" - }, - { - "lang": "kotlin", - "value": "val credential = MongoCredential.createPlainCredential(\"\", \"$external\", \"\".toCharArray())\n\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder ->\n builder.hosts(listOf(ServerAddress(\"\", )))\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" - }, - { - "lang": "kotlin", - "value": "val connectionString = ConnectionString(\n \"mongodb://@:/?\" +\n \"?authMechanism=MONGODB-OIDC\" +\n \"&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:\")\nval mongoClient = MongoClient.create(connectionString)\n" - }, - { - "lang": "kotlin", - "value": "val credential = MongoCredential.createOidcCredential(\"\")\n .withMechanismProperty(\"ENVIRONMENT\", \"azure\")\n .withMechanismProperty(\"TOKEN_RESOURCE\", \"\")\n\nval mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyToClusterSettings { builder ->\n builder.hosts(listOf(ServerAddress(\"\", )))\n }\n .credential(credential)\n .build())\n" - }, - { - "lang": "kotlin", - "value": "val connectionString = ConnectionString(\n \"mongodb://@:/?\" +\n \"authMechanism=MONGODB-OIDC\" +\n \"&authMechanismProperties=ENVIRONMENT:gcp,TOKEN_RESOURCE:\")\nval mongoClient = MongoClient.create(connectionString)\n" - }, - { - "lang": "kotlin", - "value": "val credential = MongoCredential.createOidcCredential(\"\")\n .withMechanismProperty(\"ENVIRONMENT\", \"gcp\")\n .withMechanismProperty(\"TOKEN_RESOURCE\", \"\")\n\nval mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyToClusterSettings { builder ->\n builder.hosts(listOf(ServerAddress(\"\", )))\n }\n .credential(credential)\n .build())\n" - }, - { - "lang": "kotlin", - "value": "val credential = MongoCredential.createOidcCredential(null)\n .withMechanismProperty(\"OIDC_CALLBACK\") { context: Context ->\n val accessToken = \"...\"\n OidcCallbackResult(accessToken)\n }\n" - }, - { - "lang": "kotlin", - "value": "val credential = MongoCredential.createOidcCredential(null)\n .withMechanismProperty(\"OIDC_CALLBACK\") { context: Context ->\n val accessToken = String(Files.readAllBytes(Paths.get(\"access-token.dat\")))\n OidcCallbackResult(accessToken)\n }\n\nval mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyToClusterSettings { builder ->\n builder.hosts(listOf(ServerAddress(\"\", )))\n }\n .credential(credential)\n .build()\n)\n" - } - ], - "preview": "In this guide, you can learn how to authenticate with MongoDB using each\nauthentication mechanism available exclusively in the MongoDB Enterprise\nEdition.", - "tags": "ldap, encryption, principal, tls", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/indexes", - "title": "Indexes", - "headings": [ - "Overview", - "Query Coverage and Performance", - "Operational Considerations", - "Index Types", - "Single Field and Compound Indexes", - "Single Field Indexes", - "Compound Indexes", - "Multikey Indexes (Indexes on Array Fields)", - "Atlas Search and Vector Search Indexes", - "Create a Search Index", - "List Search Indexes", - "Update a Search Index", - "Drop a Search Index", - "Text Indexes", - "Single Field", - "Multiple Fields", - "Geospatial Indexes", - "Unique Indexes", - "Clustered Indexes", - "Remove an Index", - "Remove an Index Using an Index Specification Document", - "Remove an Index Using a Name Field", - "Remove an Index Using a Wildcard Character" - ], - "paragraphs": "In this guide, you can learn how to create and manage indexes by\nusing the MongoDB Kotlin Driver. Indexes support the efficient execution of queries in MongoDB. Without\nindexes, MongoDB must scan every document in a collection (a\n collection scan ) to find the documents that match each query. These\ncollection scans are slow and can negatively affect the performance of\nyour application. If an appropriate index exists for a query, MongoDB\ncan use the index to limit the documents that the query must inspect. Indexes also have the following benefits: To learn more, see Indexes in the Server manual. Indexes allow efficient sorting. Indexes enable special capabilities such as geospatial queries . Indexes allow the creation of constraints to ensure a field value is unique . Update operations use indexes when finding documents to update, and\ndelete operations use indexes when finding documents to delete.\n Certain stages in\nthe aggregation pipeline also use indexes to improve performance. When you execute a query against MongoDB, your command can include various elements: When all the fields specified in the query, projection, and sort are in the same index, MongoDB returns results directly\nfrom the index, also called a covered query . For more information on how to ensure your index covers your query criteria and projection, see the Server manual\narticles on query coverage . Query criteria that specify fields and values you are looking for Options that affect the query's execution, such as the read concern Projection criteria to specify the fields MongoDB returns (optional) Sort criteria to specify the order of documents returned from MongoDB (optional) Sort criteria must match or invert the order of the index. Consider an index on the field name in ascending order (A-Z) and age in descending order (9-0): MongoDB uses this index when you sort your data in either of the\nfollowing ways: Specifying a sort order of name and age ascending or name and age \ndescending requires an in-memory sort. name ascending, age descending name descending, age ascending The following guidelines describe how you can optimize the way\nyour application uses indexes: Since MongoDB supports dynamic schemas, applications can query against fields whose names cannot be known in advance or\nare arbitrary. MongoDB 4.2 introduced wildcard indexes to help support these queries.\nWildcard indexes are not designed to replace workload-based index planning. For more information on designing your data model and choosing indexes appropriate for your application, see the MongoDB\nserver documentation on Indexing Strategies and\n Data Modeling and Indexes . To improve query performance, build indexes on fields that appear often in\nyour application's queries and operations that return sorted results. Track index memory and disk usage for capacity planning, because each\nindex that you add consumes disk space and memory when active. Avoid adding indexes that you infrequently use. Note that when a write\noperation updates an indexed field, MongoDB updates the related index. MongoDB supports several different index types to support querying your data. The following sections describe the\nmost common index types and provide sample code for creating each index type. For a full list of index types, see\n Indexes in the Server manual. The following examples use the\n createIndex() \nmethod to create various indexes, and the following data classes to model data\nin MongoDB: The Kotlin driver provides the Indexes \nclass to create and manage indexes. This class includes static\nfactory methods to create index specification documents for different\nMongoDB index key types. Single field indexes are indexes with a reference to a single field within a collection's\ndocuments. They improve single field query and sort performance, and support TTL Indexes that\nautomatically remove documents from a collection after a certain amount of time or at a specific clock time. The following example creates an index in ascending order on the title field: The following is an example of a query that is covered by the index\ncreated in the preceding code snippet: See the MongoDB server manual section on single field indexes for more information. The _id_ index is an example of a single field index. This index is automatically created on the _id field\nwhen a new collection is created. Compound indexes hold references to multiple fields within a collection's documents,\nimproving query and sort performance. The following example creates a compound index on the type and rated fields: The following is an example of a query that is covered by the index\ncreated in the preceding code snippet: See the MongoDB server manual section on Compound indexes for more information. Read more about compound indexes, index prefixes , and sort order here . Multikey indexes are indexes that improve performance for queries that specify a field with an index that contains\nan array value. You can define a multikey index using the same syntax as a single field or compound index. The following example creates a compound, multikey index on the rated , genres (an array of\nStrings), and title fields: The following is an example of a query that is covered by the index\ncreated in the preceding code snippet: Multikey indexes behave differently from other indexes in terms of query coverage, index-bound computation, and\nsort behavior. To learn more about multikey indexes, including a discussion of their behavior and limitations,\nsee Multikey Indexes in the Server manual. You can programmatically manage your Atlas Search and Atlas Vector\nSearch indexes by using the Kotlin driver. The Atlas Search feature enables you to perform full-text searches on\ncollections hosted on MongoDB Atlas. To learn more about MongoDB Atlas\nSearch, see the Atlas Search Indexes documentation. Atlas Vector Search enables you to perform semantic searches on vector\nembeddings stored in MongoDB Atlas. To learn more about Atlas Vector Search, see the\n Atlas Vector Search section in the Aggregates Builder guide. You can call the following methods on a collection to manage your Atlas\nSearch and Vector Search indexes: The following sections provide code examples that demonstrate how to use\neach of the preceding methods. createSearchIndex() createSearchIndexes() listSearchIndexes() updateSearchIndex() dropSearchIndex() The Atlas Search index-management methods run asynchronously. The\ndriver methods can return before confirming that they ran\nsuccessfully. To determine the current status of the indexes, call the\n listSearchIndexes() method. You can use the createSearchIndex() \nand createSearchIndexes() \nmethods to create Atlas Search and Vector Search indexes on a\ncollection. The following code example shows how to create an Atlas Search index: The following code example shows how to create Search and\nVector Search indexes in one call: You can use the\n listSearchIndexes() \nmethod to return a list of the Atlas Search indexes on a collection. The following code example shows how to print a list of the search indexes on\na collection: You can use the\n updateSearchIndex() \nmethod to update an Atlas Search index. The following code shows how to update a search index: You can use the\n dropSearchIndex() \nmethod to remove an Atlas Search index. The following code shows how to delete a search index from a collection: Text indexes support text search queries on string content. These indexes can include any field whose value is a\nstring or an array of string elements. MongoDB supports text search for various languages. You can specify the default\nlanguage as an option when creating the index. MongoDB offers an improved full-text search solution,\n Atlas Search . To learn more about Atlas Search\nindexes and how to use them, see the Atlas Search and Vector Search Indexes section of this\nguide. The following example creates a text index on the plot field: The following is an example of a query that is covered by the index\ncreated in the preceding code snippet. Note that the sort is\nomitted because text indexes do not contain sort order. A collection can only contain one text index. If you want to create a\ntext index for multiple text fields, you must create a compound\nindex. A text search runs on all the text fields within the compound\nindex. The following snippet creates a compound text index for the title and genre \nfields: For more information, see the following Server Manual Entries: Compound Text Index Restrictions Text Indexes MongoDB supports queries of geospatial coordinate data using 2dsphere indexes . With a 2dsphere index, you can query\nthe geospatial data for inclusion, intersection, and proximity. For more information on querying geospatial data, see\n Geospatial Queries in the Server manual. To create a 2dsphere index, you must specify a field that contains\nonly GeoJSON objects . To learn more about this type, see\n GeoJSON objects in the Server manual. The location.geo field in the following sample document from the theaters collection in the sample_mflix \ndatabase is a GeoJSON Point object that describes the coordinates of the theater: The following example creates a 2dsphere index on the location.geo field: The following is an example of a geospatial query that is covered by the index\ncreated in the preceding code snippet: MongoDB also supports 2d indexes for calculating distances on a\nEuclidean plane and for working with the \"legacy coordinate pairs\"\nsyntax used in MongoDB 2.2 and earlier. To learn more, see\n Geospatial Queries in the Server manual. Attempting to create a geospatial index on a field that is already\ncovered by a geospatial index results in an error. Unique indexes ensure that the indexed fields do not store duplicate values. By default, MongoDB creates a unique index\non the _id field during the creation of a collection. To create a unique index, specify the field or combination of\nfields that you want to prevent duplication on and set the unique option to true . The following example creates a unique, descending index on the theaterId field: Refer to the Unique Indexes page in the MongoDB server manual for more information. If you perform a write operation that stores a duplicate value that\nviolates the unique index, the driver raises a DuplicateKeyException ,\nand MongoDB throws an error resembling the following: Clustered indexes instruct a collection to store documents ordered\nby a key value. To create a clustered index, specify the clustered index\noption with the _id field as the key and the unique field as\n true when you create your collection. The following example creates a clustered index on the _id field in\nthe vendors collection: See the MongoDB server manual sections for more information: Clustered Index Clustered Collections You can remove any unused index except the default unique index on the\n _id field. The following sections show the ways to remove indexes: Using an index specification document Using an indexed name field Using a wildcard character to remove all indexes Pass an index specification document to the dropIndex() method to\nremove an index from a collection. An index specification document is\na Bson instance that specifies the type of index on a\nspecified field. The following snippet removes an ascending index on the title field\nin a collection: If you want to drop a text index, you must use the name of the index\ninstead. See the Remove an Index Using a Name Field section for details. Pass the name field of the index to the dropIndex() method to\nremove an index from a collection. If you must find the name of your index, use the listIndexes() \nmethod to see the value of the name fields in your indexes. The following snippet retrieves and prints all the indexes in a\ncollection: If you call listIndex() on a collection that contains a text index,\nthe output might resemble the following: This output tells us the names of the existing indexes are \"_id\" and\n\"title_text\". The following snippet removes the \"title_text\" index from the collection: You cannot remove a single field from a compound text index. You must\ndrop the entire index and create a new one to update the indexed\nfields. Starting with MongoDB 4.2, you can drop all indexes by calling the\n dropIndexes() method on your collection: For prior versions of MongoDB, pass \"*\" as a parameter to your call to\n dropIndex() on your collection: For more information on the methods in this section, see the following API Documentation: dropIndex() dropIndexes()", - "code": [ - { - "lang": "none", - "value": "name_1_age_-1" - }, - { - "lang": "kotlin", - "value": "// Data class for the movies collection\ndata class Movie(\n val title: String,\n val year: Int,\n val cast: List,\n val genres: List,\n val type: String,\n val rated: String,\n val plot: String,\n val fullplot: String,\n)\n\n// Data class for the theaters collection\ndata class Theater(\n val theaterId: Int,\n val location: Location\n) {\n data class Location(\n val address: Address,\n val geo: Point\n ) {\n data class Address(\n val street1: String,\n val city: String,\n val state: String,\n val zipcode: String\n )\n }\n}\n" - }, - { - "lang": "kotlin", - "value": "val resultCreateIndex = moviesCollection.createIndex(Indexes.ascending(Movie::title.name))\nprintln(\"Index created: $resultCreateIndex\")\n" - }, - { - "lang": "console", - "value": "Index created: title_1" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.eq(Movie::title.name, \"The Dark Knight\")\nval sort = Sorts.ascending(Movie::title.name)\nval projection = Projections.fields(\n Projections.include(Movie::title.name),\n Projections.excludeId()\n)\n\ndata class Results(val title: String)\n\nval resultsFlow = moviesCollection.find(filter).sort(sort).projection(projection)\n\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "kotlin", - "value": "val resultCreateIndex = moviesCollection.createIndex(Indexes.ascending(Movie::type.name, Movie::rated.name))\n\nprintln(\"Index created: $resultCreateIndex\")\n" - }, - { - "lang": "console", - "value": "Index created: type_1_rated_1" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.and(\n Filters.eq(Movie::type.name, \"movie\"),\n Filters.eq(Movie::rated.name, \"G\")\n)\nval sort = Sorts.ascending(Movie::type.name, Movie::rated.name)\nval projection = Projections.fields(\n Projections.include(Movie::type.name, Movie::rated.name),\n Projections.excludeId()\n)\nval resultsFlow = moviesCollection.find(filter).sort(sort).projection(projection)\n\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "kotlin", - "value": "val resultCreateIndex =\n moviesCollection.createIndex(Indexes.ascending(Movie::rated.name, Movie::genres.name, Movie::title.name))\n\nprintln(\"Index created: $resultCreateIndex\")\n" - }, - { - "lang": "console", - "value": "Index created: rated_1_genres_1_title_1" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.and(\n Filters.eq(Movie::genres.name, \"Animation\"),\n Filters.eq(Movie::rated.name, \"G\")\n)\nval sort = Sorts.ascending(Movie::title.name)\nval projection = Projections.fields(\n Projections.include(Movie::title.name, Movie::rated.name),\n Projections.excludeId()\n)\nval resultsFlow = moviesCollection.find(filter).sort(sort).projection(projection)\n\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "kotlin", - "value": "val searchIdx = Document(\n \"mappings\",\n Document(\"dynamic\", true)\n)\nval resultCreateIndex = moviesCollection.createSearchIndex(\"myIndex\", searchIdx)\n" - }, - { - "lang": "kotlin", - "value": "val searchIdxMdl = SearchIndexModel(\n \"searchIdx\",\n Document(\"analyzer\", \"lucene.standard\").append(\n \"mappings\", Document(\"dynamic\", true)\n ),\n SearchIndexType.search()\n)\n\nval vectorSearchIdxMdl = SearchIndexModel(\n \"vsIdx\",\n Document(\n \"fields\",\n listOf(\n Document(\"type\", \"vector\")\n .append(\"path\", \"embeddings\")\n .append(\"numDimensions\", 1536)\n .append(\"similarity\", \"dotProduct\")\n )\n ),\n SearchIndexType.vectorSearch()\n)\n\nval resultCreateIndexes = moviesCollection.createSearchIndexes(\n listOf(searchIdxMdl, vectorSearchIdxMdl)\n)\n" - }, - { - "lang": "kotlin", - "value": "val searchIndexesList = moviesCollection.listSearchIndexes().toList()\n" - }, - { - "lang": "kotlin", - "value": "moviesCollection.updateSearchIndex(\n \"myIndex\",\n Document(\"analyzer\", \"lucene.simple\").append(\n \"mappings\",\n Document(\"dynamic\", false)\n .append(\n \"fields\",\n Document(\n \"title\",\n Document(\"type\", \"string\")\n )\n )\n )\n)\n" - }, - { - "lang": "kotlin", - "value": "moviesCollection.dropSearchIndex(\"myIndex\");\n" - }, - { - "lang": "kotlin", - "value": "try {\n val resultCreateIndex = moviesCollection.createIndex(Indexes.text(Movie::plot.name))\n println(\"Index created: $resultCreateIndex\")\n} catch (e: MongoCommandException) {\n if (e.errorCodeName == \"IndexOptionsConflict\") {\n println(\"there's an existing text index with different options\")\n }\n}\n" - }, - { - "lang": "console", - "value": "Index created: plot_text" - }, - { - "lang": "kotlin", - "value": "val filter = Filters.text(\"Batman\")\nval projection = Projections.fields(\n Projections.include(Movie::fullplot.name),\n Projections.excludeId()\n)\n\ndata class Results(val fullplot: String)\n\nval resultsFlow = moviesCollection.find(filter).projection(projection)\n\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "kotlin", - "value": "try {\n val resultCreateIndex = moviesCollection.createIndex(\n Indexes.compoundIndex(\n Indexes.text(Movie::title.name), Indexes.text(Movie::genres.name)\n )\n )\n println(\"Index created: $resultCreateIndex\")\n} catch (e: MongoCommandException) {\n if (e.errorCodeName == \"IndexOptionsConflict\") {\n println(\"there's an existing text index with different options\")\n }\n}\n" - }, - { - "lang": "console", - "value": "Index created: title_text_genre_text" - }, - { - "lang": "javascript", - "value": "{\n \"_id\" : ObjectId(\"59a47286cfa9a3a73e51e75c\"),\n \"theaterId\" : 104,\n \"location\" : {\n \"address\" : {\n \"street1\" : \"5000 W 147th St\",\n \"city\" : \"Hawthorne\",\n \"state\" : \"CA\",\n \"zipcode\" : \"90250\"\n },\n \"geo\" : {\n \"type\" : \"Point\",\n \"coordinates\" : [\n -118.36559,\n 33.897167\n ]\n }\n }\n}" - }, - { - "lang": "kotlin", - "value": "val resultCreateIndex = theatersCollection.createIndex(\n Indexes.geo2dsphere(\"${Theater::location.name}.${Theater.Location::geo.name}\")\n)\n\nprintln(\"Index created: $resultCreateIndex\")\n" - }, - { - "lang": "console", - "value": "Index created: location.geo_2dsphere" - }, - { - "lang": "kotlin", - "value": "// MongoDB Headquarters in New York, NY.\nval refPoint = Point(Position(-73.98456, 40.7612))\nval filter = Filters.near(\n \"${Theater::location.name}.${Theater.Location::geo.name}\",\n refPoint, 1000.0, 0.0\n)\nval resultsFlow = theatersCollection.find(filter)\n\nresultsFlow.collect { println(it) }\n" - }, - { - "lang": "kotlin", - "value": "try {\n val indexOptions = IndexOptions().unique(true)\n val resultCreateIndex = theatersCollection.createIndex(\n Indexes.descending(Theater::theaterId.name), indexOptions\n )\n println(\"Index created: $resultCreateIndex\")\n} catch (e: DuplicateKeyException) {\n println(\"duplicate field values encountered, couldn't create index: \\t${e.message}\")\n}\n" - }, - { - "lang": "console", - "value": "Index created: theaterId_-1" - }, - { - "lang": "none", - "value": "E11000 duplicate key error index" - }, - { - "lang": "kotlin", - "value": "val clusteredIndexOptions = ClusteredIndexOptions(Document(\"_id\", 1), true)\nval createCollectionOptions = CreateCollectionOptions().clusteredIndexOptions(clusteredIndexOptions)\n\ndatabase.createCollection(\"vendors\", createCollectionOptions)\n" - }, - { - "lang": "kotlin", - "value": "moviesCollection.dropIndex(Indexes.ascending(Movie::title.name));\n" - }, - { - "lang": "json", - "value": "{ \"v\": 2, \"key\": {\"_id\": 1}, \"name\": \"_id_\" }\n{ \"v\": 2, \"key\": {\"_fts\": \"text\", \"_ftsx\": 1}, \"name\": \"title_text\", \"weights\": {\"title\": 1},\n\"default_language\": \"english\", \"language_override\": \"language\", \"textIndexVersion\": 3 }" - }, - { - "lang": "kotlin", - "value": "val indexes = moviesCollection.listIndexes()\n\nindexes.collect { println(it.toJson()) }\n" - }, - { - "lang": "kotlin", - "value": "moviesCollection.dropIndex(\"title_text\")\n" - }, - { - "lang": "kotlin", - "value": "moviesCollection.dropIndexes()\n" - }, - { - "lang": "kotlin", - "value": "moviesCollection.dropIndex(\"*\")\n" - } - ], - "preview": "In this guide, you can learn how to create and manage indexes by\nusing the MongoDB Kotlin Driver.", - "tags": "code example, optimization, atlas search", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/logging", - "title": "Logging", - "headings": [ - "Overview", - "Set Up a Logger", - "Background", - "Example - Set Up", - "Configure Your Logger", - "Example - Configure", - "Logger Names", - "Example - Names" - ], - "paragraphs": "In this guide, you can learn how to set up and configure a logger in the\nMongoDB Kotlin driver. You will learn how to: This guide shows how to record events in the driver.\nIf you would like to learn how to use information about the activity of the\ndriver in code, consider reading our\n guide on monitoring . Set up a logger using the Simple Logging Facade For Java (SLF4J) Configure the log level of your logger This section gives background on the dependencies necessary to set up a\nlogger and provides an example logger setup. The MongoDB Kotlin driver uses the Simple Logging Facade For Java (SLF4J).\nSLF4J allows you to specify your logging framework of choice at deployment time.\nFor more information on SLF4J,\n see the SLF4J documentation . Setting up a logger is optional. When you start your application the MongoDB\nKotlin driver looks for the slf4j-api artifact in your classpath. If the driver\ncan't find the slf4j-api artifact, the driver logs the following warning with\n java.util.logging and disables all further logging: To set up a logger, you must include the following in your project. A binding is a piece of code that connects the slf4j-api artifact with a\nlogging framework. The following example shows how to bind the slf4j-api artifact\nto the two most popular logging frameworks, Log4j2 and Logback. The slf4j-api artifact A logging framework A binding For the most popular logging frameworks, there is often a single binding\nartifact that lists the slf4j-api and the logging framework as\ndependencies. This means that you can set up a logger by adding one artifact\nto your project's dependency list. You will see this in the example below. This example shows how to set up your logger. Click the\ntab corresponding to the logging framework you would like to use in your project. The following versions listed are illustrative rather than a\nsource of truth. You should check the official documentation for SLF4J and\nyour logging framework of choice for guaranteed up-to-date version\ninformation. SLF4J documentation Logback documentation Log4j2 documentation Select the build tool you are using in your project. Once you have included the preceding dependency, connect to your\nMongoDB instance and retrieve a document with the following code: For more information on Logback, see the\n Logback manual . Add the following dependency to your pom.xml file. Add the following dependency to your build.gradle.kts file: The default log level of Logback is DEBUG. To learn how to change your\nLogback logger's log level, see the\n example in the Configure Your Logger section of this page . Select the build tool you are using in your project. Once you have included the preceding dependency, log an error using the\nfollowing code: For more information on Log4j2, see the\n Log4j2 manual . Add the following dependency to your pom.xml file. Add the following dependency to your build.gradle.kts file. The default log level of Log4J2 is ERROR. This means that running\nstandard operations in the MongoDB Kotlin driver will not produce output\nfrom Log4J2 without configuration. To learn how to change your Log4J2\nlogger's log level, see the\n example in the Configure Your Logger section of this page . To configure your logger, you must use the configuration system of the logging\nframework bound to SLF4J. In the following example we show how you can use your logging framework's\nconfiguration system to set your logger's log level . A logger's log level specifies a lower bound for how urgent a message must be\nfor the logger to output that message. This example shows how to configure your logger's log level to INFO.\nSelect the tab corresponding to the logging framework you are using in your\nproject. Specify Logback configurations in a file named logback.xml . Your\n logback.xml file does not have to be in a specific location, but it must\nbe accessible from your classpath. The Logback framework defines the following log levels. The\nfollowing lists the log levels, ordered from most urgent to least\nurgent: Set your logback.xml file to the following. To test that your logger configuration was successful, run the following\ncode. For more information on configuring Logback, see the\n the Logback Manual . ERROR WARN INFO DEBUG TRACE Specify Log4j2 configurations in a file named log4j2.xml . Your\n log4j2.xml file does not have to be in a specific location, but it must\nbe accessible from your classpath. The Log4j2 framework defines the following log levels. The following lists the\nlog levels, ordered from most urgent to least urgent: Set your log4j2.xml file to the following. To test that your logger configuration was successful, run the following\ncode. For more information on configuring Log4j2, see the official\n Log4j2 configuration guide . FATAL ERROR WARN INFO DEBUG TRACE ALL Your logger uses logger names to help organize different logging events. Logger\nnames are strings that form a hierarchy. A logger is an ancestor of another logger if\nits name followed by a \".\" is a prefix of the other logger's name. For example,\n \"grandparent\" is an ancestor of \"grandparent.parent\" which is an\nancestor of \"grandparent.parent.child\" . For a concrete example, this is what a logger hierarchy looks like in code. A logger inherits the properties of its ancestor logger and can define\nits own. You can think of this as similar to class inheritance in Kotlin. The MongoDB Kotlin driver defines the following logger names to organize different\nlogging events in the driver. Here are the logger names defined in the driver\nand the logging events they correspond to: org.mongodb.driver.authenticator : authentication org.mongodb.driver.client : events related to MongoClient instances org.mongodb.driver.cluster : monitoring of MongoDB servers org.mongodb.driver.connection : connections and connection pools org.mongodb.driver.connection.tls : TLS/SSL org.mongodb.driver.operation : operations, including logging related to automatic retries org.mongodb.driver.protocol : commands sent to and replies received from MongoDB servers org.mongodb.driver.uri : connection string parsing org.mongodb.driver.management : JMX (Java Management Extensions) This example shows how to change the log level for a specific driver logger.\nWe set the root logger to OFF and the org.mongodb.driver.connection logger to\nINFO. This will cause the application to only log messages related to connecting\nto a MongoDB instance. Select the tab corresponding to the logging framework you are using in your\nproject. Set your logback.xml file to the following. To test that your logger configuration was successful, run the following\ncode: For more information on configuring Logback, see the\n official Logback configuration guide . Set your log4j2.xml file to the following. To test that your logger configuration was successful, run the following\ncode. For more information on configuring Log4j2, see the\n official Log4J2 configuration guide .", - "code": [ - { - "lang": "none", - "value": "WARNING: SLF4J not found on the classpath. Logging is disabled for the 'org.mongodb.driver' component" - }, - { - "lang": "xml", - "value": "\n \n ch.qos.logback\n logback-classic\n 1.2.11\n \n" - }, - { - "lang": "kotlin", - "value": "dependencies {\n implementation(\"ch.qos.logback:logback-classic:1.2.11\")\n}" - }, - { - "lang": "kotlin", - "value": "val mongoClient = MongoClient.create(\"\");\nval database = mongoClient.getDatabase(DB_NAME_PLACEHOLDER);\nval collection = database.getCollection(COLLECTION_NAME_PLACEHOLDER);\ncollection.find().firstOrNull()\n" - }, - { - "lang": "console", - "value": "...\n12:14:55.853 [main] DEBUG org.mongodb.driver.connection - Opened connection [connectionId{localValue:3, serverValue:3}] to \n12:14:55.861 [main] DEBUG org.mongodb.driver.protocol.command - Command \"find\" started on database using a connection with driver-generated ID 3 and server-generated ID 3 to . The request ID is 5. Command: {\"find\": \"\", \"filter\": {}, \"limit\": 1, \"singleBatch\": true, \"$db\": \"\", \"lsid\": {\"id\": {\"$binary\": {\"base64\": \"<_id>\", \"subType\": \"04\"}}}, \"$readPreference\": {\"mode\": \"primaryPreferred\"}}\n12:14:55.864 [main] DEBUG org.mongodb.driver.protocol.command - Command \"find\" succeeded in 4.34 ms using a connection with driver-generated ID 3 and server-generated ID 3 to .\", \"firstBatch\": []}, \"ok\": 1.0, \"$clusterTime\": {\"clusterTime\": {\"$timestamp\": {\"t\": 1673778535, \"i\": 1}}, \"signature\": {\"hash\": {\"$binary\": {\"base64\": \"<_id>\", \"subType\": \"00\"}}, \"keyId\": 0}}, \"operationTime\": {\"$timestamp\": {\"t\": 1673778535, \"i\": 1}}}" - }, - { - "lang": "xml", - "value": "\n \n org.apache.logging.log4j\n log4j-slf4j-impl\n 2.17.1\n \n" - }, - { - "lang": "groovy", - "value": "dependencies {\n implementation(\"org.apache.logging.log4j:log4j-slf4j-impl:2.17.1\")\n}" - }, - { - "lang": "kotlin", - "value": "val loggerParent = LoggerFactory.getLogger(\"parent\")\nval loggerChild = LoggerFactory.getLogger(\"parent.child\")\n" - }, - { - "lang": "kotlin", - "value": "val loggerParent = LoggerFactory.getLogger(\"parent\")\nval loggerChild = LoggerFactory.getLogger(\"parent.child\")\n" - }, - { - "lang": "console", - "value": "12:35:00.438 [main] ERROR - Logging an Error" - }, - { - "lang": "xml", - "value": "\n \n \n \n %-4relative [%thread] %-5level %logger{30} - %msg%n\n \n \n \n \n \n \n" - }, - { - "lang": "kotlin", - "value": "val mongoClient = MongoClient.create(\"\");\nval database = mongoClient.getDatabase(DB_NAME_PLACEHOLDER);\nval collection = database.getCollection(COLLECTION_NAME_PLACEHOLDER);\ncollection.find().firstOrNull()\n" - }, - { - "lang": "console", - "value": "...\n1317 [cluster-ClusterId{value='', description='null'}-] INFO org.mongodb.driver.cluster - Discovered replica set primary \n1568 [main] INFO org.mongodb.driver.connection - Opened connection [connectionId{localValue:7, serverValue:}] to " - }, - { - "lang": "xml", - "value": "\n\n \n \n \n \n \n \n \n \n \n \n" - }, - { - "lang": "kotlin", - "value": "val mongoClient = MongoClient.create(\"\");\nval database = mongoClient.getDatabase(DB_NAME_PLACEHOLDER);\nval collection = database.getCollection(COLLECTION_NAME_PLACEHOLDER);\ncollection.find().firstOrNull()\n" - }, - { - "lang": "console", - "value": "...\n10:14:57.633 [cluster-ClusterId{value=, description='null'}-] INFO org.mongodb.driver.cluster - Discovered replica set primary \n10:14:57.790 [main] INFO org.mongodb.driver.connection - Opened connection [connectionId{localValue:7, serverValue:}] to " - }, - { - "lang": "kotlin", - "value": "import org.slf4j.LoggerFactory\n" - }, - { - "lang": "kotlin", - "value": "val loggerParent = LoggerFactory.getLogger(\"parent\")\nval loggerChild = LoggerFactory.getLogger(\"parent.child\")\n" - }, - { - "lang": "xml", - "value": "\n \n \n \n %-4relative [%thread] %-5level %logger{30} - %msg%n\n \n \n \n \n \n \n \n" - }, - { - "lang": "kotlin", - "value": "val mongoClient = MongoClient.create(\"\");\nval database = mongoClient.getDatabase(DB_NAME_PLACEHOLDER);\nval collection = database.getCollection(COLLECTION_NAME_PLACEHOLDER);\ncollection.find().firstOrNull()\n" - }, - { - "lang": "console", - "value": "...\n829 [cluster-rtt-ClusterId{value='', description='null'}-] INFO org.mongodb.driver.connection - Opened connection [connectionId{localValue:2, serverValue:}] to \n977 [main] INFO org.mongodb.driver.connection - Opened connection [connectionId{localValue:7, serverValue:}] to " - }, - { - "lang": "xml", - "value": "\n\n \n \n \n \n \n \n \n \n \n \n \n" - }, - { - "lang": "kotlin", - "value": "val mongoClient = MongoClient.create(\"\");\nval database = mongoClient.getDatabase(DB_NAME_PLACEHOLDER);\nval collection = database.getCollection(COLLECTION_NAME_PLACEHOLDER);\ncollection.find().firstOrNull()\n" - }, - { - "lang": "console", - "value": "...\n15:40:23.005 [cluster-ClusterId{value='', description='null'}-] INFO org.mongodb.driver.connection - Opened connection [connectionId{localValue:3, serverValue:}] to \n15:40:23.159 [main] INFO org.mongodb.driver.connection - Opened connection [connectionId{localValue:7, serverValue:}] to " - } - ], - "preview": "In this guide, you can learn how to set up and configure a logger in the\nMongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/monitoring", - "title": "Monitoring", - "headings": [ - "Overview", - "Monitor Events", - "Command Events", - "Example", - "Server Discovery and Monitoring Events", - "Example", - "Connection Pool Events", - "Example", - "Monitor Connection Pool Events with JMX", - "JMX Support", - "JMX and JConsole Example", - "Include the Driver in Your Distributed Tracing System" - ], - "paragraphs": "In this guide, you can learn how to set up and configure monitoring in the\nMongoDB Kotlin driver. Monitoring is the process of getting information about the activities a running\nprogram performs for use in an application or an application performance\nmanagement library. Monitoring the MongoDB Kotlin driver lets you understand the\ndriver's resource usage and performance, and can help you make informed\ndecisions when designing and debugging your application. In this guide you will learn how to perform these tasks: This guide shows how to use information about the activity of the driver in code.\nIf you would like to learn how to record events in the driver,\nconsider reading our guide on logging . Monitor different types of events in the MongoDB Kotlin driver Monitor connection pool events with Java Management Extensions (JMX) and JConsole To monitor an event , you must register a listener on your MongoClient \ninstance. An event is any action that happens in a running program. The driver includes functionality\nfor listening to a subset of the events that occur when the driver is running. A listener is a class that performs some action when certain events occur.\nA listener's API defines the events it can respond to. Each method of a listener class represents a response to a certain event. Each\nmethod receives one argument: an object representing the event the method\nresponds to. The MongoDB Kotlin driver organizes the events it defines into three categories: The following sections show how to monitor each event category. For a full list of the events you can monitor,\n see the event package of the MongoDB Kotlin driver . Command Events Server Discovery and Monitoring Events Connection Pool Events A command event is an event related to a MongoDB database command. Some\nexamples of database commands that produce command events are find ,\n insert , delete , and count . To monitor command events, write a class that implements the\n CommandListener interface and register an instance of that class with your\n MongoClient instance. For more information on MongoDB database commands, see the\n MongoDB manual entry on database commands . The driver does not publish events for commands it calls internally. This\nincludes database commands the driver uses to monitor your cluster and\ncommands related to connection establishment (such as the initial hello \ncommand). As a security measure, the driver redacts the contents of some command events. This\nprotects the sensitive information contained in these command events. For a\nfull list of redacted command events, see the\n MongoDB command logging and monitoring specification . This example shows how to make a counter for database commands. The counter\nkeeps track of the number of times the driver successfully executes each database\ncommand, and prints this information every time a database command finishes. To make a counter, do the following: The following code defines the CommandCounter class which implements the\n CommandListener interface: The following code adds an instance of the CommandCounter class to a\n MongoClientSettings object, and configures a MongoClient instance with the\n MongoClientSettings object. The code then runs some database commands to test the\ncounter. For more information on the classes and methods mentioned in this section, see\nthe following API Documentation: Make a class with counter functionality that implements the CommandListener interface. Add an instance of the new class that implements CommandListener to a MongoClientSettings object. Configure a MongoClient instance with the MongoClientSettings object. CommandListener MongoClientSettings MongoClient CommandStartedEvent CommandSucceededEvent CommandFailedEvent A server discovery and monitoring (SDAM) event is an event related to a change\nin the state of the MongoDB instance or cluster you have connected the driver to. The driver defines nine SDAM events. The driver divides these nine events\nbetween three separate listener interfaces which each listen for three of the\nnine events. Here are the three interfaces and the events they listen for: To monitor a type of SDAM event, write a class that\nimplements one of the three preceding interfaces and register an instance of that\nclass with your MongoClient instance. For a detailed description of each SDAM event, see the MongoDB SDAM monitoring events specification . ClusterListener : topology-related events ServerListener : events related to mongod or mongos processes ServerMonitorListener : heartbeat related events This example shows how to make a listener class that prints a message that lets\nyou know if the driver can write to your MongoDB instance. The following code defines the IsWritable class which implements the\n ClusterListener interface. The following code adds an instance of the IsWritable class to a\n MongoClient object. The code then runs a find operation to test the\n IsWritable class. For more information on the classes and methods mentioned in this section, see\nthe following API Documentation: ClusterListener ServerListener ServerMonitorListener MongoClientSettings MongoClient ClusterDescriptionChangedEvent A connection pool event is an event related to a connection pool held by the driver.\nA connection pool is a set of open TCP connections your driver maintains with\na MongoDB instance. Connection pools help reduce the number of network handshakes\nyour application needs to perform with a MongoDB instance, and can help your\napplication run faster. To monitor connection pool events, write a class that implements the\n ConnectionPoolListener interface and register an instance of that class with your\n MongoClient instance. This example shows how to make a listener class that prints a message each time\nyou check out a connection from your connection pool. The following code defines the ConnectionPoolLibrarian class which implements the\n ConnectionPoolListener interface. The following code adds an instance of the ConnectionPoolLibrarian class to a\n MongoClient object. The code then runs a database command to test the\nlibrarian. For more information on the classes and methods mentioned in this section, see\nthe following API Documentation: ConnectionPoolListener MongoClientSettings MongoClient ConnectionCheckedOutEvent ConnectionCheckOutFailedEvent You can monitor connection pool events using Java Management Extensions (JMX) .\nJMX provides tools to monitor applications and devices. For more information on JMX, see\n the official Oracle JMX documentation . To enable JMX connection pool monitoring, add an instance of the\n JMXConnectionPoolListener class to your MongoClient object. The JMXConnectionPoolListener class performs the following actions: MXBeans registered on the platform MBean server have the following properties: All MXBean instances created by the driver are under the domain\n \"org.mongodb.driver\" . For more information on the topics discussed in this subsection, see the\nfollowing resources from Oracle: Creates MXBean instances for each mongod or mongos process the driver\nmaintains a connection pool with. Registers these MXBean instances with the platform MBean server. Property Description clusterId A client-generated unique identifier. This identifier ensures that\neach MXBean the driver makes has a unique name when an application has\nmultiple MongoClient instances connected to the same MongoDB deployment. host The hostname of the machine running the mongod or mongos process. port The port on which the mongod or mongos process is listening. minSize The minimum size of the connection pool, including idle and in-use connections. maxSize The maximum size of the connection pool, including idle and in-use connections. size The current size of the connection pool, including idle and in-use connections. checkedOutCount The current count of connections that are in use. Platform MBean Server Reference Documentation MXBean Documentation MBean Documentation This example shows how you can monitor the driver's connection pools using JMX\nand JConsole . JConsole is a JMX compliant GUI monitoring tool that comes with\nthe Java Platform. The following code snippet adds a JMXConnectionPoolListener to a\n MongoClient instance. The code then pauses execution so you can\nnavigate to JConsole and inspect your connection pools. Once you have started your server, open JConsole in your terminal using the\nfollowing command: Once JConsole is open, perform the following actions in the GUI: When you no longer want to inspect your connection pools in JConsole, do the\nfollowing: For more information on JMX and JConsole, see the following resources from\nOracle: For more information on the JMXConnectionPoolListener class, see\nthe API Documentation for\n JMXConnectionPoolListener . The descriptions of JMX and JConsole in this example are illustrative\nrather than a source of truth. For guaranteed up to date information, consult\nthe following official Oracle resources: JConsole documentation . JMX documentation Select the process running the preceding example code. Press Insecure Connection in the warning dialog box. Click on the MBeans tab. Inspect your connection pool events under the \"org.mongodb.driver\" domain. Exit JConsole by closing the JConsole window Stop the program running the preceding code snippet JConsole Documentation . Monitoring and Management Guide If you use a distributed tracing system , you can include event data from the\ndriver. A distributed tracing system is an application that\ntracks requests as they propagate throughout different services in a\nservice-oriented architecture. If you use the driver in a Spring Cloud \napplication, use\n Spring Cloud Sleuth to\ninclude MongoDB event data in the\n Zipkin distributed tracing system. If you do not use Spring Cloud or need to include driver event data in a distributed\ntracing system other than Zipkin, you must write a command event listener that\nmanages spans \nfor your desired distributed tracing system. To see an implementation of such a\nlistener, see the Java source code for the\n TraceMongoCommandListener \nclass in the Spring Cloud Sleuth source code. To learn more about Spring Cloud Sleuth, see\n Getting Started \nin the Spring Cloud Sleuth documentation. To view a detailed description of a distributed tracing system, see\n Dapper from Google Research.", - "code": [ - { - "lang": "kotlin", - "value": "class CommandCounter : CommandListener {\n private val commands = mutableMapOf()\n\n\n @Synchronized\n override fun commandSucceeded(event: CommandSucceededEvent) {\n val commandName = event.commandName\n val count = commands[commandName] ?: 0\n commands[commandName] = count + 1\n println(commands.toString())\n }\n\n override fun commandFailed(event: CommandFailedEvent) {\n println(\"Failed execution of command '${event.commandName}' with id ${event.requestId}\")\n }\n}\n" - }, - { - "lang": "kotlin", - "value": "val commandCounter = CommandCounter()\n\nval settings = MongoClientSettings.builder()\n .applyConnectionString(URI)\n .addCommandListener(commandCounter)\n .build()\nval mongoClient = MongoClient.create(settings)\nval database = mongoClient.getDatabase(DATABASE)\nval collection = database.getCollection(COLLECTION)\n\n// Run some commands to test the counter\ncollection.find().firstOrNull()\ncollection.find().firstOrNull()\n" - }, - { - "lang": "console", - "value": "{find=1}\n{find=2}\n{find=2, endSessions=1}" - }, - { - "lang": "kotlin", - "value": "class IsWriteable : ClusterListener {\n private var isWritable = false\n\n\n @Synchronized\n override fun clusterDescriptionChanged(event: ClusterDescriptionChangedEvent) {\n if (!isWritable) {\n if (event.newDescription.hasWritableServer()) {\n isWritable = true\n println(\"Able to write to cluster\")\n }\n } else {\n if (!event.newDescription.hasWritableServer()) {\n isWritable = false\n println(\"Unable to write to cluster\")\n }\n }\n }\n}\n" - }, - { - "lang": "kotlin", - "value": "val clusterListener = IsWriteable()\nval settings = MongoClientSettings.builder()\n .applyConnectionString(URI)\n .applyToClusterSettings { builder ->\n builder.addClusterListener(clusterListener)\n }\n .build()\nval mongoClient = MongoClient.create(settings)\nval database = mongoClient.getDatabase(DATABASE)\nval collection = database.getCollection(COLLECTION)\n// Run a command to trigger a ClusterDescriptionChangedEvent event\ncollection.find().firstOrNull()\n" - }, - { - "lang": "console", - "value": "Able to write to server" - }, - { - "lang": "kotlin", - "value": "class ConnectionPoolLibrarian : ConnectionPoolListener {\n\n override fun connectionCheckedOut(event: ConnectionCheckedOutEvent) {\n println(\"Let me get you the connection with id ${event.connectionId.localValue}...\")\n }\n\n override fun connectionCheckOutFailed(event: ConnectionCheckOutFailedEvent) {\n println(\"Something went wrong! Failed to checkout connection.\")\n }\n}\n" - }, - { - "lang": "kotlin", - "value": "val cpListener = ConnectionPoolLibrarian()\nval settings = MongoClientSettings.builder()\n .applyConnectionString(URI)\n .applyToConnectionPoolSettings { builder ->\n builder.addConnectionPoolListener(cpListener)\n }\n .build()\nval mongoClient = MongoClient.create(settings)\nval database = mongoClient.getDatabase(DATABASE)\nval collection = database.getCollection(COLLECTION)\n// Run a command to trigger connection pool events\ncollection.find().firstOrNull()\n" - }, - { - "lang": "console", - "value": "Let me get you the connection with id 21..." - }, - { - "lang": "shell", - "value": "jconsole" - }, - { - "lang": "kotlin", - "value": "val connectionPoolListener = JMXConnectionPoolListener()\nval settings = MongoClientSettings.builder()\n .applyConnectionString(uri)\n .applyToConnectionPoolSettings {\n it.addConnectionPoolListener(connectionPoolListener)\n }\n .build()\nval mongoClient: MongoClient = MongoClient.create(settings)\n\ntry {\n println(\"Navigate to JConsole to see your connection pools...\")\n Thread.sleep(Long.MAX_VALUE)\n} catch (e: Exception) {\n e.printStackTrace()\n}\n" - }, - { - "lang": "console", - "value": "Navigate to JConsole to see your connection pools..." - } - ], - "preview": "In this guide, you can learn how to set up and configure monitoring in the\nMongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/stable-api", - "title": "Stable API", - "headings": [ - "Overview", - "Enable the Stable API on a MongoDB Client", - "Stable API Options" - ], - "paragraphs": "The Stable API feature requires MongoDB Server 5.0 or later. You should only use the Stable API feature if all the MongoDB\nservers you are connecting to support this feature. In this guide, you can learn how to specify the Stable API when connecting to\na MongoDB instance or replica set. You can use the Stable API feature to\nforce the server to run operations with behavior compatible with the\nspecified API version . An API version defines the expected behavior of the\noperations it covers and the format of server responses. If you change to\na different API version, the operations are not guaranteed to be\ncompatible and the server responses are not guaranteed to be similar. When you use the Stable API feature with an official MongoDB driver, you\ncan update your driver or server without worrying about backward compatibility\nissues of the commands covered by the Stable API. See the MongoDB reference page on the Stable API \nfor more information including a list of commands it covers. The following sections describe how you can enable the Stable API for\nyour MongoDB client and the options that you can specify. To enable the Stable API, you must specify an API version in the settings\nof your MongoDB client. Once you instantiate a MongoClient instance with\na specified API version, all commands you run with that client use that\nversion of the Stable API. The following example shows how you can instantiate a MongoClient that\nsets the Stable API version and connects to a server by performing the\nfollowing operations: For more information on the methods and classes referenced in this\nsection, see the following API Documentation: If you need to run commands using more than one version of the\nStable API, instantiate a separate client with that version. If you need to run commands not covered by the Stable API, make sure the\n\"strict\" option is disabled. See the section on\n Stable API Options for more information. Construct a ServerApi instance using the ServerApi.Builder \nhelper class. Specify a Stable API version using a constant from the\n ServerApiVersion class. Construct a MongoClientSettings instance using the\n MongoClientSettings.Builder class. Specify a server to connect to using a ServerAddress instance. Instantiate a MongoClient using the MongoClient.create() method\nand pass your MongoClientSettings instance as a parameter. If you specify an API version and connect to a MongoDB server that does\nnot support the Stable API, your application may raise an exception when\nexecuting a command on your MongoDB server. If you use a MongoClient \nthat specifies the API version to query a server that does not support it,\nyour query could fail with an exception message that includes the\nfollowing text: ServerApi ServerApi.Builder ServerApiVersion ServerAddress MongoClientSettings MongoClientSettings.Builder MongoClient.create() MongoClient You can enable or disable optional behavior related to the Stable API as\ndescribed in the following table. The following example shows how you can set the two options on an instance\nof ServerApi by chaining methods on the ServerApi.Builder : For more information on the options in this section, see the following\nAPI Documentation: Option Name Description Strict DeprecationErrors strict() deprecationErrors()", - "code": [ - { - "lang": "kotlin", - "value": "val serverApi = ServerApi.builder()\n .version(ServerApiVersion.V1)\n .build()\n\n// Replace the uri string placeholder with your MongoDB deployment's connection string\nval uri = \"\"\n\nval settings = MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(uri))\n .serverApi(serverApi)\n .build()\n\nval client = MongoClient.create(settings)\n" - }, - { - "lang": "none", - "value": "'Unrecognized field 'apiVersion' on server..." - }, - { - "lang": "kotlin", - "value": "val serverApi = ServerApi.builder()\n .version(ServerApiVersion.V1)\n .strict(true)\n .deprecationErrors(true)\n .build()\n" - } - ], - "preview": "In this guide, you can learn how to specify the Stable API when connecting to\na MongoDB instance or replica set. You can use the Stable API feature to\nforce the server to run operations with behavior compatible with the\nspecified API version. An API version defines the expected behavior of the\noperations it covers and the format of server responses. If you change to\na different API version, the operations are not guaranteed to be\ncompatible and the server responses are not guaranteed to be similar.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals/time-series", - "title": "Time Series Collections", - "headings": [ - "Overview", - "Create a Time Series Collection", - "Query a Time Series Collection" - ], - "paragraphs": "In this guide, you can learn about time series collections in\nMongoDB, and how to interact with them in the MongoDB Kotlin driver. Time series collections efficiently store sequences of measurements over\na period of time. Time series data consists of any data collected over\ntime, metadata that describes the measurement, and the time of the\nmeasurement. Example Measurement Metadata Sales Data Revenue Company Infection Rates Amount of People Infected Location To create a time series collection, pass the following parameters to the\n createCollection() \nmethod: To check if you successfully created the collection, send the\n \"listCollections\" command to the runCommand() method. The name of the new collection to create The TimeSeriesOptions \nfor creating the collection in a CreateCollectionOptions object Versions prior to MongoDB 5.0 cannot create a time series collection. To query in a time series collection, use the same conventions as you\nwould for retrieving \nand aggregating data . For more information, see our\n Aggregates Builders guide . MongoDB version 5.0 introduces window functions into the aggregation\npipeline. You can use window functions to perform operations on a\ncontiguous span of time series data.", - "code": [ - { - "lang": "kotlin", - "value": "val database = mongoClient.getDatabase(\"fall_weather\")\nval tsOptions = TimeSeriesOptions(\"temperature\")\nval collOptions = CreateCollectionOptions().timeSeriesOptions(tsOptions)\n\ndatabase.createCollection(\"september2021\", collOptions)\n" - }, - { - "lang": "kotlin", - "value": "val commandResult = database.listCollections().toList()\n .find { it[\"name\"] == \"september2021\" }\n\nprintln(commandResult?.toJson(JsonWriterSettings.builder().indent(true).build()))\n" - }, - { - "lang": "json", - "value": "{\n \"name\": \"september2021\",\n \"type\": \"timeseries\",\n \"options\": {\n \"timeseries\": {\n \"timeField\": \"temperature\",\n \"granularity\": \"seconds\",\n \"bucketMaxSpanSeconds\": 3600\n }\n },\n \"info\": {\n \"readOnly\": false\n }\n}" - } - ], - "preview": "In this guide, you can learn about time series collections in\nMongoDB, and how to interact with them in the MongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "fundamentals", - "title": "Fundamentals", - "headings": [], - "paragraphs": "Learn how to perform the following tasks using the Kotlin driver in the\nFundamentals section: Connect to MongoDB Use the Stable API Authenticate with MongoDB Convert between MongoDB Data Formats and Kotlin Objects Read from and Write to MongoDB Simplify your Code with Builders Transform your Data Create Aggregation Expressions Create Indexes to Speed Up Queries Sort Using Collations Log Events in the Driver Monitor Driver Events Use a Time Series Collection Encrypt Fields in a Document", - "code": [], - "preview": null, - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "", - "title": "MongoDB Kotlin Driver", - "headings": [ - "Introduction", - "Quick Start", - "Quick Reference", - "What's New", - "Usage Examples", - "Fundamentals", - "API Documentation", - "FAQ", - "Connection Troubleshooting", - "Issues & Help", - "Compatibility", - "Migrate from KMongo", - "Validate Driver Artifact Signatures", - "Learn", - "Developer Hub" - ], - "paragraphs": "Welcome to the documentation site for the Kotlin Driver, the official\nMongoDB driver for server-side Kotlin applications that use coroutines.\nDownload the driver by using Maven or Gradle , or set up a runnable project by following our\nQuick Start guide. If your Kotlin application requires synchronous processing, use the\n Sync Driver , which uses synchronous operations\nto make blocking calls to MongoDB. If you are developing an Android or Kotlin Multiplatform (KMP)\napplication, you can use the MongoDB Atlas Device Kotlin SDK \nto access Atlas App Services and to manage your Realm data. Learn how to establish a connection to MongoDB Atlas and begin\nworking with data in the Quick Start section. See driver syntax examples for common MongoDB commands in the\n Quick Reference section. For a list of new features and changes in each version, see the\n What's New section. For fully runnable code snippets and explanations for common\nmethods, see the Usage Examples section. Learn how to perform the following tasks using the Kotlin driver in the\nFundamentals section: Connect to MongoDB Use the Stable API Authenticate with MongoDB Convert between MongoDB Data Formats and Kotlin Objects Read from and Write to MongoDB Simplify your Code with Builders Transform your Data Create Aggregation Expressions Create Indexes to Speed Up Queries Sort Using Collations Log Events in the Driver Monitor Driver Events Use a Time Series Collection Encrypt Fields in a Document The MongoDB Kotlin driver API documentation contains several libraries\norganized by functionality. For detailed information about classes and\nmethods in each library, see the following table for their descriptions\nand links to the API documentation. Library Description BSON Base BSON classes BSON Record Codec Classes that support records Core Shared core classes Kotlin Driver API For answers to commonly asked questions about the MongoDB\nKotlin Driver, see the Frequently Asked Questions (FAQ) \nsection. For solutions to some issues you might experience when connecting to a MongoDB\ndeployment while using the MongoDB Kotlin Driver, see the\n Connection Troubleshooting section. Learn how to report bugs, contribute to the driver, and find\nadditional resources for asking questions and receiving help in the\n Issues & Help section. For the compatibility charts that show the recommended Kotlin\nDriver version for each MongoDB Server version, see the\n Compatibility section. Learn about the changes needed to migrate from the\ncommunity-developed KMongo driver to the MongoDB Kotlin Driver in the\n Migrate from KMongo section. Learn about how to validate signatures of Kotlin driver artifacts\npublished on Maven in the Validate Driver Artifact Signatures section. Visit the Developer Hub to learn more about the MongoDB Kotlin driver. The Developer Hub provides tutorials and social engagement for\ndevelopers. To learn how to use MongoDB features with the Kotlin driver, see the\n Kotlin Tutorials and Articles page, which\nfeatures our Getting Started with the MongoDB Kotlin Driver \ndeveloper tutorial. To ask questions and engage in discussions with fellow developers using\nthe Kotlin Driver, visit the MongoDB Developer Community .", - "code": [], - "preview": "Welcome to the documentation site for the Kotlin Driver, the official\nMongoDB driver for server-side Kotlin applications that use coroutines.\nDownload the driver by using Maven or Gradle, or set up a runnable project by following our\nQuick Start guide.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "issues-and-help", - "title": "Issues & Help", - "headings": ["Bugs / Feature Requests", "Pull Requests"], - "paragraphs": "We are lucky to have a vibrant MongoDB Kotlin community that includes users\nwith varying levels of experience using the Kotlin driver. We find the quickest\nway to get support for general questions is through the MongoDB Community Forums . Refer to our support channels \ndocumentation for more information. If you think you've found a bug or want to see a new feature in the Kotlin\ndriver, please open a case in our issue management tool, JIRA: If you've identified a security vulnerability in a driver or any other\nMongoDB project, please report it according to the instructions found in the\n Create a Vulnerability Report page . Create an account and login . Navigate to the JAVA project . Click Create . Please provide as much information as possible\nabout the issue and the steps to reproduce it. Bug reports in JIRA for the Kotlin driver and the Core Server (i.e. SERVER)\nproject are public. We are happy to accept contributions to help improve the driver. We will guide\nuser contributions to ensure they meet the standards of the codebase. Please\nensure that any pull requests include documentation, tests, and pass the\n gradle checks. To get started check out the source and work on a branch: Finally, ensure that the code passes gradle checks.", - "code": [ - { - "lang": "bash", - "value": "$ git clone https://github.com/mongodb/mongo-java-driver.git\n$ cd mongo-java-driver\n$ git checkout -b myNewFeature" - }, - { - "lang": "bash", - "value": "$ ./gradlew check" - } - ], - "preview": "We are lucky to have a vibrant MongoDB Kotlin community that includes users\nwith varying levels of experience using the Kotlin driver. We find the quickest\nway to get support for general questions is through the MongoDB Community Forums.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "migrate-kmongo", - "title": "Migrate from KMongo", - "headings": [ - "Overview", - "Connect to MongoDB Cluster", - "CRUD and Aggregation", - "Construct Queries", - "Data Typing", - "Data Serialization", - "Synchronous and Asynchronous Support", - "What Next?" - ], - "paragraphs": "This page contains a high-level comparison of most of the ways the official\nMongoDB Kotlin and the community-developed KMongo driver differ.\nYou can use this page to identify the changes you need to make to migrate from\nthe deprecated KMongo driver to the official MongoDB Kotlin driver. The MongoDB Kotlin driver is the officially supported and maintained MongoDB driver for\nKotlin. It is developed by the MongoDB team. Although both drivers support synchronous and asynchronous operations ,\nthe examples on this page will use asynchronous coroutine-based operations. KMongo is a popular community-developed library\nfor working with MongoDB from Kotlin applications.\nIt is a wrapper around the Java driver that was created prior to the creation of\nthe official Kotlin driver to serve the needs of the Kotlin community. As of July 2023, KMongo has been marked as deprecated. Both drivers let you connect to and communicate with MongoDB clusters from a\nKotlin application. To connect to a MongoDB cluster using the MongoDB Kotlin driver: See the Connect to MongoDB documentation for more\ninformation. To connect to a MongoDB cluster using KMongo with coroutines: Unlike the MongoDB Kotlin driver, KMongo allows the collection name to be\ninferred from the data class name. Both drivers provide support for all MongoDB CRUD APIs and aggregation\noperations. The MongoDB Kotlin driver also provides functions for all basic CRUD operations: Aggregation pipelines can be built using the aggregate method and the\n pipeline function: See the CRUD Operations and\n Aggregation documentation for more information. KMongo provides functions for all basic CRUD operations: Aggregation pipelines can be built using the aggregate method and the\n pipeline function: For more information on available methods, see the\n Extensions Overview KMongo\ndocumentation. Both drivers provide support for type-safe queries using property references. The MongoDB Kotlin driver uses the Builders API to construct queries.\nAlternatively, you can use the Document class. To map a KMongo string query to the Kotlin driver, you can use the JsonObject class. For more information, see the following Kotlin driver documentation: Builders Documents guide JsonObject API Documentation With KMongo, you can create queries using property references on the data class\nthat represents objects in a collection and infix operators that the library\nprovides. KMongo also supports string queries that let you construct queries with\nMongoDB Query Language: For more information, see the following KMongo documentation: Typed Queries Mongo Shell Queries Both drivers support the use of Kotlin data classes as well as the Document class to\nmodel the data stored in a MongoDB collection. The Document \nclass lets you model data represented in a MongoDB collection in a flexible format. You can use data classes and Document classes to model data with the\nMongoDB Kotlin driver: You can use data classes and Document classes to model data in KMongo: Both drivers provide support for serializing and deserializing data objects\nin Kotlin to and from BSON. You can serialize data classes in the Kotlin driver using both automatic\ndata class codecs as well as the kotlinx.serialization library. The\ndriver provides an efficient Bson serializer that handles the\nserialization of Kotlin objects to BSON data. To learn more, see the Kotlin Serialization \ndocumentation. If you use the Document class to represent your collection, you can\nserialize it to JSON and EJSON using the .toJson() method: To learn more about serializing data with the Document class, refer to\n Document Data Format - Extended JSON documentation. You can serialize data in KMongo using the following serialization libraries: To learn more about the KMongo serialization methods, refer to the\n Object Mapping \nKMongo documentation. Jackson (default) POJO Codec engine kotlinx.serialization Both drivers support synchronous and asynchronous operations. The MongoDB Kotlin driver also has separate libraries for synchronous and\nasynchronous operations. However, the Kotlin driver only has built-in support\nfor coroutines as an asynchronous paradigm. The MongoDB Kotlin driver does not\ncurrently provide support for other asynchronous paradigms such as Reactive\nStreams, Reactor, or RxJava2. Unlike KMongo, if you want to write asynchronous code, you only need to import\nthe relevant package. To write synchronous code: To write asynchronous coroutine code: Driver Package Sync Coroutines KMongo has a core library org.litote.kmongo:kmongo with main functionality and\nseparate companion libraries that provide asynchronous support to the core library. KMongo supports the following asynchronous paradigms: To write synchronous code with KMongo: To write async coroutine code with KMongo: To learn more, refer to the Quick Start \nin the KMongo documentation. Async Style Package Reactive Streams Coroutines Reactor RxJava2 Now that you have learned about the differences between KMongo and the MongoDB\nKotlin driver, see the Quick Start to get\nstarted using the KMongo Kotlin driver.", - "code": [ - { - "lang": "kotlin", - "value": "import com.mongodb.kotlin.client.coroutine.MongoClient\n\ndata class Jedi(val name: String, val age: Int)\n\n// Replace the placeholder with your MongoDB deployment's connection string\nval uri = CONNECTION_STRING_URI_PLACEHOLDER\n\nval mongoClient = MongoClient.create(uri)\n\nval database = mongoClient.getDatabase(\"test\")\n// Get a collection of documents of type Jedi\nval collection = database.getCollection(\"jedi\")" - }, - { - "lang": "kotlin", - "value": "import org.litote.kmongo.reactivestreams.*\nimport org.litote.kmongo.coroutine.*\n\ndata class Jedi(val name: String, val age: Int)\n\n// Get new MongoClient instance using coroutine extension\nval client = KMongo.createClient().coroutine\n\nval database = client.getDatabase(\"test\")\n// Get a collection of documents of type Jedi\nval col = database.getCollection()" - }, - { - "lang": "kotlin", - "value": "// Insert a document\n val jedi =a Jedi(\"Luke Skywalker\", 19)\n collection.insertOne(jedi)\n\n // Find a document\n val luke = collection.find(Jedi::name.name, \"Luke Skywalker\")\n val jedis = collection.find(lt(Jedi::age.name, 30)).toList()\n\n // Update a document\n val filter = Filters.eq(Jedi::name.name, \"Luke Skywalker\")\n val update = Updates.set(Jedi::age.name, 20)\n collection.updateOne(filter, update)\n\n // Delete a document\n val filter = Filters.eq(Jedi::name.name, \"Luke Skywalker\")\n collection.deleteOne(filter)" - }, - { - "lang": "kotlin", - "value": "data class Results(val avgAge: Double)\n\nval resultsFlow = collection.aggregate(\n listOf(\n Aggregates.match(Filters.ne(Jedi::name.name, \"Luke Skywalker\")),\n Aggregates.group(\"\\$${Jedi::name.name}\",\n Accumulators.avg(\"avgAge\", \"\\$${Jedi::age.name}\"))\n )\n)\nresultsFlow.collect { println(it) }" - }, - { - "lang": "kotlin", - "value": "// Insert a document\nval jedi = Jedi(\"Luke Skywalker\", 19)\ncol.insertOne(jedi)\n\n// Find a document\nval luke = col.findOne(Jedi::name eq \"Luke Skywalker\")\nval jedis = col.find(Jedi::age lt 30).toList()\n\n// Update a document\ncol.updateOne(Jedi::name eq \"Luke Skywalker\", setValue(Jedi::age, 20))\n\n// Delete a document\ncol.deleteOne(Jedi::name eq \"Luke Skywalker\")" - }, - { - "lang": "kotlin", - "value": "val avgAge = collection.aggregate(\n pipeline(\n match(Jedi::name ne \"Luke Skywalker\"),\n group(Jedi::name, avg(Jedi::age))\n )\n).toList()" - }, - { - "lang": "kotlin", - "value": "data class Person(val name: String, val email: String, val gender: String, val age: Int)\ndata class Results(val email: String)\n\nval collection = database.getCollection(\"people\")\n\n// Using Builders\nval filter = and(eq(\"gender\", \"female\"), gt(\"age\", 29))\nval projection = fields(excludeId(), include(\"email\"))\nval results = collection.find(filter).projection(projection)\n\n// Using Document class\nval filter = Document().append(\"gender\", \"female\").append(\"age\", Document().append(\"\\$gt\", 29))\nval projection = Document().append(\"_id\", 0).append(\"email\", 1)\nval results = collection.find(filter).projection(projection)" - }, - { - "lang": "kotlin", - "value": "val query = JsonObject(\"{\\\"name\\\": \\\"Gabriel Garc\\\\u00eda M\\\\u00e1rquez\\\"}\")\nval jsonResult = collection.find(query).firstOrNull()" - }, - { - "lang": "kotlin", - "value": "data class Jedi(val name: String)\n\nval yoda = col.findOne(Jedi::name eq \"Yoda\")\n\n// Compile error (2 is not a String)\nval error = col.findOne(Jedi::name eq 2)\n\n// Use property reference with instances\nval yoda2 = col.findOne(yoda::name regex \"Yo.*\")" - }, - { - "lang": "kotlin", - "value": "import org.litote.kmongo.MongoOperator.lt\nimport org.litote.kmongo.MongoOperator.match\nimport org.litote.kmongo.MongoOperator.regex\nimport org.litote.kmongo.MongoOperator.sample\n\nval yoda = col.findOne(\"{name: {$regex: 'Yo.*'}}\")!!\nval luke = col.aggregate(\"\"\"[ {$match:{age:{$lt : ${yoda.age}}}},\n {$sample:{size:1}}\n ]\"\"\").first()" - }, - { - "lang": "kotlin", - "value": "// With data class\ndata class Movie(val title: String, val year: Int, val rating: Float)\n\nval dataClassCollection = database.getCollection(\"movies\")\nval movieDataClass = dataClassCollection.findOneOrNull()\nval movieNameDataClass = movieDataClass.title\n\n// With Document class\nval documentCollection = database.getCollection(\"movies\")\nval movieDocument = documentCollection.findOneOrNull()\nval movieTitleDocument = movieDocument.getString(\"title\")" - }, - { - "lang": "kotlin", - "value": "// With data class\ndata class Movie(val title: String, val year: Int, val rating: Float)\n\nval collection = database.getCollection(\"movies\")\nval movieDataClass = dataClassCollection.findOne()\nval movieNameDataClass = movieDataClass.title\n\n// With Document class\nval documentCollection = database.getCollection(\"movies\")\nval movieDocument = documentCollection.findOne()\nval movieTitleDocument = movieDocument.getString(\"title\")" - }, - { - "lang": "kotlin", - "value": "@Serializable\ndata class LightSaber(\n @SerialName(\"_id\") // Use instead of @BsonId\n @Contextual val id: ObjectId?,\n val color: String,\n val qty: Int,\n @SerialName(\"brand\")\n val manufacturer: String = \"Acme\" // Use instead of @BsonProperty\n)" - }, - { - "lang": "kotlin", - "value": "val document = Document(\"_id\", 1).append(\"color\", \"blue\")\n\n// Serialize to JSON\ndocument.toJson()\n\n// Serialize to EJSON\nval settings = JsonWriterSettings.builder().outputMode(JsonMode.STRICT).build()\nval json = doc.toJson(settings)" - }, - { - "lang": "kotlin", - "value": "// Using KotlinX Serialization\n@Serializable\ndata class Data(@Contextual val _id: Id = newId())\n\nval json = Json { serializersModule = IdKotlinXSerializationModule }\nval data = Data()\nval json = json.encodeToString(data)" - }, - { - "lang": "kotlin", - "value": "import com.mongodb.kotlin.client.MongoClient\n\n// Instantiate your collection\ndata class Jedi(val name: String, val age: Int)\nval uri = \"\nval mongoClient = MongoClient.create(uri)\nval database = mongoClient.getDatabase(\"test\")\nval collection = database.getCollection(\"jedi\")\n\n// Synchronous operations\nval jedi =a Jedi(\"Luke Skywalker\", 19)\ncollection.insertOne(jedi)" - }, - { - "lang": "kotlin", - "value": "import com.mongodb.kotlin.client.coroutine.MongoClient\n\n// Instantiate your collection\ndata class Jedi(val name: String, val age: Int)\nval uri = \"\nval mongoClient = MongoClient.create(uri)\nval database = mongoClient.getDatabase(\"test\")\nval collection = database.getCollection(\"jedi\")\n\nrunBlocking {\n\n // Async operations\n val jedi =a Jedi(\"Luke Skywalker\", 19)\n collection.insertOne(jedi)\n}" - }, - { - "lang": "kotlin", - "value": "import org.litote.kmongo.*\n\n// Instantiate your collection\ndata class Jedi(val name: String, val age: Int)\n\nval client = KMongo.createClient()\nval database = client.getDatabase(\"test\")\nval col = database.getCollection()\n\n// Synchronous operations\ncol.insertOne(Jedi(\"Luke Skywalker\", 19))\nval yoda : Jedi? = col.findOne(Jedi::name eq \"Yoda\")" - }, - { - "lang": "kotlin", - "value": "import org.litote.kmongo.reactivestreams.*\nimport org.litote.kmongo.coroutine.*\n\n// Instantiate your collection\ndata class Jedi(val name: String, val age: Int)\n\nval client = KMongo.createClient()\nval database = client.getDatabase(\"test\")\nval col = database.getCollection()\n\nrunBlocking {\n\n // Async operations\n col.insertOne(Jedi(\"Luke Skywalker\", 19))\n val yoda : Jedi? = col.findOne(Jedi::name eq \"Yoda\")\n}" - } - ], - "preview": "This page contains a high-level comparison of most of the ways the official\nMongoDB Kotlin and the community-developed KMongo driver differ.\nYou can use this page to identify the changes you need to make to migrate from\nthe deprecated KMongo driver to the official MongoDB Kotlin driver.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "quick-reference", - "title": "Quick Reference", - "headings": [], - "paragraphs": "This page shows the driver syntax for several MongoDB commands and links to\ntheir related reference and API documentation. The examples on the page use the following data class to represent MongoDB documents: Command Syntax", - "code": [ - { - "lang": "kotlin", - "value": "data class Movie(\n val title: String,\n val year: Int,\n val rated: String? = \"Not Rated\",\n val genres: List? = listOf()\n)\n" - }, - { - "lang": "kotlin", - "value": "collection.find(\n Filters.eq(Movie::title.name, \"Shrek\")\n).firstOrNull()\n" - }, - { - "lang": "console", - "value": "Movie(title=Shrek, year=2001, ...)" - }, - { - "lang": "kotlin", - "value": "collection.find(\n Filters.eq(Movie::year.name, 2004)\n)\n" - }, - { - "lang": "console", - "value": "[\n Movie(title=Shrek 2, year=2004, ...),\n Movie(title=Spider-Man 2, year=2004, ...),\n Movie(title=National Treasure, year=2004, ...),\n ...\n]" - }, - { - "lang": "kotlin", - "value": "collection.insertOne(Movie(\"Shrek\", 2001))\n" - }, - { - "lang": "kotlin", - "value": "collection.insertMany(\n listOf(\n Movie(\"Shrek\", 2001),\n Movie(\"Shrek 2\", 2004),\n Movie(\"Shrek the Third\", 2007),\n Movie(\"Shrek Forever After\", 2010),\n )\n)\n" - }, - { - "lang": "kotlin", - "value": "collection.updateOne(\n Filters.eq(Movie::title.name, \"Shrek\"),\n Updates.set(Movie::rated.name, \"PG\")\n)\n" - }, - { - "lang": "console", - "value": "Movie(title=Shrek, year=2001, rated=PG, genres=[])" - }, - { - "lang": "kotlin", - "value": "collection.updateMany(\n Filters.regex(Movie::title.name, \"Shrek\"),\n Updates.set(Movie::rated.name, \"PG\")\n)\n" - }, - { - "lang": "console", - "value": "[\n Movie(title=Shrek, year=2001, rated=PG, genres=[]),\n Movie(title=Shrek 2, year=2004, rated=PG, genres=[]),\n Movie(title=Shrek the Third, year=2007, rated=PG, genres=[]),\n Movie(title=Shrek Forever After, year=2010, rated=PG, genres=[])\n]" - }, - { - "lang": "kotlin", - "value": "collection.updateOne(\n Filters.eq(Movie::title.name, \"Shrek\"),\n Updates.addEachToSet(Movie::genres.name, listOf(\"Family\", \"Fantasy\"))\n)\n" - }, - { - "lang": "console", - "value": "Movie(title=Shrek, year=2001, rated=Not Rated, genres=[Family, Fantasy])" - }, - { - "lang": "kotlin", - "value": "collection.replaceOne(\n Filters.eq(Movie::title.name, \"Shrek\"),\n Movie(\"Kersh\", 1002, \"GP\")\n)\n" - }, - { - "lang": "console", - "value": "Movie(title=Kersh, year=1002, rated=GP, genres=[])" - }, - { - "lang": "kotlin", - "value": "collection.deleteOne(\n Filters.eq(Movie::title.name, \"Shrek\")\n)\n" - }, - { - "lang": "kotlin", - "value": "collection.deleteMany(\n Filters.regex(Movie::title.name, \"Shrek\")\n)\n" - }, - { - "lang": "kotlin", - "value": "collection.bulkWrite(\n listOf(\n InsertOneModel(Movie(\"Shrek\", 2001)),\n DeleteManyModel(Filters.lt(Movie::year.name, 2004)),\n )\n)\n" - }, - { - "lang": "kotlin", - "value": "val changeStream = collection.watch()\nchangeStream.collect {\n println(\"Change to ${it.fullDocument?.title}\")\n}\n" - }, - { - "lang": "kotlin", - "value": "collection.find().toList()\n" - }, - { - "lang": "console", - "value": "[\n Movie(title=Shrek, year=2001, rated=Not Rated, genres=[]),\n Movie(title=Shrek 2, year=2004, rated=Not Rated, genres=[]),\n Movie(title=Shrek the Third, year=2007, rated=Not Rated, genres=[]),\n Movie(title=Shrek Forever After, year=2010, rated=Not Rated, genres=[])\n]" - }, - { - "lang": "kotlin", - "value": "collection.countDocuments(Filters.eq(\"year\", 2001))\n" - }, - { - "lang": "console", - "value": "42" - }, - { - "lang": "kotlin", - "value": "collection.distinct(Movie::rated.name)\n" - }, - { - "lang": "console", - "value": "[Not Rated, PG, PG-13]" - }, - { - "lang": "kotlin", - "value": "collection.find()\n .limit(2)\n" - }, - { - "lang": "console", - "value": "[\n Movie(title=Shrek, year=2001, rated=Not Rated, genres=[]),\n Movie(title=Shrek 2, year=2004, rated=Not Rated, genres=[])\n]" - }, - { - "lang": "kotlin", - "value": "collection.find()\n .skip(2)\n" - }, - { - "lang": "console", - "value": "[\n Movie(title=Shrek the Third, year=2007, rated=Not Rated, genres=[]),\n Movie(title=Shrek Forever After, year=2010, rated=Not Rated, genres=[])\n]" - }, - { - "lang": "kotlin", - "value": "collection.find().sort(Sorts.descending(Movie::year.name))\n" - }, - { - "lang": "console", - "value": "[\n Movie(title=Shrek Forever After, year=2010, rated=Not Rated, genres=[]),\n Movie(title=Shrek the Third, year=2007, rated=Not Rated, genres=[]),\n Movie(title=Shrek 2, year=2004, rated=Not Rated, genres=[]),\n Movie(title=Shrek, year=2001, rated=Not Rated, genres=[])\n]" - }, - { - "lang": "kotlin", - "value": "data class Result(val title: String)\n collection.find()\n .projection(Projections.include(Movie::title.name))\n" - }, - { - "lang": "console", - "value": "Result(title=Shrek)" - }, - { - "lang": "kotlin", - "value": "collection.createIndex(Indexes.ascending(Movie::title.name))\n" - }, - { - "lang": "kotlin", - "value": "collection.find(Filters.text(\"Forever\"));\n" - }, - { - "lang": "console", - "value": "[Movie(title=Shrek Forever After, year=2010, rated=Not Rated, genres=[])]" - }, - { - "lang": "xml", - "value": "\n \n org.mongodb\n mongodb-driver-kotlin-coroutine\n 5.1.2\n \n" - }, - { - "lang": "kotlin", - "value": "dependencies {\n implementation(\"org.mongodb:mongodb-driver-kotlin-coroutine:5.1.2\")\n}" - }, - { - "lang": "kotlin", - "value": "val flow = collection.find(\n Filters.eq(Movie::year.name, 2004)\n)\nflow.collect { println(it) }\n" - }, - { - "lang": "console", - "value": "Movie(title=2001: A Space Odyssey, ...)\nMovie(title=The Sound of Music, ...)" - } - ], - "preview": "This page shows the driver syntax for several MongoDB commands and links to\ntheir related reference and API documentation.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "quick-start", - "title": "Kotlin Driver Quick Start", - "headings": [ - "Introduction", - "Set up Your Project", - "Install Kotlin", - "Create the Project", - "Add MongoDB as a Dependency", - "Add Serialization Library Dependencies", - "Create a MongoDB Cluster", - "Connect to your Cluster", - "Query Your MongoDB Cluster from Your Application", - "Working with the Document Class (Alternative)", - "Next Steps" - ], - "paragraphs": "This guide shows you how to create an application that uses the Kotlin driver \nto connect to a MongoDB Atlas cluster . If you prefer to connect to\nMongoDB by using a different driver or programming language, see the\n list of official MongoDB drivers . The Kotlin driver lets you connect to and communicate with MongoDB clusters\nfrom a Kotlin application. MongoDB Atlas is a fully managed cloud database service that hosts your data\non MongoDB clusters. In this guide, you can learn how to get started with your\nown free cluster. To view another example that demonstrates how to build an\napplication in Kotlin that connects to MongoDB Atlas, see the\n Getting Started with the MongoDB Kotlin Driver \ndeveloper tutorial. Make sure that your system has Kotlin installed and running on JDK 1.8 or later.\nFor more information on getting started with Kotlin/JVM development,\nrefer to Get started with Kotlin/JVM \nin the Kotlin language documentation. This guide shows you how to add the MongoDB Kotlin driver dependencies\nby using Gradle or Maven. We recommend that you use an integrated development\nenvironment (IDE) such as IntelliJ IDEA or Eclipse IDE to configure\nGradle or Maven to build and run your project. If you are not using an IDE, see the\n Creating New Gradle Builds guide\nor the Building Maven guide\nfor more information on how to set up your project. If you are using Gradle to manage your\npackages, add the following entry to your build.gradle.kts \ndependencies list: If you are using Maven to manage your\npackages, add the following entry to your pom.xml dependencies list: After you configure your dependencies, ensure that they are available to your\nproject by running the dependency manager and refreshing the\nproject in your IDE. To enable the driver to convert between Kotlin objects and BSON, the\ndata format for documents in MongoDB, you must also add one or both of the\nfollowing serialization packages to your application: If you are using Gradle to manage your packages, add one of the following\nentries to your build.gradle.kts dependencies list: If you are using Maven to manage your packages, add one of the following\nentries to your pom.xml dependencies list: After you configure your dependencies, ensure that they are available to your\nproject by running the dependency manager and refreshing the\nproject in your IDE. To learn more about these packages, see\n Kotlin Serialization . bson-kotlinx (Recommended) bson-kotlin After setting up your Kotlin project dependencies, create a MongoDB cluster\nin which you can store and manage your data. Complete the\n Get Started with Atlas tutorial\nto set up a new Atlas account, create and launch a free tier MongoDB cluster,\nand load sample datasets. After you complete the steps in the Get Started with Atlas tutorial, you\nhave a new MongoDB cluster deployed in Atlas, a new database user, and\nsample data loaded into your cluster. This step shows how to create and run an application that uses the\nKotlin driver to connect to your MongoDB cluster and run a query on\nthe sample data. First, you must specify how the driver connects to your MongoDB cluster\nby including a connection string in your code. This string includes\ninformation on the hostname or IP address and port of your cluster,\nauthentication mechanism, user credentials, and other connection\noptions. If you are connecting to an instance or cluster that is not hosted on Atlas,\nsee the Other Ways to Connect to MongoDB section of the Connection Guide for\ninstructions on how to format your connection string. To retrieve your connection string for the cluster and user you created in\nthe previous step, log into your Atlas account and navigate to the\n Database page under Deployment and click the\n Connect button for your cluster, which is shown in the following\nimage: Select the Drivers option for connection and select\n Kotlin from the list of drivers and 4.10 or\nlater from the version dropdown. Next, click the Copy icon, which is highlighted in the\nfollowing image, to copy your connection string to\nyour clipboard: Save your Atlas connection string in a safe location that you can access\nfor the next step. Next, create a file called QuickStartDataClassExample.kt in your\nproject. Copy the following sample code into the file and replace the value of\nthe uri variable with your MongoDB Atlas connection string that you\nsaved in the preceding step. Replace the \"\" placeholder of\nyour connection string with the password you set for your user that has\n atlasAdmin permissions: When you run the main function, the application prints the details\nof a movie document that matches the query, as shown in the following output: If you don't see any output or receive an error, check whether you\nincluded the proper connection string in your application. Also, confirm\nthat you successfully loaded the sample dataset into your MongoDB Atlas cluster. After completing this step, you have a working application that uses\nthe Kotlin driver to connect to your MongoDB cluster, run a query on the\nsample data, and print out the result. This example uses a Kotlin data class to model MongoDB data. To learn more about using data classes to store and retrieve data,\nsee the Document Data Format: Data Classes guide. If you encounter the following error while connecting to your MongoDB\ninstance, you must update your JDK to the latest patch release: This exception is a known issue when using the TLS 1.3 protocol with\nspecific versions of JDK, but this issue is fixed for the following\nJDK versions: To resolve this error, update your JDK to one of the preceding patch\nversions or a newer one. JDK 11.0.7 JDK 13.0.3 JDK 14.0.2 The preceding section demonstrates how to run a query on a sample\ncollection to retrieve data by using a Kotlin data class. This section\nshows how to use the Document class\nto store and retrieve data from MongoDB. In a new file called QuickStartDocumentExample.kt , paste the following sample\ncode to run a query on your sample dataset in MongoDB Atlas. Replace the\nvalue of the uri variable with your MongoDB Atlas connection string: When you run the main function, the application prints the details\nof a movie document that matches the query, as shown in the following output: If you don't see any output or receive an error, check whether you\nincluded the proper connection string in your application. Also, confirm\nthat you successfully loaded the sample dataset into your MongoDB Atlas cluster. To learn more about the Kotlin driver, see the\n Fundamentals guides, which describe relevant\nconcepts in detail and provide code examples for performing different tasks.", - "code": [ - { - "lang": "kotlin", - "value": "dependencies {\n implementation(\"org.mongodb:mongodb-driver-kotlin-coroutine:5.1.2\")\n}" - }, - { - "lang": "xml", - "value": "\n \n org.mongodb\n mongodb-driver-kotlin-coroutine\n 5.1.2\n \n" - }, - { - "lang": "kotlin", - "value": "implementation(\"org.mongodb:bson-kotlinx:5.1.2\")\n// OR\nimplementation(\"org.mongodb:bson-kotlin:5.1.2\")" - }, - { - "lang": "xml", - "value": "\n org.mongodb\n bson-kotlinx\n 5.1.2\n\n\n\n org.mongodb\n bson-kotlin\n 5.1.2\n" - }, - { - "lang": "none", - "value": "Movie(\n title=Back to the Future,\n year=1985,\n cast=[Michael J. Fox, Christopher Lloyd, Lea Thompson, Crispin Glover]\n)" - }, - { - "lang": "kotlin", - "value": "import com.mongodb.client.model.Filters.eq\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport io.github.cdimascio.dotenv.dotenv\nimport kotlinx.coroutines.flow.firstOrNull\nimport kotlinx.coroutines.runBlocking\n\n// Create data class to represent a MongoDB document\ndata class Movie(val title: String, val year: Int, val cast: List)\n\nfun main() {\n\n // Replace the placeholder with your MongoDB deployment's connection string\n val uri = CONNECTION_STRING_URI_PLACEHOLDER\n\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n // Get a collection of documents of type Movie\n val collection = database.getCollection(\"movies\")\n\n runBlocking {\n val doc = collection.find(eq(\"title\", \"Back to the Future\")).firstOrNull()\n if (doc != null) {\n println(doc)\n } else {\n println(\"No matching documents found.\")\n }\n }\n\n mongoClient.close()\n}\n\n" - }, - { - "lang": "none", - "value": "javax.net.ssl.SSLHandshakeException: extension (5) should not be presented in certificate_request" - }, - { - "lang": "json", - "value": "{\n _id: ...,\n plot: 'A young man is accidentally sent 30 years into the past...',\n genres: [ 'Adventure', 'Comedy', 'Sci-Fi' ],\n ...\n title: 'Back to the Future',\n ...\n}" - }, - { - "lang": "kotlin", - "value": "import com.mongodb.client.model.Filters.eq\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport io.github.cdimascio.dotenv.dotenv\nimport kotlinx.coroutines.flow.firstOrNull\nimport kotlinx.coroutines.runBlocking\nimport org.bson.Document\n\nfun main() {\n\n // Replace the placeholder with your MongoDB deployment's connection string\n val uri = CONNECTION_STRING_URI_PLACEHOLDER\n\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n runBlocking {\n val doc = collection.find(eq(\"title\", \"Back to the Future\")).firstOrNull()\n if (doc != null) {\n println(doc.toJson())\n } else {\n println(\"No matching documents found.\")\n }\n }\n\n mongoClient.close()\n}\n\n" - } - ], - "preview": "This guide shows you how to create an application that uses the Kotlin driver\nto connect to a MongoDB Atlas cluster. If you prefer to connect to\nMongoDB by using a different driver or programming language, see the\nlist of official MongoDB drivers.", - "tags": "code example, get started, runnable app", - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples/bulkWrite", - "title": "Perform Bulk Operations", - "headings": ["Example"], - "paragraphs": "The bulkWrite() method performs batch write operations against a\n single collection. This method reduces the number of network round trips from\nyour application to your MongoDB instance which increases the performance of your\napplication. Since you only receive the success status after\nall the operations return, we recommend you use this if that meets the\nrequirements of your use case. You can specify one or more of the following write operations in\n bulkWrite() : The bulkWrite() method accepts the following parameters: The bulkWrite() method returns a BulkWriteResult object that\ncontains information about the write operation results including the number\nof documents inserted, modified, and deleted. If one or more of your operations attempts to set a value that violates a\nunique index on your collection, an exception is raised that should look\nsomething like this: Similarly, if you attempt to perform a bulk write against a collection\nthat uses schema validation and one or more of your write operations\nprovide an unexpected format, you may encounter exceptions. insertOne updateOne updateMany deleteOne deleteMany replaceOne A List of objects that implement WriteModel : the classes that\nimplement WriteModel correspond to the aforementioned write\noperations. For example, the InsertOneModel class wraps the insertOne \nwrite operation. See the links to the API documentation at the bottom of this\npage for more information on each class. BulkWriteOptions : optional object that specifies settings such as\nwhether to ensure your MongoDB instance orders your write operations. Retryable writes run on MongoDB server versions 3.6 or later in bulk\nwrite operations unless they include one or more instances of\n UpdateManyModel or DeleteManyModel . By default, MongoDB executes bulk write operations one-by-one in the\nspecified order (i.e. serially). During an ordered bulk write, if\nan error occurs during the processing of an operation, MongoDB returns\nwithout processing the remaining operations in the list. In contrast,\nwhen you set ordered to false , MongoDB continues to process remaining\nwrite operations in the list in the event of an error. Unordered operations\nare theoretically faster since MongoDB can execute them in parallel, but\nyou should only use them if your writes do not depend on order. The following code sample performs an ordered bulk write operation on the\n movies collection in the sample_mflix database. The example call\nto bulkWrite() includes examples of the InsertOneModel ,\n UpdateOneModel , and DeleteOneModel . For additional information on the classes and methods mentioned on this\npage, see the following resources: This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . Unique Index Server Manual Entry Schema Validation Server Manual Entry bulkWrite() API Documentation BulkWriteOptions API Documentation BulkWriteResult API Documentation InsertOneModel API Documentation UpdateOneModel API Documentation UpdateManyModel API Documentation DeleteOneModel API Documentation DeleteManyModel API Documentation ReplaceOneModel API Documentation", - "code": [ - { - "lang": "sh", - "value": "The bulk write operation failed due to an error: Bulk write operation error on server . Write errors: [BulkWriteError{index=0, code=11000, message='E11000 duplicate key error collection: ... }]." - }, - { - "lang": "kotlin", - "value": "import com.mongodb.MongoException\nimport com.mongodb.client.model.DeleteOneModel\nimport com.mongodb.client.model.Filters\nimport com.mongodb.client.model.InsertOneModel\nimport com.mongodb.client.model.ReplaceOneModel\nimport com.mongodb.client.model.UpdateOneModel\nimport com.mongodb.client.model.UpdateOptions\nimport com.mongodb.client.model.Updates\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\n\ndata class Movie(val title: String, val runtime: Int? = null)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n try {\n val result = collection.bulkWrite(\n listOf(\n InsertOneModel(Movie(\"A Sample Movie\")),\n InsertOneModel(Movie(\"Another Sample Movie\")),\n InsertOneModel(Movie(\"Yet Another Sample Movie\")),\n UpdateOneModel(\n Filters.eq(Movie::title.name,\"A Sample Movie\"),\n Updates.set(Movie::title.name, \"An Old Sample Movie\"),\n UpdateOptions().upsert(true)\n ),\n DeleteOneModel(Filters.eq(\"title\", \"Another Sample Movie\")),\n ReplaceOneModel(\n Filters.eq(Movie::title.name, \"Yet Another Sample Movie\"),\n Movie(\"The Other Sample Movie\", 42)\n )\n )\n )\n println(\n \"\"\"\n Result statistics:\n inserted: ${result.insertedCount}\n updated: ${result.modifiedCount}\n deleted: ${result.deletedCount}\n \"\"\".trimIndent()\n )\n } catch (e: MongoException) {\n System.err.println(\"The bulk write operation failed due to an error: $e\")\n }\n mongoClient.close()\n}\n" - }, - { - "lang": "console", - "value": "Result statistics:\ninserted: 3\nupdated: 2\ndeleted: 1" - } - ], - "preview": "The bulkWrite() method performs batch write operations against a\nsingle collection. This method reduces the number of network round trips from\nyour application to your MongoDB instance which increases the performance of your\napplication. Since you only receive the success status after\nall the operations return, we recommend you use this if that meets the\nrequirements of your use case.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples/command", - "title": "Run a Command", - "headings": ["Example"], - "paragraphs": "You can run all raw database operations using the\n MongoDatabase.runCommand() method. A raw database operation is a\ncommand you can execute directly on the MongoDB server CLI. These\ncommands include administrative and diagnostic tasks, such as fetching\nserver stats or initializing a replica set. Call the runCommand() \nmethod with a Bson command object on an instance of a MongoDatabase \nto run your raw database operation. The runCommand() method accepts a command in the form of a Bson object.\nBy default, runCommand returns an object of type\n org.bson.Document containing the output of the database command. You\ncan specify a return type for runCommand() as an optional second\nparameter. Use the MongoDB Shell for\nadministrative tasks instead of the Kotlin driver whenever possible,\nsince these tasks are often quicker and easier to implement with the\nshell than in a Kotlin application. In the following sample code, we send the dbStats command to request\nstatistics from a specific MongoDB database. For additional information on the classes and methods mentioned on this\npage, see the following resources: This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . runCommand() API Documentation Database Commands Server Manual Entry dbStats Server Manual Entry", - "code": [ - { - "lang": "kotlin", - "value": "\nimport com.mongodb.MongoException\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\nimport org.bson.BsonDocument\nimport org.bson.BsonInt64\nimport org.bson.json.JsonWriterSettings\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n try {\n val command = BsonDocument(\"dbStats\", BsonInt64(1))\n val commandResult = database.runCommand(command)\n println(commandResult.toJson(JsonWriterSettings.builder().indent(true).build()))\n } catch (me: MongoException) {\n System.err.println(\"An error occurred: $me\")\n }\n mongoClient.close()\n}\n" - }, - { - "lang": "json", - "value": "{\n \"db\": \"sample_mflix\",\n \"collections\": 5,\n \"views\": 0,\n \"objects\": 75595,\n \"avgObjSize\": 692.1003770090614,\n \"dataSize\": 52319328,\n \"storageSize\": 29831168,\n \"numExtents\": 0,\n \"indexes\": 9,\n \"indexSize\": 14430208,\n \"fileSize\": 0,\n \"nsSizeMB\": 0,\n \"ok\": 1\n}" - } - ], - "preview": "You can run all raw database operations using the\nMongoDatabase.runCommand() method. A raw database operation is a\ncommand you can execute directly on the MongoDB server CLI. These\ncommands include administrative and diagnostic tasks, such as fetching\nserver stats or initializing a replica set. Call the runCommand()\nmethod with a Bson command object on an instance of a MongoDatabase\nto run your raw database operation.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples/count", - "title": "Count Documents", - "headings": ["Example"], - "paragraphs": "There are two instance methods in the MongoCollection class that you can\ncall to count the number of documents in a collection: The estimatedDocumentCount() method returns more quickly than the\n countDocuments() method because it uses the collection's metadata rather\nthan scanning the entire collection. The countDocuments() method returns\nan accurate count of the number of documents and supports specifying\na filter. When you call the countDocuments() method, you can optionally pass a\n query filter parameter. You cannot pass any parameters when you call\n estimatedDocumentCount() . You can also pass an optional parameter to either of these methods to\nspecify the behavior of the call: Both methods return the number of matching documents as a Long primitive. countDocuments() returns an accurate count of the number of documents\nin the collection that match a specified query. If you specify an empty query\nfilter, the method returns the total number of documents in the collection. estimatedDocumentCount() returns an estimation of the number of\ndocuments in the collection based on the collection metadata. You cannot\nspecify a query when using this method. When using countDocuments() to return the total number of documents in a\ncollection, you can improve performance by avoiding a collection scan. To do\nthis, use a hint to take advantage\nof the built-in index on the _id field. Use this technique only when\ncalling countDocuments() with an empty query parameter: If you are using the Stable API V1 with the \"strict\" option and a\nMongoDB server version between 5.0.0 and 5.0.8 inclusive, method calls to\n estimatedDocumentCount() may error due to a server bug. Upgrade to MongoDB server 5.0.9 or set the Stable API \"strict\" option to\n false to avoid this issue. Method Optional Parameter Class Description countDocuments() CountOptions You can specify a maximum number of documents to count by using the\n limit() method or the maximum amount of execution time using the\n maxTime() method. estimatedDocumentCount() EstimatedDocumentCountOptions You can specify the maximum execution time using the maxTime() \nmethod. The following example estimates the number of documents in the\n movies collection in the sample_mflix database, and then returns\nan accurate count of the number of documents in the movies \ncollection with Spain in the countries field.\nIf you run the preceding sample code, you should see output that looks something\nlike this (exact numbers may vary depending on your data): For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . countDocuments() estimatedDocumentCount() CountOptions EstimatedDocumentCountOptions", - "code": [ - { - "lang": "kotlin", - "value": "val options = CountOptions().hintString(\"_id_\")\nval numDocuments = collection.countDocuments(BsonDocument(), options)\n" - }, - { - "lang": "kotlin", - "value": "\nimport com.mongodb.MongoException\nimport com.mongodb.client.model.Filters\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\n\n\ndata class Movie(val countries: List)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n\n val query = Filters.eq(Movie::countries.name, \"Spain\")\n try {\n val estimatedCount = collection.estimatedDocumentCount()\n println(\"Estimated number of documents in the movies collection: $estimatedCount\")\n val matchingCount = collection.countDocuments(query)\n println(\"Number of movies from Spain: $matchingCount\")\n } catch (e: MongoException) {\n System.err.println(\"An error occurred: $e\")\n }\n\n mongoClient.close()\n}\n" - }, - { - "lang": "console", - "value": "Estimated number of documents in the movies collection: 23541\nNumber of movies from Spain: 755" - } - ], - "preview": "There are two instance methods in the MongoCollection class that you can\ncall to count the number of documents in a collection:", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples/delete-operations", - "title": "Delete Operations", - "headings": [], - "paragraphs": "Delete a Document Delete Multiple Documents", - "code": [], - "preview": null, - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples/deleteMany", - "title": "Delete Multiple Documents", - "headings": ["Example"], - "paragraphs": "You can delete multiple documents from a collection in a single operation\nby calling the deleteMany() method on a MongoCollection object. To specify which documents to delete, pass a query filter that matches\nthe documents you want to delete. If you provide an empty document,\nMongoDB matches all documents in the collection and deletes them. While\nyou can use deleteMany() to delete all documents in a collection,\nconsider using the drop() method instead for better performance. Upon successful deletion, this method returns an instance of\n DeleteResult . You can retrieve information such as the number of\ndocuments deleted by calling the getDeletedCount() method on the\n DeleteResult instance. If your delete operation fails, the driver raises an exception. For more\ninformation on the types of exceptions raised under specific conditions,\nsee the API documentation for deleteMany() , linked at the bottom of\nthis page. The following snippet deletes multiple documents from the movies \ncollection in the sample_mflix database. The query filter passed to the deleteMany() method matches all\nmovie documents that contain a rating of less than 2.9 in the imdb \nsub-document. When you run the example, you should see output that reports the number of\ndocuments deleted in your call to deleteMany() . For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . deleteMany() DeleteResult drop()", - "code": [ - { - "lang": "kotlin", - "value": "\nimport com.mongodb.MongoException\nimport com.mongodb.client.model.Filters\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\n\ndata class Movie(val imdb: IMDB){\n data class IMDB(val rating: Double)\n}\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n val query = Filters.lt(\"${Movie::imdb.name}.${Movie.IMDB::rating.name}\", 2.9)\n try {\n val result = collection.deleteMany(query)\n println(\"Deleted document count: \" + result.deletedCount)\n } catch (e: MongoException) {\n System.err.println(\"Unable to delete due to an error: $e\")\n }\n mongoClient.close()\n}\n" - }, - { - "lang": "console", - "value": "Deleted document count: 4" - } - ], - "preview": "You can delete multiple documents from a collection in a single operation\nby calling the deleteMany() method on a MongoCollection object.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples/deleteOne", - "title": "Delete a Document", - "headings": ["Example"], - "paragraphs": "You can delete a single document from a collection using the deleteOne() \nmethod on a MongoCollection object. The method accepts a query filter\nthat matches the document you want to delete. If you do not specify\na filter, MongoDB matches the first document in the collection. The\n deleteOne() method only deletes the first document matched. This method returns an instance of DeleteResult which contains information\nincluding how many documents were deleted as a result of the operation. If your delete operation fails, the driver raises an exception. For more\ninformation on the types of exceptions raised under specific conditions,\nsee the API documentation for deleteOne() , linked at the bottom of\nthis page. The following snippet deletes a single document from the movies \ncollection of the sample_mflix database. The example uses the eq() \nfilter to match movies with the title exactly matching the text\n 'The Garbage Pail Kids Movie' . When you run the example, if the query filter you passed in your call to\n deleteOne() matches a document and removes it, you should see output\nthat looks something like this: If your query filter does not match a document in your collection,\nyour call to deleteOne() removes no documents and returns the following: For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . deleteOne() DeleteResult eq()", - "code": [ - { - "lang": "none", - "value": "Deleted document count: 1" - }, - { - "lang": "none", - "value": "Deleted document count: 0" - }, - { - "lang": "kotlin", - "value": "\nimport com.mongodb.MongoException\nimport com.mongodb.client.model.Filters\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\n\n\ndata class Movie(val title: String)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n val query = Filters.eq(Movie::title.name, \"The Garbage Pail Kids Movie\")\n\n try {\n val result = collection.deleteOne(query)\n println(\"Deleted document count: \" + result.deletedCount)\n } catch (e: MongoException) {\n System.err.println(\"Unable to delete due to an error: $e\")\n }\n mongoClient.close()\n}\n" - } - ], - "preview": "You can delete a single document from a collection using the deleteOne()\nmethod on a MongoCollection object. The method accepts a query filter\nthat matches the document you want to delete. If you do not specify\na filter, MongoDB matches the first document in the collection. The\ndeleteOne() method only deletes the first document matched.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples/distinct", - "title": "Retrieve Distinct Values of a Field", - "headings": ["Example"], - "paragraphs": "You can retrieve a list of distinct values for a field across a\ncollection by calling the distinct() method on a MongoCollection \nobject. Pass the document field name as the first parameter and the class\nyou want to cast the results to as the type parameter. The following snippets demonstrate the distinct() method using the movies \ncollection in the sample_mflix sample database. Documents are modeled\nwith the following Kotlin data class: The following method call returns each distinct value of the countries \nfield in the movies collection: You can specify a field on the document or one within an embedded document \nusing dot notation . The following method call returns each distinct\nvalue of the wins field in the awards embedded document: You can also limit the set of documents from which your MongoDB instance retrieves\ndistinct values with a query filter as a second parameter, as follows: The distinct() method returns an object that implements the\n DistinctFlow class, which contains methods to access, organize, and traverse\nthe results. DistinctFlow delegates to the Flow interface\nfrom the Kotlin Coroutines library, allowing access to methods such as first() and\n firstOrNull() . For more information, see our\n guide on Accessing Data From a Flow . The following example retrieves a list of distinct values for the year \ndocument field from the movies collection. It uses a query filter to\nmatch movies that include \"Carl Franklin\" as one of the values in the\n directors array. When you run the example, you should see output that reports each distinct\nyear for all the movies that Carl Franklin was included as a director. For additional information on the classes and methods mentioned on this\npage, see the following resources: This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . distinct() API Documentation distinctFlow API Documentation Dot Notation Server Manual Entry", - "code": [ - { - "lang": "kotlin", - "value": "data class Movie(\n val type: String,\n val languages: List,\n val countries: List,\n val awards: Awards){\n data class Awards(val wins: Int)\n }\n" - }, - { - "lang": "kotlin", - "value": "collection.distinct(Movie::countries.name)\n" - }, - { - "lang": "kotlin", - "value": "collection.distinct(\"${Movie::awards.name}.${Movie.Awards::wins.name}\")\n" - }, - { - "lang": "kotlin", - "value": "collection.distinct(Movie::type.name, Filters.eq(Movie::languages.name, \"French\"))\n" - }, - { - "lang": "kotlin", - "value": "import com.mongodb.MongoException\nimport com.mongodb.client.model.Filters\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\n\ndata class Movie(val year: Int, val directors: List)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n try {\n val resultsFlow = collection.distinct(\n Movie::year.name, Filters.eq(Movie::directors.name, \"Carl Franklin\")\n )\n resultsFlow.collect { println(it) }\n } catch (e: MongoException) {\n System.err.println(\"An error occurred: $e\")\n }\n\n mongoClient.close()\n}\n" - }, - { - "lang": "console", - "value": "1992\n1995\n1998\n..." - } - ], - "preview": "You can retrieve a list of distinct values for a field across a\ncollection by calling the distinct() method on a MongoCollection\nobject. Pass the document field name as the first parameter and the class\nyou want to cast the results to as the type parameter.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples/find-operations", - "title": "Find Operations", - "headings": [], - "paragraphs": "Find a Document Find Multiple Documents", - "code": [], - "preview": null, - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples/find", - "title": "Find Multiple Documents", - "headings": ["Example"], - "paragraphs": "You can query for multiple documents in a collection by calling the find() \nmethod on a MongoCollection object. Pass a query filter to the\n find() method to query for and return documents that match the filter in\nthe collection. If you do not include a filter, MongoDB returns all the\ndocuments in the collection. For more information on querying MongoDB with the Kotlin driver, see our\n guide on Querying Documents . You can also chain methods to the find() method such as sort() which\norganizes the matched documents in a specified order and\n projection() which configures the included fields in the\nreturned documents. For more information on the sort() method, see our\n guide on Sorting .\nFor more information on the projection() method, see our\n guide on Projections The find() method returns an instance of FindFlow , a class\nthat offers several methods to access, organize, and traverse the results. FindFlow also obtains methods from its delegate interface Flow from the\nKotlin Coroutines library.\nYou can call the collect() method to iterate through the fetched results.\nYou can also call terminal methods, such as firstOrNull() to return either\nthe first document or null if there are no results, or first() to return\nthe first document in the collection. If no documents match the query,\ncalling first() throws a NoSuchElementException exception. For more information on accessing data from a flow with the Kotlin driver, see our\n guide on Accessing Data From a Flow . The following snippet finds and prints all documents that match a query on\nthe movies collection. It uses the following objects and methods: For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: A query filter that is passed to the find() method. The lt() \nfilter matches only movies with a runtime of less than 15 minutes. A sort that organizes returned documents in descending order by\ntitle (\"Z\" before \"A\"). A projection that includes the objects in the title and imdb \nfields and excludes the _id field using the helper method\n excludeId() . This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . FindFlow find()", - "code": [ - { - "lang": "kotlin", - "value": "import com.mongodb.client.model.Filters.lt\nimport com.mongodb.client.model.Projections\nimport com.mongodb.client.model.Sorts\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\n\ndata class Movie(val title: String, val runtime: Int, val imdb: IMDB){\n data class IMDB(val rating: Double)\n}\n\ndata class Results(val title: String)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n val projectionFields= Projections.fields(\n Projections.include(Movie::title.name, Movie::imdb.name),\n Projections.excludeId()\n )\n val resultsFlow = collection.withDocumentClass()\n .find(lt(Movie::runtime.name, 15))\n .projection(projectionFields)\n .sort(Sorts.descending(Movie::title.name))\n\n resultsFlow.collect { println(it) }\n\n mongoClient.close()\n}\n" - } - ], - "preview": "You can query for multiple documents in a collection by calling the find()\nmethod on a MongoCollection object. Pass a query filter to the\nfind() method to query for and return documents that match the filter in\nthe collection. If you do not include a filter, MongoDB returns all the\ndocuments in the collection.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples/findOne", - "title": "Find a Document", - "headings": ["Example"], - "paragraphs": "You can retrieve a single document in a collection by chaining together\nthe find() and first() methods on a MongoCollection object.\nYou can pass a query filter to the find() method to query for and\nreturn documents that match the filter in the collection. If you do not\ninclude a filter, MongoDB returns all the documents in the collection. For more information on querying MongoDB with the Kotlin driver, see our\n guide on Querying Documents . You can also chain other methods to the find() method\nsuch as sort() which organizes the matched documents in a specified order, and\n projection() which configures the fields included in the returned documents. For more information on the sort() method, see our\n guide on Sorting .\nFor more information on the projection() method, see our\n guide on Projections The find() method returns an instance of FindFlow , a class\nthat offers several methods to access, organize, and traverse the results. FindFlow also obtains methods from its delegate interface Flow from the\nKotlin Coroutines library, such as first() and firstOrNull() .\nThe firstOrNull() method returns the first document from the retrieved results\nor null if there are no results. The first() method returns\nthe first document or throws a NoSuchElementException exception if no\ndocuments match the query. For more information on accessing data from a flow with the Kotlin driver, see our\n guide on Accessing Data From a Flow . The following snippet finds a single document from the movies collection.\nIt uses the following objects and methods: For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: A query filter that is passed to the find() method. The eq \nfilter matches only movies with the title exactly matching the text\n \"The Room\" . A sort that organizes matched documents in descending order by\nrating, so if our query matches multiple documents the returned\ndocument is the one with the highest rating. A projection that includes the objects in the title and imdb \nfields and excludes the _id field using the helper method\n excludeId() . This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . FindFlow find()", - "code": [ - { - "lang": "kotlin", - "value": "import com.mongodb.client.model.Filters.eq\nimport com.mongodb.client.model.Filters.lt\nimport com.mongodb.client.model.Projections\nimport com.mongodb.client.model.Sorts\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.flow.firstOrNull\nimport kotlinx.coroutines.runBlocking\nimport usageExamples.find.Results\n\ndata class Movie(val title: String, val runtime: Int, val imdb: IMDB) {\n data class IMDB(val rating: Double)\n}\n\ndata class Results(val title: String, val imdb: Movie.IMDB)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n val projectionFields= Projections.fields(\n Projections.include(Movie::title.name, Movie::imdb.name),\n Projections.excludeId()\n )\n val resultsFlow = collection.withDocumentClass()\n .find(eq(Movie::title.name, \"The Room\"))\n .projection(projectionFields)\n .sort(Sorts.descending(\"${Movie::imdb.name}.${Movie.IMDB::rating.name}\"))\n .firstOrNull()\n\n if (resultsFlow == null) {\n println(\"No results found.\");\n } else {\n println(resultsFlow)\n }\n\n mongoClient.close()\n}\n" - } - ], - "preview": "You can retrieve a single document in a collection by chaining together\nthe find() and first() methods on a MongoCollection object.\nYou can pass a query filter to the find() method to query for and\nreturn documents that match the filter in the collection. If you do not\ninclude a filter, MongoDB returns all the documents in the collection.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples/insert-operations", - "title": "Insert Operations", - "headings": [], - "paragraphs": "Insert a Document Insert Multiple Documents", - "code": [], - "preview": null, - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples/insertMany", - "title": "Insert Multiple Documents", - "headings": ["Example"], - "paragraphs": "You can insert multiple documents into a collection in a single\noperation by calling the insertMany() method on a MongoCollection \nobject. To insert them, add your Document objects to a List and pass\nthat List as an argument to insertMany() . If you call the insertMany() method\non a collection that does not exist yet, the server creates it for you. Upon successful insertion, insertMany() returns an instance of\n InsertManyResult . You can retrieve information such as the _id \nfields of the documents you inserted by calling the getInsertedIds() \nmethod on the InsertManyResult instance. If your insert operation fails, the driver raises an exception. For more\ninformation on the types of exceptions raised under specific conditions,\nsee the API documentation for insertMany() , linked at the bottom of\nthis page. The following snippet inserts multiple documents into the movies \ncollection. When you run the example, you should see output with the inserted documents'\n ObjectId values in each of the value fields: For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . insertMany() Document InsertManyResult", - "code": [ - { - "lang": "kotlin", - "value": "import com.mongodb.MongoException\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\n\ndata class Movie(val title: String)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n val movieList = listOf(\n Movie(\"Short Circuit 3\"),\n Movie(\"The Lego Frozen Movie\")\n )\n\n try {\n val result = collection.insertMany(movieList)\n println(\"Success! Inserted document ids: \" + result.insertedIds)\n } catch (e: MongoException) {\n System.err.println(\"Unable to insert due to an error: $e\")\n }\n mongoClient.close()\n}\n" - }, - { - "lang": "console", - "value": "Success! Inserted document ids: {0=BsonObjectId{value=...}, 1=BsonObjectId{value=...}}" - } - ], - "preview": "You can insert multiple documents into a collection in a single\noperation by calling the insertMany() method on a MongoCollection\nobject. To insert them, add your Document objects to a List and pass\nthat List as an argument to insertMany(). If you call the insertMany() method\non a collection that does not exist yet, the server creates it for you.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples/insertOne", - "title": "Insert a Document", - "headings": ["Example"], - "paragraphs": "You can insert a single document into a collection using the insertOne() \nmethod on a MongoCollection object. To insert a document, construct a\n Document object that contains the fields and values that you want to\nstore. If you call the insertOne() method on a collection that does\nnot exist yet, the server automatically creates it for you. Upon a successful insertion, insertOne() returns an instance of\n InsertOneResult . You can retrieve information such as the _id \nfield of the document you inserted by calling the getInsertedId() \nmethod on the InsertOneResult instance. If your insert operation fails, the driver raises an exception. For more\ninformation on the types of exceptions raised under specific conditions,\nsee the API documentation for insertOne() , linked at the bottom of\nthis page. The following snippet inserts a single document into the movies \ncollection. When you run the example, you should see output with the inserted document's\n ObjectId in the value field: For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . insertOne() Document InsertOneResult", - "code": [ - { - "lang": "kotlin", - "value": "import com.mongodb.MongoException\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\nimport org.bson.codecs.pojo.annotations.BsonId\nimport org.bson.types.ObjectId\n\ndata class Movie(@BsonId val id: ObjectId, val title: String, val genres: List)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n try {\n val result = collection.insertOne(\n Movie(ObjectId(), \"Ski Bloopers\", listOf(\"Documentary\", \"Comedy\"))\n )\n println(\"Success! Inserted document id: \" + result.insertedId)\n } catch (e: MongoException) {\n System.err.println(\"Unable to insert due to an error: $e\")\n }\n mongoClient.close()\n}\n" - }, - { - "lang": "console", - "value": "Success! Inserted document id: BsonObjectId{value=...}" - } - ], - "preview": "You can insert a single document into a collection using the insertOne()\nmethod on a MongoCollection object. To insert a document, construct a\nDocument object that contains the fields and values that you want to\nstore. If you call the insertOne() method on a collection that does\nnot exist yet, the server automatically creates it for you.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples/replaceOne", - "title": "Replace a Document", - "headings": ["Example"], - "paragraphs": "You can replace a single document using the replaceOne() method on\na MongoCollection object. This method removes all the existing fields\nand values from a document (except the _id field) and substitutes it\nwith your replacement document. The replaceOne() method accepts a query filter that matches the\ndocument you want to replace and a replacement document that contains the\ndata you want to save in place of the matched document. The replaceOne() \nmethod only replaces the first document that matches the filter. You can optionally pass an instance of ReplaceOptions to the replaceOne() method in\norder to specify the method's behavior. For example, if you set the upsert \nfield of the ReplaceOptions object to true , the operation inserts\na new document from the fields in the replacement document if no documents\nmatch the query filter. See the link to the ReplaceOptions API\ndocumentation at the bottom of this page for more information. Upon successful execution, the replaceOne() method returns an instance\nof UpdateResult . You can retrieve information such as the number of\ndocuments modified by calling the getModifiedCount() method. You can also\nretrieve the value of the document's _id field by calling the\n getUpsertedId() method if you set upsert(true) in the\n ReplaceOptions instance and the operation resulted in the insertion of a new document. If your replacement operation fails, the driver raises an exception.\nFor example, if you try to specify a value for the immutable field\n _id in your replacement document that differs from the original\ndocument, the method throws a MongoWriteException with the message: If your replacement document contains a change that violates unique index\nrules, the method throws a MongoWriteException with an error\nmessage that should look something like this: For more information on the types of exceptions raised under specific\nconditions, see the API documentation for replaceOne() , linked at the\nbottom of this page. In this example, we replace the first match of our query filter in the\n movies collection of the sample_mflix database with a replacement\ndocument. All the fields except for the _id field are deleted from the\noriginal document and are substituted by the replacement document. Before the replaceOne() operation runs, the original document contains\nseveral fields describing the movie. After the operation runs, the resulting\ndocument contains only the fields specified by the replacement document\n( title and fullplot ) and the _id field. The following snippet uses the following objects and methods: After you run the example, you should see output that looks something like\nthis: Or if the example resulted in an upsert: If you query the replaced document, it should look something like this: For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: A query filter that is passed to the replaceOne() method. The eq \nfilter matches only movies with the title exactly matching the text\n 'Music of the Heart' . A replacement document that contains the document that replaces the\nmatching document if it exists. A ReplaceOptions object with the upsert option set to true .\nThis option specifies that the method should insert the data contained in\nthe replacement document if the query filter does not match any documents. This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . ReplaceOne ReplaceOptions UpdateResult eq()", - "code": [ - { - "lang": "none", - "value": "After applying the update, the (immutable) field '_id' was found to have been altered to _id: ObjectId('...)" - }, - { - "lang": "none", - "value": "E11000 duplicate key error collection: ..." - }, - { - "lang": "none", - "value": "Modified document count: 1\nUpserted id: null" - }, - { - "lang": "none", - "value": "Modified document count: 0\nUpserted id: BsonObjectId{value=...}" - }, - { - "lang": "none", - "value": "Movie(title=50 Violins, fullplot= A dramatization of the true story of Roberta Guaspari who co-founded the Opus 118 Harlem School of Music)" - }, - { - "lang": "kotlin", - "value": "import com.mongodb.MongoException\nimport com.mongodb.client.model.Filters\nimport com.mongodb.client.model.ReplaceOptions\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\n\ndata class Movie(val title: String, val fullplot: String)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n try {\n val query = Filters.eq(\"title\", \"Music of the Heart\")\n val replaceDocument = Movie( \"50 Violins\", \" A dramatization of the true story of Roberta Guaspari who co-founded the Opus 118 Harlem School of Music\")\n val options = ReplaceOptions().upsert(true)\n val result = collection.replaceOne(query, replaceDocument, options)\n println(\"Modified document count: \" + result.modifiedCount)\n println(\"Upserted id: \" + result.upsertedId) // only contains a non-null value when an upsert is performed\n } catch (e: MongoException) {\n System.err.println(\"Unable to replace due to an error: $e\")\n }\n mongoClient.close()\n}\n" - } - ], - "preview": "You can replace a single document using the replaceOne() method on\na MongoCollection object. This method removes all the existing fields\nand values from a document (except the _id field) and substitutes it\nwith your replacement document.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples/update-operations", - "title": "Update & Replace Operations", - "headings": [], - "paragraphs": "Update a Document Update Multiple Documents Replace a Document", - "code": [], - "preview": null, - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples/updateMany", - "title": "Update Multiple Documents", - "headings": ["Example"], - "paragraphs": "You can update multiple documents using the updateMany() method on\na MongoCollection object. The method accepts a filter that matches the\ndocument you want to update and an update statement that instructs the\ndriver how to change the matching document. The updateMany() method updates\nall the documents in the collection that match the filter. To perform an update with the updateMany() method, you must pass\na query filter and an update document. The query filter specifies which\ndocuments in the collection to match and the update document provides\ninstructions on what changes to make to them. You can optionally pass an instance of UpdateOptions to the updateMany() method in\norder to modify the behavior of the call. For example, if you set the\n upsert field of the UpdateOptions object to true and no documents\nmatch the specified query filter, the operation inserts a new document\ncomposed of the fields from both the query and update document. Upon successful execution, the updateMany() method returns an instance\nof UpdateResult . You can retrieve information such as the number of\ndocuments modified by calling the getModifiedCount() method. If you\nspecified upsert(true) in an UpdateOptions object and the\noperation results in an insert, you can retrieve the _id field of the\nnew document by calling the getUpsertedId() method on the\n UpdateResult instance. If your update operation fails, the driver raises an exception and does not update\nany of the documents matching the filter. For example, if you try to set\na value for the immutable field _id in your update document, the\n updateMany() method does not update any documents and throws a\n MongoWriteException with the message: If your update document contains a change that violates unique index\nrules, the method throws a MongoWriteException with an error\nmessage that should look something like this: For more information on the types of exceptions raised under specific\nconditions, see the API documentation for updateMany() , linked at the\nbottom of this page. In this example, we use a Filter builder to filter our query for\nmovies in the genre \"Frequently Discussed\". Next, we update documents that match our query in the movies collection of the\n sample_mflix database. We perform the following\nupdates to the matching documents: We use the Updates builder, a factory class that contains static\nhelper methods to construct the update document. While you can pass an update\ndocument instead of using the builder, the builder provides type checking and\nsimplified syntax. Read our\n guide on Updates in the Builders\nsection for more information. After you run the example, you should see a similar output. If you query the updated document or documents, they should look something like\nthis: For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: Add Frequently Discussed to the array of genres only if it does not\nalready exist Set the value of lastUpdated to the current time. This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . UpdateMany UpdateOptions combine() addToSet() currentDate() UpdateResult", - "code": [ - { - "lang": "none", - "value": "Performing an update on the path '_id' would modify the immutable field '_id'" - }, - { - "lang": "none", - "value": "E11000 duplicate key error collection: ..." - }, - { - "lang": "none", - "value": "Movie(num_mflix_comments=100, genres=[ ... Frequently Discussed], lastUpdated= ... )" - }, - { - "lang": "kotlin", - "value": "import com.mongodb.MongoException\nimport com.mongodb.client.model.Filters\nimport com.mongodb.client.model.Updates\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\nimport java.time.LocalDateTime\n\ndata class Movie(\n val num_mflix_comments: Int,\n val genres: List,\n val lastUpdated: LocalDateTime\n)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n val query = Filters.gt(Movie::num_mflix_comments.name, 50)\n val updates = Updates.combine(\n Updates.addToSet(Movie::genres.name, \"Frequently Discussed\"),\n Updates.currentDate(Movie::lastUpdated.name)\n )\n try {\n val result = collection.updateMany(query, updates)\n println(\"Modified document count: \" + result.modifiedCount)\n } catch (e: MongoException) {\n System.err.println(\"Unable to update due to an error: $e\")\n }\n mongoClient.close()\n}\n" - }, - { - "lang": "console", - "value": "Modified document count: 53" - } - ], - "preview": "You can update multiple documents using the updateMany() method on\na MongoCollection object. The method accepts a filter that matches the\ndocument you want to update and an update statement that instructs the\ndriver how to change the matching document. The updateMany() method updates\nall the documents in the collection that match the filter.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples/updateOne", - "title": "Update a Document", - "headings": ["Example"], - "paragraphs": "You can update a single document using the updateOne() method on\na MongoCollection object. The method accepts a filter that matches the\ndocument you want to update and an update statement that instructs the\ndriver how to change the matching document. The updateOne() method only\nupdates the first document that matches the filter. To perform an update with the updateOne() method, you must pass\na query filter and an update document. The query filter specifies the criteria\nfor which document to perform the update on and the update document provides\ninstructions on what changes to make to it. You can optionally pass an instance of UpdateOptions to the updateOne() method in\norder to specify the method's behavior. For example, if you set the upsert field of\nthe UpdateOptions object to true , the operation inserts a new\ndocument from the fields in both the query and update document if no documents\nmatch the query filter. See the link to the UpdateOptions API\ndocumentation at the bottom of this page for more information. Upon successful execution, the updateOne() method returns an instance\nof UpdateResult . You can retrieve information such as the number of\ndocuments modified by calling the getModifiedCount() method, or the\nvalue of the _id field by calling the getUpsertedId() method if you\nspecified upsert(true) in an UpdateOptions instance. If your update operation fails, the driver raises an exception.\nFor example, if you try to set a value for the immutable field _id in\nyour update document, the method throws a MongoWriteException with the\nmessage: If your update document contains a change that violates unique index\nrules, the method throws a MongoWriteException with an error\nmessage that should look something like this: For more information on the types of exceptions raised under specific\nconditions, see the updateOne() API documentation linked at the\nbottom of this page. In this example, we use a Filter builder to query the collection for\na movie with the title \"Cool Runnings 2\". Next, we perform the following updates to the first match for our query\nin the movies collection of the sample_mflix database: We use the Updates builder, a factory class that contains static\nhelper methods, to construct the update document. While you can pass an update\ndocument instead of using the builder, the builder provides type checking and\nsimplified syntax. See the guide on the Updates builder \nfor more information. After you run the example, you should see output that looks something like this: Or if the example resulted in an upsert: If you query the updated document, it should look something like this: For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: Set the value of runtime to 99 Add Sports to the array of genres only if it does not already exist Set the value of lastUpdated to the current time This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . UpdateOne UpdateOptions combine() set() addToSet() currentDate() UpdateResult", - "code": [ - { - "lang": "none", - "value": "Performing an update on the path '_id' would modify the immutable field '_id'" - }, - { - "lang": "none", - "value": "E11000 duplicate key error collection: ..." - }, - { - "lang": "none", - "value": "Modified document count: 1\nUpserted id: null" - }, - { - "lang": "none", - "value": "Modified document count: 0\nUpserted id: BsonObjectId{value=...}" - }, - { - "lang": "none", - "value": "Movie(title=Cool Runnings 2, runtime=99, genres=[ ... Sports], lastUpdated= ... )" - }, - { - "lang": "kotlin", - "value": "import com.mongodb.MongoException\nimport com.mongodb.client.model.Filters\nimport com.mongodb.client.model.UpdateOptions\nimport com.mongodb.client.model.Updates\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\nimport java.time.LocalDateTime\n\ndata class Movie(\n val title: String,\n val runtime: Int,\n val genres: List,\n val lastUpdated: LocalDateTime\n)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n val query = Filters.eq(Movie::title.name, \"Cool Runnings 2\")\n val updates = Updates.combine(\n Updates.set(Movie::runtime.name, 99),\n Updates.addToSet(Movie::genres.name, \"Sports\"),\n Updates.currentDate(Movie::lastUpdated.name)\n )\n val options = UpdateOptions().upsert(true)\n try {\n val result = collection.updateOne(query, updates, options)\n println(\"Modified document count: \" + result.modifiedCount)\n println(\"Upserted id: \" + result.upsertedId) // only contains a non-null value when an upsert is performed\n } catch (e: MongoException) {\n System.err.println(\"Unable to update due to an error: $e\")\n }\n mongoClient.close()\n}\n" - } - ], - "preview": "You can update a single document using the updateOne() method on\na MongoCollection object. The method accepts a filter that matches the\ndocument you want to update and an update statement that instructs the\ndriver how to change the matching document. The updateOne() method only\nupdates the first document that matches the filter.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples/watch", - "title": "Watch for Changes", - "headings": ["Process Change Stream Events with .collect()", "Example"], - "paragraphs": "You can keep track of changes to data in MongoDB, such as changes to a\ncollection, database, or deployment, by opening a change stream . A change\nstream allows applications to watch for changes to data and react to them. The change stream returns change event documents when changes occur. A\nchange event contains information about the updated data. Open a change stream by calling the watch() method on a\n MongoCollection , MongoDatabase , or MongoClient object as shown in\nthe following code example: The watch() method optionally takes an aggregation pipeline which\nconsists of an array of stages as the first parameter to filter and\ntransform the change event output as follows: The watch() method returns an instance of ChangeStreamFlow , a class\nthat offers several methods to access, organize, and traverse the results.\n ChangeStreamFlow also inherits methods from its parent class Flow \nfrom the Kotlin Coroutines library. You can call collect() on the ChangeStreamFlow to handle\nevents as they occur. Alternatively, you can use other methods built in to Flow \nto work with the results. To configure options for processing the documents returned from the change\nstream, use member methods of the ChangeStreamFlow object returned\nby watch() . See the link to the ChangeStreamFlow API\ndocumentation at the bottom of this example for more details on the\navailable methods. To capture events from a change stream, call the collect() method\nas shown below: The .collect() function triggers when a change event is emitted. You can\nspecify logic in the function to process the event document when it is\nreceived. For update operation change events, change streams only return the modified\nfields by default rather than the entire updated document. You can configure\nyour change stream to also return the most current version of the document\nby calling the fullDocument() member method of the ChangeStreamFlow \nobject with the value FullDocument.UPDATE_LOOKUP as follows: The following example application opens a change stream on the movies collection\nin the sample_mflix database. The application use an aggregation pipeline\nto filter changes based on operationType so that it only receives insert and update\nevents. Deletes are excluded by omission. The application uses the .collect() method\nto receive and print the filtered change events that occur on the collection. The application launches the collect() operation in a separate coroutine job,\nwhich allows the application to continue running while the change stream is open.\nOnce the operations are complete, the application closes the change stream and exits. For additional information on the classes and methods mentioned on this\npage, see the following resources: This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . Change Streams Server Manual Entry Change Events Server Manual Entry Aggregation Pipeline Server Manual Entry Aggregation Stages Server Manual Entry ChangeStreamFlow API Documentation MongoCollection.watch() API Documentation MongoDatabase.watch() API Documentation MongoClient.watch() API Documentation", - "code": [ - { - "lang": "kotlin", - "value": "val changeStream = collection.watch()\n" - }, - { - "lang": "kotlin", - "value": "val pipeline = listOf(Aggregates.match(Filters.lt(\"fullDocument.runtime\", 15)))\nval changeStream = collection.watch(pipeline)\n" - }, - { - "lang": "kotlin", - "value": "val changeStream = collection.watch()\nchangeStream.collect {\n println(\"Change observed: $it\")\n}\n" - }, - { - "lang": "kotlin", - "value": "val changeStream = collection.watch()\n .fullDocument(FullDocument.UPDATE_LOOKUP)\n" - }, - { - "lang": "kotlin", - "value": "\nimport com.mongodb.client.model.Aggregates\nimport com.mongodb.client.model.Filters\nimport com.mongodb.client.model.Updates\nimport com.mongodb.client.model.changestream.FullDocument\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.launch\nimport kotlinx.coroutines.runBlocking\nimport java.lang.Thread.sleep\n\ndata class Movie(val title: String, val year: Int)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n\n\n val job = launch {\n val pipeline = listOf(\n Aggregates.match(\n Filters.`in`(\"operationType\", mutableListOf(\"insert\", \"update\"))\n )\n )\n val changeStreamFlow = collection.watch(pipeline)\n .fullDocument(FullDocument.DEFAULT)\n changeStreamFlow.collect { event ->\n println(\"Received a change to the collection: $event\")\n }\n }\n\n // Insert events captured by the change stream watcher\n collection.insertOne(Movie(\"Back to the Future\", 1985))\n collection.insertOne(Movie(\"Freaky Friday\", 2003))\n\n // Update event captured by the change stream watcher\n collection.updateOne(\n Filters.eq(Movie::title.name, \"Back to the Future\"),\n Updates.set(Movie::year.name, 1986)\n )\n\n // Delete event not captured by the change stream watcher\n collection.deleteOne(Filters.eq(Movie::title.name, \"Freaky Friday\"))\n\n sleep(1000) // Give time for the change stream watcher to process all events\n\n // Cancel coroutine job to stop the change stream watcher\n job.cancel()\n mongoClient.close()\n}\n" - }, - { - "lang": "console", - "value": "Received a change to the collection: ChangeStreamDocument{ operationType=insert, resumeToken={\"_data\": \"82646518C0000000022B022C0100296E5A1004782683FAB5A741B0B0805C207A7FCCED46645F69640064646518C0E6873977DD9059EE0004\"}, namespace=sample_mflix.movies, destinationNamespace=null, fullDocument=Movie(title=Back to the Future, year=1985), fullDocumentBeforeChange=null, documentKey={\"_id\": {\"$oid\": \"646518c0e6873977dd9059ee\"}}, clusterTime=Timestamp{value=7234215589353357314, seconds=1684347072, inc=2}, updateDescription=null, txnNumber=null, lsid=null, wallTime=BsonDateTime{value=1684347072952}}\nReceived a change to the collection: ChangeStreamDocument{ operationType=insert, resumeToken={\"_data\": \"82646518C1000000012B022C0100296E5A1004782683FAB5A741B0B0805C207A7FCCED46645F69640064646518C1E6873977DD9059EF0004\"}, namespace=sample_mflix.movies, destinationNamespace=null, fullDocument=Movie(title=Freaky Friday, year=2003), fullDocumentBeforeChange=null, documentKey={\"_id\": {\"$oid\": \"646518c1e6873977dd9059ef\"}}, clusterTime=Timestamp{value=7234215593648324609, seconds=1684347073, inc=1}, updateDescription=null, txnNumber=null, lsid=null, wallTime=BsonDateTime{value=1684347073112}}\nReceived a change to the collection: ChangeStreamDocument{ operationType=update, resumeToken={\"_data\": \"8264651D4A000000042B022C0100296E5A1004CAEADF0D7376406A8197E3082CDB3D3446645F6964006464651D4A8C2D2556BA204FB40004\"}, namespace=sample_mflix.movies, destinationNamespace=null, fullDocument=null, fullDocumentBeforeChange=null, documentKey={\"_id\": {\"$oid\": \"64651d4a8c2d2556ba204fb4\"}}, clusterTime=Timestamp{value=7234220580105355268, seconds=1684348234, inc=4}, updateDescription=UpdateDescription{removedFields=[], updatedFields={\"year\": 1986}, truncatedArrays=[], disambiguatedPaths=null}, txnNumber=null, lsid=null, wallTime=BsonDateTime{value=1684348234958}}" - } - ], - "preview": "You can keep track of changes to data in MongoDB, such as changes to a\ncollection, database, or deployment, by opening a change stream. A change\nstream allows applications to watch for changes to data and react to them.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "usage-examples", - "title": "Usage Examples", - "headings": ["Overview", "How to Use the Usage Examples"], - "paragraphs": "Usage examples provide convenient starting points for popular MongoDB\noperations. Each example provides the following information: An explanation of the operation in the example showing the\npurpose and a sample use case for the method An explanation of how to use the operation, including parameters,\nreturn values, and common exceptions you might encounter A full Kotlin file that you can copy and paste to run the example\nin your own environment These examples use the sample datasets \nprovided by Atlas. You can load them into your database on the free tier of\nMongoDB Atlas by following the\n Get Started with Atlas Guide \nor you can\n import the sample dataset into a local MongoDB instance . Once you have imported the dataset, you can copy and paste a usage\nexample into your development environment of choice. You can follow the\n quick start guide to learn more about getting\nstarted with the MongoDB Kotlin driver. Once you've copied a usage example,\nyou'll need to edit the connection URI to get the example connected to\nyour MongoDB instance: You can use the Atlas Connectivity Guide \nto learn how to allow connections to your instance of Atlas and to find the\n connection string you use to replace the\n uri variable in usage examples. If your instance uses\n SCRAM authentication , you can replace\n with your username, with your password, and\n with the IP address or URL of your instance. For more information about connecting to your MongoDB instance, see our\n Connection Guide .", - "code": [ - { - "lang": "kotlin", - "value": "// Replace the following with your MongoDB deployment's connection string.\nval uri = \"\"" - } - ], - "preview": "Usage examples provide convenient starting points for popular MongoDB\noperations. Each example provides the following information:", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "validate-signatures", - "title": "Validate Driver Artifact Signatures", - "headings": [ - "Overview", - "Procedure", - "Install Encryption Software", - "Download and Import the Public Key", - "Download the Signed File", - "Download the File Signature", - "Verify the Signature", - "Additional Information" - ], - "paragraphs": "You can validate the signature of a Kotlin driver artifact published\non Maven. This process can enhance the security of your system or\nnetwork by allowing you to confirm the authenticity of the driver. The following steps describe how you can validate driver artifact\nsignatures. You must first install the GnuPG encryption suite to use GPG\non the command line. You can install GnuPG by using Homebrew . As an alternative, you can install GPG Suite ,\nwhich provides a GUI to use GPG. There is a Homebrew installation \nfor GPG Suite. Navigate to the Releases page\nin the MongoDB JVM drivers GitHub repository. Each version release contains instructions on\nhow to download and import the public key for verifying signatures. In your terminal, run the curl command to download the signed\nfile corresponding to a version of the driver. For example,\nrunning the following command downloads the signed file for the\nv5.1.0 driver: In your terminal, run the curl command to download the file\nsignature corresponding to a version of the driver. For example,\nrunning the following command downloads the file signature for the\nv5.1.0 driver: Finally, you can verify the signature by using the encryption package.\nThe following terminal command uses gpg to verify the artifact signature of the v5.1.0\ndriver: If you successfully verify the signature, you see a message\nsimilar to the following: To learn more about verifying signatures, see Verify Integrity\nof MongoDB Packages in the Server\nmanual.", - "code": [ - { - "lang": "sh", - "value": "curl -LO https://repo.maven.apache.org/maven2/org/mongodb/mongodb-driver-core/5.1.0/mongodb-driver-core-5.1.0.jar" - }, - { - "lang": "sh", - "value": "curl -LO https://repo.maven.apache.org/maven2/org/mongodb/mongodb-driver-core/5.1.0/mongodb-driver-core-5.1.0.jar.asc" - }, - { - "lang": "sh", - "value": "gpg --verify mongodb-driver-core-5.1.0.jar.asc mongodb-driver-core-5.1.0.jar" - }, - { - "lang": "none", - "value": "gpg: Signature made Tue 30 Apr 12:05:34 2024 MDT\ngpg: using RSA key 76E0008D166740A8\ngpg: Good signature from \"MongoDB Java Driver Release Signing Key \" [unknown]\ngpg: WARNING: This key is not certified with a trusted signature!\ngpg: There is no indication that the signature belongs to the owner.\nPrimary key fingerprint: 1A75 005E 1421 9222 3D6A 7C3B 76E0 008D 1667 40A8" - } - ], - "preview": "You can validate the signature of a Kotlin driver artifact published\non Maven. This process can enhance the security of your system or\nnetwork by allowing you to confirm the authenticity of the driver.", - "tags": "java, kotlin, security, SSDLC, encryption", - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - }, - { - "slug": "whats-new", - "title": "What's New", - "headings": [ - "What's New in 5.2", - "What's New in 5.1.3", - "What's New in 5.1.2", - "What's New in 5.1.1", - "What's New in 5.1", - "Deprecations in 5.1", - "Improvements in 5.1", - "New Features in 5.1", - "What's New in 5.0", - "What's New in 4.11", - "Deprecations in 4.11", - "New Features in 4.11", - "What's New in 4.10" - ], - "paragraphs": "Learn what's new in: Version 5.2 Version 5.1.3 Version 5.1.2 Version 5.1.1 Version 5.1 Version 5.0 Version 4.11 Version 4.10 New features of the 4.11 driver release include: Atlas Search and Vector Search Indexes in the Indexes guide Adds the SearchIndexType class, which you can pass\nwhen constructing a SearchIndexModel instance. This change\nallows you to specify the index type when creating an Atlas\nSearch or Vector Search index. To learn more, see Atlas Search and Vector Search Indexes in the Indexes guide . The 5.1.3 driver patch release includes the following changes: Fixes an issue that could cause assertion errors when using Cursor \ntypes. The 5.1.2 driver patch release includes the following changes: Support for encoding Kotlin data classes with nullable\ngeneric parameter types. For example, you can encode the Container class\nin the following code: The 5.1.1 driver patch release includes the following changes: When using the MONGODB-OIDC authentication mechanism, you must not\ninclude comma characters in the authMechanismProperties connection\nstring value. To learn more about this behavior, see the\n MONGODB-OIDC section of the Enterprise\nAuthentication guide. This section includes the following information: To avoid breaking changes in future major releases of the driver,\nreplace any application code that depends on deprecated program elements. Deprecations in 5.1 Improvements in 5.1 New Features in 5.1 Support for MongoDB server v3.6 is deprecated and will be removed in the\nnext driver version release. To learn how to upgrade your MongoDB server\ndeployment, see Release Notes in the MongoDB server\nmanual. Internal testing of GraalVM native image technology. These tests involve building\nnative applications by using the GraalVM native-image tool. Enhanced support for the MONGODB-OIDC authentication mechanism.\nTo learn more about OIDC, see the MONGODB-OIDC section of the\nEnterprise Authentication Mechanisms guide. Fixes an issue in which operations used the incorrect codec when using\na polymorphic MongoCollection instance. This ensures that\ndiscriminator information is not lost when using bson-kotlinx . Fixes an issue in which the class discriminator was the first field\nwhen decoding, resulting in field type errors when using a polymorphic\n MongoCollection instance. Support for polymorphic serialization. To learn more, see the\n Polymorphic Serialization section of the Kotlin Serialization guide. Introduces the serverMonitoringMode connection URI option. To\nlearn more, see the Connection Options guide. New features of the 5.0 driver release include: The KotlinSerializerCodecProvider constructor now accepts\n serializersModule and bsonConfiguration objects: This makes it easier to customize your configuration. Fixes a Kotlin reflection bug that resulted in container type erasure. This section includes the following information: Deprecations in 4.11 New Features in 4.11 The 4.11 driver release deprecates the following items: To avoid breaking changes in future major releases of the driver,\nreplace any application code that depends on deprecated methods and types. The following network address-related methods are deprecated and will be removed\nin v5.0: The ServerAddress \nmethods getSocketAddress() and getSocketAddresses() . Instead of getSocketAddress() , use the getByName() instance\nmethod of java.net.InetAddress . Instead of getSocketAddresses() , use the getAllByName() instance\nmethod of java.net.InetAddress . The UnixServerAddress \nmethod getUnixSocketAddress() . Instead of getUnixSocketAddress() , construct an instance of\n jnr.unixsocket.UnixSocketAddress . Pass the full path of the UNIX\nsocket file to the constructor. By default, MongoDB creates a UNIX\nsocket file located at \"/tmp/mongodb-27017.sock\" . To learn more\nabout the UnixSocketAddress , see the UnixSocketAddress API documentation. The following methods and types related to the\n StreamFactory \ninterface are deprecated and scheduled for removal in v5.0: If you configure Netty by using\n MongoClientSettings.Builder.streamFactoryFactory() , your code might resemble\nthe following: Replace this code with the TransportSettings.nettyBuilder() \nas shown in the following example: streamFactoryFactory() method from MongoClientSettings.Builder getStreamFactoryFactory() method from MongoClientSettings NettyStreamFactoryFactory class NettyStreamFactory class AsynchronousSocketChannelStreamFactory class AsynchronousSocketChannelStreamFactoryFactory class BufferProvider class SocketStreamFactory class Stream class StreamFactory class StreamFactoryFactory class TlsChannelStreamFactoryFactory class New features of the 4.11 driver release include: Support for connecting to MongoDB by using a SOCKS5 proxy. Added the getSplitEvent() method to the ChangeStreamDocument class\nto identify fragments of a change stream event that exceeds 16MB. You must\nuse the aggregation stage $changeStreamSplitLargeEvent in your change\nstream to handle events that exceed 16MB. Added an aggregation stage builder for $vectorSearch . Added Atlas Search index management helpers. Updated Snappy and Zstd compression library dependency versions. To learn\nmore about the current dependency versions, see Network Compression . Added getElapsedTime() methods to the following classes to monitor the\nduration of connection pool events: ConnectionCheckOutFailedEvent ConnectionCheckedOutEvent ConnectionReadyEvent Support for Java 21 virtual threads and structured concurrency. The driver\ninternals were updated to avoid unnecessary pinning of virtual threads\nand to preserve interrupted status of a thread, as the latter matters for\nstructured concurrency where it is used for cancellation. To learn more about virtual threads, see the Virtual Threads \nJDK enhancement proposal. To learn more about structured concurrency, see the\n Structured Concurrency \nJDK enhancement proposal. Updated API documentation for the following types: ClusterListener ServerListener ServerMonitorListener Starting in version 4.10.1 of the Kotlin driver, you must add\nthe bson-kotlinx library as an explicit dependency to use the\n kotlinx-serialization library. Support for Kotlin server-side usage, both for coroutines and for synchronous applications. Codec support for Kotlin data classes. Support for the kotlinx.serialization library", - "code": [ - { - "lang": "kotlin", - "value": "@Serializable\ndata class Box(\n val boxed: T\n)\n\n@Serializable\ndata class Container(\n val box: Box\n)" - }, - { - "lang": "kotlin", - "value": "KotlinSerializerCodec.create(clazz.kotlin, serializersModule=serializersModule, bsonConfiguration=bsonConfiguration)" - }, - { - "lang": "java", - "value": "import com.mongodb.connection.netty.NettyStreamFactoryFactory;\n\n// ...\n\nMongoClientSettings settings = MongoClientSettings.builder()\n .streamFactoryFactory(NettyStreamFactoryFactory.builder().build())\n .build();" - }, - { - "lang": "java", - "value": "import com.mongodb.connection.TransportSettings;\n\n// ...\n\nMongoClientSettings settings = MongoClientSettings.builder()\n .transportSettings(TransportSettings.nettyBuilder().build())\n .build();" - } - ], - "preview": "Learn what's new in:", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["kotlin"] - } - } - ] + "url": "http://mongodb.com/docs/drivers/kotlin/coroutine/upcoming", + "includeInGlobalSearch": false, + "documents": [ + { + "slug": "api-documentation", + "title": "API Documentation", + "headings": [], + "paragraphs": "BSON kotlinx.serialization -\nclasses for encoding and decoding between Kotlin data classes and the BSON data\nformat using kotlinx.serialization . Core - classes that\ncontain essential driver functionality. Kotlin Coroutine Driver -\nclasses for the current driver API using coroutines. Kotlin Sync Driver -\nclasses for the current synchronous driver API.", + "code": [], + "preview": null, + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "compatibility", + "title": "Compatibility", + "headings": [ + "MongoDB Compatibility", + "Compatibility Table Legend", + "Language Compatibility" + ], + "paragraphs": "The following compatibility table specifies the recommended version or versions\nof the MongoDB Kotlin Driver for use with a specific version of MongoDB. The first column lists the driver version. MongoDB ensures compatibility between the MongoDB Server and the drivers\nfor three years after the server version's end of life (EOL) date. To learn\nmore about the MongoDB release and EOL dates, see\n MongoDB Software Lifecycle Schedules . Icon Explanation \u2713 All features are supported. \u229b The Driver version will work with the MongoDB version, but not all\nnew MongoDB features are supported. No mark The Driver version is not tested with the MongoDB version. Kotlin Driver Version MongoDB 8.0 MongoDB 7.0 MongoDB 6.0 MongoDB 5.0 MongoDB 4.4 MongoDB 4.2 MongoDB 4.0 MongoDB 3.6 MongoDB 3.4 MongoDB 3.2 MongoDB 3.0 MongoDB 2.6 5.2 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 5.1 \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 5.0 \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 4.11 \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 4.10 \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 The following compatibility table specifies the recommended version or versions\nof the MongoDB Kotlin Driver for use with a specific version of Kotlin. The first column lists the driver version. For more information on how to read the compatibility tables, see our guide on\n MongoDB Compatibility Tables . Kotlin Driver Version Kotlin 1.8 5.1 \u2713 5.0 \u2713 4.11 \u2713 4.10 \u2713", + "code": [], + "preview": "The following compatibility table specifies the recommended version or versions\nof the MongoDB Kotlin Driver for use with a specific version of MongoDB.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "connection-troubleshooting", + "title": "Connection Troubleshooting", + "headings": [ + "Connection Error", + "Check Connection String", + "Configure Firewall", + "Authentication Error", + "Check Connection String", + "Verify User Is in Authentication Database", + "Error Sending Message", + "Check Connection String", + "Verify User Is in Authentication Database", + "Configure Firewall", + "Check the Number of Connections", + "Timeout Error", + "Set maxConnectionTimeoutMS", + "Set maxConnectionLifeTime and maxConnectionIdleTime", + "Check the Number of Connections", + "Additional Tips", + "Get Log Information for TLS/SSL" + ], + "paragraphs": "This page offers potential solutions to issues you might see when\nconnecting to a MongoDB instance or replica set while using the\nMongoDB Kotlin Driver. This page lists only connection issues. If you are having any other issues\nwith MongoDB, consider the following resources: The Frequently Asked Questions (FAQ) for the Kotlin driver The Issues & Help topic for information about\nreporting bugs, contributing to the driver, and additional resources The MongoDB Community Forums for\nquestions, discussions, or general technical support The following error message is a general message indicating that the driver\ncannot connect to a server on the specified hostname or port: If you receive this error, try the following methods to resolve the issue. Verify that the hostname and port number in the connection string are both\naccurate. In the sample error message, the hostname is 127.0.0.1 and the\nport is 27017 . The default port value for a MongoDB instance is\n 27017 , but you can configure MongoDB to communicate on another port. Assuming that your MongoDB deployment uses the default port, verify that your\nfirewall has port 27017 open. If your deployment is using a different port,\nverify that port is open in your firewall. Do not open ports in your firewall unless you are sure that is the port used\nby your MongoDB instance. The Kotlin driver can fail to connect to a MongoDB instance if\nthe authorization is not configured correctly. This often results in an error\nmessage similar to the following: If you receive this error, try the following methods to resolve the issue. An invalid connection string is the most common cause of authentication\nissues when attempting to connect to MongoDB. If your connection string contains a username and password, ensure that they\nare in the correct format. If your MongoDB deployment is on MongoDB Atlas, you can check your connection\nstring by using the Atlas Connection Example .\nMake sure to replace the connection string in the example with yours. When connecting to a replica set, you should include all of the hosts\nin the replica set in your connection string. Separate each of the hosts\nin the connection string with a comma. This enables the driver to establish a\nconnection if one of the hosts is unreachable. For more information about using connection strings with the Kotlin driver,\nsee Connection URI in the Connection Guide. If the username or password includes any of the following characters, they\nmust be percent encoded : To successfully authenticate a connection by using a username and password,\nthe username must be defined in the authentication database. The default\nauthentication database is the admin database. To use a different database\nfor authentication, specify the authSource in the connection string. The\nfollowing example instructs the driver to use users as the authentication\ndatabase: When you send a request through the driver and it is unable to send the command,\nit often displays the following general error message: If you receive this error, try the following methods to resolve the issue. Verify that the connection string in\nyour app is accurate. This is described under Connection Error \nand Authentication Error . The user needs to be recognized in your\nauthentication database. This is described under Authentication\nError . The firewall needs to have an open port for communicating with the MongoDB\ninstance. This is described under Connection Error . Each MongoClient instance supports a maximum number of concurrent open\nconnections in its connection pool. The configuration parameter maxPoolSize \ndefines this value and is set to 100 by default. If there are already a\nnumber of open connections equal to maxPoolSize , the server waits until\na connection becomes available. If this wait time exceeds the maxIdleTimeMS \nvalue, the driver responds with an error. Sometimes when you send messages through the driver to the server, the messages\ntake a while to respond. When this happens, you might receive an error message\nsimilar to one of the following error messages: If you receive one of these errors, try the following methods to resolve the\nissue. The maxConnectionTimeoutMS option indicates the amount of time the\nKotlin driver waits for a connection before timing out. The default\nvalue is 10000 . You can increase this value or set it to 0 if\nyou want the driver to never timeout. Consider setting maxConnectionLifeTime and\n maxConnectionIdleTime . These parameters configure how long a connection\ncan be maintained with a MongoDB instance. For more information about these\nparameters, see Connection Pool Settings . You might have too many open connections. The solution to this is described\nunder Error Sending Message . While not related to a specific error message, this section includes\nadditional information that can be useful when attempting to troubleshoot\nconnection issues. When using TLS/SSL, you can use the -Djavax.net.debug=all system property\nto view additional log statements. This can help when attempting to debug any\nconnection issues. See the Oracle guide to debugging TLS/SSL connections \nfor more information.", + "code": [ + { + "lang": "none", + "value": "Error: couldn't connect to server 127.0.0.1:27017" + }, + { + "lang": "none", + "value": "Command failed with error 18 (AuthenticationFailed): 'Authentication failed.' on server localhost:27017." + }, + { + "lang": "none", + "value": ": / ? # [ ] @" + }, + { + "lang": "kotlin", + "value": "val mongoClient =\nMongoClient.create(\"mongodb://:@:/?authSource=users\")" + }, + { + "lang": "none", + "value": "com.mongodb.MongoSocketWriteException: Exception sending message" + }, + { + "lang": "none", + "value": "Timed out after 30000 ms while waiting for a server that matches ReadPreferenceServerSelector{readPreference=primary}." + }, + { + "lang": "none", + "value": "No server chosen by ReadPreferenceServerSelector{readPreference=primary} from cluster description" + } + ], + "preview": "This page offers potential solutions to issues you might see when\nconnecting to a MongoDB instance or replica set while using the\nMongoDB Kotlin Driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "faq", + "title": "FAQ", + "headings": [ + "Why Am I Having Problems Connecting to a MongoDB Instance?", + "How is the Kotlin Driver Different from KMongo?", + "What is the Difference Between the Kotlin Driver and the Kotlin SDK?", + "How Does Connection Pooling Work in the Kotlin Driver?" + ], + "paragraphs": "On this page, you can find frequently asked questions and their corresponding answers. If you can't find an answer to your question on this page, see the\n Issues & Help page for information on how to report issues. If you have trouble connecting to a MongoDB deployment, see\nthe Connection Troubleshooting Guide \nfor possible solutions. The Kotlin driver is the official MongoDB driver for Kotlin. It is\ndeveloped by the MongoDB team and provides a native API for Kotlin\napplications to connect to MongoDB and work with data. It is implemented\nby wrapping the MongoDB Java driver . The Kotlin driver was developed in collaboration with the creator of KMongo,\nJulien Buret, to give users an officially-supported driver. The official Kotlin driver and KMongo have generally similar APIs.\nNotable similarities between the Kotlin driver and KMongo include: Although the official Kotlin driver and KMongo are similar, there are some\nkey differences: For more detailed information, see Migrate from KMongo . KMongo is a popular community-developed library\nfor working with MongoDB from Kotlin applications.\nIt is a wrapper around the Java driver that was created prior to the creation of\nthe official Kotlin driver to serve the needs of the Kotlin community. As of July 2023, KMongo has been marked as deprecated. Support for synchronous and coroutine-based operations Support using data classes to represent MongoDB documents Support KotlinX serialization Support for MongoDB CRUD APIs and aggregation The official driver does not have built-in support for reactor ,\n rxjava2 , Jackson ,\nor GSON . The official driver does not support MongoDB shell commands. The official driver supports type-safe queries with the Builders API,\nwhereas KMongo uses infix functions and property references for\ntype-safe queries. MongoDB supports both mobile and server-side development in Kotlin. If\nyou are developing a mobile application for Android or Kotlin\nMultiplatform (KMP), you can use the MongoDB\nAtlas Device Kotlin SDK to access Atlas App Services and\nto manage your Realm data. The Kotlin driver supports server-side development by providing a\ncomplete library for building idiomatic Kotlin applications. You can\nlearn how to develop asynchronous applications in this documentation for\nthe Kotlin Coroutine Driver, or you can view the Kotlin Sync\nDriver documentation to learn more about synchronous\nprogramming. Every MongoClient instance has a built-in connection pool for each server\nin your MongoDB topology. Connection pools open sockets on demand to\nsupport concurrent MongoDB operations in your multi-threaded application. The maximum size of each connection pool is set by the maxPoolSize option, which\ndefaults to 100 . If the number of in-use connections to a server reaches\nthe value of maxPoolSize , the next request to that server will wait\nuntil a connection becomes available. Each MongoClient instance opens two additional sockets per server in your\nMongoDB topology for monitoring the server's state. For example, a client connected to a 3-node replica set opens 6\nmonitoring sockets. It also opens as many sockets as needed to support\nan application's threads on each server, up to\nthe value of maxPoolSize . If maxPoolSize is 100 and the\napplication only uses the primary (the default), then only the primary\nconnection pool grows and there can be at most 106 total connections. If the\napplication uses a read preference to query the\nsecondary nodes, their pools also grow and there can be 306 total connections. Additionally, connection pools are rate-limited such that each connection pool\ncan only create, at maximum, the value of maxConnecting connections\nin parallel at any time. Any additional thread stops waiting in the\nfollowing cases: You can set the minimum number of concurrent connections to\neach server with the minPoolSize option, which defaults to 0 .\nThe connection pool will be initialized with this number of sockets. If\nsockets are closed due to any network errors, causing the total number\nof sockets (both in use and idle) to drop below the minimum, more\nsockets are opened until the minimum is reached. You can set the maximum number of milliseconds that a connection can\nremain idle in the pool before being removed and replaced with\nthe maxIdleTimeMS option, which defaults to 0 (no limit). The following default configuration for a MongoClient works for most\napplications: Create a client once for each process, and reuse it for all\noperations. It is a common mistake to create a new client for each\nrequest, which is very inefficient. To support high numbers of concurrent MongoDB operations\nwithin one process, you can increase maxPoolSize . Once the pool\nreaches its maximum size, additional threads wait for sockets\nto become available. The driver does not limit the number of threads that\ncan wait for sockets to become available, and it is the application's\nresponsibility to limit the size of its pool to bound queuing\nduring a load spike. Threads wait for the amount of time specified in\nthe waitQueueTimeoutMS option, which defaults to 120000 (120 seconds). A thread that waits more than the length of time defined by\n waitQueueTimeoutMS for a socket raises a connection error. Use this\noption if it is more important to bound the duration of operations\nduring a load spike than it is to complete every operation. When MongoClient.close() is called by any thread, the driver\ncloses all idle sockets and closes all sockets that are in\nuse as they are returned to the pool. To learn more about connecting to MongoDB, see the Connection\nGuide . One of the existing threads finishes creating a connection, or\nan existing connection is checked back into the pool. The driver's ability to reuse existing connections improves due to\nrate-limits on connection creation.", + "code": [ + { + "lang": "kotlin", + "value": "val client = MongoClient(\"\")" + } + ], + "preview": "On this page, you can find frequently asked questions and their corresponding answers.", + "tags": "troubleshooting, question, support", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/aggregation-expression-operations", + "title": "Aggregation Expression Operations", + "headings": [ + "Overview", + "How to Use Operations", + "Constructor Methods", + "Operations", + "Arithmetic Operations", + "Array Operations", + "Boolean Operations", + "Comparison Operations", + "Conditional Operations", + "Convenience Operations", + "Conversion Operations", + "Date Operations", + "Document Operations", + "Map Operations", + "String Operations", + "Type-Checking Operations" + ], + "paragraphs": "In this guide, you can learn how to use the MongoDB Kotlin Driver to construct\nexpressions for use in aggregation pipelines. You can perform\nexpression operations with discoverable, typesafe Java methods rather\nthan BSON documents. Because these methods follow the fluent interface\npattern, you can chain aggregation operations together to create code\nthat is both more compact and more naturally readable. The operations in this guide use methods from the\n com.mongodb.client.model.mql package.\nThese methods provide an idiomatic way to use the Query API,\nthe mechanism by which the driver interacts with a MongoDB deployment. To learn more\nabout the Query API, see the Server manual documentation . The examples in this guide assume that you include the following imports\nin your code: To access document fields in an expression, you need to reference the\ncurrent document being processed by the aggregation pipeline. Use the\n current() method to refer to this document. To access the value of a\nfield, you must use the appropriately typed method, such as\n getString() or getDate() . When you specify the type for a field,\nyou ensure that the driver provides only those methods which are\ncompatible with that type. The following code shows how to reference a\nstring field called name : To specify a value in an operation, pass it to the of() constructor method to\nconvert it to a valid type. The following code shows how to reference a\nvalue of 1.0 : To create an operation, chain a method to your field or value reference.\nYou can build more complex operations by chaining additional methods. The following example creates an operation to find patients in New\nMexico who have visited the doctor\u2019s office at least once. The operation\nperforms the following actions: The and() method links these operations so that the pipeline stage\nmatches only documents that meet both criteria. While some aggregation stages, such as group() , accept operations\ndirectly, other stages expect that you first include your operation in a\nmethod such as computed() or expr() . These methods, which take\nvalues of type TExpression , allow you to use your expressions in\ncertain aggregations. To complete your aggregation pipeline stage, include your expression\nin an aggregates builder method. The following list provides examples of\nhow to include your expression in common aggregates builder methods: To learn more about these methods, see the\n Aggregation guide . The examples use the listOf() method to create a list of\naggregation stages. This list is passed to the aggregate() method of\n MongoCollection . Checks if the size of the visitDates array is greater than 0 \nby using the gt() method Checks if the state field value is \u201cNew Mexico\u201d by using the\n eq() method match(expr()) project(fields(computed(\"\", ))) group() You can use these constructor methods to define values for use in Kotlin aggregation\nexpressions. Refer to any of the sections in Operations for examples using these methods. Method Description current() References the current document being processed by the aggregation pipeline. currentAsMap() References the current document being processed by the aggregation pipeline as a map value. Returns an MqlValue type corresponding to the provided primitive. ofArray() Returns an array of MqlValue types corresponding to the provided array of primitives. ofEntry() Returns an entry value. ofMap() Returns an empty map value. ofNull() Returns the null value as exists in the Query API. When you provide a value to one of these methods, the driver treats\nit literally. For example, of(\"$x\") represents the string value\n \"$x\" , rather than a field named x . The following sections provide information and examples for\naggregation expression operations available in the driver.\nThe operations are categorized by purpose and functionality. Each section has a table that describes aggregation methods\navailable in the driver and corresponding expression operators in the\nQuery API. The method names link to API documentation and the\naggregation pipeline operator names link to descriptions and examples in\nthe Server manual documentation. While each method is effectively\nequivalent to the corresponding Query API expression, they may differ in\nexpected parameters and implementation. The driver generates a Query API expression that may be different\nfrom the Query API expression provided in each example. However,\nboth expressions will produce the same aggregation result. The driver does not provide methods for all aggregation pipeline operators in\nthe Query API. If you need to use an unsupported operation in an\naggregation, you must define the entire expression using the BSON Document \ntype. To learn more about the Document type, see Documents . You can perform an arithmetic operation on a value of type MqlInteger or\n MqlNumber using the methods described in this section. Suppose you have weather data for a specific year that includes the\nprecipitation measurement (in inches) for each day. You want find the average\nprecipitation, in millimeters, for each month. The multiply() operator multiplies the precipitation field by\n 25.4 to convert the value to millimeters. The avg() accumulator method\nreturns the average as the avgPrecipMM field. The group() method\ngroups the values by month given in each document's date field. The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in the\nQuery API: Method Aggregation Pipeline Operator $abs $add divide() $divide $multiply round() $round $subtract You can perform an array operation on a value of type MqlArray \nusing the methods described in this section. Suppose you have a collection of movies, each of which contains an array\nof nested documents for upcoming showtimes. Each nested document\ncontains an array that represents the total number of seats in the\ntheater, where the first array entry is the number of premium seats and\nthe second entry is the number of regular seats. Each nested document\nalso contains the number of tickets that have already been bought for\nthe showtime. A document in this collection might resemble the\nfollowing: The filter() method displays only the results matching the provided\npredicate. In this case, the predicate uses sum() to calculate the\ntotal number of seats and compares that value to the number of ticketsBought \nwith lt() . The project() method stores these filtered results as a new\n availableShowtimes array. The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: Method Aggregation Pipeline Operator all() $allElementsTrue any() $anyElementTrue concat() $concatArrays concatArrays() $concatArrays contains() $in distinct() $setUnion elementAt() $arrayElemAt filter() $filter first() $first joinStrings() $concat last() $last map() $map max() $max maxN() $maxN min() $min minN() $minN multiply() $multiply size() $size slice() $slice sum() $sum union() $setUnion unionArrays() $setUnion You must specify the type of the array that you retrieve with the\n getArray() method if you need to work with the values of the\narray as their specific type. In this example, we specify that the seats array contains values\nof type MqlDocument so that we can extract nested fields from\neach array entry. To improve readability, the previous example assigns intermediary values to\nthe totalSeats and isAvailable variables. If you don't pull\nout these intermediary values into variables, the code still produces\nequivalent results. You can perform a boolean operation on a value of type MqlBoolean \nusing the methods described in this section. Suppose you want to classify very low or high weather temperature\nreadings (in degrees Fahrenheit) as extreme. The or() operator checks to see if temperatures are extreme by comparing\nthe temperature field to predefined values with lt() and gt() .\nThe project() method records this result in the extremeTemp field. The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: Method Aggregation Pipeline Operator and() $and not() $not or() $or You can perform a comparison operation on a value of type MqlValue \nusing the methods described in this section. The following example shows a pipeline that matches all the documents\nwhere the location field has the value \"California\" : The following code provides an equivalent aggregation pipeline in\nthe Query API: The cond() method is similar to the ternary operator in Java and you\nshould use it for simple branches based on a boolean value. You should use\nthe switchOn() methods for more complex comparisons such as performing\npattern matching on the value type or other arbitrary checks on the value. Method Aggregation Pipeline Operator eq() $eq gt() $gt gte() $gte lt() $lt lte() $lte $max $min ne() $ne You can perform a conditional operation using the methods described in\nthis section. Suppose you have a collection of customers with their membership information.\nOriginally, customers were either members or not. Over time, membership levels\nwere introduced and used the same field. The information stored in this field\ncan be one of a few different types, and you want to create a standardized value\nindicating their membership level. The switchOn() method checks each clause in order. If the value matches the\ntype indicated by the clause, that clause determines the string value\ncorresponding to the membership level. If the original value is a string, it\nrepresents the membership level and that value is used. If the data type is a\nboolean, it returns either Gold or Guest for the membership level. If\nthe data type is an array, it returns the most recent string in the array which\nmatches the most recent membership level. If the member field is an\nunknown type, the switchOn() method provides a default value of Guest . The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: Method Aggregation Pipeline Operator cond() $cond switchOn() $switch You can apply custom functions to values of type\n MqlValue using the methods described in this section. To improve readability and allow for code reuse, you can move redundant\ncode into static methods. However, it is not possible to directly chain\nstatic methods in Kotlin. The passTo() method lets you chain values\ninto custom static methods. Suppose you need to determine how a class is performing against some\nbenchmarks. You want to find the average final grade for each class and\ncompare it against the benchmark values. The following custom method gradeAverage() takes an array of documents and\nthe name of an integer field shared across those documents. It calculates the\naverage of that field across all the documents in the provided array and\ndetermines the average of that field across all the elements in\nthe provided array. The evaluate() method compares a provided value to\ntwo provided range limits and generates a response string based on\nhow the values compare: The passArrayTo() method takes all of the students and calculates the\naverage score by using the gradeAverage() method. Then, the\n passNumberTo() method uses the evaluate() method to determine how the\nclasses are performing. This example stores the result as the evaluation \nfield using the project() method. The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: Method Aggregation Pipeline Operator passTo() No corresponding operator One advantage of using the passTo() method is that you can reuse\nyour custom methods for other aggregations. You could\nuse the gradeAverage() method to find the average of grades for\ngroups of students filtered by, for example, entry year or district, not just their\nclass. You could use the evaluate() method to evaluate, for\nexample, an individual student's performance, or an entire school's or\ndistrict's performance. You can perform a conversion operation to convert between certain MqlValue \ntypes using the methods described in this section. Suppose you want to have a collection of student data that includes\ntheir graduation years, which are stored as strings. You want to\ncalculate the year of their five-year reunion and store this value in a\nnew field. The parseInteger() method converts the graduationYear to an integer\nso that add() can calculate the reunion year. The addFields() method\nstores this result as a new reunionYear field. The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: Method Aggregation Pipeline Operator asDocument() No corresponding operator asMap() No corresponding operator asString() for MqlDate $dateToString asString() for MqlValue $toString millisecondsAsDate() $toDate parseDate() $dateFromString parseInteger() $toInt You can perform a date operation on a value of type MqlDate \nusing the methods described in this section. Suppose you have data about package deliveries and need to match\ndeliveries that occurred on any Monday in the \"America/New_York\" time\nzone. If the deliveryDate field contains any string values representing\nvalid dates, such as \"2018-01-15T16:00:00Z\" or \"Jan 15, 2018, 12:00\nPM EST\" , you can use the parseDate() method to convert the strings\ninto date types. The dayOfWeek() method determines which day of the week it is and converts\nit to a number based on which day is a Monday according to the\n \"America/New_York\" parameter. The eq() method compares this value to\n 2 , which corresponds to Monday based on the provided timezone parameter. The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: Method Aggregation Pipeline Operator dayOfMonth() $dayOfMonth dayOfWeek() $dayOfWeek dayOfYear() $dayOfYear hour() $hour millisecond() $millisecond minute() $minute month() $month second() $second week() $week year() $year You can perform a document operation on a value of type MqlDocument \nusing the methods described in this section. Suppose you have a collection of legacy customer data which includes\naddresses as child documents under the mailing.address field. You want\nto find all the customers who currently live in Washington state. A\ndocument in this collection might resemble the following: The getDocument() method retrieves the mailing.address field as a\ndocument so the nested state field can be retrieved with the\n getString() method. The eq() method checks if the value of the\n state field is \"WA\" . The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: Method Aggregation Pipeline Operator $getField hasField() No corresponding operator merge() $mergeObjects setField() $setField unsetField() $unsetField You can perform a map operation on a value of either type MqlMap or\n MqlEntry using the methods described in this section. Suppose you have a collection of inventory data where each document represents\nan individual item you're responsible for supplying. Each document contains a\nfield that is a map of all your warehouses and how many copies they currently\nhave in their inventory of the item. You want to determine the total number of\ncopies of items you have across all of your warehouses. A document in this\ncollection might resemble the following: The entries() method returns the map entries in the warehouses \nfield as an array. The sum() method calculates the total value of items\nbased on the values in the array retrieved with the getValue() method.\nThis example stores the result as the new totalInventory field using the\n project() method. The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: You should represent data as a map if the data maps\narbitrary keys such as dates or item IDs to values. Method Aggregation Pipeline Operator entries() $objectToArray get() No corresponding operator getKey() No corresponding operator getValue() No corresponding operator has() No corresponding operator merge() No corresponding operator set() No corresponding operator setKey() No corresponding operator setValue() No corresponding operator unset() No corresponding operator You can perform a string operation on a value of type MqlString \nusing the methods described in this section. Suppose you need to generate lowercase usernames for employees of a\ncompany from the employees' last names and employee IDs. The append() method combines the lastName and employeeID fields into\na single username, while the toLower() method makes the entire username\nlowercase. This example stores the result as a new username field using\nthe project() method. The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: Method Aggregation Pipeline Operator append() $concat length() $strLenCP lengthBytes() $strLenBytes substr() $substrCP substrBytes() $substrBytes toLower() $toLower toUpper() $toUpper You can perform a type-check operation on a value of type MqlValue \nusing the methods described in this section. These methods do not return boolean values. Instead, you provide a default value\nthat matches the type specified by the method. If the checked value\nmatches the method type, the checked value is returned. Otherwise, the supplied\ndefault value is returned. If you want to program branching logic based on the\ndata type, see switchOn() . Suppose you have a collection of rating data. An early version of the review\nschema allowed users to submit negative reviews without a star rating. You want\nconvert any of these negative reviews without a star rating to have the minimum\nvalue of 1 star. The isNumberOr() method returns either the value of rating , or\na value of 1 if rating is not a number or is null. The\n project() method returns this value as a new numericalRating field. The following code shows the pipeline for this aggregation: The following code provides an equivalent aggregation pipeline in\nthe Query API: Method Aggregation Pipeline Operator isArrayOr() No corresponding operator isBooleanOr() No corresponding operator isDateOr() No corresponding operator isDocumentOr() No corresponding operator isIntegerOr() No corresponding operator isMapOr() No corresponding operator isNumberOr() No corresponding operator isStringOr() No corresponding operator", + "code": [ + { + "lang": "kotlin", + "value": "import com.mongodb.client.model.Aggregates\nimport com.mongodb.client.model.Accumulators\nimport com.mongodb.client.model.Projections\nimport com.mongodb.client.model.Filters\nimport com.mongodb.client.model.mql.MqlValues" + }, + { + "lang": "kotlin", + "value": "current().getString(\"name\")" + }, + { + "lang": "kotlin", + "value": "of(1.0)" + }, + { + "lang": "kotlin", + "value": "current()\n .getArray(\"visitDates\")\n .size()\n .gt(of(0))\n .and(current()\n .getString(\"state\")\n .eq(of(\"New Mexico\")))" + }, + { + "lang": "javascript", + "value": "[ { $group: {\n _id: { $month: \"$date\" },\n avgPrecipMM: {\n $avg: { $multiply: [\"$precipitation\", 25.4] } }\n} } ]" + }, + { + "lang": "kotlin", + "value": "val month = current().getDate(\"date\").month(of(\"UTC\"))\nval precip = current().getInteger(\"precipitation\")\n\nlistOf(\n Aggregates.group(\n month,\n Accumulators.avg(\"avgPrecipMM\", precip.multiply(25.4))\n))\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": ...,\n \"movie\": \"Hamlet\",\n \"showtimes\": [\n {\n \"date\": \"May 14, 2023, 12:00 PM\",\n \"seats\": [ 20, 80 ],\n \"ticketsBought\": 100\n },\n {\n \"date\": \"May 20, 2023, 08:00 PM\",\n \"seats\": [ 10, 40 ],\n \"ticketsBought\": 34\n }]\n}" + }, + { + "lang": "javascript", + "value": "[ { $project: {\n availableShowtimes: {\n $filter: {\n input: \"$showtimes\",\n as: \"showtime\",\n cond: { $lt: [ \"$$showtime.ticketsBought\", { $sum: \"$$showtime.seats\" } ] }\n } }\n} } ]" + }, + { + "lang": "kotlin", + "value": "val showtimes = current().getArray(\"showtimes\")\n\nlistOf(\n Aggregates.project(\n Projections.fields(\n Projections.computed(\"availableShowtimes\", showtimes\n .filter { showtime ->\n val seats = showtime.getArray(\"seats\")\n val totalSeats = seats.sum { n -> n }\n val ticketsBought = showtime.getInteger(\"ticketsBought\")\n val isAvailable = ticketsBought.lt(totalSeats)\n isAvailable\n })\n)))\n" + }, + { + "lang": "javascript", + "value": "[ { $project: {\n extremeTemp: { $or: [ { $lt: [\"$temperature\", 10] },\n { $gt: [\"$temperature\", 95] } ] }\n} } ]" + }, + { + "lang": "kotlin", + "value": "val temperature = current().getInteger(\"temperature\")\n\nlistOf(\n Aggregates.project(\n Projections.fields(\n Projections.computed(\"extremeTemp\", temperature\n .lt(of(10))\n .or(temperature.gt(of(95))))\n)))\n" + }, + { + "lang": "javascript", + "value": "[ { $match: { location: { $eq: \"California\" } } } ]" + }, + { + "lang": "kotlin", + "value": "val location = current().getString(\"location\")\n\nlistOf(\n Aggregates.match(\n Filters.expr(location.eq(of(\"California\")))\n))\n" + }, + { + "lang": "javascript", + "value": "[ { $project: {\n membershipLevel: {\n $switch: {\n branches: [\n { case: { $eq: [ { $type: \"$member\" }, \"string\" ] }, then: \"$member\" },\n { case: { $eq: [ { $type: \"$member\" }, \"bool\" ] }, then: { $cond: {\n if: \"$member\",\n then: \"Gold\",\n else: \"Guest\" } } },\n { case: { $eq: [ { $type: \"$member\" }, \"array\" ] }, then: { $last: \"$member\" } }\n ],\n default: \"Guest\" } }\n} } ]" + }, + { + "lang": "kotlin", + "value": "val member = current().getField(\"member\")\n\nlistOf(\n Aggregates.project(\n Projections.fields(\n Projections.computed(\"membershipLevel\",\n member.switchOn{field -> field\n .isString{s-> s}\n .isBoolean{b -> b.cond(of(\"Gold\"), of(\"Guest\"))}\n .isArray { a -> a.last()}\n .defaults{ d -> of(\"Guest\")}})\n)))\n" + }, + { + "lang": "javascript", + "value": "[ { $project: {\n evaluation: { $switch: {\n branches: [\n { case: { $lte: [ { $avg: \"$students.finalGrade\" }, 70 ] },\n then: \"Needs improvement\"\n },\n { case: { $lte: [ { $avg: \"$students.finalGrade\" }, 85 ] },\n then: \"Meets expectations\"\n }\n ],\n default: \"Exceeds expectations\" } }\n} } ]" + }, + { + "lang": "kotlin", + "value": "fun gradeAverage(students: MqlArray, fieldName: String): MqlNumber {\n val sum = students.sum{ student -> student.getInteger(fieldName) }\n val avg = sum.divide(students.size())\n return avg\n}\n\nfun evaluate(grade: MqlNumber, cutoff1: MqlNumber, cutoff2: MqlNumber): MqlString {\n val message = grade.switchOn{ on -> on\n .lte(cutoff1) { g -> of(\"Needs improvement\") }\n .lte(cutoff2) { g -> of(\"Meets expectations\") }\n .defaults{g -> of(\"Exceeds expectations\")}}\n return message\n}\n" + }, + { + "lang": "kotlin", + "value": "val students = current().getArray(\"students\")\n\nlistOf(\n Aggregates.project(\n Projections.fields(\n Projections.computed(\"evaluation\", students\n .passArrayTo { s -> gradeAverage(s, \"finalGrade\") }\n .passNumberTo { grade -> evaluate(grade, of(70), of(85)) })\n)))\n" + }, + { + "lang": "javascript", + "value": "[ { $addFields: {\n reunionYear: {\n $add: [ { $toInt: \"$graduationYear\" }, 5 ] }\n} } ]" + }, + { + "lang": "kotlin", + "value": "val graduationYear = current().getString(\"graduationYear\")\n\nlistOf(\n Aggregates.addFields(\n Field(\"reunionYear\",\n graduationYear\n .parseInteger()\n .add(5))\n))\n" + }, + { + "lang": "javascript", + "value": "[ { $match: {\n $expr: {\n $eq: [ {\n $dayOfWeek: {\n date: { $dateFromString: { dateString: \"$deliveryDate\" } },\n timezone: \"America/New_York\" }},\n 2\n ] }\n} } ]" + }, + { + "lang": "kotlin", + "value": "val deliveryDate = current().getString(\"deliveryDate\")\n\nlistOf(\n Aggregates.match(\n Filters.expr(deliveryDate\n .parseDate()\n .dayOfWeek(of(\"America/New_York\"))\n .eq(of(2))\n)))\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": ...,\n \"customer.name\": \"Mary Kenneth Keller\",\n \"mailing.address\":\n {\n \"street\": \"601 Mongo Drive\",\n \"city\": \"Vasqueztown\",\n \"state\": \"CO\",\n \"zip\": 27017\n }\n}" + }, + { + "lang": "javascript", + "value": "[\n { $match: {\n $expr: {\n $eq: [{\n $getField: {\n input: { $getField: { input: \"$$CURRENT\", field: \"mailing.address\"}},\n field: \"state\" }},\n \"WA\" ]\n}}}]" + }, + { + "lang": "kotlin", + "value": "val address = current().getDocument(\"mailing.address\")\n\nlistOf(\n Aggregates.match(\n Filters.expr(address\n .getString(\"state\")\n .eq(of(\"WA\"))\n)))\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": ...,\n \"item\": \"notebook\"\n \"warehouses\": [\n { \"Atlanta\", 50 },\n { \"Chicago\", 0 },\n { \"Portland\", 120 },\n { \"Dallas\", 6 }\n ]\n}" + }, + { + "lang": "javascript", + "value": "[ { $project: {\n totalInventory: {\n $sum: {\n $getField: { $objectToArray: \"$warehouses\" },\n } }\n} } ]" + }, + { + "lang": "kotlin", + "value": "val warehouses = current().getMap(\"warehouses\")\n\nlistOf(\n Aggregates.project(\n Projections.fields(\n Projections.computed(\"totalInventory\", warehouses\n .entries()\n .sum { v -> v.getValue() })\n)))\n" + }, + { + "lang": "javascript", + "value": "[ { $project: {\n username: {\n $toLower: { $concat: [\"$lastName\", \"$employeeID\"] } }\n} } ]" + }, + { + "lang": "kotlin", + "value": "val lastName = current().getString(\"lastName\")\nval employeeID = current().getString(\"employeeID\")\n\nlistOf(\n Aggregates.project(\n Projections.fields(\n Projections.computed(\"username\", lastName\n .append(employeeID)\n .toLower())\n)))\n" + }, + { + "lang": "javascript", + "value": "[ { $project: {\n numericalRating: {\n $cond: { if: { $isNumber: \"$rating\" },\n then: \"$rating\",\n else: 1\n } }\n} } ]" + }, + { + "lang": "kotlin", + "value": "val rating = current().getField(\"rating\")\n\nlistOf(\n Aggregates.project(\n Projections.fields(\n Projections.computed(\"numericalRating\", rating\n .isNumberOr(of(1)))\n)))\n" + } + ], + "preview": "In this guide, you can learn how to use the MongoDB Kotlin Driver to construct\nexpressions for use in aggregation pipelines. You can perform\nexpression operations with discoverable, typesafe Java methods rather\nthan BSON documents. Because these methods follow the fluent interface\npattern, you can chain aggregation operations together to create code\nthat is both more compact and more naturally readable.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/aggregation", + "title": "Aggregation", + "headings": [ + "Overview", + "Aggregation and Find Operations Compared", + "Useful References", + "Example Data", + "Basic Aggregation", + "Explain Aggregation", + "Aggregation Expressions" + ], + "paragraphs": "In this guide, you can learn how to use aggregation operations in the MongoDB Kotlin driver. Aggregation operations process data in your MongoDB collections and return computed results. MongoDB's Aggregation\npipeline, part of the Query API, is modeled on the concept of data processing pipelines. Documents enter a multi-staged pipeline that\ntransforms the documents into an aggregated result. Another way to think of aggregation is like a car factory. Within the car factory is an assembly line, along which\nare assembly stations with specialized tools to do a specific job, like drills and welders. Raw parts enter the factory,\nwhich are then transformed and assembled into a finished product. The aggregation pipeline is the assembly line, aggregation stages are the assembly stations, and\n operator expressions are the specialized tools. Using find operations, you can: Using aggregation operations, you can: Aggregation operations have some limitations you must keep in mind: select what documents to return select what fields to return sort the results perform all find operations rename fields calculate fields summarize data group values Returned documents must not violate the BSON document size limit \nof 16 megabytes. Pipeline stages have a memory limit of 100 megabytes by default. If required,\nyou may exceed this limit by using the\n allowDiskUse \nmethod. The $graphLookup stage has a strict memory limit of 100 megabytes\nand will ignore allowDiskUse . Aggregation pipeline Aggregation stages Operator expressions Aggregation Builders The examples use a collection of the following data in MongoDB: The data in the collection is modeled by the following Restaurant data class: To perform an aggregation, pass a list of aggregation stages to the\n MongoCollection.aggregate() method. The Kotlin driver provides the\n Aggregates \nhelper class that contains builders for aggregation stages. In the following example, the aggregation pipeline: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: Uses a $match stage to filter for documents whose\n categories array field contains the element Bakery . The example uses\n Aggregates.match to build the $match stage. Uses a $group stage to group the matching documents by the stars \nfield, accumulating a count of documents for each distinct value of stars . You can build the expressions used in this example using the aggregation builders . MongoCollection.aggregate() Aggregates.match To view information about how MongoDB executes your operation, use the\n explain() method of the AggregateFlow class. The explain() \nmethod returns execution plans and performance statistics. An execution\nplan is a potential way MongoDB can complete an operation.\nThe explain() method provides both the winning plan (the plan MongoDB\nexecuted) and rejected plans. In the following example, we print the JSON representation of the\nwinning plans for aggregation stages that produce execution plans: For more information about the topics mentioned in this section, see the\nfollowing resources: You can specify the level of detail of your explanation by passing a\nverbosity level to the explain() method. The following table shows all verbosity levels for explanations and\ntheir intended use cases: Verbosity Level Use Case ALL_PLANS_EXECUTIONS You want to know which plan MongoDB will choose to run your query. EXECUTION_STATS You want to know if your query is performing well. QUERY_PLANNER You have a problem with your query and you want as much information\nas possible to diagnose the issue. Explain Output Server Manual Entry Query Plans Server Manual Entry ExplainVerbosity API Documentation explain() API Documentation AggregateFlow API Documentation The Kotlin driver provides builders for accumulator expressions for use with\n $group . You must declare all other expressions in JSON format or\ncompatible document format. In the following example, the aggregation pipeline uses a\n $project stage and various Projections to return the name \nfield and the calculated field firstCategory whose value is the\nfirst element in the categories field. For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: The syntax in either of the following examples will define an $arrayElemAt \nexpression. The $ in front of \"categories\" tells MongoDB that this is a field path ,\nusing the \"categories\" field from the input document. Accumulators $group $project Projections", + "code": [ + { + "lang": "json", + "value": "[\n {\"name\": \"Sun Bakery Trattoria\", \"contact\": {\"phone\": \"386-555-0189\", \"email\": \"SunBakeryTrattoria@example.org\", \"location\": [-74.0056649, 40.7452371]}, \"stars\": 4, \"categories\": [\"Pizza\", \"Pasta\", \"Italian\", \"Coffee\", \"Sandwiches\"]},\n {\"name\": \"Blue Bagels Grill\", \"contact\": {\"phone\": \"786-555-0102\", \"email\": \"BlueBagelsGrill@example.com\", \"location\": [-73.92506, 40.8275556]}, \"stars\": 3, \"categories\": [\"Bagels\", \"Cookies\", \"Sandwiches\"]},\n {\"name\": \"XYZ Bagels Restaurant\", \"contact\": {\"phone\": \"435-555-0190\", \"email\": \"XYZBagelsRestaurant@example.net\", \"location\": [-74.0707363, 40.59321569999999]}, \"stars\": 4, \"categories\": [\"Bagels\", \"Sandwiches\", \"Coffee\"]},\n {\"name\": \"Hot Bakery Cafe\", \"contact\": {\"phone\": \"264-555-0171\", \"email\": \"HotBakeryCafe@example.net\", \"location\": [-73.96485799999999, 40.761899]}, \"stars\": 4, \"categories\": [\"Bakery\", \"Cafe\", \"Coffee\", \"Dessert\"]},\n {\"name\": \"Green Feast Pizzeria\", \"contact\": {\"phone\": \"840-555-0102\", \"email\": \"GreenFeastPizzeria@example.com\", \"location\": [-74.1220973, 40.6129407]}, \"stars\": 2, \"categories\": [\"Pizza\", \"Italian\"]},\n {\"name\": \"ZZZ Pasta Buffet\", \"contact\": {\"phone\": \"769-555-0152\", \"email\": \"ZZZPastaBuffet@example.com\", \"location\": [-73.9446421, 40.7253944]}, \"stars\": 0, \"categories\": [\"Pasta\", \"Italian\", \"Buffet\", \"Cafeteria\"]},\n {\"name\": \"XYZ Coffee Bar\", \"contact\": {\"phone\": \"644-555-0193\", \"email\": \"XYZCoffeeBar@example.net\", \"location\": [-74.0166091, 40.6284767]}, \"stars\": 5, \"categories\": [\"Coffee\", \"Cafe\", \"Bakery\", \"Chocolates\"]},\n {\"name\": \"456 Steak Restaurant\", \"contact\": {\"phone\": \"990-555-0165\", \"email\": \"456SteakRestaurant@example.com\", \"location\": [-73.9365108, 40.8497077]}, \"stars\": 0, \"categories\": [\"Steak\", \"Seafood\"]},\n {\"name\": \"456 Cookies Shop\", \"contact\": {\"phone\": \"604-555-0149\", \"email\": \"456CookiesShop@example.org\", \"location\": [-73.8850023, 40.7494272]}, \"stars\": 4, \"categories\": [\"Bakery\", \"Cookies\", \"Cake\", \"Coffee\"]},\n {\"name\": \"XYZ Steak Buffet\", \"contact\": {\"phone\": \"229-555-0197\", \"email\": \"XYZSteakBuffet@example.org\", \"location\": [-73.9799932, 40.7660886]}, \"stars\": 3, \"categories\": [\"Steak\", \"Salad\", \"Chinese\"]}\n]" + }, + { + "lang": "kotlin", + "value": "data class Restaurant(\n val name: String,\n val contact: Contact,\n val stars: Int,\n val categories: List\n) {\n data class Contact(\n val phone: String,\n val email: String,\n val location: List\n )\n}\n" + }, + { + "lang": "kotlin", + "value": "data class Results(@BsonId val id: Int, val count: Int)\n\nval resultsFlow = collection.aggregate(\n listOf(\n Aggregates.match(Filters.eq(Restaurant::categories.name, \"Bakery\")),\n Aggregates.group(\"\\$${Restaurant::stars.name}\",\n Accumulators.sum(\"count\", 1))\n )\n)\n\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": null, + "value": "Results(id=4, count=2)\nResults(id=5, count=1)" + }, + { + "lang": "kotlin", + "value": "data class Results (val name: String, val count: Int)\n\nval explanation = collection.aggregate(\n listOf(\n Aggregates.match(Filters.eq(Restaurant::categories.name, \"bakery\")),\n Aggregates.group(\"\\$${Restaurant::stars.name}\", Accumulators.sum(\"count\", 1))\n )\n).explain(ExplainVerbosity.EXECUTION_STATS)\n\n// Prettyprint the output\nprintln(explanation.toJson(JsonWriterSettings.builder().indent(true).build()))\n" + }, + { + "lang": "javascript", + "value": "{\n \"explainVersion\": \"2\",\n \"queryPlanner\": {\n // ...\n },\n \"command\": {\n // ...\n },\n // ...\n}" + }, + { + "lang": "kotlin", + "value": "Document(\"\\$arrayElemAt\", listOf(\"\\$categories\", 0))\n// is equivalent to\nDocument.parse(\"{ \\$arrayElemAt: ['\\$categories', 0] }\")\n" + }, + { + "lang": "kotlin", + "value": "data class Results(val name: String, val firstCategory: String)\n\nval resultsFlow = collection.aggregate(\n listOf(\n Aggregates.project(\n Projections.fields(\n Projections.excludeId(),\n Projections.include(\"name\"),\n Projections.computed(\n \"firstCategory\",\n Document(\"\\$arrayElemAt\", listOf(\"\\$categories\", 0))\n )\n )\n )\n )\n)\n\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Results(name=Sun Bakery Trattoria, firstCategory=Pizza)\nResults(name=Blue Bagels Grill, firstCategory=Bagels)\nResults(name=XYZ Bagels Restaurant, firstCategory=Bagels)\nResults(name=Hot Bakery Cafe, firstCategory=Bakery)\nResults(name=Green Feast Pizzeria, firstCategory=Pizza)\nResults(name=ZZZ Pasta Buffet, firstCategory=Pasta)\nResults(name=XYZ Coffee Bar, firstCategory=Coffee)\nResults(name=456 Steak Restaurant, firstCategory=Steak)\nResults(name=456 Cookies Shop, firstCategory=Bakery)\nResults(name=XYZ Steak Buffet, firstCategory=Steak)" + } + ], + "preview": "In this guide, you can learn how to use aggregation operations in the MongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/auth", + "title": "Authentication Mechanisms", + "headings": [ + "Overview", + "Specify an Authentication Mechanism", + "Mechanisms", + "Default", + "SCRAM-SHA-256", + "SCRAM-SHA-1", + "MONGODB-CR", + "MONGODB-AWS", + "AWS SDK", + "Specify Your Credentials in the Environment", + "Specify Your Credentials in a MongoCredential", + "X.509" + ], + "paragraphs": "In this guide, you can learn how to authenticate with MongoDB using each\n authentication mechanism available in the MongoDB Community Edition.\nAuthentication mechanisms are processes by which the driver and server\nconfirm identity and establish trust to ensure security. The mechanisms that you can use with the latest version of MongoDB Community\nEdition are as follows: To authenticate using Kerberos or LDAP , see the\n Enterprise Authentication Mechanisms guide . For more information on establishing a connection to your MongoDB cluster,\nread our Connection Guide . Default SCRAM-SHA-256 SCRAM-SHA-1 MONGODB-CR MONGODB-AWS X.509 You can specify your authentication mechanism and credentials when connecting\nto MongoDB using either of the following: A connection string (also known as a connection URI ) specifies how to\nconnect and authenticate to your MongoDB cluster. To authenticate using a connection string, include your settings in your\nconnection string and pass it to the MongoClient.create() method to\ninstantiate your MongoClient . The Connection String \ntab in each section provides the syntax for authenticating using a\n connection string . Alternatively, you can use the MongoCredential class to specify your\nauthentication details. The MongoCredential class contains static factory\nmethods that construct instances containing your authentication mechanism and\ncredentials. When you use the MongoCredential helper class, you need\nto use the MongoClientSettings.Builder class to configure your\nconnection settings when constructing your MongoClient . The\n MongoCredential tab in each section provides the syntax for\nauthenticating using a MongoCredential . For more information on these classes and methods, refer to the following API\ndocumentation: A connection string A MongoCredential factory method MongoClient.create() MongoClient MongoClientSettings.Builder MongoCredential The default authentication mechanism setting uses one of the following\nauthentication mechanisms depending on what your MongoDB server supports: Server versions 3.6 and earlier use MONGODB-CR as the default\nmechanism. Newer versions of the server use one of the mechanisms for\nwhich they advertise support. The following code snippets show how to specify the authentication mechanism,\nusing the following placeholders: Select the Connection String or the MongoCredential \ntab below for instructions and sample code for specifying this authentication\nmechanism: For more information on the challenge-response (CR) and salted\nchallenge-response authentication mechanisms (SCRAM) that MongoDB supports,\nsee the SCRAM section of the Server manual. SCRAM-SHA-256 SCRAM-SHA-1 MONGODB-CR db_username - your MongoDB database username db_password - your MongoDB database user's password hostname - network address of your MongoDB server, accessible by your client port - port number of your MongoDB server authenticationDb - MongoDB database that contains your user's\nauthentication data. If you omit this parameter, the driver uses the\ndefault value admin . To specify the default authentication mechanism using a connection\nstring, omit the mechanism. Your code to instantiate a MongoClient \nshould resemble the following: To specify the default authentication mechanism using the\n MongoCredential class, use the createCredential() method. Your\ncode to instantiate a MongoClient should resemble the following: SCRAM-SHA-256 is a salted challenge-response authentication mechanism\n(SCRAM) that uses your username and password, encrypted with the SHA-256 \nalgorithm, to authenticate your user. The following code snippets show how to specify the authentication mechanism,\nusing the following placeholders: Select the Connection String or the MongoCredential \ntab below for instructions and sample code for specifying this authentication\nmechanism: SCRAM-SHA-256 is the default authentication method for MongoDB starting\nin MongoDB 4.0. db_username - your MongoDB database username. db_password - your MongoDB database user's password. hostname - network address of your MongoDB server, accessible by your client. port - port number of your MongoDB server. authenticationDb - MongoDB database that contains your user's\nauthentication data. If you omit this parameter, the driver uses the\ndefault value admin . To specify the SCRAM-SHA-256 authentication mechanism using a\nconnection string, assign the authMechanism parameter the value\n SCRAM-SHA-256 in your connection string. Your code to instantiate\na MongoClient should resemble the following: To specify the default authentication mechanism using the\n MongoCredential class, use the\n createScramSha256Credential() \nmethod. Your code to instantiate a MongoClient should resemble the following: SCRAM-SHA-1 is a salted challenge-response mechanism (SCRAM) that uses your\nusername and password, encrypted with the SHA-1 algorithm, to authenticate\nyour user. The following code snippets show how to specify the authentication mechanism,\nusing the following placeholders: Select the Connection String or the MongoCredential \ntab below for instructions and sample code for specifying this authentication\nmechanism: SCRAM-SHA-1 is the default authentication method for MongoDB versions\n3.0, 3.2, 3.4, and 3.6. db_username - your MongoDB database username. db_password - your MongoDB database user's password. hostname - network address of your MongoDB server, accessible by your client. port - port number of your MongoDB server. authenticationDb - MongoDB database that contains your user's\nauthentication data. If you omit this parameter, the driver uses the\ndefault value admin . To specify the SCRAM-SHA-1 authentication mechanism using a\nconnection string, assign the authMechanism parameter the value\n SCRAM-SHA-1 in your connection string. Your code to instantiate\na MongoClient should resemble the following: To specify the default authentication mechanism using the\n MongoCredential class, use the\n createScramSha1Credential() \nmethod. Your code to instantiate a MongoClient should resemble the following: MONGODB-CR is a challenge-response authentication mechanism that uses your\nusername and password to authenticate your user. This authentication\nmechanism was deprecated starting in MongoDB 3.6 and is no longer\nsupported as of MongoDB 4.0. You cannot specify this method explicitly; refer to the fallback provided\nby the default authentication mechanism to\nconnect using MONGODB-CR . The MONGODB-AWS authentication mechanism uses your Amazon Web Services\nIdentity and Access Management (AWS IAM) credentials to authenticate your\nuser. To learn more about configuring MongoDB Atlas, see the\n Set Up Passwordless Authentication with AWS IAM Roles \nguide. To instruct the driver to use this authentication mechanism, you can specify\n MONGODB-AWS either as a parameter in the connection string or by using\nthe MongoCredential.createAwsCredential() factory method. Learn how to specify this authentication mechanism and the various ways to\nprovide your AWS IAM credentials in the next sections. These sections contain code examples that use the following placeholders: The MONGODB-AWS authentication mechanism is available for MongoDB\ndeployments on MongoDB Atlas. awsKeyId - value of your AWS access key ID awsSecretKey - value of your AWS secret access key atlasUri - network address of your MongoDB Atlas deployment hostname - hostname of your MongoDB Atlas deployment port - port of your MongoDB Atlas deployment awsSessionToken - value of your AWS session token You can use one of the AWS SDK for Java v1 or v2 to specify your credentials.\nThis method offers the following features: To use the AWS SDK for Java for MONGODB-AWS authentication, you must\nperform the following: To specify the authentication mechanism by using a MongoCredential ,\nuse the MongoCredential.createAwsCredential() factory method\nand add the MongoCredential instance to your MongoClient as shown\nin the following example: To specify the authentication mechanism in the connection string, add\nit as a parameter as shown in the following example: To add the AWS SDK as a dependency to your project, see the following\nAWS documentation for the version you need: To supply your credentials, see the following AWS documentation for the\nversion you need: Multiple options for obtaining credentials Credential caching which helps your application avoid rate limiting Credential provider management for use with the Elastic Kubernetes Service . Specify the authentication mechanism Add the SDK as a dependency to your project Supply your credentials using one of the methods in the credential\nprovider chain For the AWS SDK for Java v2 , see the Setting Up \nguide. For the AWS SDK for Java v1 , see the Getting Started \nguide. For the AWS SDK for Java v2, the Java driver currently tests using the\n software.amazon.awssdk:auth:2.18.9 dependency. For the AWS SDK for Java v1, the Java driver currently tests using the\n com.amazonaws:aws-java-sdk-core:1.12.337 dependency. To learn more about the AWS SDK for Java v2 class the driver uses to\nget the credentials, see the DefaultCredentialsProvider \nAPI documentation. Learn how to supply your credentials to this class from the\n Use the default credential provider chain \nsection. To learn more about the AWS SDK for Java v1 class the driver uses to\nget the credentials, see the DefaultAWSCredentialsProviderChain \nAPI documentation. Learn how to supply your credentials to this class from the\n Using the Default Credential Provider Chain \nsection. If you include both v1 and v2 of the AWS SDK for Java in your project,\nyou must use the v2 methods to supply your credentials. You can provide your AWS IAM credentials by instructing the driver to\nuse the MONGODB-AWS authentication mechanism and by setting the\nappropriate environment variables. To use the environment variables to supply your credentials, you must perform\nthe following: You can specify the authentication mechanism by using a MongoCredential \nor on the connection string. To specify the authentication mechanism by using a MongoCredential ,\nuse the MongoCredential.createAwsCredential() factory method and add the\n MongoCredential instance to your MongoClient as shown in the following\nexample: To specify the authentication mechanism in the connection string, add it as a\nparameter as shown in the following example: The next examples show how to provide your credentials by setting environment\nvariables for the following types of authentication: The following example shows how you can set your programmatic access keys \nin environment variables by using bash or a similar shell: Omit the line containing AWS_SESSION_TOKEN if you don't need an AWS\nsession token for that role. To authenticate by using ECS container credentials , set the ECS\nendpoint relative URI in an environment variable by using bash or\na similar shell as shown in the following example: To authenticate using EC2 container credentials , make sure none of the\naforementioned environment variables are set. The driver obtains the\ncredentials from the default IPv4 EC2 instance metadata endpoint. Specify the authentication mechanism Add the appropriate environment variables Programmatic access keys ECS container credentials EC2 container credentials You can supply your AWS IAM credentials to a MongoClient by using a\n MongoCredential instance. To construct the MongoCredential instance\nfor MONGODB-AWS authentication, use the createAwsCredential() \nfactory method. You can supply only programmatic access keys to the\n MongoCredential.createAwsCredential() method. If you need to supply ECS\nor EC2 container credentials, use the instructions in\n Specify Your Credentials in the Environment or AWS SDK . To use the MongoCredential for MONGODB-AWS authentication, you\nmust perform the following: To specify the authentication mechanism by using a MongoCredential ,\nuse the MongoCredential.createAwsCredential() factory method\nand add the MongoCredential instance to your MongoClient as shown\nin the following example: If you need to specify an AWS session token, pass it to the\n withMechanismProperty() \nmethod as shown in the following example: To refresh your credentials, you can declare a Supplier lambda expression\nthat returns new credentials as shown in the following example: If you must provide AWS IAM credentials in a connection string, you can add\nit to your MongoClientSettings by calling the applyConnectionString() \nmethod: Specify the authentication mechanism Supply the credentials The X.509 authentication mechanism uses\n TLS with X.509 certificates to\nauthenticate your user, identified by the relative distinguished names\n(RDNs) of your client certificate. When you specify the X.509 \nauthentication mechanism, the server authenticates the connection using\nthe subject name of the client certificate. The following code snippets show how to specify the authentication mechanism,\nusing the following placeholders: Select the Connection String or the MongoCredential \ntab below for instructions and sample code for specifying this authentication\nmechanism: For additional information on configuring your application to use\ncertificates as well as TLS/SSL options, see our\n TLS/SSL guide . hostname - network address of your MongoDB server, accessible by your client. port - port number of your MongoDB server. authenticationDb - MongoDB database that contains your user's\nauthentication data. If you omit this parameter, the driver uses the\ndefault value admin . To specify the X.509 authentication mechanism using a connection\nstring, assign the authMechanism parameter the value MONGODB-X509 \nand enable TLS by assigning the tls \nparameter a true value. Your code to instantiate a MongoClient \nshould resemble the following: To specify the X.509 authentication mechanism using the\n MongoCredential class, use the\n createMongoX509Credential() \nmethod. Also, enable TLS by calling the\n applyToSslSettings() \nmethod and setting the enabled property to true in the\n SslSettings.Builder \nblock. Your code to instantiate a MongoClient should resemble the following:", + "code": [ + { + "lang": "kotlin", + "value": "val mongoClient =\n MongoClient.create(\"mongodb://:@:/?authSource=\")\n" + }, + { + "lang": "kotlin", + "value": "val credential = MongoCredential.createCredential(\n \"\", \"\", \"\".toCharArray()\n)\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder: ClusterSettings.Builder ->\n builder.hosts(\n listOf(ServerAddress(\"\", \"\"))\n )\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" + }, + { + "lang": "kotlin", + "value": "val mongoClient =\n MongoClient.create(\"mongodb://:@:/?authSource=&authMechanism=SCRAM-SHA-256\")\n" + }, + { + "lang": "kotlin", + "value": "val credential = MongoCredential.createScramSha256Credential(\n \"\", \"\", \"\".toCharArray()\n)\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder: ClusterSettings.Builder ->\n builder.hosts(\n listOf(ServerAddress(\"\", \"\"))\n )\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" + }, + { + "lang": "kotlin", + "value": "val mongoClient =\n MongoClient.create(\"mongodb://:@:/?authSource=&authMechanism=SCRAM-SHA-1\")\n" + }, + { + "lang": "kotlin", + "value": "val credential = MongoCredential.createScramSha1Credential(\n \"\", \"\", \"\".toCharArray()\n)\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder: ClusterSettings.Builder ->\n builder.hosts(\n listOf(ServerAddress(\"\", \"\"))\n )\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" + }, + { + "lang": "kotlin", + "value": "val credential = MongoCredential.createAwsCredential(null, null)\n\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder: ClusterSettings.Builder ->\n builder.hosts(\n listOf(ServerAddress(\"\"))\n )\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" + }, + { + "lang": "kotlin", + "value": "val mongoClient =\n MongoClient.create(\"mongodb://?authMechanism=MONGODB-AWS\")\n" + }, + { + "lang": "bash", + "value": "export AWS_ACCESS_KEY_ID=\nexport AWS_SECRET_ACCESS_KEY=\nexport AWS_SESSION_TOKEN=" + }, + { + "lang": "bash", + "value": "export AWS_CONTAINER_CREDENTIALS_RELATIVE_URI=" + }, + { + "lang": "kotlin", + "value": "val credential = MongoCredential.createAwsCredential(null, null)\n\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder: ClusterSettings.Builder ->\n builder.hosts(\n listOf(ServerAddress(\"\"))\n )\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" + }, + { + "lang": "kotlin", + "value": "val mongoClient =\n MongoClient.create(\"mongodb://?authMechanism=MONGODB-AWS\")\n" + }, + { + "lang": "kotlin", + "value": "val credential = MongoCredential.createAwsCredential(\"\", \"\".toCharArray())\n\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder: ClusterSettings.Builder ->\n builder.hosts(\n listOf(ServerAddress(\"\"))\n )\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" + }, + { + "lang": "kotlin", + "value": "val credential = MongoCredential.createAwsCredential(\"\", \"\".toCharArray())\n .withMechanismProperty(\"AWS_SESSION_TOKEN\", \"\")\n\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder: ClusterSettings.Builder ->\n builder.hosts(\n listOf(ServerAddress(\"\"))\n )\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" + }, + { + "lang": "kotlin", + "value": "val awsFreshCredentialSupplier: Supplier = Supplier {\n // Add your code here to fetch new credentials\n\n // Return the new credentials\n AwsCredential(\"\", \"\", \"\")\n}\n\nval credential = MongoCredential.createAwsCredential(\"\", \"\".toCharArray())\n .withMechanismProperty(MongoCredential.AWS_CREDENTIAL_PROVIDER_KEY, awsFreshCredentialSupplier)\n\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder ->\n builder.hosts(listOf(ServerAddress(\"\", \"\")))\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" + }, + { + "lang": "kotlin", + "value": "val credential = MongoCredential.createAwsCredential(\"\", \"\".toCharArray())\nval connectionString = ConnectionString(\"mongodb:///?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN:\")\n\nval settings = MongoClientSettings.builder()\n .applyConnectionString(connectionString)\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" + }, + { + "lang": "kotlin", + "value": "val mongoClient =\n MongoClient.create(\"mongodb://:@:/?authSource=&authMechanism=MONGODB-X509&tls=true\")\n" + }, + { + "lang": "kotlin", + "value": "val credential = MongoCredential.createMongoX509Credential()\n\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder ->\n builder.hosts(listOf(\n ServerAddress(\"\", \"\"))\n )\n }\n .applyToSslSettings { builder ->\n builder.enabled(true)\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" + } + ], + "preview": "In this guide, you can learn how to authenticate with MongoDB using each\nauthentication mechanism available in the MongoDB Community Edition.\nAuthentication mechanisms are processes by which the driver and server\nconfirm identity and establish trust to ensure security.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/builders/aggregates", + "title": "Aggregates Builders", + "headings": [ + "Overview", + "Match", + "Project", + "Projecting Computed Fields", + "Documents", + "Sample", + "Sort", + "Skip", + "Limit", + "Lookup", + "Left Outer Join", + "Full Join and Uncorrelated Subqueries", + "Group", + "Pick-N Accumulators", + "MinN", + "MaxN", + "FirstN", + "LastN", + "Top", + "TopN", + "Bottom", + "BottomN", + "Unwind", + "Out", + "Merge", + "GraphLookup", + "SortByCount", + "ReplaceRoot", + "AddFields", + "Count", + "Bucket", + "BucketAuto", + "Facet", + "SetWindowFields", + "Densify", + "Fill", + "Atlas Full-Text Search", + "Atlas Search Metadata", + "Atlas Vector Search" + ], + "paragraphs": "In this guide, you can learn how to use the Aggregates \nclass which provides static factory methods that build aggregation pipeline\nstages in the MongoDB Kotlin driver. For a more thorough introduction to Aggregation, see our Aggregation guide . The examples on this page assume imports for methods of the following classes: Use these methods to construct pipeline stages and specify them in your\naggregation as a list: Many Aggregation examples in this guide use the Atlas sample_mflix.movies dataset . The documents in this collection are\nmodeled by the following Movie data class for use with the Kotlin driver: Aggregates Filters Projections Sorts Accumulators Use the match() method to create a $match \npipeline stage that matches incoming documents against the specified\nquery filter, filtering out documents that do not match. The following example creates a pipeline stage that matches all documents\nin the movies collection where the\n title field is equal to \"The Shawshank Redemption\": The filter can be an instance of any class that implements Bson , but it's\nconvenient to combine with use of the Filters class.\nclass. Use the project() method to create a $project \npipeline stage that project specified document fields. Field projection\nin aggregation follows the same rules as field projection in queries . The following example creates a pipeline stage that includes the title and\n plot fields but excludes the _id field: Though the projection can be an instance of any class that implements Bson ,\nit's convenient to combine with use of Projections . The $project stage can project computed fields as well. The following example creates a pipeline stage that projects the rated field\ninto a new field called rating , effectively renaming the field: Use the documents() method to create a\n $documents \npipeline stage that returns literal documents from input values. The following example creates a pipeline stage that creates\nsample documents in the movies collection with a title field: If you use a $documents stage in an aggregation pipeline, it must be the first\nstage in the pipeline. If you use the documents() method to provide the input to an aggregation pipeline,\nyou must call the aggregate() method on a database instead of on a\ncollection. Use the sample() method to create a $sample \npipeline stage to randomly select documents from input. The following example creates a pipeline stage that randomly selects 5 documents\nfrom the movies collection: Use the sort() method to create a $sort \npipeline stage to sort by the specified criteria. The following example creates a pipeline stage that sorts in descending order according\nto the value of the year field and then in ascending order according to the\nvalue of the title field: Though the sort criteria can be an instance of any class that\nimplements Bson , it's convenient to combine with use of\n Sorts . Use the skip() method to create a $skip \npipeline stage to skip over the specified number of documents before\npassing documents into the next stage. The following example creates a pipeline stage that skips the first 5 documents\nin the movies collection: Use the $limit pipeline stage\nto limit the number of documents passed to the next stage. The following example creates a pipeline stage that limits the number of documents\nreturned from the movies collection to 4 : Use the lookup() method to create a $lookup \npipeline stage to perform joins and uncorrelated subqueries between two collections. The following example creates a pipeline stage that performs a left outer\njoin between the movies and comments collections in the sample mflix \ndatabase: It joins the _id field from movies to the movie_id field in comments It outputs the results in the joined_comments field The following example uses the fictional orders and warehouses collections.\nThe data is modeled using the following Kotlin data classes: The example creates a pipeline stage that joins the two collections by the item\nand whether the available quantity in inStock field is enough to fulfill\nthe ordered quantity: Use the group() method to create a $group \npipeline stage to group documents by a specified expression and output a document\nfor each distinct grouping. The following example creates a pipeline stage that groups documents\nin the orders collection by the value of the customerId field.\nEach group accumulates the sum and average\nof the values of the ordered field into the totalQuantity and\n averageQuantity fields: Learn more about accumulator operators from the Server manual section\non Accumulators . The driver includes the Accumulators \nclass with static factory methods for each of the supported accumulators. The pick-n accumulators are aggregation accumulation operators that return\nthe top and bottom elements given a specific ordering. Use one of the\nfollowing builders to create an aggregation accumulation operator: Learn which aggregation pipeline stages you can use accumulator operators with\nfrom the Server manual section on\n Accumulators . The pick-n accumulator examples use documents from the movies collection\nin the sample-mflix database. minN() maxN() firstN() lastN() top() topN() bottom() bottomN() You can only perform aggregation operations with these pick-n accumulators\nwhen running MongoDB v5.2 or later. The minN() builder creates the $minN \naccumulator which returns data from documents that contain the n lowest\nvalues of a grouping. The following example demonstrates how to use the minN() method to return\nthe lowest three imdb.rating values for movies, grouped by year : See the minN() API documentation \nfor more information. The $minN and $bottomN accumulators can perform similar tasks.\nSee\n Comparison of $minN and $bottomN Accumulators \nfor recommended usage of each. The maxN() accumulator returns data from documents that contain the n \nhighest values of a grouping. The following example demonstrates how to use the maxN() method to\nreturn the highest two imdb.rating values for movies, grouped by year : See the maxN() API documentation \nfor more information. The firstN() accumulator returns data from the first n documents in\neach grouping for the specified sort order. The following example demonstrates how to use the firstN() method to\nreturn the first two movie title values, based on the order they came\ninto the stage, grouped by year : See the firstN() API documentation \nfor more information. The $firstN and $topN accumulators can perform similar tasks.\nSee\n Comparison of $firstN and $topN Accumulators \nfor recommended usage of each. The lastN() accumulator returns data from the last n documents in\neach grouping for the specified sort order. The following example demonstrates how to use the lastN() method to show\nthe last three movie title values, based on the the order they came into\nthe stage, grouped by year : See the lastN() API documentation \nfor more information. The top() accumulator returns data from the first document in a group\nbased on the specified sort order. The following example demonstrates how to use the top() method to return\nthe title and imdb.rating values for the top rated movies based on the\n imdb.rating , grouped by year . See the top() API documentation \nfor more information. The topN() accumulator returns data from documents that contain the\nhighest n values for the specified field. The following example demonstrates how to use the topN() method to return\nthe title and runtime values of the three longest movies based on the\n runtime values, grouped by year . See the topN() API documentation \nfor more information. The $firstN and $topN accumulators can perform similar tasks.\nSee\n Comparison of $firstN and $topN Accumulators \nfor recommended usage of each. The bottom() accumulator returns data from the last document in a group\nbased on the specified sort order. The following example demonstrates how to use the bottom() method to\nreturn the title and runtime values of the shortest movie based on the\n runtime value, grouped by year . See the bottom() API documentation \nfor more information. The bottomN() accumulator returns data from documents that contain the\nlowest n values for the specified field. The following example demonstrates how to use the bottomN() method to\nreturn the title and imdb.rating values of the two lowest rated movies\nbased on the imdb.rating value, grouped by year : See the bottomN() API documentation \nfor more information. The $minN and $bottomN accumulators can perform similar tasks.\nSee Comparison of $minN and $bottomN Accumulators \nfor recommended usage of each. Use the unwind() method to create an $unwind \npipeline stage to deconstruct an array field from input documents, creating\nan output document for each array element. The following example creates a document for each element in the lowestRatedTwoMovies array: To preserve documents that have missing or null \nvalues for the array field, or where array is empty: To include the array index (in this example, in a field called \"position\" ): Use the out() method to create an $out \npipeline stage that writes all documents to the specified collection in\nthe same database. The following example writes the results of the pipeline to the classic_movies \ncollection: The $out stage must be the last stage in any aggregation pipeline. Use the merge() method to create a $merge \npipeline stage that merges all documents into the specified collection. The following example merges the pipeline into the nineties_movies collection\nusing the default options: The following example merges the pipeline into the movie_ratings collection\nin the aggregation database using some non-default options that specify to\nreplace the document if both year and title match, otherwise insert the\ndocument: The $merge stage must be the last stage in any aggregation pipeline. Use the graphLookup() method to create a $graphLookup \npipeline stage that performs a recursive search on a specified collection to match\na specified field in one document to a specified field of another document. The following example uses the contacts collection. The data is modeled\nusing the following Kotlin data class: The example computes the reporting graph for users in the\n contact collection, recursively matching the value in the friends field\nto the name field: Using GraphLookupOptions , you can specify the depth to recurse as well as\nthe name of the depth field, if desired. In this example, $graphLookup will\nrecurse up to two times, and create a field called degrees with the\nrecursion depth information for every document. Using GraphLookupOptions , you can specify a filter that documents must match\nin order for MongoDB to include them in your search. In this\nexample, only links with \"golf\" in their hobbies field will be included: Use the sortByCount() method to create a $sortByCount \npipeline stage that groups documents by a given expression and then sorts\nthese groups by count in descending order. The following example groups documents in the movies collection by the\n genres field and computes the count for each distinct value: The $sortByCount stage is identical to a $group stage with a\n $sum accumulator followed by a $sort stage. Use the replaceRoot() method to create a $replaceRoot \npipeline stage that replaces each input document with the specified document. The following example uses a fictional books collection that contains data\nmodeled using the following Kotlin data class: Each input document is replaced by the nested document in the\n spanishTranslation field: Use the addFields() method to create an $addFields \npipeline stage that adds new fields to documents. The following example adds two new fields, watched and type , to the\ninput documents in the movie collection: Use $addFields when you do not want to project field inclusion\nor exclusion. Use the count() method to create a $count \npipeline stage that counts the number of documents that enter the stage, and assigns\nthat value to a specified field name. If you do not specify a field,\n count() defaults the field name to \"count\". The following example creates a pipeline stage that outputs the count of incoming\ndocuments in a field called \"total\": The $count stage is syntactic sugar for: Use the bucket() method to create a $bucket \npipeline stage that automates the bucketing of data around predefined boundary\nvalues. The following examples use data modeled with the following Kotlin data class: This example creates a pipeline stage that groups incoming documents based\non the value of their screenSize field, inclusive of the lower boundary\nand exclusive of the upper boundary: Use the BucketOptions class to specify a default bucket for values\noutside of the specified boundaries, and to specify additional accumulators. The following example creates a pipeline stage that groups incoming documents based\non the value of their screenSize field, counting the number of documents\nthat fall within each bucket, pushing the value of screenSize into a\nfield called matches , and capturing any screen sizes greater than \"70\"\ninto a bucket called \"monster\" for monstrously large screen sizes: The driver includes the Accumulators \nclass with static factory methods for each of the supported accumulators. Use the bucketAuto() method to create a $bucketAuto \npipeline stage that automatically determines the boundaries of each bucket\nin its attempt to distribute the documents evenly into a specified number of buckets. The following examples use data modeled with the following Kotlin data class: This example creates a pipeline stage that will attempt to create and evenly\ndistribute documents into 5 buckets using the value of their price field: Use the BucketAutoOptions class to specify a preferred number \nbased scheme to set boundary values, and specify additional accumulators. The following example creates a pipeline stage that will attempt to create and evenly\ndistribute documents into 5 buckets using the value of their price field,\nsetting the bucket boundaries at powers of 2 (2, 4, 8, 16, ...). It also counts\nthe number of documents in each bucket, and calculates their average price \nin a new field called avgPrice : The driver includes the Accumulators \nclass with static factory methods for each of the supported accumulators. Use the facet() method to create a $facet \npipeline stage that allows for the definition of parallel pipelines. The following examples use data modeled with the following Kotlin data class: This example creates a pipeline stage that executes two parallel aggregations: The first aggregation distributes incoming documents into 5 groups according to\ntheir screenSize field. The second aggregation counts all manufacturers and returns their count, limited\nto the top 5. Use the setWindowFields() method to create a $setWindowFields \npipeline stage that allows using window operators to perform operations\non a specified span of documents in a collection. The following example uses a fictional weather collection using data modeled\nwith the following Kotlin data class: The example creates a pipeline stage that computes the\naccumulated rainfall and the average temperature over the past month for\neach locality from more fine-grained measurements presented in the rainfall \nand temperature fields: The driver includes the Windows \nclass with static factory methods for building windowed computations. Use the densify() method to create a\n $densify \npipeline stage that generates a sequence of documents to span a specified\ninterval. Consider the following documents retrieved from the Atlas sample weather dataset \nthat contain measurements for a similar position field, spaced one hour\napart: These documents are modeled using the following Kotlin data class: Suppose you needed to create a pipeline stage that performs the following\nactions on these documents: The call to the densify() aggregation stage builder that accomplishes\nthese actions should resemble the following: The following output highlights the documents generated by the aggregate stage\nwhich contain ts values every 15 minutes between the existing documents: See the densify package API documentation \nfor more information. You can use the $densify() aggregation stage only when running\nMongoDB v5.1 or later. Add a document at every 15-minute interval for which a ts value does not\nalready exist. Group the documents by the position field. Use the fill() method to create a\n $fill \npipeline stage that populates null and missing field values. Consider the following documents that contain temperature and air pressure\nmeasurements at an hourly interval: These documents are modeled using the following Kotlin data class: Suppose you needed to populate missing temperature and air pressure\ndata points in the documents as follows: The call to the fill() aggregation stage builder that accomplishes\nthese actions resembles the following: See the fill package API documentation \nfor more information. You can use the $fill() aggregation stage only when running\nMongoDB v5.3 or later. Populate the air_pressure field for hour \"2\" using linear interpolation\nto calculate the value. Set the missing temperature value to \"23.6C\" for hour \"3\". Use the search() method to create a $search \npipeline stage that specifies a full-text search of one or more fields. The following example creates a pipeline stage that searches the title \nfield in the movies collection for text that contains the word \"Future\": Learn more about the builders from the\n search package API documentation . This aggregation pipeline operator is only available for collections hosted\non MongoDB Atlas clusters running v4.2 or later that are\ncovered by an Atlas search index .\nLearn more about the required setup and the functionality of this operator\nfrom the Atlas Search documentation. Use the searchMeta() method to create a\n $searchMeta \npipeline stage which returns only the metadata part of the results from\nAtlas full-text search queries. The following example shows the count metadata for an Atlas search\naggregation stage: Learn more about this helper from the\n searchMeta() API documentation . This aggregation pipeline operator is only available\non MongoDB Atlas clusters running v4.4.11 and later. For a\ndetailed list of version availability, see the MongoDB Atlas documentation\non $searchMeta . Use the vectorSearch() method to create a $vectorSearch \npipeline stage that specifies a semantic search . A semantic search is\na type of search that locates pieces of information that are similar in meaning. To use this feature when performing an aggregation on a collection, you\nmust create a vector search index and index your vector embeddings. To\nlearn how to set up search indexes in MongoDB Atlas, see How to\nIndex Vector Embeddings for Vector Search in the Atlas documentation. The example in this section uses data modeled with the following Kotlin data class: This example shows how to build an aggregation pipeline that uses the\n vectorSearch() method to perform a vector search with the following\nspecifications: To learn more about this helper, see the\n vectorSearch() API documentation . To learn about which versions of MongoDB Atlas support this feature, see\n Limitations \nin the Atlas documentation. Searches plotEmbedding field values by using vector embeddings of a\nstring value Uses the mflix_movies_embedding_index vector search index Considers up to 2 nearest neighbors Returns 1 document Filters for documents in which the year value is at least 2016", + "code": [ + { + "lang": "kotlin", + "value": "import com.mongodb.client.model.Aggregates\nimport com.mongodb.client.model.Filters\nimport com.mongodb.client.model.Projections\nimport com.mongodb.client.model.Sorts\nimport com.mongodb.client.model.Accumulators" + }, + { + "lang": "kotlin", + "value": "val matchStage = Aggregates.match(Filters.eq(\"someField\", \"someCriteria\"))\nval sortByCountStage = Aggregates.sortByCount(\"\\$someField\")\nval results = collection.aggregate(\n listOf(matchStage, sortByCountStage)).toList()\n" + }, + { + "lang": "kotlin", + "value": "data class Movie(\n val title: String,\n val year: Int,\n val genres: List,\n val rated: String,\n val plot: String,\n val runtime: Int,\n val imdb: IMDB\n){\n data class IMDB(\n val rating: Double\n )\n}\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.match(Filters.eq(Movie::title.name, \"The Shawshank Redemption\"))\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.project(\n Projections.fields(\n Projections.include(Movie::title.name, Movie::plot.name),\n Projections.excludeId())\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.project(\n Projections.fields(\n Projections.computed(\"rating\", \"\\$${Movie::rated.name}\"),\n Projections.excludeId()\n )\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.documents(\n listOf(\n Document(Movie::title.name, \"Steel Magnolias\"),\n Document(Movie::title.name, \"Back to the Future\"),\n Document(Movie::title.name, \"Jurassic Park\")\n )\n)\n" + }, + { + "lang": "kotlin", + "value": "val docsStage = database.aggregate( // ... )\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.sample(5)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.sort(\n Sorts.orderBy(\n Sorts.descending(Movie::year.name),\n Sorts.ascending(Movie::title.name)\n )\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.skip(5)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.limit(4)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.lookup(\n \"comments\",\n \"_id\",\n \"movie_id\",\n \"joined_comments\"\n)\n" + }, + { + "lang": "kotlin", + "value": "data class Order(\n @BsonId val id: Int,\n val customerId: Int,\n val item: String,\n val ordered: Int\n)\ndata class Inventory(\n @BsonId val id: Int,\n val stockItem: String,\n val inStock: Int\n)\n" + }, + { + "lang": "kotlin", + "value": "val variables = listOf(\n Variable(\"order_item\", \"\\$item\"),\n Variable(\"order_qty\", \"\\$ordered\")\n)\nval pipeline = listOf(\n Aggregates.match(\n Filters.expr(\n Document(\"\\$and\", listOf(\n Document(\"\\$eq\", listOf(\"$\\$order_item\", \"\\$${Inventory::stockItem.name}\")),\n Document(\"\\$gte\", listOf(\"\\$${Inventory::inStock.name}\", \"$\\$order_qty\"))\n ))\n )\n ),\n Aggregates.project(\n Projections.fields(\n Projections.exclude(Order::customerId.name, Inventory::stockItem.name),\n Projections.excludeId()\n )\n )\n)\nval innerJoinLookup =\n Aggregates.lookup(\"warehouses\", variables, pipeline, \"stockData\")\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.group(\"\\$${Order::customerId.name}\",\n Accumulators.sum(\"totalQuantity\", \"\\$${Order::ordered.name}\"),\n Accumulators.avg(\"averageQuantity\", \"\\$${Order::ordered.name}\")\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.group(\n \"\\$${Movie::year.name}\",\n Accumulators.minN(\n \"lowestThreeRatings\",\n \"\\$${Movie::imdb.name}.${Movie.IMDB::rating.name}\",\n 3\n )\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.group(\n \"\\$${Movie::year.name}\",\n Accumulators.maxN(\n \"highestTwoRatings\",\n \"\\$${Movie::imdb.name}.${Movie.IMDB::rating.name}\",\n 2\n )\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.group(\n \"\\$${Movie::year.name}\",\n Accumulators.firstN(\n \"firstTwoMovies\",\n \"\\$${Movie::title.name}\",\n 2\n )\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.group(\n \"\\$${Movie::year.name}\",\n Accumulators.lastN(\n \"lastThreeMovies\",\n \"\\$${Movie::title.name}\",\n 3\n )\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.group(\n \"\\$${Movie::year.name}\",\n Accumulators.top(\n \"topRatedMovie\",\n Sorts.descending(\"${Movie::imdb.name}.${Movie.IMDB::rating.name}\"),\n listOf(\"\\$${Movie::title.name}\", \"\\$${Movie::imdb.name}.${Movie.IMDB::rating.name}\")\n )\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.group(\n \"\\$${Movie::year.name}\",\n Accumulators.topN(\n \"longestThreeMovies\",\n Sorts.descending(Movie::runtime.name),\n listOf(\"\\$${Movie::title.name}\", \"\\$${Movie::runtime.name}\"),\n 3\n )\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.group(\n \"\\$${Movie::year.name}\",\n Accumulators.bottom(\n \"shortestMovies\",\n Sorts.descending(Movie::runtime.name),\n listOf(\"\\$${Movie::title.name}\", \"\\$${Movie::runtime.name}\")\n )\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.group(\n \"\\$${Movie::year.name}\",\n Accumulators.bottom(\n \"lowestRatedTwoMovies\",\n Sorts.descending(\"${Movie::imdb.name}.${Movie.IMDB::rating.name}\"),\n listOf(\"\\$${Movie::title.name}\", \"\\$${Movie::imdb.name}.${Movie.IMDB::rating.name}\"),\n )\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.unwind(\"\\$${\"lowestRatedTwoMovies\"}\")\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.unwind(\n \"\\$${\"lowestRatedTwoMovies\"}\",\n UnwindOptions().preserveNullAndEmptyArrays(true)\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.unwind(\n \"\\$${\"lowestRatedTwoMovies\"}\",\n UnwindOptions().includeArrayIndex(\"position\")\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.out(\"classic_movies\")\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.merge(\"nineties_movies\")\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.merge(\n MongoNamespace(\"aggregation\", \"movie_ratings\"),\n MergeOptions().uniqueIdentifier(listOf(\"year\", \"title\"))\n .whenMatched(MergeOptions.WhenMatched.REPLACE)\n .whenNotMatched(MergeOptions.WhenNotMatched.INSERT)\n)\n" + }, + { + "lang": "kotlin", + "value": "data class Users(\n val name: String,\n val friends: List?,\n val hobbies: List?\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.graphLookup(\n \"contacts\",\n \"\\$${Users::friends.name}\", Users::friends.name, Users::name.name,\n \"socialNetwork\"\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.graphLookup(\n \"contacts\",\n \"\\$${Users::friends.name}\", Users::friends.name, Users::name.name,\n \"socialNetwork\",\n GraphLookupOptions().maxDepth(2).depthField(\"degrees\")\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.graphLookup(\n \"contacts\",\n \"\\$${Users::friends.name}\", Users::friends.name, Users::name.name, \"socialNetwork\",\n GraphLookupOptions().maxDepth(1).restrictSearchWithMatch(\n Filters.eq(Users::hobbies.name, \"golf\")\n )\n)\n" + }, + { + "lang": "json", + "value": "[\n { \"$group\": { \"_id\": , \"count\": { \"$sum\": 1 } } },\n { \"$sort\": { \"count\": -1 } }\n]" + }, + { + "lang": "kotlin", + "value": "Aggregates.sortByCount(\"\\$${Movie::genres.name}\"),\n" + }, + { + "lang": "kotlin", + "value": "data class Libro(val titulo: String)\ndata class Book(val title: String, val spanishTranslation: Libro)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.replaceRoot(\"\\$${Book::spanishTranslation.name}\")\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.addFields(\n Field(\"watched\", false),\n Field(\"type\", \"movie\")\n)\n" + }, + { + "lang": "json", + "value": "{ \"$group\":{ \"_id\": 0, \"count\": { \"$sum\" : 1 } } }" + }, + { + "lang": "kotlin", + "value": "Aggregates.count(\"total\")\n" + }, + { + "lang": "kotlin", + "value": "data class Screen(\n val id: String,\n val screenSize: Int,\n val manufacturer: String,\n val price: Double\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.bucket(\"\\$${Screen::screenSize.name}\", listOf(0, 24, 32, 50, 70, 1000))\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.bucket(\"\\$${Screen::screenSize.name}\", listOf(0, 24, 32, 50, 70),\n BucketOptions()\n .defaultBucket(\"monster\")\n .output(\n Accumulators.sum(\"count\", 1),\n Accumulators.push(\"matches\", \"\\$${Screen::screenSize.name}\")\n )\n)\n" + }, + { + "lang": "kotlin", + "value": "data class Screen(\n val id: String,\n val screenSize: Int,\n val manufacturer: String,\n val price: Double\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.bucketAuto(\"\\$${Screen::screenSize.name}\", 5)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.bucketAuto(\n \"\\$${Screen::price.name}\", 5,\n BucketAutoOptions()\n .granularity(BucketGranularity.POWERSOF2)\n .output(Accumulators.sum(\"count\", 1), Accumulators.avg(\"avgPrice\", \"\\$${Screen::price.name}\"))\n )\n" + }, + { + "lang": "kotlin", + "value": "data class Screen(\n val id: String,\n val screenSize: Int,\n val manufacturer: String,\n val price: Double\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.facet(\n Facet(\n \"Screen Sizes\",\n Aggregates.bucketAuto(\n \"\\$${Screen::screenSize.name}\",\n 5,\n BucketAutoOptions().output(Accumulators.sum(\"count\", 1))\n )\n ),\n Facet(\n \"Manufacturer\",\n Aggregates.sortByCount(\"\\$${Screen::manufacturer.name}\"),\n Aggregates.limit(5)\n )\n)\n" + }, + { + "lang": "kotlin", + "value": "data class Weather(\n val localityId: String,\n val measurementDateTime: LocalDateTime,\n val rainfall: Double,\n val temperature: Double\n)\n" + }, + { + "lang": "kotlin", + "value": "val pastMonth = Windows.timeRange(-1, MongoTimeUnit.MONTH, Windows.Bound.CURRENT)\n\nval resultsFlow = weatherCollection.aggregate(\n listOf(\n Aggregates.setWindowFields(\"\\$${Weather::localityId.name}\",\n Sorts.ascending(Weather::measurementDateTime.name),\n WindowOutputFields.sum(\n \"monthlyRainfall\",\n \"\\$${Weather::rainfall.name}\",\n pastMonth\n ),\n WindowOutputFields.avg(\n \"monthlyAvgTemp\",\n \"\\$${Weather::temperature.name}\",\n pastMonth\n )\n )\n )\n" + }, + { + "lang": "none", + "value": "Document{{ _id=5553a..., position=Document{{type=Point, coordinates=[-47.9, 47.6]}}, ts=Mon Mar 05 08:00:00 EST 1984, ... }}\nDocument{{ _id=5553b..., position=Document{{type=Point, coordinates=[-47.9, 47.6]}}, ts=Mon Mar 05 09:00:00 EST 1984, ... }}" + }, + { + "lang": "none", + "value": "Document{{ _id=5553a..., position=Document{{type=Point, coordinates=[-47.9, 47.6]}}, ts=Mon Mar 05 08:00:00 EST 1984, ... }}\nDocument{{ position=Document{{coordinates=[-47.9, 47.6]}}, ts=Mon Mar 05 08:15:00 EST 1984 }}\nDocument{{ position=Document{{coordinates=[-47.9, 47.6]}}, ts=Mon Mar 05 08:30:00 EST 1984 }}\nDocument{{ position=Document{{coordinates=[-47.9, 47.6]}}, ts=Mon Mar 05 08:45:00 EST 1984 }}\nDocument{{ _id=5553b..., position=Document{{type=Point, coordinates=[-47.9, 47.6]}}, ts=Mon Mar 05 09:00:00 EST 1984, ... }}" + }, + { + "lang": "kotlin", + "value": "data class Weather(\n @BsonId val id: ObjectId = ObjectId(),\n val position: Point,\n val ts: LocalDateTime\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.densify(\n \"ts\",\n DensifyRange.partitionRangeWithStep(15, MongoTimeUnit.MINUTE),\n DensifyOptions.densifyOptions().partitionByFields(\"Position.coordinates\")\n)\n" + }, + { + "lang": "none", + "value": "Document{{_id=6308a..., hour=1, temperature=23C, air_pressure=29.74}}\nDocument{{_id=6308b..., hour=2, temperature=23.5C}}\nDocument{{_id=6308c..., hour=3, temperature=null, air_pressure=29.76}}" + }, + { + "lang": "kotlin", + "value": "data class Weather(\n @BsonId val id: ObjectId = ObjectId(),\n val hour: Int,\n val temperature: String?,\n val air_pressure: Double?\n)\n" + }, + { + "lang": "kotlin", + "value": "val resultsFlow = weatherCollection.aggregate(\n listOf(\n Aggregates.fill(\n FillOptions.fillOptions().sortBy(Sorts.ascending(Weather::hour.name)),\n FillOutputField.value(Weather::temperature.name, \"23.6C\"),\n FillOutputField.linear(Weather::air_pressure.name)\n )\n )\n)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Weather(id=6308a..., hour=1, temperature=23C, air_pressure=29.74)\nWeather(id=6308b..., hour=2, temperature=23.5C, air_pressure=29.75)\nWeather(id=6308b..., hour=3, temperature=23.6C, air_pressure=29.76)" + }, + { + "lang": "kotlin", + "value": "Aggregates.search(\n SearchOperator.text(\n SearchPath.fieldPath(Movie::title.name), \"Future\"\n ),\n SearchOptions.searchOptions().index(\"title\")\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.searchMeta(\n SearchOperator.near(1985, 2, SearchPath.fieldPath(Movie::year.name)),\n SearchOptions.searchOptions().index(\"year\")\n)\n" + }, + { + "lang": "kotlin", + "value": "data class MovieAlt(\n val title: String,\n val year: Int,\n val plot: String,\n val plotEmbedding: List\n)\n" + }, + { + "lang": "kotlin", + "value": "Aggregates.vectorSearch(\n SearchPath.fieldPath(MovieAlt::plotEmbedding.name),\n listOf(-0.0072121937, -0.030757688, -0.012945653),\n \"mflix_movies_embedding_index\",\n 2.toLong(),\n 1.toLong(),\n vectorSearchOptions().filter(Filters.gte(MovieAlt::year.name, 2016))\n)\n" + } + ], + "preview": "In this guide, you can learn how to use the Aggregates\nclass which provides static factory methods that build aggregation pipeline\nstages in the MongoDB Kotlin driver.", + "tags": "code example, data insights, compute, atlas", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/builders/filters", + "title": "Filters Builders", + "headings": [ + "Overview", + "Comparison", + "Logical", + "Arrays", + "Elements", + "Evaluation", + "Bitwise", + "Geospatial" + ], + "paragraphs": "In this guide, you can learn how to use builders to specify\n filters for your queries in the MongoDB Kotlin driver. Builders are classes provided by the MongoDB Kotlin driver that help you\nconstruct BSON objects. To learn more, see our guide\non builders . Filters are operations used to limit the results of a query based on\nspecified conditions. Filters are a helpful tool to locate\ninformation that matches search conditions in a collection. You can use filters in the following places: Some examples of results from queries with filters are: This guide shows you how to use builders with examples of the following\ntypes of operators: The Filters class provides static factory methods for all the MongoDB query\noperators. Each method returns an instance of the BSON \ntype, which you can pass to any method that expects a query filter. Most of the Filter examples in this guide use the following sample collection paints : These documents in the paints collection are modeled by the following data class for use\nwith the Kotlin driver: As a parameter to the find() method In a match stage of an aggregation pipeline As a parameter to the deleteOne() or deleteMany() method As a parameter to the updateOne() or updateMany() method Items that cost more than $0 but less than $25. Foods that are both gluten-free and less than 500 calories. A food critic review that mentions \"spicy\". Comparison Logical Arrays Elements Evaluation Bitwise Geospatial For brevity, you may choose to import all methods of the\n Filters \nclass statically: The comparison filters include all operators that compare the value in a\ndocument to a specified value. The Filters comparison operator methods include: The following example creates a filter that matches all documents where\nthe value of the qty field equals \"5\" in the paints collection: The following example creates a filter that matches all documents where\nthe value of the qty field is greater than or equal to \"10\" in the\n paints collection: The following example creates a filter that matches all documents in\nthe paints collection because the predicate is empty: Comparison Method Matches eq() values equal to a specified value. gt() values greater than a specified value. gte() values greater than or equal to a specified value. lt() values less than a specified value. lte() values less than or equal to a specified value. ne() values not equal to a specified value. in() any of the values specified in an array. nin() none of the values specified in an array. empty() all the documents. The logical operators perform logical operations based on the conditions of the specified method. The Filters logical operator methods include: The following example creates a filter that matches documents where\nthe value of the qty field is greater than \"8\" or the value\nof the color field equals \"pink\" in the paints collection: Logical Method Matches and() documents with the conditions of all the filters. This operator joins filters with a logical AND . or() documents with the conditions of either filter. This operator joins filters with a logical OR . not() documents that do not match the filter. nor() documents that fail to match both filters. This operator joins filters with a logical NOR . The array operators evaluate the array field in a document. The Filters array operator methods include: The following example matches documents with a vendors array\ncontaining both \"A\" and \"D\" in the paints collection: Array Method Matches all() documents if the array field contains every element specified in the query. elemMatch() documents if an element in the array field matches all the specified conditions. size() documents if the array field is a specified number of elements. The elements operators evaluate the nature of a specified field. The Filters elements operator methods include: The following example matches documents that have a qty field and\nits value does not equal \"5\" or \"8\" in the paints collection: Elements Method Matches exists() documents that have the specified field. type() documents if a field is of the specified type. The evaluation operators evaluate the value of any field in a document. The Filters evaluation operator methods include: The following example matches documents that have a color field\nstarting with the letter \"p\" in the paints collection: Evaluation Method Matches mod() documents where a modulo operation on the value of a field contain a specified result. regex() documents where values contain a specified regular expression. text() documents which contain a specified full-text search expression. where() documents which contain a specified JavaScript expression. The bitwise operators convert a number into its binary value to\nevaluate its bits. The Filters bitwise operator methods include: The following example matches documents that have a decimalValue field\nwith bits set at positions of the corresponding bitmask \"34\" (i.e.\n\"00100010\") in this binary_numbers collection: Bitwise Method Matches bitsAllSet() documents where the specified bits of a field are set (i.e. \"1\"). bitsAllClear() documents where the specified bits of a field are clear (i.e. \"0\"). bitsAnySet() documents where at least one of the specified bits of a field are set (i.e. \"1\"). bitsAnyClear() documents where at least one of the specified bits of a field are clear (i.e. \"0\"). The geospatial operators evaluate a specified coordinate and its\nrelation to a shape or location. The Filters geospatial operator methods include: The following example creates a filter that matches documents in which\nthe point field contains a GeoJSON geometry that falls within\nthe given Polygon \nin this stores collection: Geospatial Method Matches geoWithin() documents containing a GeoJSON geometry value that falls within a bounding GeoJSON geometry. geoWithinBox() documents containing a coordinates value that exist within the specified box. geoWithinPolygon() documents containing a coordinates value that exist within the specified polygon. geoWithinCenter() documents containing a coordinates value that exist within the specified circle. geoWithinCenterSphere() geometries containing a geospatial data value (GeoJSON or legacy coordinate pairs) that exist within the specified circle, using spherical geometry. geoIntersects() geometries that intersect with a GeoJSON geometry. The 2dsphere index supports $geoIntersects . near() geospatial objects in proximity to a point. Requires a geospatial index. The 2dsphere and 2d indexes support $near . nearSphere() geospatial objects in proximity to a point on a sphere. Requires a geospatial index. The 2dsphere and 2d indexes support $nearSphere .", + "code": [ + { + "lang": "json", + "value": "{ \"_id\": 1, \"color\": \"red\", \"qty\": 5, \"vendor\": [\"A\"] }\n{ \"_id\": 2, \"color\": \"purple\", \"qty\": 10, \"vendor\": [\"C\", \"D\"] }\n{ \"_id\": 3, \"color\": \"blue\", \"qty\": 8, \"vendor\": [\"B\", \"A\"] }\n{ \"_id\": 4, \"color\": \"white\", \"qty\": 6, \"vendor\": [\"D\"] }\n{ \"_id\": 5, \"color\": \"yellow\", \"qty\": 11, \"vendor\": [\"A\", \"B\"] }\n{ \"_id\": 6, \"color\": \"pink\", \"qty\": 5, \"vendor\": [\"C\"] }\n{ \"_id\": 7, \"color\": \"green\", \"qty\": 8,\"vendor\": [\"B\", \"C\"] }\n{ \"_id\": 8, \"color\": \"orange\", \"qty\": 7, \"vendor\": [\"A\", \"D\"] }" + }, + { + "lang": "kotlin", + "value": "import com.mongodb.client.model.Filters.*" + }, + { + "lang": "kotlin", + "value": "data class PaintOrder(\n @BsonId val id: Int,\n val qty: Int,\n val color: String,\n val vendors: List = mutableListOf()\n)\n" + }, + { + "lang": "kotlin", + "value": "val equalComparison = Filters.eq(PaintOrder::qty.name, 5)\nval resultsFlow = collection.find(equalComparison)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=1, qty=5, color=red, vendors=[A])\nPaintOrder(id=6, qty=5, color=pink, vendors=[C])" + }, + { + "lang": "kotlin", + "value": "val gteComparison = Filters.gte(PaintOrder::qty.name, 10)\nval resultsFlow = collection.find(gteComparison)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=2, qty=10, color=purple, vendors=[C, D])\nPaintOrder(id=5, qty=11, color=yellow, vendors=[A, B])" + }, + { + "lang": "kotlin", + "value": "val emptyComparison = Filters.empty()\nval resultsFlow = collection.find(emptyComparison)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=1, qty=5, color=red, vendors=[A])\nPaintOrder(id=2, qty=10, color=purple, vendors=[C, D])\nPaintOrder(id=3, qty=8, color=blue, vendors=[B, A])\nPaintOrder(id=4, qty=6, color=white, vendors=[D])\nPaintOrder(id=5, qty=11, color=yellow, vendors=[A, B])\nPaintOrder(id=6, qty=5, color=pink, vendors=[C])\nPaintOrder(id=7, qty=8, color=green, vendors=[B, C])\nPaintOrder(id=8, qty=7, color=orange, vendors=[A, D])" + }, + { + "lang": "kotlin", + "value": "val orComparison = Filters.or(\n Filters.gt(PaintOrder::qty.name, 8),\n Filters.eq(PaintOrder::color.name, \"pink\")\n)\nval resultsFlow = collection.find(orComparison)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=2, qty=10, color=purple, vendors=[C, D])\nPaintOrder(id=5, qty=11, color=yellow, vendors=[A, B])\nPaintOrder(id=6, qty=5, color=pink, vendors=[C])" + }, + { + "lang": "kotlin", + "value": "val search = listOf(\"A\", \"D\")\nval allComparison = Filters.all(PaintOrder::vendors.name, search)\nval resultsFlow = collection.find(allComparison)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=8, qty=7, color=orange, vendors=[A, D])" + }, + { + "lang": "kotlin", + "value": "val existsComparison = Filters.and(Filters.exists(PaintOrder::qty.name), Filters.nin(\"qty\", 5, 8))\nval resultsFlow = collection.find(existsComparison)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=2, qty=10, color=purple, vendors=[C, D])\nPaintOrder(id=4, qty=6, color=white, vendors=[D])\nPaintOrder(id=5, qty=11, color=yellow, vendors=[A, B])\nPaintOrder(id=8, qty=7, color=orange, vendors=[A, D])" + }, + { + "lang": "kotlin", + "value": "val regexComparison = Filters.regex(PaintOrder::color.name, \"^p\")\nval resultsFlow = collection.find(regexComparison)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=2, qty=10, color=purple, vendors=[C, D])\nPaintOrder(id=6, qty=5, color=pink, vendors=[C])" + }, + { + "lang": "json", + "value": "{ \"_id\": 9, \"decimalValue\": 54, \"binaryValue\": \"00110110\" }\n{ \"_id\": 10, \"decimalValue\": 20, \"binaryValue\": \"00010100\" }\n{ \"_id\": 11, \"decimalValue\": 68, \"binaryValue\": \"1000100\" }\n{ \"_id\": 12, \"decimalValue\": 102, \"binaryValue\": \"01100110\" }" + }, + { + "lang": "kotlin", + "value": "data class BinaryNumber(\n @BsonId val id: Int,\n val decimalValue: Int,\n val binaryValue: String\n)\nval binaryCollection = database.getCollection(\"binary_numbers\")\n\nval bitmask = 34.toLong() // 00100010 in binary\nval bitsComparison = Filters.bitsAllSet(BinaryNumber::decimalValue.name, bitmask)\nval resultsFlow = binaryCollection.find(bitsComparison)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "BinaryNumber(id=1, decimalValue=54, binaryValue=00110110)\nBinaryNumber(id=4, decimalValue=102, binaryValue=01100110)" + }, + { + "lang": "json", + "value": "{ \"_id\": 13, \"coordinates\": { \"type\": \"Point\", \"coordinates\": [2.0, 2.0] } }\n{ \"_id\": 14, \"coordinates\": { \"type\": \"Point\", \"coordinates\": [5.0, 6.0] } }\n{ \"_id\": 15, \"coordinates\": { \"type\": \"Point\", \"coordinates\": [1.0, 3.0] } }\n{ \"_id\": 16, \"coordinates\": { \"type\": \"Point\", \"coordinates\": [4.0, 7.0] } }" + }, + { + "lang": "kotlin", + "value": "data class Store(\n @BsonId val id: Int,\n val name: String,\n val coordinates: Point\n)\nval collection = database.getCollection(\"stores\")\n\nval square = Polygon(listOf(\n Position(0.0, 0.0),\n Position(4.0, 0.0),\n Position(4.0, 4.0),\n Position(0.0, 4.0),\n Position(0.0, 0.0)))\nval geoWithinComparison = Filters.geoWithin(Store::coordinates.name, square)\n\nval resultsFlow = collection.find(geoWithinComparison)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Store(id=13, name=Store 13, coordinates=Point{coordinate=Position{values=[2.0, 2.0]}})\nStore(id=15, name=Store 15, coordinates=Point{coordinate=Position{values=[1.0, 3.0]}})" + } + ], + "preview": "In this guide, you can learn how to use builders to specify\nfilters for your queries in the MongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/builders/indexes", + "title": "Indexes Builders", + "headings": [ + "Overview", + "Ascending Indexes", + "Descending Indexes", + "Compound Indexes", + "Text Indexes", + "Hashed Indexes", + "Geospatial Indexes" + ], + "paragraphs": "In this guide, you can learn how to specify indexes using\n builders in the MongoDB Kotlin Driver.\nThe Indexes builder provides helper methods for constructing the\nfollowing types of indexes: Indexes store a subset of the collection's data set. The index stores\nthe value of a specific field or set of fields, ordered by the value of\nthe field. See our guide on Indexes for\nexamples of queries covered by indexes. The Indexes class provides static factory methods for all the MongoDB index types.\nEach method returns a BSON \ninstance, which you can pass to\n createIndex() . Ascending Indexes Descending Indexes Compound Indexes Text Indexes Hashed Indexes Geospatial Indexes For brevity, you may choose to import all methods of the\n Indexes \nclass: An ascending index enables you to sort query results by the value of the\nindexed fields from smallest to largest. In order to create an ascending index, first call the\n ascending() \nbuilder method to create a Bson instance that represents the index\ndocument, passing the name or names of the fields you want to index.\nThen, call the createIndex() method on the collection, passing the Bson \ninstance that contains the index document. The following example specifies an ascending index on the name field: If you have an ascending or a descending index on a single field, MongoDB\ncan sort using the index in either direction. A descending index enables you to sort query results by the value of the\nindexed fields from largest to smallest. In order to create a descending index, first call the\n descending() \nbuilder method to create a Bson instance that represents the index\ndocument, passing the name or names of the fields you want to index.\nThen, call the createIndex() method on the collection, passing the Bson \ninstance that contains the index document. The following example specifies a descending index on the capacity field: In order to create a compound index, first call the\n compoundIndex() \nbuilder method to create a Bson instance that represents the index\ndocument, passing the names of the fields you want to index. Then, call\nthe createIndex() method on the collection, passing the Bson \ninstance that contains the index document. The following example specifies a compound index composed of\ndescending index on the capacity and year field, followed\nby an ascending index on the name field: A text index groups documents by the text in the indexed field. In order to create a text index, first call the\n text() \nbuilder method to create a Bson instance that represents the index\ndocument, passing the name of the fields you want to index. Then, call\nthe createIndex() method on the collection, passing the Bson \ninstance that contains the index document. The following example specifies a text index key on the theaters field: A hashed index groups documents by the hash value in the indexed field. In order to create a hashed index, first call the\n hashed() \nbuilder method to create a Bson instance that represents the index\ndocument, passing the name of the fields you want to index. Then, call\nthe createIndex() method on the collection, passing the Bson \ninstance that contains the index document. The following example specifies a hashed index on the capacity \nfield: A 2dsphere index groups documents by the coordinates in the indexed field. In order to create a 2dsphere index, first call the\n geo2dsphere() \nbuilder method to create a Bson instance that represents the index\ndocument, passing the name or names of the fields you want to index.\nThen, call the createIndex() method on the collection, passing the Bson \ninstance that contains the index document. The following example specifies a 2dsphere index on the location field:", + "code": [ + { + "lang": "kotlin", + "value": "import com.mongodb.client.model.Indexes.*" + }, + { + "lang": "kotlin", + "value": "val ascendingIndex = Indexes.ascending(\"name\")\nval indexName = collection.createIndex(ascendingIndex)\nprintln(indexName)\n" + }, + { + "lang": "console", + "value": "name_1" + }, + { + "lang": "kotlin", + "value": "val descendingIndex = Indexes.descending(\"capacity\")\nval indexName = collection.createIndex(descendingIndex)\nprintln(indexName)\n" + }, + { + "lang": "console", + "value": "capacity_-1" + }, + { + "lang": "kotlin", + "value": "val compoundIndexExample = Indexes.compoundIndex(\n Indexes.descending(\"capacity\", \"year\"),\n Indexes.ascending(\"name\")\n)\nval indexName = collection.createIndex(compoundIndexExample)\nprintln(indexName)\n" + }, + { + "lang": "console", + "value": "capacity_-1_year_-1_name_1" + }, + { + "lang": "kotlin", + "value": "val textIndex = Indexes.text(\"theaters\")\nval indexName = collection.createIndex(textIndex)\nprintln(indexName)\n" + }, + { + "lang": "console", + "value": "theaters_text" + }, + { + "lang": "kotlin", + "value": "val hashedIndex = Indexes.hashed(\"capacity\")\nval indexName = collection.createIndex(hashedIndex)\nprintln(indexName)\n" + }, + { + "lang": "console", + "value": "capacity_hashed" + }, + { + "lang": "kotlin", + "value": "val geo2dsphereIndex = Indexes.geo2dsphere(\"location\")\nval indexName = collection.createIndex(geo2dsphereIndex)\nprintln(indexName)\n" + }, + { + "lang": "console", + "value": "location_2dsphere" + } + ], + "preview": "In this guide, you can learn how to specify indexes using\nbuilders in the MongoDB Kotlin Driver.\nThe Indexes builder provides helper methods for constructing the\nfollowing types of indexes:", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/builders/projections", + "title": "Projections Builders", + "headings": [ + "Overview", + "Sample Documents and Examples", + "Projection Operations", + "Inclusion", + "Exclusion", + "Combining Projections", + "Exclusion of _id", + "Project an Array Element Match", + "Project an Array Slice", + "Project a Text Score" + ], + "paragraphs": "In this guide, you can learn how to specify projections using\n builders in the MongoDB Kotlin driver. MongoDB supports field projection , specifying which fields to include and exclude when returning results from a\nquery. Projection in MongoDB follows some basic rules: Find more information about projection mechanics in the Project Fields to Return from Query guide in the MongoDB Server documentation. The Projections class provides static factory methods for\nall the MongoDB projection operators. Each method returns an instance of the BSON type which you can pass\nto any method that expects a projection. The _id field is always included unless explicitly excluded Specifying a field for inclusion implicitly excludes all other fields except the _id field Specifying a field for exclusion removes only that field in a query result For brevity, you may choose to import the methods of the\n Projections \nclass: The following sections feature examples that run query and projection operations\non a sample collection called projection_builders . Each section uses\na variable named collection to refer to the MongoCollection instance\nof the projection_builders collection. The collection contains the following documents, representing the monthly average\ntemperatures in Celsius for the years 2018 and 2019: The following data class is used to represent the documents in the collection: The following sections contain information on the available projection\noperations and how to construct them using the Projections class. Use the include() method to specify the inclusion of one or more fields. The following example includes the year field and implicitly the _id field: The following example includes the year and type fields and implicitly the _id field: Use the exclude() method to specify the exclusion of one or more fields. The following example excludes the temperatures field: The following example excludes the temperatures and type fields: Use the fields() method to combine multiple projections. The following example includes the year and type fields and excludes the\n _id field: Use the excludeId() convenience method to specify the exclusion of the _id field: Use the elemMatch(String, Bson) method variant to specify an array projection that will include the first\nelement of an array that matches a supplied query filter. This filtering occurs after all documents matching the\nquery filter (if supplied) are retrieved. The following example projects the first element of the temperatures array where the avg field is\ngreater that 10.1 : When you've specified matching criteria in the query portion of your operation, use the elemMatch(String) method\nvariant to specify a positional projection to include\nthe first element of an array. Only documents that match the query filter will be retrieved. The following example projects the first element of the temperatures array: Only the first element that matches the specified query filter will be included,\nregardless of how many matches there may be. In MongoDB version 4.4 and earlier, the specified array field must appear in the query filter. Beginning in MongoDB 4.4,\nyou can use a positional project on an array field that does not appear in the query filter. Use the slice() method to project a slice of an array. The following example projects the first 6 elements of the temperatures array: The following example skips the first 6 elements of the temperatures array and projects the next 6 : Use the metaTextScore() method to specify a projection of the\n score of a text query . The following example projects the text score as the value of the score field:", + "code": [ + { + "lang": "kotlin", + "value": "import com.mongodb.client.model.Projections.*" + }, + { + "lang": "json", + "value": "{\n \"year\" : 2018,\n \"type\" : \"even number but not a leap year\",\n \"temperatures\" : [\n { \"month\" : \"January\", \"avg\" : 9.765 },\n { \"month\" : \"February\", \"avg\" : 9.675 },\n { \"month\" : \"March\", \"avg\" : 10.004 },\n { \"month\" : \"April\", \"avg\" : 9.983 },\n { \"month\" : \"May\", \"avg\" : 9.747 },\n { \"month\" : \"June\", \"avg\" : 9.65 },\n { \"month\" : \"July\", \"avg\" : 9.786 },\n { \"month\" : \"August\", \"avg\" : 9.617 },\n { \"month\" : \"September\", \"avg\" : 9.51 },\n { \"month\" : \"October\", \"avg\" : 10.042 },\n { \"month\" : \"November\", \"avg\" : 9.452 },\n { \"month\" : \"December\", \"avg\" : 9.86 }\n ]\n},\n{\n \"year\" : 2019,\n \"type\" : \"odd number, can't be a leap year\",\n \"temperatures\" : [\n { \"month\" : \"January\", \"avg\" : 10.023 },\n { \"month\" : \"February\", \"avg\" : 9.808 },\n { \"month\" : \"March\", \"avg\" : 10.43 },\n { \"month\" : \"April\", \"avg\" : 10.175 },\n { \"month\" : \"May\", \"avg\" : 9.648 },\n { \"month\" : \"June\", \"avg\" : 9.686 },\n { \"month\" : \"July\", \"avg\" : 9.794 },\n { \"month\" : \"August\", \"avg\" : 9.741 },\n { \"month\" : \"September\", \"avg\" : 9.84 },\n { \"month\" : \"October\", \"avg\" : 10.15 },\n { \"month\" : \"November\", \"avg\" : 9.84 },\n { \"month\" : \"December\", \"avg\" : 10.366 }\n ]\n}" + }, + { + "lang": "kotlin", + "value": "data class YearlyTemperature(\n @BsonId val id: ObjectId,\n val year: Int,\n val type: String,\n val temperatures: List\n) {\n data class MonthlyTemperature(\n val month: String,\n val avg: Double\n )\n}\n" + }, + { + "lang": "kotlin", + "value": "data class Results(@BsonId val id: ObjectId, val year: Int)\n\nval filter = Filters.empty()\nval projection = Projections.include(YearlyTemperature::year.name)\nval resultsFlow = collection.find(filter).projection(projection)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Results(id=6467808db5003e6354a1ee22, year=2018)\nResults(id=6467808db5003e6354a1ee23, year=2019)" + }, + { + "lang": "kotlin", + "value": "data class Results(@BsonId val id: ObjectId, val year: Int, val type: String)\n\nval filter = Filters.empty()\nval projection = Projections.include(YearlyTemperature::year.name, YearlyTemperature::type.name)\nval resultsFlow = collection.find(filter).projection(projection)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Results(id=646780e3311323724f69a907, year=2018, type=even number but not a leap year)\nResults(id=646780e3311323724f69a908, year=2019, type=odd number, can't be a leap year)" + }, + { + "lang": "kotlin", + "value": "data class Results(@BsonId val id: ObjectId, val year: Int, val type: String)\nval filter = Filters.empty()\nval projection = Projections.exclude(YearlyTemperature::temperatures.name)\nval resultsFlow = collection.find(filter).projection(projection)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Results(id=6462976102c85b29a7bfc9d5, year=2018, type=even number but not a leap year)\nResults(id=6462976102c85b29a7bfc9d6, year=2019, type=odd number, can't be a leap year)" + }, + { + "lang": "kotlin", + "value": "data class Results(@BsonId val id: ObjectId, val year: Int)\n\nval filter = Filters.empty()\nval projection = Projections.exclude(YearlyTemperature::temperatures.name, YearlyTemperature::type.name)\nval resultsFlow = collection.find(filter).projection(projection)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Results(id=64629783d7760d2365215147, year=2018)\nResults(id=64629783d7760d2365215148, year=2019)" + }, + { + "lang": "kotlin", + "value": "data class Results(val year: Int, val type: String)\n\nval filter = Filters.empty()\nval projection = Projections.fields(\n Projections.include(YearlyTemperature::year.name, YearlyTemperature::type.name),\n Projections.excludeId()\n)\nval resultsFlow = collection.find(filter).projection(projection)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Results(year=2018, type=even number but not a leap year)\nResults(year=2019, type=odd number, can't be a leap year)" + }, + { + "lang": "kotlin", + "value": "data class Results(val year: Int, val type: String, val temperatures: List)\nval filter = Filters.empty()\nval projection = Projections.excludeId()\nval resultsFlow = collection.find(filter).projection(projection)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Results(year=2018, type=even number but not a leap year, temperatures=[MonthlyTemperature(month=January, avg=9.765), MonthlyTemperature(month=February, avg=9.675), MonthlyTemperature(month=March, avg=10.004), MonthlyTemperature(month=April, avg=9.983), MonthlyTemperature(month=May, avg=9.747), MonthlyTemperature(month=June, avg=9.65), MonthlyTemperature(month=July, avg=9.786), MonthlyTemperature(month=August, avg=9.617), MonthlyTemperature(month=September, avg=9.51), MonthlyTemperature(month=October, avg=10.042), MonthlyTemperature(month=November, avg=9.452), MonthlyTemperature(month=December, avg=9.86)])\nResults(year=2019, type=odd number, can't be a leap year, temperatures=[MonthlyTemperature(month=January, avg=10.023), MonthlyTemperature(month=February, avg=9.808), MonthlyTemperature(month=March, avg=10.43), MonthlyTemperature(month=April, avg=10.175), MonthlyTemperature(month=May, avg=9.648), MonthlyTemperature(month=June, avg=9.686), MonthlyTemperature(month=July, avg=9.794), MonthlyTemperature(month=August, avg=9.741), MonthlyTemperature(month=September, avg=9.84), MonthlyTemperature(month=October, avg=10.15), MonthlyTemperature(month=November, avg=9.84), MonthlyTemperature(month=December, avg=10.366)])" + }, + { + "lang": "kotlin", + "value": "data class Results(\n val year: Int,\n val temperatures: List?\n)\n\nval filter = Filters.empty()\nval projection = Projections.fields(\n Projections.include(YearlyTemperature::year.name),\n Projections.elemMatch(\n YearlyTemperature::temperatures.name,\n Filters.gt(YearlyTemperature.MonthlyTemperature::avg.name, 10.1)\n )\n)\nval resultsFlow = collection.find(filter).projection(projection)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Results(year=2018, temperatures=null)\nResults(year=2019, temperatures=[MonthlyTemperature(month=March, avg=10.43)])" + }, + { + "lang": "kotlin", + "value": "data class Results(\n val year: Int,\n val temperatures: List\n)\n\nval filter = Filters.gt(\n \"${YearlyTemperature::temperatures.name}.${YearlyTemperature.MonthlyTemperature::avg.name}\",\n 10.1\n)\nval projection = Projections.fields(\n Projections.include(YearlyTemperature::year.name),\n Projections.elemMatch(YearlyTemperature::temperatures.name)\n)\nval resultsFlow = collection.find(filter).projection(projection)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Results(year=2019, temperatures=[MonthlyTemperature(month=March, avg=10.43)])" + }, + { + "lang": "kotlin", + "value": "data class Results(val temperatures: List)\n\nval filter = Filters.empty()\n// First half of the year\nval projection = Projections.fields(\n Projections.slice(YearlyTemperature::temperatures.name, 6),\n Projections.excludeId()\n)\nval resultsFlow = collection.find(filter)\n .projection(projection)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Results(temperatures=[MonthlyTemperature(month=January, avg=9.765), MonthlyTemperature(month=February, avg=9.675), MonthlyTemperature(month=March, avg=10.004), MonthlyTemperature(month=April, avg=9.983), MonthlyTemperature(month=May, avg=9.747), MonthlyTemperature(month=June, avg=9.65)])\nResults(temperatures=[MonthlyTemperature(month=January, avg=10.023), MonthlyTemperature(month=February, avg=9.808), MonthlyTemperature(month=March, avg=10.43), MonthlyTemperature(month=April, avg=10.175), MonthlyTemperature(month=May, avg=9.648), MonthlyTemperature(month=June, avg=9.686)])" + }, + { + "lang": "kotlin", + "value": "data class Results(val temperatures: List)\n\nval filter = Filters.empty()\n// Second half of the year\nval projection = Projections.fields(\n Projections.slice(YearlyTemperature::temperatures.name, 6, 6),\n Projections.excludeId()\n)\nval resultsFlow = collection.find(filter)\n .projection(projection)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Results(temperatures=[MonthlyTemperature(month=July, avg=9.786), MonthlyTemperature(month=August, avg=9.617), MonthlyTemperature(month=September, avg=9.51), MonthlyTemperature(month=October, avg=10.042), MonthlyTemperature(month=November, avg=9.452), MonthlyTemperature(month=December, avg=9.86)])\nResults(temperatures=[MonthlyTemperature(month=July, avg=9.794), MonthlyTemperature(month=August, avg=9.741), MonthlyTemperature(month=September, avg=9.84), MonthlyTemperature(month=October, avg=10.15), MonthlyTemperature(month=November, avg=9.84), MonthlyTemperature(month=December, avg=10.366)])" + }, + { + "lang": "kotlin", + "value": "data class Results(val year: Int, val score: Double)\n\nval filter = Filters.text(\"even number\")\nval projection = Projections.fields(\n Projections.include(YearlyTemperature::year.name),\n Projections.metaTextScore(\"score\")\n)\nval resultsFlow = collection.find(filter).projection(projection)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Results(year=2018, score=1.25)\nResults(year=2019, score=0.625)" + } + ], + "preview": "In this guide, you can learn how to specify projections using\nbuilders in the MongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/builders/sort", + "title": "Sorts Builders", + "headings": [ + "Overview", + "The Sorts Class", + "Ascending", + "Descending", + "Combining Sort Criteria", + "Text Score" + ], + "paragraphs": "In this guide, you can learn how to specify sort criteria for your\nqueries using builders in the MongoDB Kotlin Driver. Sort criteria are the rules MongoDB uses to sort your data. Some\nexamples of sort criteria are: Builders are classes provided by the Kotlin driver that help you\nconstruct BSON objects. To learn more, see the\n builders guide. You should read this guide if you want to learn how to use builders to\nspecify sort criteria for your queries. To learn the fundamentals of sorting in the Kotlin driver, see the\n sorting guide. The examples on this page use a sample collection that\ncontains the following documents: This data is modeled with the following Kotlin data class: Smallest number to largest number Earliest time of day to latest time of day Alphabetical order by first name The Sorts class is a builder that provides static factory methods for all sort\ncriteria operators supported by MongoDB. These methods return a Bson object\nthat you can pass to the sort() method of a FindFlow instance or to\n Aggregates.sort() . To learn more about the Aggregates \nclass, see the Aggregates builder guide. For more information about the classes and interfaces in this section, see the\nfollowing API Documentation: Sorts BSON FindFlow Aggregates To specify an ascending sort, use the Sorts.ascending() static\nfactory method. Pass the name of the field you want to sort on to\n Sorts.ascending() . The following example sorts the documents in the\n sample collection by ascending order\non the orderTotal field: To specify a descending sort, use the Sorts.descending() static factory\nmethod. Pass the name of the field you want to sort on to Sorts.descending() . The following example sorts the documents in the\n sample collection in descending order\non the orderTotal field: To combine sort criteria, use the Sorts.orderBy() static factory\nmethod. This method constructs an object containing an ordered list of sort\ncriteria. When performing the sort, if the previous sort criteria result in a\ntie, the sort uses the next sort criteria in the list to determine the order. The following example sorts the documents in the\n sample collection in descending order\non the date field, and in the event of a tie, ascending order on the\n orderTotal field: You can sort text search results by their text score, a value that indicates how\nclosely a search result matches your search string. To specify a sort by the\ntext score of a text search, use the Sorts.metaTextScore() static factory\nmethod. For a detailed example showing how to specify sort criteria using\nthe Sorts.metaTextScore() method, see\n the text search section of the sorting guide. For more information, see the Sorts class \nAPI Documentation.", + "code": [ + { + "lang": "json", + "value": "{ \"_id\": 1, \"date\": \"2022-01-03\", \"orderTotal\": 17.86, \"description\": \"1/2 lb cream cheese and 1 dozen bagels\" },\n{ \"_id\": 2, \"date\": \"2022-01-11\", \"orderTotal\": 83.87, \"description\": \"two medium vanilla birthday cakes\" },\n{ \"_id\": 3, \"date\": \"2022-01-11\", \"orderTotal\": 19.49, \"description\": \"1 dozen vanilla cupcakes\" },\n{ \"_id\": 4, \"date\": \"2022-01-15\", \"orderTotal\": 43.62, \"description\": \"2 chicken lunches and a diet coke\" },\n{ \"_id\": 5, \"date\": \"2022-01-23\", \"orderTotal\": 60.31, \"description\": \"one large vanilla and chocolate cake\" },\n{ \"_id\": 6, \"date\": \"2022-01-23\", \"orderTotal\": 10.99, \"description\": \"1 bagel, 1 orange juice, 1 muffin\" }" + }, + { + "lang": "kotlin", + "value": "data class Order(\n @BsonId val id: Int,\n val date: String,\n val orderTotal: Double,\n val description: String,\n)\n" + }, + { + "lang": "kotlin", + "value": "val resultsFlow = collection.find()\n .sort(Sorts.ascending(Order::orderTotal.name))\n\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Order(id=6, date=2022-01-23, orderTotal=10.99, description=1 bagel, 1 orange juice, 1 muffin)\nOrder(id=1, date=2022-01-03, orderTotal=17.86, description=1/2 lb cream cheese and 1 dozen bagels)\nOrder(id=3, date=2022-01-11, orderTotal=19.49, description=1 dozen vanilla cupcakes)\nOrder(id=4, date=2022-01-15, orderTotal=43.62, description=2 chicken lunches and a diet coke)\nOrder(id=5, date=2022-01-23, orderTotal=60.31, description=one large vanilla and chocolate cake)\nOrder(id=2, date=2022-01-11, orderTotal=83.87, description=two medium vanilla birthday cakes)" + }, + { + "lang": "kotlin", + "value": "val resultsFlow = collection.find()\n .sort(Sorts.descending(Order::orderTotal.name))\n\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Order(id=2, date=2022-01-11, orderTotal=83.87, description=two medium vanilla birthday cakes)\nOrder(id=5, date=2022-01-23, orderTotal=60.31, description=one large vanilla and chocolate cake)\nOrder(id=4, date=2022-01-15, orderTotal=43.62, description=2 chicken lunches and a diet coke)\nOrder(id=3, date=2022-01-11, orderTotal=19.49, description=1 dozen vanilla cupcakes)\nOrder(id=1, date=2022-01-03, orderTotal=17.86, description=1/2 lb cream cheese and 1 dozen bagels)\nOrder(id=6, date=2022-01-23, orderTotal=10.99, description=1 bagel, 1 orange juice, 1 muffin)" + }, + { + "lang": "kotlin", + "value": "val orderBySort = Sorts.orderBy(\n Sorts.descending(Order::date.name), Sorts.ascending(Order::orderTotal.name)\n)\nval results = collection.find().sort(orderBySort)\n\nresults.collect {println(it) }\n" + }, + { + "lang": "console", + "value": "Order(id=6, date=2022-01-23, orderTotal=10.99, description=1 bagel, 1 orange juice, 1 muffin)\nOrder(id=5, date=2022-01-23, orderTotal=60.31, description=one large vanilla and chocolate cake)\nOrder(id=4, date=2022-01-15, orderTotal=43.62, description=2 chicken lunches and a diet coke)\nOrder(id=3, date=2022-01-11, orderTotal=19.49, description=1 dozen vanilla cupcakes)\nOrder(id=2, date=2022-01-11, orderTotal=83.87, description=two medium vanilla birthday cakes)\nOrder(id=1, date=2022-01-03, orderTotal=17.86, description=1/2 lb cream cheese and 1 dozen bagels)" + } + ], + "preview": "In this guide, you can learn how to specify sort criteria for your\nqueries using builders in the MongoDB Kotlin Driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/builders/updates", + "title": "Updates Builders", + "headings": [ + "Overview", + "Field Updates", + "Set", + "Unset", + "Set On Insert", + "Increment", + "Multiply", + "Rename", + "Min", + "Max", + "Current Date", + "Current Timestamp", + "Bit", + "Array Updates", + "Add to Set", + "Pop", + "Pull All", + "Pull", + "Push", + "Combining Multiple Update Operators" + ], + "paragraphs": "In this guide, you can learn how to specify updates by using\n builders in the MongoDB Kotlin Driver. The Updates builder provides helper methods for the following types of updates: Some methods that expect updates are: The Updates class provides static factory methods for all the MongoDB update\noperators. Each method returns an instance of the BSON \ntype, which you can pass to any method that expects an update argument. The examples in this guide use the following document: This example is modeled by the following data class unless otherwise noted: Field Updates Array Updates Combining Multiple Update Operators updateOne() updateMany() bulkWrite() For brevity, you may choose to import the methods of the Updates class: Use the set() \nmethod to assign the value of a field in an update operation. The following example sets the value of the qty field to 11 : The preceding example updates the original document to the following state: Use the unset() method\nto delete the value of a field in an update operation. The following example deletes the qty field: The preceding example updates the original document to the following state: Use the setOnInsert() \nmethod to assign the value of a field in an update operation on an\ninsert of a document. The following example sets the value of the color field to \"pink\" if\nthe operation resulted in the insert of a document: The preceding example updates the original document to the following state: If the document is not inserted, no change will occur. Use the inc() \nmethod to increment the value of a numeric field in an update operation. The following example increments the value of the qty field, which was 5 , by 3 : The preceding example updates the original document to the following state: Use the mul() \nmethod to multiply the value of a numeric field in an update operation. The following example multiplies the value of the qty field, which\nwas 5 , by 2 : The preceding example updates the original document to the following state: Use the rename() \nmethod to rename the value of a field in an update operation. The following example renames the qty field to quantity : The preceding example updates the original document to the following state: Use the min() \nmethod to set the value of the field to the given value if the given value is\nless than the current value of the field. The following example updates the qty field to 2 because 2 \nis less than the current value of the qty field ( 5 ): The preceding example updates the original document to the following state: Use the max() \nmethod to update the value of a field with the larger number of the two\nspecified in an update operation. The following example updates the qty field to 8 because 8 \nis greater than the current value of the qty field ( 5 ): The preceding example updates the original document to the following state: Use the currentDate() \nmethod to assign the value of a field in an update operation to the\ncurrent date as a BSON date . The following example sets the value of the lastModified field to\nthe current date as a BSON date: Since we wrote this page on June 16, 2023, the preceding example updates\nthe original document to the following state: Use the currentTimestamp() \nmethod to assign the value of a field in an update operation to the\ncurrent date as a timestamp . The following example sets the value of the lastModified field to\nthe current date as a BSON timestamp: Since we wrote this page on June 16, 2023, the preceding example updates\nthe original document to the following state: Use the bitwiseOr() ,\n bitwiseAnd() ,\nand bitwiseXor() \nmethods to perform a bitwise update of the integer value of a field in\nan update operation. The following example performs a bitwise OR between the number\n 10 and the integer value of the qty field ( 5 ): The bitwise operation results in 15 : The preceding example updates the original document to the following state: Use the addToSet() \nmethod to append a value to an array if the value is not already present\nin an update operation. The following example adds a Vendor instance that has a name \nvalue of \"C\" to the vendor array: The preceding example updates the original document to the following state: Use the popFirst() \nmethod to remove the first element of an array and the\n popLast() \nmethod to remove the last element of an array in an update operation. The following example removes the first entry of the vendor array: The preceding example updates the original document to the following state: Use the pullAll() \nmethod to remove all instances of specified values from an existing array in\nan update operation. The following example removes Vendor instances that have name values\nof \"A\" and \"M\" from the vendor array: The preceding example updates the original document to the following state: Use the pull() \nmethod to remove all instances of a specified value from an existing array in\nan update operation. The following example removes Vendor instances that have a name \nvalue of \"D\" from the vendor array: The preceding example updates the original document to the following state: Use the push() \nmethod to append a value to an array in an update operation. The following example adds a Vendor instance that has a name \nvalue of \"Q\" to the vendor array: The preceding example updates the original document to the following state: An application can update multiple fields of a single document by\ncombining two or more of the update operators described in the preceding\nsections. The following example increments the value of the qty field by 6 , sets\nthe value of the color field to \"purple\" , and adds a Vendor \ninstance that has a name value of \"R\" to the vendor field: The preceding example updates the original document to the following state:", + "code": [ + { + "lang": "json", + "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 5,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" + }, + { + "lang": "kotlin", + "value": "import com.mongodb.client.model.Updates.*" + }, + { + "lang": "kotlin", + "value": "data class PaintOrder (\n @BsonId val id: Int,\n val color: String,\n val qty: Int?,\n val vendor: List?,\n val lastModified: LocalDateTime?\n)\n\ndata class Vendor (\n val name: String,\n)\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 11,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.set(PaintOrder::qty.name, 11)\ncollection.updateOne(filter, update)\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.unset(PaintOrder::qty.name)\ncollection.updateOne(filter, update)\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": 1,\n \"color\": \"pink\"\n}" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.setOnInsert(PaintOrder::color.name, \"pink\")\ncollection.updateOne(filter, update, UpdateOptions().upsert(true))\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 8,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.inc(PaintOrder::qty.name, 3)\ncollection.updateOne(filter, update)\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 10,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.mul(PaintOrder::qty.name, 2)\ncollection.updateOne(filter, update)\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" },\n \"quantity\": 5,\n}" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.rename(PaintOrder::qty.name, \"quantity\")\ncollection.updateOne(filter, update)\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 2,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.min(PaintOrder::qty.name, 2)\ncollection.updateOne(filter, update)\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 8,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.max(PaintOrder::qty.name, 8)\ncollection.updateOne(filter, update)\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 5,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"$date\": \"2023-06-16T17:13:06.373Z\"\n}" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.currentDate(PaintOrder::lastModified.name)\ncollection.updateOne(filter, update)\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 5,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"$timestamp\": { \"t\": 1686935654, \"i\": 3 }\n}" + }, + { + "lang": "kotlin", + "value": "// Create a new instance of the collection with the flexible `Document` type\n// to allow for the changing of the `lastModified` field to a `BsonTimestamp`\n// from a `LocalDateTime`.\nval collection = database.getCollection(\"paint_orders\")\n\nval filter = Filters.eq(\"_id\", 1)\nval update = Updates.currentTimestamp(PaintOrder::lastModified.name)\ncollection.updateOne(filter, update)\n" + }, + { + "lang": "none", + "value": "0101 // bit representation of 5\n1010 // bit representation of 10\n----\n1111 // bit representation of 15" + }, + { + "lang": "json", + "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 15,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.bitwiseOr(PaintOrder::qty.name, 10)\ncollection.updateOne(filter, update)\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 5,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" },\n { \"name\": \"C\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.addToSet(PaintOrder::vendor.name, Vendor(\"C\"))\ncollection.updateOne(filter, update)\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 5,\n \"vendor\": [\n { \"name\": \"D\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.popFirst(PaintOrder::vendor.name)\ncollection.updateOne(filter, update)\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 5,\n \"vendor\": [\n { \"name\": \"D\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.pullAll(PaintOrder::vendor.name, listOf(Vendor(\"A\"), Vendor(\"M\")))\ncollection.updateOne(filter, update)\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 5,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"M\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.pull(PaintOrder::vendor.name, Vendor(\"D\"))\ncollection.updateOne(filter, update)\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": 1,\n \"color\": \"red\",\n \"qty\": 5,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" },\n { \"name\": \"Q\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.push(PaintOrder::vendor.name, Vendor(\"Q\"))\ncollection.updateOne(filter, update)\n" + }, + { + "lang": "json", + "value": "{\n \"_id\": 1,\n \"color\": \"purple\",\n \"qty\": 11,\n \"vendor\": [\n { \"name\": \"A\" },\n { \"name\": \"D\" },\n { \"name\": \"M\" },\n { \"name\": \"R\" }\n ],\n \"lastModified\": { \"$date\": \"2000-01-01T07:00:00.000Z\" }\n}" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.combine(\n Updates.set(PaintOrder::color.name, \"purple\"),\n Updates.inc(PaintOrder::qty.name, 6),\n Updates.push(PaintOrder::vendor.name, Vendor(\"R\"))\n)\ncollection.updateOne(filter, update)\n" + } + ], + "preview": "In this guide, you can learn how to specify updates by using\nbuilders in the MongoDB Kotlin Driver.", + "tags": "code example, change data, nested class", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/builders", + "title": "Builders", + "headings": [ + "Overview", + "Why Use Builders?", + "Scenario", + "Using the MongoDB Shell", + "Without Using Builders", + "Using Builders", + "Available Builders" + ], + "paragraphs": "This section includes guides on how to use each of the available\nbuilders, and demonstrates the utility the MongoDB Kotlin driver builder classes\nprovide. The Kotlin driver provides classes to simplify the process for developers\nto use CRUD operations and the Aggregation API. The static utility methods allow you\nto build a query more efficiently. Using the builders class, you leverage the power of: When using builders, the Kotlin compiler and the IDE catch errors such as misspelled\noperators early on. When using the MongoDB shell or plain Kotlin, you\nwrite operators as strings and get no visual indication of a problem,\npushing these errors to runtime instead of compile time. With the builder classes, you write operators as methods. The IDE\ninstantly underlines and gives you a red bar on the right indicating\nsomething is wrong. While developing, the IDE also shows you the\nmethods you can use. It automatically completes your code with\nplaceholder parameters once you select which method you want to use. The Kotlin compiler and the IDE to find errors during development The IDE for discovery and code completion Imagine we want to send a marketing email to all users in our users \ncollection with the following criteria: We only want their email address, so we'll ensure our query doesn't\nreturn data we pay bandwidth costs for but don't need. The documents in the users collection are modeled with the following data class\nin our application: Users that identify as female gender Users that are older than 29 Aggregates for building aggregation pipelines. Filters for building query filters. Indexes for creating index keys. Projections for building projections. Sorts for building sort criteria. Updates for building updates.", + "code": [ + { + "lang": "kotlin", + "value": "data class User(\n @BsonId\n val id: BsonObjectId = BsonObjectId(),\n val gender: String,\n val age: Int,\n val email: String,\n)\n" + }, + { + "lang": "js", + "value": "collection.find({ \"gender\": \"female\", \"age\" : { \"$gt\": 29 }}, { \"_id\": 0, \"email\": 1 })" + }, + { + "lang": "kotlin", + "value": "data class Results(val email: String)\n\nval filter = Document().append(\"gender\", \"female\").append(\"age\", Document().append(\"\\$gt\", 29))\nval projection = Document().append(\"_id\", 0).append(\"email\", 1)\nval results = collection.find(filter).projection(projection)\n" + }, + { + "lang": "kotlin", + "value": "import com.mongodb.client.model.Filters\nimport com.mongodb.client.model.Projections" + }, + { + "lang": "kotlin", + "value": "data class Results(val email: String)\n\nval filter = Filters.and(Filters.eq(User::gender.name, \"female\"), Filters.gt(User::age.name, 29))\nval projection = Projections.fields(Projections.excludeId(), Projections.include(\"email\"))\nval results = collection.find(filter).projection(projection)\n" + } + ], + "preview": "This section includes guides on how to use each of the available\nbuilders, and demonstrates the utility the MongoDB Kotlin driver builder classes\nprovide.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/collations", + "title": "Collations", + "headings": [ + "Overview", + "Sample Data for Examples", + "Collations in MongoDB", + "How to Specify Collations", + "Collection", + "Index", + "Operation", + "Index Types That Do Not Support Collations", + "Collation Options", + "Collation Examples", + "find() and sort() Example", + "findOneAndUpdate() Example", + "findOneAndDelete() Example", + "Aggregation Example" + ], + "paragraphs": "In this guide, you can learn how to use collations with MongoDB to order your\nquery or aggregation operation results by string values. A collation is a set\nof character ordering and matching rules that apply to a specific language and\nlocale. You can learn more about collations in the following sections in this guide: Collations in MongoDB How to Specify Collations Collation Options Collation Code Examples The examples on this page use a MongoDB collection with the following documents: These documents are represented by the following data class: MongoDB sorts strings using binary collation by default. The binary\ncollation uses the ASCII standard character values to\ncompare and order strings. Certain languages and locales have specific\ncharacter ordering conventions that differ from the ASCII character values. For example, in Canadian French, the right-most accented character\n(diacritic) determines the ordering for strings when all preceding characters\nare the same. Consider the following Canadian French words: When using binary collation, MongoDB sorts them in the following order: When using the Canadian French collation, MongoDB sorts them in a different\norder as shown below: cote cot\u00e9 c\u00f4te c\u00f4t\u00e9 MongoDB supports collations on most CRUD operations \nand aggregations. For a complete list of supported operations, see the\n Operations that Support Collations \nserver manual page. You can specify the locale code and optional variant in the following string\nformat: The following example specifies the \"de\" locale code and \"phonebook\" variant\ncode: If you do not need to specify a variant, omit everything after the locale\ncode as follows: For a complete list of supported locales, see our server manual page on\n Supported Languages and Locales . The following sections show you different ways to apply collations in\nMongoDB: Collection Index Operation You can set a default collation when you create a collection. When you\ncreate a collection with a specified collation, all supported operations\nthat scan that collection apply the rules of the collation. You can only assign a default collation to a collection when you create that\ncollection. However, you can specify a collation in a new index on an existing\ncollection. See the Index section of this guide\nfor more information. The following snippet shows how to specify the \"en_US\" locale collation\nwhen creating a new collection called items : To check whether you created the collation successfully, retrieve a list\nof the indexes on that collection as follows: You can specify a collation when you create a new index on a collection.\nThe index stores an ordered representation of the documents in the\ncollection so your operation does not need to perform the ordering\nin-memory. To use the index, your operation must meet the following\ncriteria: The following code snippet shows how you can create an index on the \"firstName\"\nfield with the \"en_US\" locale collation in ascending order: To check whether you created the collation successfully, retrieve a list\nof the indexes on that collection as follows: The following code snippet shows an example operation that specifies the\nsame collation and is covered by the index we created in the preceding code snippet: The operation uses the same collation as the one specified in the index. The operation is covered by the index that contains the collation. You can override the default collation on a collection by passing the\nnew collation as a parameter to one of the supported operations. However,\nsince the operation does not use an index, the operation may not perform\nas well as one that is covered by an index. For more information on the\ndisadvantages of sorting operations not covered by an index, see the server\nmanual page on Use Indexes to Sort Query Results . The following code snippet shows an example query operation with the\nfollowing characteristics: The referenced collection contains the default collation \"en_US\" similar to\nthe one specified in the Collection section. The query specifies the Icelandic (\"is\") collation which is not covered\nby the collection's default collation index. Since the specified collation is not covered by an index, the sort\noperation is performed in-memory. While most MongoDB index types support collation, the following types support\nonly binary comparison: text 2d geoHaystack This section covers various collation options and how to specify them to\nfurther refine the ordering and matching behavior. You can use the Collation.Builder class to specify values for the\npreceding collation options. You can call the build() method to construct a\n Collation object as shown in the following code snippet: For more information on the corresponding methods and parameters they\ntake, see the API Documentation for Collation.Builder . Collation Option Description Locale Backwards Case-sensitivity Alternate Case First Max Variable Strength Normalization Numeric Ordering This section contains examples that demonstrate how to use a selection of\nMongoDB operations that support collations. In the following examples, we specify the \"de@collation=phonebook\" locale and\nvariant collation. The \"de\" part of the collation specifies the German\nlocale and the \"collation=phonebook\" part specifies a variant. The\n\"de\" locale collation contains rules for prioritizing proper nouns,\nidentified by capitalization of the first letter. In the\n\"collation=phonebook\" variant, characters with umlauts are ordered before\nthe same characters without them in an ascending sort. The following example demonstrates how you can apply a collation when\nretrieving sorted results from a collection. To perform this\noperation, call find() on the example collection and chain the\n collation() and sort() methods to specify the order in which you want\nto receive the results. When we perform this operation on our example collection ,\nthe output should resemble the following: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: find() sort() Sorts This section demonstrates how you can specify a collation in an\noperation that updates the first match from your query. To specify the\ncollation for this operation, instantiate a FindOneAndUpdateOptions \nobject, set a collation on it, and pass it as a parameter to your call to\nthe findOneAndUpdate() method. In this example, we demonstrate the following: Since \"G\u00fcnter\" is lexically before \"Gunter\" using the\n de@collation=phonebook collation in ascending order, the following operation\nreturns \"G\u00fcnter\" before \"Gunter\" in the results: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: Retrieve the first document in our example collection \nthat precedes \"Gunter\" in an ascending order. Set options for operation including the \"de@collation=phonebook\" \ncollation. Add a new field \"verified\" with the value \"true\". Retrieve and print the updated document. The following code example uses imports from the\n import com.mongodb.client.model package for convenience. findOneAndUpdate findOneAndUpdateOptions Filters Updates Sorts This section demonstrates how you can specify a numerical ordering of\nstrings in a collation in an operation that deletes the first match from your\nquery. To specify the collation for this operation, instantiate\na FindOneAndDeleteOptions object, set a numeric ordering collation on\nit, and pass it as a parameter to your call to the findOneAndDelete() \nmethod. This example calls the findOneAndDelete() operation on a collection that\ncontains the following documents: These documents are represented by the following data class: In the collation, we set the locale option to \"en\" and the\n numericOrdering option to \"true\" in order to sort strings based on their\nnumerical order. The numeric value of the string \"179\" is greater than the number 100, so\nthe preceding document is the only match. If we perform the same operation without the numerical ordering collation\non the original collection of three documents, the filter matches all of\nour documents since \"100\" comes before \"16\", \"84\", and \"179\" when ordering\nby binary collation. For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: The following code example uses imports from the\n import com.mongodb.client.model package for convenience. findOneAndDelete FindOneAndDeleteOptions Filters Sorts This section demonstrates how you can specify a collation in an aggregation\noperation. In an aggregation operation, you can specify a series of\naggregation stages which is collectively called the aggregation pipeline. To\nperform an aggregation, call the aggregate() method on a\n MongoCollection object. To specify a collation for an aggregation operation, call the collation() \nmethod on the AggregateFlow returned by the aggregation operation.\nMake sure to specify a sort aggregation stage on which to apply the\ncollation in your aggregation pipeline. The following example shows how we can construct an aggregation pipeline on\nthe example collection and apply\na collation by specifying the following: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: A group aggregation stage using the Aggregates.group() helper to\nidentify each document by the firstName field and use that value as\nthe _id of the result. An accumulator in the group aggregation stage to sum the number of\ninstances of matching values in the firstName field. Apply an ascending sort to the _id field of the output documents of\nthe prior aggregation stage. Construct a collation object, specifying the German locale and\na collation strength that ignores accents and umlauts. aggregate() AggregateFlow CollationStrength Accumulators Aggregates Sorts", + "code": [ + { + "lang": "json", + "value": "{ \"_id\" : 1, \"firstName\" : \"Klara\" }\n{ \"_id\" : 2, \"firstName\" : \"Gunter\" }\n{ \"_id\" : 3, \"firstName\" : \"G\u00fcnter\" }\n{ \"_id\" : 4, \"firstName\" : \"J\u00fcrgen\" }\n{ \"_id\" : 5, \"firstName\" : \"Hannah\" }" + }, + { + "lang": "kotlin", + "value": "data class FirstName(\n @BsonId val id: Int, \n val firstName: String, \n val verified: Boolean = false\n)\n" + }, + { + "lang": "none", + "value": "cote\ncot\u00e9\nc\u00f4te\nc\u00f4t\u00e9" + }, + { + "lang": "none", + "value": "cote\nc\u00f4te\ncot\u00e9\nc\u00f4t\u00e9" + }, + { + "lang": "none", + "value": "\"@collation=\"" + }, + { + "lang": "none", + "value": "\"de@collation=phonebook\"" + }, + { + "lang": "none", + "value": "\"de\"" + }, + { + "lang": "kotlin", + "value": "database.createCollection(\n \"names\",\n CreateCollectionOptions().collation(\n Collation.builder().locale(\"en_US\").build()\n )\n)\n" + }, + { + "lang": "kotlin", + "value": "val collection = database.getCollection(\"names\")\nval indexInformation = collection.listIndexes().first()\nprintln(indexInformation.toJson())\n" + }, + { + "lang": "javascript", + "value": "{\n // ...\n \"collation\": {\n \"locale\": \"en_US\",\n // ...\n },\n // ...\n}" + }, + { + "lang": "kotlin", + "value": "val collection = database.getCollection(\"names\")\nval idxOptions = IndexOptions().collation(Collation.builder().locale(\"en_US\").build())\ncollection.createIndex(Indexes.ascending(FirstName::firstName.name), idxOptions)\n" + }, + { + "lang": "kotlin", + "value": "val collection = database.getCollection(\"names\")\nval indexInformation = collection.listIndexes().first()\nprintln(indexInformation.toJson())\n" + }, + { + "lang": "javascript", + "value": "{\n // ...\n \"collation\": {\n \"locale\": \"en_US\",\n // ...\n },\n // ...\n}" + }, + { + "lang": "kotlin", + "value": "val resultsFlow = collection.find()\n .collation(Collation.builder().locale(\"en_US\").build())\n .sort(Sorts.ascending(FirstName::firstName.name));\n" + }, + { + "lang": "kotlin", + "value": "val findFlow = collection.find()\n .collation(Collation.builder().locale(\"is\").build())\n .sort(Sorts.ascending(FirstName::firstName.name))\n" + }, + { + "lang": "kotlin", + "value": "Collation.builder()\n .caseLevel(true)\n .collationAlternate(CollationAlternate.SHIFTED)\n .collationCaseFirst(CollationCaseFirst.UPPER)\n .collationMaxVariable(CollationMaxVariable.SPACE)\n .collationStrength(CollationStrength.SECONDARY)\n .locale(\"en_US\")\n .normalization(false)\n .numericOrdering(true)\n .build()\n" + }, + { + "lang": "kotlin", + "value": "val resultsFlow = collection.find()\n .collation(Collation.builder().locale(\"de@collation=phonebook\").build())\n .sort(Sorts.ascending(FirstName::firstName.name))\n\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "FirstName(id=3, firstName=G\u00fcnter, verified=false)\nFirstName(id=2, firstName=Gunter, verified=false)\nFirstName(id=5, firstName=Hannah, verified=false)\nFirstName(id=4, firstName=J\u00fcrgen, verified=false)\nFirstName(id=1, firstName=Klara, verified=false)" + }, + { + "lang": "kotlin", + "value": "val result = collection.findOneAndUpdate(\n Filters.lt(FirstName::firstName.name, \"Gunter\"),\n Updates.set(\"verified\", true),\n FindOneAndUpdateOptions()\n .collation(Collation.builder().locale(\"de@collation=phonebook\").build())\n .sort(Sorts.ascending(FirstName::firstName.name))\n .returnDocument(ReturnDocument.AFTER)\n)\nprintln(result)\n" + }, + { + "lang": "console", + "value": "FirstName(id=3, firstName=G\u00fcnter, verified=true)" + }, + { + "lang": "json", + "value": "{ \"_id\" : 1, \"a\" : \"16 apples\" }\n{ \"_id\" : 2, \"a\" : \"84 oranges\" }\n{ \"_id\" : 3, \"a\" : \"179 bananas\" }" + }, + { + "lang": "kotlin", + "value": "data class CollationExample(@BsonId val id: Int, val a: String)\n" + }, + { + "lang": "kotlin", + "value": "val result = collection.findOneAndDelete(\n Filters.gt(CollationExample::a.name, \"100\"),\n FindOneAndDeleteOptions()\n .collation(Collation.builder().locale(\"en\").numericOrdering(true).build())\n .sort(Sorts.ascending(CollationExample::a.name))\n)\nprintln(result)\n" + }, + { + "lang": "console", + "value": "CollationExample(id=3, a=179 bananas)" + }, + { + "lang": "kotlin", + "value": "data class Result(@BsonId val id: String, val nameCount: Int)\nval groupStage = Aggregates.group(\n \"\\$${FirstName::firstName.name}\",\n Accumulators.sum(\"nameCount\", 1)\n)\nval sortStage = Aggregates.sort(Sorts.ascending(\"_id\"))\nval resultsFlow = collection.aggregate(listOf(groupStage, sortStage))\n .collation(\n Collation.builder().locale(\"de\")\n .collationStrength(CollationStrength.PRIMARY)\n .build()\n )\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Result(id=Gunter, nameCount=2)\nResult(id=Hannah, nameCount=1)\nResult(id=J\u00fcrgen, nameCount=1)\nResult(id=Klara, nameCount=1)" + } + ], + "preview": "In this guide, you can learn how to use collations with MongoDB to order your\nquery or aggregation operation results by string values. A collation is a set\nof character ordering and matching rules that apply to a specific language and\nlocale.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/connection/connect", + "title": "Connect to MongoDB", + "headings": [ + "MongoClient", + "Connection URI", + "Atlas Connection Example", + "Other Ways to Connect to MongoDB", + "Connect to a MongoDB Server on Your Local Machine", + "Connect to a Replica Set" + ], + "paragraphs": "In this guide, you can learn how to connect to a MongoDB instance or\nreplica set using the Kotlin driver. You can view sample code to connect to an Atlas cluster \nor continue reading to learn more about the MongoClient class and\nconnection URIs. You can connect to and communicate with MongoDB using the MongoClient \nclass. Use the MongoClient.create() method to construct a MongoClient . To learn more about how connection pools work in the driver, see the FAQ page . All resource usage limits, such as max connections, apply to individual\n MongoClient instances. To learn about the different settings you can use to control the\nbehavior of your MongoClient , see the guide on\n MongoClient Settings . As each MongoClient represents a thread-safe pool of connections to the\ndatabase, most applications only require a single instance of a\n MongoClient , even across multiple threads. Always call MongoClient.close() to clean up resources when an\ninstance is no longer needed. The connection URI provides a set of instructions that the driver uses to\nconnect to a MongoDB deployment. It instructs the driver on how it should\nconnect to MongoDB and how it should behave while connected. The following\nfigure explains each part of a sample connection URI: This figure uses the Standard Connection String Format ,\n mongodb for the protocol. You can also use the DNS Seed List Connection Format ,\n mongodb+srv , if you want more flexibility of deployment and the ability\nto change the servers in rotation without reconfiguring clients. The next part of the connection URI contains your credentials if you are\nusing a password-based authentication mechanism. Replace the value of user \nwith your database username and pass with your database user's password. If your\nauthentication mechanism does not require credentials, omit this part of\nthe connection URI. The next part of the connection URI specifies the hostname or IP\naddress, followed by the port of your MongoDB instance. In the example,\n sample.host represents the hostname and 27017 is the port number.\nReplace these values to refer to your MongoDB instance. The last part of the connection URI contains connection options as parameters.\nIn the example, we set two connection options: maxPoolSize=20 and\n w=majority . For more information on connection options, skip to the\n Connection Options section of this guide. If your deployment is on MongoDB Atlas, see the\n Atlas driver connection guide \nand select Kotlin from the language dropdown to retrieve your connection\nstring. To connect to a MongoDB deployment on Atlas, create a client. You can\ncreate a client that uses your connection string and other\nclient options by passing a MongoClientSettings object to the\n MongoClient.create() method. To instantiate a MongoClientSettings object, use the builder method to specify\nyour connection string and any other client options, and then call the build() \nmethod. Chain the applyConnectionString() method to the builder to specify your\nconnection URI. You can set the Stable API version client option to avoid\nbreaking changes when you upgrade to a new server version. To\nlearn more about the Stable API feature, see the Stable API page . The following code shows how you can specify the connection string and\nthe Stable API client option when connecting to a MongoDB\ndeployment on Atlas and verify that the connection is successful: If you are connecting to a single MongoDB server instance or replica set\nthat is not hosted on Atlas, see the following sections to find out how to\nconnect. To test whether you can connect to your server, replace the connection\nstring in the Connect to MongoDB Atlas code\nexample and run it. If you need to run a MongoDB server on your local machine for development\npurposes instead of using an Atlas cluster, you need to complete the following: After you successfully start your MongoDB server, specify your connection\nstring in your driver connection code. If your MongoDB Server is running locally, you can use the connection string\n \"mongodb://localhost:\" where is the port number you\nconfigured your server to listen for incoming connections. If you need to specify a different hostname or IP address, see our Server\nManual entry on Connection Strings . Download the Community \nor Enterprise version\nof MongoDB Server. Install and configure \nMongoDB Server. Start the server. Always secure your MongoDB server from malicious attacks. See our\n Security Checklist for a\nlist of security recommendations. A MongoDB replica set deployment is a group of connected instances that\nstore the same set of data. This configuration of instances provides data\nredundancy and high data availability. To connect to a replica set deployment, specify the hostnames (or IP\naddresses) and port numbers of the members of the replica set. If you are not able to provide a full list of hosts in the replica set,\nyou can specify a single or subset of the hosts in the replica and\ninstruct the driver to perform automatic discovery in one of the following\nways: The following examples show how to specify multiple hosts to a MongoClient \ninstance using either the ConnectionString or MongoClientSettings \nclass. Select the tab that corresponds to your preferred class. Specify the name of the replica set as the value of the replicaSet \nparameter Specify false as the value of the directConnection parameter Specify more than one host in the replica set Although you can specify a subset of the hosts in a replica set,\ninclude all the hosts in the replica set to ensure the driver is able to\nestablish the connection if one of the hosts are unreachable.", + "code": [ + { + "lang": "kotlin", + "value": "// Replace the placeholder with your Atlas connection string\nval uri = \"\"\n\n// Construct a ServerApi instance using the ServerApi.builder() method\nval serverApi = ServerApi.builder()\n .version(ServerApiVersion.V1)\n .build()\nval settings = MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(uri))\n .serverApi(serverApi)\n .build()\n// Create a new client and connect to the server\nval mongoClient = MongoClient.create(settings)\nval database = mongoClient.getDatabase(\"admin\")\ntry {\n // Send a ping to confirm a successful connection\n val command = Document(\"ping\", BsonInt64(1))\n val commandResult = database.runCommand(command)\n println(\"Pinged your deployment. You successfully connected to MongoDB!\")\n} catch (me: MongoException) {\n System.err.println(me)\n}\n" + }, + { + "lang": "kotlin", + "value": "val connectionString = ConnectionString(\"mongodb://host1:27017,host2:27017,host3:27017/\")\nval mongoClient = MongoClient.create(connectionString)\n" + }, + { + "lang": "kotlin", + "value": "val seed1 = ServerAddress(\"host1\", 27017)\nval seed2 = ServerAddress(\"host2\", 27017)\nval seed3 = ServerAddress(\"host3\", 27017)\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder ->\n builder.hosts(\n listOf(seed1, seed2, seed3)\n )\n }\n .build()\nval mongoClient = MongoClient.create(settings)\n" + } + ], + "preview": "In this guide, you can learn how to connect to a MongoDB instance or\nreplica set using the Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/connection/connection-options", + "title": "Connection Options", + "headings": [], + "paragraphs": "This section explains MongoDB connection and authentication options\nsupported by the driver. You can pass the connection options as\nparameters of the connection URI to specify the behavior of the client. For a complete list of options, see the\n ConnectionString \nAPI reference page. Option Name Type Description minPoolSize integer Specifies the minimum number of connections that must exist at\nany moment in a single connection pool. maxPoolSize integer Specifies the maximum number of connections that a connection\npool may have at a given time. waitQueueTimeoutMS integer Specifies the maximum amount of time, in milliseconds that a\nthread may wait for a connection to become available. serverSelectionTimeoutMS integer Specifies the maximum amount of time, in milliseconds, the driver\nwill wait for server selection to succeed before throwing an\nexception. localThresholdMS integer When communicating with multiple instances of MongoDB in a replica\nset, the driver will only send requests to a server whose\nresponse time is less than or equal to the server with the fastest\nresponse time plus the local threshold, in milliseconds. heartbeatFrequencyMS integer Specifies the frequency, in milliseconds that the driver will\nwait between attempts to determine the current state of each\nserver in the cluster. replicaSet string Specifies that the connection string \nprovided includes multiple hosts. When specified, the driver\nattempts to find all members of that set. ssl boolean Specifies that all communication with MongoDB instances should\nuse TLS/SSL. Superseded by the tls option. tls boolean Specifies that all communication with MongoDB instances should\nuse TLS. Supersedes the ssl option. tlsInsecure boolean Specifies that the driver should allow invalid hostnames for TLS\nconnections. Has the same effect as setting\n tlsAllowInvalidHostnames to true . To configure TLS security\nconstraints in other ways, use a\n custom SSLContext . tlsAllowInvalidHostnames boolean Specifies that the driver should allow invalid hostnames in the\ncertificate for TLS connections. Supersedes\n sslInvalidHostNameAllowed . connectTimeoutMS integer Specifies the maximum amount of time, in milliseconds, the Kotlin\ndriver waits for a connection to open before timing out. A value of\n 0 instructs the driver to never time out while waiting for a connection\nto open. socketTimeoutMS integer Specifies the maximum amount of time, in milliseconds, the Kotlin\ndriver will wait to send or receive a request before timing out.\nA value of 0 instructs the driver to never time out while waiting\nto send or receive a request. maxIdleTimeMS integer Specifies the maximum amount of time, in milliseconds, that the driver\nallows a pooled connection to idle before closing the\nconnection. A value of 0 indicates that there is no upper bound\non how long the driver allows a pooled connection to be idle. maxLifeTimeMS integer Specifies the maximum amount of time, in milliseconds, the Kotlin\ndriver will continue to use a pooled connection before closing the\nconnection. A value of 0 indicates that there is no upper bound\non how long the driver can keep a pooled connection open. journal boolean Specifies that the driver must wait for the connected MongoDB\ninstance to group commit to the journal file on disk for all writes. w string or integer Specifies the write concern. For more information on values, see\nthe server documentation for the w option . wtimeoutMS integer Specifies a time limit, in milliseconds, for the write concern. For\nmore information, see the server documentation for the\n wtimeoutMS option .\nA value of 0 instructs the driver to never time out write operations. readPreference string Specifies the read preference. For more information on values, see\nthe server documentation for the\n readPreference option . readPreferenceTags string Specifies the read preference tags. For more information on values, see\nthe server documentation for the\n readPreferenceTags option . maxStalenessSeconds integer Specifies, in seconds, how stale a secondary can be before the\ndriver stops communicating with that secondary. The minimum value is\neither 90 seconds or the heartbeat frequency plus 10 seconds, whichever\nis greater. For more information, see the server documentation for the\n maxStalenessSeconds option .\nNot providing a parameter or explicitly specifying -1 indicates\nthat there should be no staleness check for secondaries. authMechanism string Specifies the authentication mechanism \nthat the driver should use if a credential\nwas supplied. authSource string Specifies the database that the supplied credentials should be\nvalidated against. authMechanismProperties string Specifies authentication properties for the specified authentication\nmechanism as a list of colon-separated properties and values.\nFor more information, see the server documentation for\nthe authMechanismProperties option . appName string Specifies the name of the application provided to MongoDB instances\nduring the connection handshake. Can be used for server logs and\nprofiling. compressors string Specifies one or more compression algorithms that the driver\nwill attempt to use to compress requests sent to the connected\nMongoDB instance. Possible values include: zlib , snappy ,\nand zstd . zlibCompressionLevel integer Specifies the degree of compression that Zlib \nshould use to decrease the size of requests to the connected MongoDB\ninstance. The level can range from -1 to 9 , with lower values\ncompressing faster (but resulting in larger requests) and larger values\ncompressing slower (but resulting in smaller requests). retryWrites boolean Specifies that the driver must retry supported write operations\nif they fail due to a network error. retryReads boolean Specifies that the driver must retry supported read operations\nif they fail due to a network error. serverMonitoringMode string Specifies which server monitoring protocol the driver uses. When set to\n auto , the monitoring mode is determined by the environment in which\nthe driver is running. The driver uses poll mode in function-as-a-service\n(FaaS) environments and stream mode in other environments. uuidRepresentation string Specifies the UUID representation to use for read and write\noperations. For more information, see the driver documentation\nfor the\n MongoClientSettings.getUuidRepresentation() method . directConnection boolean Specifies that the driver must connect to the host directly. maxConnecting integer Specifies the maximum number of connections a pool may be establishing\nconcurrently. srvServiceName string Specifies the service name of the\n SRV resource records \nthe driver retrieves to construct your\n seed list .\nYou must use the\n DNS Seed List Connection Format \nin your\n connection URI \nto use this option.", + "code": [], + "preview": "This section explains MongoDB connection and authentication options\nsupported by the driver. You can pass the connection options as\nparameters of the connection URI to specify the behavior of the client.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/connection/mongoclientsettings", + "title": "Specify MongoClient Settings", + "headings": [ + "Overview", + "MongoClient Settings", + "Example", + "Cluster Settings", + "Example", + "Socket Settings", + "Example", + "Connection Pool Settings", + "Example", + "Server Settings", + "Example", + "TLS/SSL Settings", + "Example" + ], + "paragraphs": "In this guide, you can learn about the different settings to control\nthe behavior of your MongoClient . The following sections describe commonly used settings: MongoClient Settings Cluster Settings Socket Settings Connection Pool Settings Server Settings TLS/SSL Settings You can control the behavior of your MongoClient by creating and passing\nin a MongoClientSettings \nobject to the MongoClient.create() \nmethod. To create a MongoClientSettings object, use the\n MongoClientSettings.builder() method and chain methods to specify your\nsettings. After chaining them, use the build() method to create the\n MongoClientSettings object. The following table describes all the methods you can chain to modify your\nconnection behavior: Method Description addCommandListener() Adds a listener for command events . applicationName() Sets the logical name of the application using the MongoClient . applyConnectionString() Applies the settings from the given ConnectionString to the\nbuilder. If you omit this method, the driver attempts to connect to\n localhost . applyToClusterSettings() Applies the ClusterSettings.Builder block and then sets the\n cluster settings . applyToConnectionPoolSettings() Applies the ConnectionPoolSettings.Builder block and then sets the\n connection pool settings . applyToServerSettings() Applies the ServerSettings.Builder block and then sets the\n server settings . applyToSocketSettings() Applies the SocketSettings.Builder block and then sets the\n socket settings . applyToSslSettings() Applies the SslSettings.Builder block and then sets the\n TLS/SSL settings . autoEncryptionSettings() Sets the auto-encryption settings . codecRegistry() Sets the codec registry. Sets the codec registry . commandListenerList() Sets the command listeners . compressorList() Sets the compressors to use for compressing\nmessages to the server. credential() Sets the credential . readConcern() Sets the read concern . readPreference() Sets the read preference . retryReads() Whether the driver should retry reads \nif a network error occurs. retryWrites() Whether the driver should retry writes \nif a network error occurs. serverApi() Sets the server API to use when sending\ncommands to the server. streamFactoryFactory() Sets the factory to use to create a StreamFactory . uuidRepresentation() Sets the UUID representation to use when encoding instances of UUID\nand decoding BSON binary values with subtype of 3. writeConcern() Sets the write concern . This example demonstrates specifying a ConnectionString : Each setting has an applyConnectionString() method. They are\nrarely needed within the settings, so you should use this method as shown\nin the preceding example . Some options in the settings map to a connection string option.\nIf you specify the same options in your settings and connection\nstring, the order you chain them determines which option the driver\nuses. The driver uses the last setting it reads. For example, this snippet contains settings with the following times\nfor the driver to connect to an available socket: Since the driver reads the socket settings options last, the driver\nexpects to connect to an available socket within 5 SECONDS before\ntiming out. The connection string specifies within 2 SECONDS The socket settings specifies within\n 5 SECONDS To log the MongoClient instance settings,\nset the org.mongodb.driver.client named\nlogger to the INFO level. To learn more about logging with the MongoDB Kotlin Driver, see the\n Logging guide. Chain the applyToClusterSettings() \nmethod to modify the driver's behavior when interacting with your\nMongoDB cluster. The following table describes all the methods you can chain to your\nsettings to modify the driver's behavior: Method Description addClusterListener() Adds a listener for cluster-related events. applyConnectionString() Uses the settings from a ConnectionString object. applySettings() Uses the cluster settings specified in a ClusterSettings object. hosts() Sets all the specified locations of a Mongo server. localThreshold() Sets the amount of time that a server\u2019s round trip can take and still\nbe eligible for server selection. mode() Sets how to connect to a MongoDB server. requiredClusterType() Sets the type of cluster required for the cluster. requiredReplicaSetName() Sets the replica set name required for the cluster. serverSelectionTimeout() Sets the maximum time to select a primary node before throwing a\ntimeout exception. serverSelector() Adds a server selector to apply before server selection. srvHost() Sets the host name to use to look up an SRV DNS record to find the\nMongoDB hosts. When setting srvHost , the driver does not process any\nassociated TXT records associated with the host. If you want to enable the processing of TXT records, you must\nspecify the SRV host in the connection string using the\n applyConnectionString() method. srvMaxHosts() This example specifies for the driver to connect directly to a server,\nregardless of the type of MongoDB cluster it's a part of: This is analogous to the directConnection parameter you can specify\nin your connection URI. See Connection Options for more\ninformation. Chain the applyToSocketSettings() \nmethod to modify the driver's behavior when connecting and communicating\nwith your MongoDB server. The following table describes all the methods you can chain to your settings\nto modify the driver's behavior: Method Description applyConnectionString() Uses the settings from a ConnectionString object. applySettings() Uses the socket settings specified in a SocketSettings object. applyToProxySettings() Applies the ProxySettings.Builder block and then sets the\n proxySettings field. connectTimeout() Sets the maximum time to connect to an available socket before throwing\na timeout exception. readTimeout() Sets the maximum time to read to an available socket before throwing a\ntimeout exception. receiveBufferSize() Sets the socket's buffer size when receiving. sendBufferSize() Sets the socket's buffer size when sending. This example specifies the following driver behavior in a MongoDB socket: To connect to an available socket within 10 SECONDS To read from an available socket within 15 SECONDS Chain the applyToConnectionPoolSettings() \nmethod to modify the way the driver manages its connection pool. The following table describes all the methods you can chain to your\nsettings to modify the driver's behavior: Method Description addConnectionPoolListener() Adds a listener for connection pool-related events. applyConnectionString() Uses the settings from a ConnectionString object. applySettings() Uses the connection pool settings specified in a\n ConnectionPoolSettings object. maintenanceFrequency() Sets the frequency for running a maintenance job. maintenanceInitialDelay() Sets the time to wait before running the first maintenance job. maxConnectionIdleTime() Sets the maximum time a connection can be idle before it's closed. maxConnectionLifeTime() Sets the maximum time a pooled connection can be alive before it's\nclosed. maxWaitTime() Sets the maximum time to wait for an available connection. maxSize() Sets the maximum amount of connections associated with a connection\npool. minSize() Sets the minimum amount of connections associated with a connection\npool. This maxSize and minSize settings apply to each server\nin the cluster you connect the driver to. For example, assume you connect the driver to a cluster with three\n mongos servers. This means that there can be at most maxSize \nconnections and at least minSize connections to each mongos server. This example specifies the following driver behavior in a pool of\n Connection types: The thread to wait at most 10 SECONDS for an available connection To have at most 200 connections associated with the pool Chain the applyToServerSettings() \nmethod to modify the driver's behavior when monitoring each MongoDB\nserver. The following table describes all the methods you can chain to your\nsettings to modify the driver's behavior: Method Description addServerListener() Adds a listener for server-related events. addServerMonitorListener() Adds a listener for server monitor-related events. applyConnectionString() Uses the settings from a ConnectionString object. applySettings() Uses the server settings specified in a ServerSettings object. heartbeatFrequency() Sets the interval for a cluster monitor to attempt reaching a server. minHeartbeatFrequency() Sets the minimum interval for server monitoring checks. serverMonitoringMode() Specifies which server monitoring protocol the driver uses. This example specifies the following driver behavior in a MongoDB server: The minimum interval for server monitoring checks to be at least\n 700 MILLISECONDS The cluster monitor to attempt reaching a server every 15 SECONDS Chain the applyToSslSettings() \nmethod to modify the driver's behavior when using TLS/SSL to secure a\nconnection between your application and MongoDB. The following table describes all the methods you can chain to your\nsettings to modify the driver's behavior: Method Description applyConnectionString() Uses the settings from a ConnectionString object. applySettings() Uses the TLS/SSL settings specified in a SslSettings object. context() Sets the SSLContext for use when you enable TLS/SSL. enabled() Whether to enable TLS/SSL. (You must enable this for Atlas clusters.) invalidHostNameAllowed() Whether to allow a mismatch between the server\u2019s hostname and the\nhostname specified by the TLS certificate. This example specifies for the driver to enable TLS/SSL when connecting\nto MongoDB:", + "code": [ + { + "lang": "kotlin", + "value": "val mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(\"\"))\n .build()\n)\n" + }, + { + "lang": "kotlin", + "value": "val mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(\"mongodb+srv:/:@:?connectTimeoutMS(2000)\"))\n .applyToSocketSettings{ builder ->\n builder.connectTimeout(5, TimeUnit.SECONDS)\n }\n .build()\n)\n" + }, + { + "lang": "kotlin", + "value": "val mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(\"mongodb+srv://host1.acme.com\"))\n .build()\n)\n" + }, + { + "lang": "kotlin", + "value": "val mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyToClusterSettings{ builder ->\n builder.mode(ClusterConnectionMode.SINGLE)\n }\n .build()\n)\n" + }, + { + "lang": "kotlin", + "value": "val mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(\"\"))\n .applyToSocketSettings{ builder ->\n builder\n .connectTimeout(10, TimeUnit.SECONDS)\n .readTimeout(15, TimeUnit.SECONDS)\n }\n .build()\n)\n" + }, + { + "lang": "kotlin", + "value": "val mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(\"\"))\n .applyToConnectionPoolSettings{ builder ->\n builder\n .maxWaitTime(10, TimeUnit.SECONDS)\n .maxSize(200)\n }\n .build()\n)\n" + }, + { + "lang": "kotlin", + "value": "val mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(\"\"))\n .applyToServerSettings{ builder ->\n builder\n .minHeartbeatFrequency(700, TimeUnit.MILLISECONDS)\n .heartbeatFrequency(15, TimeUnit.SECONDS)\n }\n .build()\n)\n" + }, + { + "lang": "kotlin", + "value": "val mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(\"\"))\n .applyToSslSettings{ builder ->\n builder.enabled(true)\n }\n .build()\n)\n" + } + ], + "preview": "In this guide, you can learn about the different settings to control\nthe behavior of your MongoClient.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/connection/network-compression", + "title": "Network Compression", + "headings": [ + "Specify Compression Algorithms", + "Compression Algorithm Dependencies" + ], + "paragraphs": "The MongoDB Kotlin Driver provides a connection option to compress messages,\nThis reduces the amount of data passed over the network between MongoDB\nand your application. The driver supports the following algorithms: The driver tests against the following versions of these libraries: If you specify multiple compression algorithms, the driver selects the\nfirst one that is supported by the MongoDB instance that the driver is\nconnected to. Snappy : available in MongoDB 3.4 and later. Zlib : available in MongoDB 3.6 and later. Zstandard : available in MongoDB 4.2 and later. org.xerial.snappy:snappy-java:1.1.8.4 com.github.luben:zstd-jni:1.5.5-2 If your application requires Snappy or Zstandard compression, you must add\n explicit dependencies for those algorithms. You can enable compression on your connection by specifying the\nalgorithms in the following ways: Adding the compressors parameter to your ConnectionString instance Calling the compressorList() method from the MongoClientSettings builder To enable compression on your connection in a ConnectionString \ninstance, specify the compressors parameter. You can specify\none or more of the following values for the compressors parameter: The following example shows how to specify Snappy, Zlib, and\nZstandard as the compression algorithms for a connection: \"snappy\" for Snappy compression \"zlib\" for Zlib compression \"zstd\" for Zstandard compression To enable compression using within your MongoClientSettings ,\ncall the compressorList() \nbuilder method and pass one or more MongoCompressor \ninstances as a parameter. You can specify compression algorithms by calling the following\nmethods from MongoCompressor : The following example shows how to specify Snappy, Zlib, and\nZstandard as the compression algorithms for a connection: createSnappyCompressor() for Snappy compression createZlibCompressor() for Zlib compression createZstdCompressor() for Zstandard compression The JDK supports Zlib compression natively, but\n Snappy and\n Zstandard depend on open source\nimplementations. See\n snappy-java and\n zstd-java for details.", + "code": [ + { + "lang": "kotlin", + "value": "// Replace the placeholders with values from your MongoDB deployment's connection string\nval connectionString = ConnectionString(\"mongodb+srv://:@/?compressors=snappy,zlib,zstd\")\n\n// Create a new client with your settings\nval mongoClient = MongoClient.create(connectionString)\n" + }, + { + "lang": "kotlin", + "value": "// Replace the placeholder with your MongoDB deployment's connection string\nval uri = \"\"\n\nval settings = MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(uri))\n .compressorList(\n listOf(\n MongoCompressor.createSnappyCompressor(),\n MongoCompressor.createZlibCompressor(),\n MongoCompressor.createZstdCompressor())\n )\n .build()\n\n// Create a new client with your settings\nval mongoClient = MongoClient.create(settings)\n" + } + ], + "preview": "The MongoDB Kotlin Driver provides a connection option to compress messages,\nThis reduces the amount of data passed over the network between MongoDB\nand your application.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/connection/socks5", + "title": "Connect to MongoDB by Using a SOCKS5 Proxy", + "headings": [ + "Overview", + "SOCKS5 Proxy Settings", + "Examples", + "Specify Proxy Settings in the MongoClientSettings", + "Specify Proxy Settings in the Connection String", + "API Documentation" + ], + "paragraphs": "In this guide, you can learn how to use the MongoDB Kotlin Driver to connect\nto MongoDB by using a SOCKS5 proxy . SOCKS5 is a standardized\nprotocol for communicating with network services through a proxy server. To learn more about the SOCKS5 protocol, see the Wikipedia entry on\n SOCKS . The proxy settings specify the SOCKS5 proxy server address and your\nauthentication credentials. You can specify your settings in an instance of\n MongoClientSettings or in your connection string. The following table describes the SOCKS5 client options: Name Accepted Values Description proxyHost String Specifies the SOCKS5 proxy IPv4 address, IPv6 address, or hostname.\nYou must provide this value to connect to a SOCKS5 proxy. proxyPort Non-negative integer Specifies the TCP port number of the SOCKS5 proxy server. If you\nset a value for proxyHost , this option defaults to 1080 ,\nbut you can specify a different port number. proxyUsername String Specifies the username for authentication to the SOCKS5 proxy server.\nThe driver ignores null and empty string values for this setting.\nThe driver requires that you pass values for both proxyUsername \nand proxyPassword or that you omit both values. proxyPassword String Specifies the password for authentication to the SOCKS5 proxy server.\nThe driver ignores null and empty string values for this setting.\nThe driver requires that you pass values for both proxyUsername \nand proxyPassword or that you omit both values. The following examples show how to instantiate a MongoClient that connects\nto MongoDB by using a SOCKS5 proxy. The proxy settings can be specified in a\n MongoClientSettings instance or a connection string. These examples use\nthe placeholder values described in the SOCKS5 Proxy Settings section.\nReplace the placeholders with your proxy specifications and credentials. The following code example shows how to specify SOCKS5 proxy settings by\nusing the applyToSocketSettings() builder method when creating a\n MongoClientSettings instance: The following code example shows how to specify SOCKS5 proxy settings in\nyour connection string: To learn more about the methods and types discussed in this guide, see the\nfollowing API documentation: MongoClientSettings.Builder SocketSettings.Builder MongoClient.create() ProxySettings.Builder", + "code": [ + { + "lang": "kotlin", + "value": "val uri = \"\"\n\nval mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(uri))\n .applyToSocketSettings{ builder ->\n builder\n .applyToProxySettings{ proxyBuilder ->\n proxyBuilder\n .host(\"\")\n .port(\"\".toInt())\n .username(\"\")\n .password(\"\")\n .build()\n }\n }\n .build()\n)\n" + }, + { + "lang": "kotlin", + "value": "val connectionString = ConnectionString(\n \"mongodb+srv://:@/?\" +\n \"proxyHost=\" +\n \"&proxyPort=\" +\n \"&proxyUsername=\" +\n \"&proxyPassword=\"\n)\n\nval mongoClient = MongoClient.create(connectionString)\n" + } + ], + "preview": "In this guide, you can learn how to use the MongoDB Kotlin Driver to connect\nto MongoDB by using a SOCKS5 proxy. SOCKS5 is a standardized\nprotocol for communicating with network services through a proxy server.", + "tags": "code example, security, connection string", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/connection/tls", + "title": "Enable TLS/SSL on a Connection", + "headings": [ + "Overview", + "Enable TLS/SSL", + "Configure Certificates", + "Configure the JVM Trust Store", + "Configure the JVM Key Store", + "Configure a Client-Specific Trust Store and Key Store", + "Disable Hostname Verification", + "Restrict Connections to TLS 1.2 Only", + "Customize TLS/SSL Configuration through the Java SE SSLContext", + "Online Certificate Status Protocol (OCSP)", + "Client-Driven OCSP", + "OCSP Stapling" + ], + "paragraphs": "In this guide, you can learn how to connect to MongoDB instances with the\n TLS/SSL \nsecurity protocol using the underlying TLS/SSL support in the JDK. To\nconfigure your connection to use TLS/SSL, enable the TLS/SSL settings in\neither the ConnectionString \nor MongoClientSettings . If you experience trouble setting up your TLS/SSL connection, you can\nuse the -Djavax.net.debug=all system property to view more\nlog statements. See the Oracle guide to debugging TLS/SSL connections \nfor more information. You can enable TLS/SSL for the connection to your MongoDB instance\nin two different ways: through a parameter in your connection string, or\nusing a method in the MongoClientSettings.Builder class. If you connect by using the DNS seedlist protocol, indicated by the\n mongodb+srv prefix in your connection string, the driver\nautomatically enables TLS/SSL. To disable it, set the tls \nparameter value to false in your connection string, or set the\n enabled property to false in the SslSettings.Builder \nblock when creating a MongoClientSettings instance. To learn more about connection behavior when you use a DNS seedlist,\nsee the SRV Connection Format \nsection in the Server manual. To enable TLS/SSL on a connection with a ConnectionString , assign the connection string\nparameter tls a value of true in the connection string passed to\n MongoClient.create() : To configure your MongoClient 's TLS/SSL connection options using the\n MongoClientSettings.Builder class, call the\n applyToSslSettings() \nmethod. Set the enabled property to true in the SslSettings.Builder \nblock to enable TLS/SSL: Kotlin applications that initiate TLS/SSL requests require access to\ncryptographic certificates that prove identity for the application\nitself and other applications with which the application\ninteracts. You can configure access to these certificates in your application with\nthe following mechanisms: The JVM Trust Store and JVM Key Store A Client-Specific Trust Store and Key Store The following sections are based on the documentation for Oracle JDK,\nso some parts may be inapplicable to your JDK or to the custom TLS/SSL\nimplementation you use. The JVM trust store saves certificates that securely identify other\napplications with which your Kotlin application interacts. Using these\ncertificates, your application can prove that the connection to another\napplication is genuine and secure from tampering by third parties. If your MongoDB instance uses a certificate that is signed by an\nauthority that is not present in the JRE's default certificate store,\nyour application must configure two system properties to initiate\nSSL/TLS requests. These properties ensure that your application can\nvalidate the TLS/SSL certificate presented by a connected MongoDB instance. You can create a trust store with the keytool \ncommand line tool provided as part of the JDK: By default, the JRE includes many commonly used public certificates\nfrom signing authorities like Let's Encrypt . As a result, you can connect to\ninstances of MongoDB Atlas (or any other\nserver whose certificate is signed by an authority in the JRE's default\ncertificate store) with TLS/SSL without configuring the trust store. javax.net.ssl.trustStore : the path to a trust store containing the\ncertificate of the signing authority javax.net.ssl.trustStorePassword : the password to access the trust\nstore defined in javax.net.ssl.trustStore The JVM key store saves certificates that securely identify your Kotlin\napplication to other applications. Using these certificates, other\napplications can prove that the connection to your application is\ngenuine and secure from tampering by third parties. An application that initiates TLS/SSL requests needs to set two JVM system\nproperties to ensure that the client presents a TLS/SSL certificate to\nthe MongoDB server: You can create a key store with the keytool \nor openssl \ncommand line tool. For more information on configuring a Kotlin application to use TLS/SSL,\nplease see the JSSE Reference Guide . By default, MongoDB instances do not perform client certificate\nvalidation. You must configure the key store if you configured your MongoDB\ninstance to validate client certificates. javax.net.ssl.keyStore : the path to a key store containing the client's\nTLS/SSL certificates javax.net.ssl.keyStorePassword : the password to access the key store\ndefined in javax.net.ssl.keyStore You can configure a client-specific trust store and key store using the\n init() method of the SSLContext class. You can find an example showing how to configure a client with an SSLContext \ninstance in the\n Customize TLS/SSL Configuration with an SSLContext section of this guide . For more information on the SSLContext class, see the API\ndocumentation for SSL Context . By default, the driver ensures that the hostname included in the server's\nTLS/SSL certificates matches the hostnames provided when constructing\na MongoClient . To disable hostname verification for your\napplication, you can explicitly disable this by setting the\n invalidHostNameAllowed property of the builder to true in the\n applytoSslSettings() builder lambda: Disabling hostname verification can make your configuration\n insecure .\nDisable hostname verification only for testing purposes or\nwhen there is no other alternative. To restrict your application to use only the TLS 1.2 protocol, set the\n jdk.tls.client.protocols system property to \"TLSv1.2\". Java Runtime Environments (JREs) before Java 8 only enabled\nthe TLS 1.2 protocol in update releases. If your JRE has not enabled\nthe TLS 1.2 protocol, upgrade to a later release to connect by using\nTLS 1.2. If your TLS/SSL configuration requires customization, you can\nset the sslContext property of your MongoClient by\npassing an SSLContext \nobject to the builder in the applyToSslSettings() lambda: OCSP is a standard used to check whether X.509 certificates have been\nrevoked. A certificate authority can add an X.509 certificate to the\nCertificate Revocation List (CRL) before the expiry time to invalidate\nthe certificate. When a client sends an X.509 certificate during the TLS\nhandshake, the CA's revocation server checks the CRL and returns a status\nof \"good\", \"revoked\", or \"unknown\". The driver supports the following variations of OCSP: The following sections describe the differences between them and how to enable\nthem for your application. Client-Driven OCSP OCSP Stapling The Kotlin driver uses the JVM arguments configured for the application\nand cannot be overridden for a specific MongoClient instance. In client-driven OCSP, the client sends the certificate in an OCSP request to\nan OCSP responder after receiving the certificate from the server. The OCSP\nresponder checks the status of the certificate with a certificate\nauthority (CA) and reports whether it's valid in a response sent to the\nclient. To enable client-driven OCSP for your application, set the following JVM\nsystem properties: Property Value com.sun.net.ssl.checkRevocation Set this property to true to enable revocation checking. ocsp.enable Set this property to true to enable client-driven OCSP. If the OCSP responder is unavailable, the TLS support provided by the\nJDK reports a \"hard fail\". This differs from the \"soft fail\" behavior of\nthe MongoDB Shell and some other drivers. OCSP stapling is a mechanism in which the server must obtain the signed\ncertificate from the certificate authority (CA) and include it in a\ntime-stamped OCSP response to the client. To enable OCSP stapling for your application, set the following JVM system\nproperties: For more information about OCSP, check out the following resources: Property Description com.sun.net.ssl.checkRevocation Set this property to true to enable revocation checking. jdk.tls.client.enableStatusRequestExtension Oracle JDK 8 Documentation on how to enable OCSP for an application Official IETF specification for OCSP (RFC 6960)", + "code": [ + { + "lang": "kotlin", + "value": "val mongoClient = MongoClient.create(\"mongodb+srv://:@?tls=true\")\n" + }, + { + "lang": "kotlin", + "value": "val settings = MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(\"\"))\n .applyToSslSettings { builder ->\n builder.enabled(true)\n }\n .build()\nval mongoClient = MongoClient.create(settings)\n" + }, + { + "lang": "console", + "value": "keytool -importcert -trustcacerts -file \n -keystore -storepass " + }, + { + "lang": "kotlin", + "value": "val settings = MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(\"\"))\n .applyToSslSettings { builder ->\n builder.enabled(true)\n builder.invalidHostNameAllowed(true)\n }\n .build()\nval mongoClient = MongoClient.create(settings);\n" + }, + { + "lang": "kotlin", + "value": "// You can customize SSL settings using the SSLContext\nval sslContext = SSLContext.getDefault()\n\nval settings = MongoClientSettings.builder()\n .applyToSslSettings { builder ->\n builder.enabled(true)\n builder.context(sslContext)\n }\n .build()\nval mongoClient = MongoClient.create(settings);\n" + } + ], + "preview": "In this guide, you can learn how to connect to MongoDB instances with the\nTLS/SSL\nsecurity protocol using the underlying TLS/SSL support in the JDK. To\nconfigure your connection to use TLS/SSL, enable the TLS/SSL settings in\neither the ConnectionString\nor MongoClientSettings.", + "tags": "code example, security, authentication", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/connection", + "title": "Connection Guide", + "headings": ["Overview"], + "paragraphs": "Learn how to set up a connection and specify connection behavior from your\napplication to a MongoDB deployment using the driver in the following\nsections: For information about authenticating with a MongoDB instance,\nsee Authentication Mechanisms and Enterprise Authentication Mechanisms . Connect to MongoDB View a List of Connection Options Specify Connection Behavior with the MongoClient Class Enable Network Compression Enable TLS/SSL on a Connection Connect to MongoDB by Using a SOCKS5 Proxy", + "code": [], + "preview": "Learn how to set up a connection and specify connection behavior from your\napplication to a MongoDB deployment using the driver in the following\nsections:", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/compound-operations", + "title": "Compound Operations", + "headings": [ + "Overview", + "How to Use Compound Operations", + "Find and Update", + "Example", + "Find and Replace", + "Example", + "Find and Delete", + "Example", + "Avoiding a Race Condition", + "Example With Race Condition", + "Example Without Race Condition" + ], + "paragraphs": "In this guide, you can learn how to perform compound operations with\nthe MongoDB Kotlin driver. Compound operations consist of a read and write operation performed as one\n atomic operation . An atomic operation is an operation which either completes\nentirely, or does not complete at all. Atomic operations cannot partially complete. Atomic operations can help you avoid race conditions in your code. A\nrace condition occurs when your code's behavior is dependent on the order of\nuncontrollable events. MongoDB supports the following compound operations: If you need to perform more complex tasks atomically, such as reading and\nwriting to more than one document, use transactions . Transactions are a\nfeature of MongoDB and other databases that lets you define an arbitrary\nsequence of database commands as an atomic operation. For more information on atomic operations and atomicity, see\n the MongoDB manual entry for atomicity and transactions . For more information on transactions, see\n the MongoDB manual entry for transactions . Find and update one document Find and replace one document Find and delete one document This section shows how to use each compound operation with the MongoDB Kotlin Driver. The following examples use a collection containing these two sample documents. This data is modeled with the following Kotlin data class: By default, each compound operation returns your found document in the state\nbefore your write operation. You can retrieve your found document in the\nstate after your write operation by using the options class corresponding to\nyour compound operation. You can see an example of this configuration in the\n Find and Replace example below . To find and update one document, use the findOneAndUpdate() method of the\n MongoCollection class. The findOneAndUpdate() method returns your found\ndocument or null if no documents match your query. The following example uses the findOneAndUpdate() method to find a\ndocument with the color field set to \"green\" and update the\n food field in that document to \"pizza\" . The example also uses a FindOneAndUpdateOptions instance to specify the\nfollowing options: For more information on the Projections class, see our\n guide on the Projections builder . For more information on the upsert operation, see our\n guide on upserts . For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: Specify an upsert, which inserts the document specified by the query filter if no documents match the query. Set a maximum execution time of 5 seconds for this operation on the MongoDB\ninstance. If the operation takes longer, the findOneAndUpdate() method\nwill throw a MongoExecutionTimeoutException . findOneAndUpdate() FindOneAndUpdateOptions MongoExecutionTimeoutException To find and replace one document, use the findOneAndReplace() method of the\n MongoCollection class. The findOneAndReplace() method returns your found\ndocument or null if no documents match your query. The following example uses the findOneAndReplace() method to find a\ndocument with the color field set to \"green\" and replace it\nwith the following document: The example also uses a FindOneAndReplaceOptions instance to specify that\nthe returned document should be in the state after our replace operation. For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: findOneAndReplace() FindOneAndReplaceOptions To find and delete one document, use the findOneAndDelete() method of the\n MongoCollection class. The findOneAndDelete() method returns your found\ndocument or null if no documents match your query. The following example uses the findOneAndDelete() method to find and\ndelete the document with the largest value in the _id field. The example uses a FindOneAndDeleteOptions instance to specify a\ndescending sort on the _id field. For more information on the Sorts class, see our\n guide on the Sorts builder . For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: findOneAndDelete() FindOneAndDeleteOptions In this section we explore two examples. The first example contains a\nrace condition, the second example uses a compound operation to\navoid the race condition present in the first example. For both examples, let's imagine that we run a hotel with one room and that we\nhave a small Kotlin program to help us checkout this room to a guest. The following document in MongoDB represents the room: This data is modeled with the following Kotlin data class: Let's say our app uses this bookARoomUnsafe method to checkout our room to\na guest: Imagine two separate guests, Jan and Pat, try to book the room with this method\nat the same time. Jan sees this output: And Pat sees this output: When we look at our database, we see the following: Pat will be unhappy. When Pat shows up to our hotel, Jan will be\noccupying her room. What went wrong? Here is the sequence of events that happened from the perspective of our MongoDB\ninstance: Notice that for a brief moment Pat had reserved the room, but as Jan's update\noperation was the last to execute our document has \"Jan\" as the guest. Find and return an empty room for Jan. Find and return an empty room for Pat. Update the room to booked for Pat. Update the room to booked for Jan. Let's use a compound operation to avoid the race condition and\nalways give our users the correct message. Imagine two separate guests, Jan and Pat, try to book the room with this method\nat the same time. Jan sees this output: And Pat sees this output: When we look at our database, we see the following: Pat got the correct message. While she might be sad she didn't get the\nreservation, at least she knows not to travel to our hotel. Here is the sequence of events that happened from the perspective of our MongoDB\ninstance: For information on the Updates class, see our\n guide on the Updates builder . For more information of the Filters class, see our\n guide on the Filters builder . For more information on the findOneAndUpdate() method, see\nthe API Documentation for the MongoCollection class . Find an empty room for Jan and reserve it. Try to find an empty room for Pat and reserve it. When there are not any rooms left, return null . Your MongoDB instance places a write lock on the document you are modifying\nfor the duration of your compound operation.", + "code": [ + { + "lang": "json", + "value": " {\"_id\": 1, \"food\": \"donut\", \"color\": \"green\"}\n {\"_id\": 2, \"food\": \"pear\", \"color\": \"yellow\"}" + }, + { + "lang": "kotlin", + "value": "data class FoodOrder(\n @BsonId val id: Int,\n val food: String,\n val color: String\n)\n" + }, + { + "lang": "kotlin", + "value": "\nval filter = Filters.eq(FoodOrder::color.name, \"green\")\nval update = Updates.set(FoodOrder::food.name, \"pizza\")\nval options = FindOneAndUpdateOptions()\n .upsert(true)\n .maxTime(5, TimeUnit.SECONDS)\n/* The result variable contains your document in the\n state before your update operation is performed\n or null if the document was inserted due to upsert\n being true */\nval result = collection.findOneAndUpdate(filter, update, options)\n\nprintln(result)\n" + }, + { + "lang": "console", + "value": "FoodOrder(id=1, food=donut, color=green)" + }, + { + "lang": "json", + "value": "{\"music\": \"classical\", \"color\": \"green\"}" + }, + { + "lang": "kotlin", + "value": "data class Music(\n @BsonId val id: Int,\n val music: String,\n val color: String\n)\n\nval filter = Filters.eq(FoodOrder::color.name, \"green\")\nval replace = Music(1, \"classical\", \"green\")\nval options = FindOneAndReplaceOptions()\n .returnDocument(ReturnDocument.AFTER)\nval result = collection.withDocumentClass().findOneAndReplace(filter, replace, options)\n\nprintln(result)\n" + }, + { + "lang": "console", + "value": "Music(id=1, music=classical, color=green)" + }, + { + "lang": "kotlin", + "value": "val sort = Sorts.descending(\"_id\")\nval filter = Filters.empty()\nval options = FindOneAndDeleteOptions().sort(sort)\nval result = collection.findOneAndDelete(filter, options)\n\nprintln(result)\n" + }, + { + "lang": "console", + "value": "FoodOrder(id=2, food=pear, color=yellow)" + }, + { + "lang": "json", + "value": " {\"_id\": 1, \"guest\": null, \"room\": \"Blue Room\", \"reserved\": false}" + }, + { + "lang": "kotlin", + "value": "data class HotelRoom(\n @BsonId val id: Int,\n val guest: String? = null,\n val room: String,\n val reserved: Boolean = false\n)\n" + }, + { + "lang": "none", + "value": "You got the Blue Room, Jan" + }, + { + "lang": "none", + "value": "You got the Blue Room, Pat" + }, + { + "lang": "json", + "value": " {\"_id\": 1, \"guest\": \"Jan\", \"room\": \"Blue Room\", \"reserved\": false}" + }, + { + "lang": "kotlin", + "value": "suspend fun bookARoomUnsafe(guestName: String) {\n val filter = Filters.eq(\"reserved\", false)\n val myRoom = hotelCollection.find(filter).firstOrNull()\n if (myRoom == null) {\n println(\"Sorry, we are booked, $guestName\")\n return\n }\n\n val myRoomName = myRoom.room\n\n println(\"You got the $myRoomName, $guestName\")\n\n val update = Updates.combine(Updates.set(\"reserved\", true), Updates.set(\"guest\", guestName))\n val roomFilter = Filters.eq(\"_id\", myRoom.id)\n hotelCollection.updateOne(roomFilter, update)\n}\n" + }, + { + "lang": "console", + "value": "You got the Blue Room, Jan" + }, + { + "lang": "console", + "value": "Sorry, we are booked, Pat" + }, + { + "lang": "json", + "value": " {\"_id\": 1, \"guest\": \"Jan\", \"room\": \"Blue Room\", \"reserved\": false}" + }, + { + "lang": "kotlin", + "value": "suspend fun bookARoomSafe(guestName: String) {\n val update = Updates.combine(\n Updates.set(HotelRoom::reserved.name, true),\n Updates.set(HotelRoom::guest.name, guestName)\n )\n val filter = Filters.eq(\"reserved\", false)\n val myRoom = hotelCollection.findOneAndUpdate(filter, update)\n if (myRoom == null) {\n println(\"Sorry, we are booked, $guestName\")\n return\n }\n\n val myRoomName = myRoom.room\n println(\"You got the $myRoomName, $guestName\")\n}\n" + } + ], + "preview": "In this guide, you can learn how to perform compound operations with\nthe MongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/query-document", + "title": "Specify a Query", + "headings": [ + "Overview", + "Comparison Operators", + "Logical Operators", + "Array Operators", + "Element Operators", + "Evaluation Operators" + ], + "paragraphs": "In this guide, you can learn how to specify a query in the MongoDB Kotlin\ndriver. Most CRUD operations allow you to narrow the set of matched documents by\nspecifying matching criteria in a query filter . Query filters\ncontain one or more query operators that apply to specific fields which\ndetermine which documents to include in the result set. In this page, we cover the following query operators with\nexamples on how to use them: The examples in this guide use the following documents in the\n paint_purchases collection: This data is modeled with the following Kotlin data class: Comparison Operators Logical Operators Array Operators Element Operators Evaluation Operators Comparison operators query data based on comparisons with values in a\ncollection. Common comparison operators include gt() for \"greater\nthan\" comparisons, lte() for \"less than or equal to\" comparisons,\nand ne() for \"not equal to \" comparisons. The following example uses the Filters.gt() method to match all\ndocuments where the value of qty is greater than 7 in the\n paint_purchases collection: Logical operators query data using logic applied to the results of\nfield-level operators. Common logical operators include and() where\nall operators must be true, and or() where at least one of the\noperators must be true. The following example uses the Filters.and() method to match\ndocuments where the value of qty is less than or equal to 5 and\nthe value of color is not \"pink\" in the paint_purchases \ncollection: Array operators query data based on the value or quantity of elements in\nan array field. The following example uses the Filters.size() method to match\ndocuments where the size of the vendor list is 3 in the\n paint_purchases collection: Element operators query data based on the presence or type of a field. The following example uses the Filters.exists() method to match\ndocuments that have a rating in the paint_purchases collection: Evaluation operators query data on higher level logic, like regex\nand text searches. The following example uses the Filters.regex() method to match\ndocuments that have a color ending with the letter \"k\" in the\n paint_purchases collection: For more information about the operators mentioned in this guide,\nsee the following Server Manual Entries: Query Operators Comparison Operators Logical Operators Array Operators Element Operators Evaluation Operators", + "code": [ + { + "lang": "json", + "value": "{ \"_id\": 1, \"color\": \"red\", \"qty\": 9, \"vendor\": [\"A\", \"E\"] }\n{ \"_id\": 2, \"color\": \"purple\", \"qty\": 8, \"vendor\": [\"B\", \"D\", \"F\"], \"rating\": 5 }\n{ \"_id\": 3, \"color\": \"blue\", \"qty\": 5, \"vendor\": [\"A\", \"E\"] }\n{ \"_id\": 4, \"color\": \"white\", \"qty\": 6, \"vendor\": [\"D\"], \"rating\": 9 }\n{ \"_id\": 5, \"color\": \"yellow\", \"qty\": 4, \"vendor\": [\"A\", \"B\"] }\n{ \"_id\": 6, \"color\": \"pink\", \"qty\": 3, \"vendor\": [\"C\"] }\n{ \"_id\": 7, \"color\": \"green\", \"qty\": 8, \"vendor\": [\"C\", \"E\"], \"rating\": 7 }\n{ \"_id\": 8, \"color\": \"black\", \"qty\": 7, \"vendor\": [\"A\", \"C\", \"D\"] }" + }, + { + "lang": "kotlin", + "value": "data class PaintOrder(\n @BsonId val id: Int,\n val qty: Int,\n val color: String,\n val vendor: List,\n val rating: Int? = null\n)\n" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.gt(\"qty\", 7)\nval findFlow = collection.find(filter)\nfindFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=1, qty=9, color=red, vendor=[A, E], rating=null)\nPaintOrder(id=2, qty=8, color=purple, vendor=[B, D, F], rating=5)\nPaintOrder(id=7, qty=8, color=green, vendor=[C, E], rating=7)" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.and(Filters.lte(\"qty\", 5), Filters.ne(\"color\", \"pink\"))\nval findFlow = collection.find(filter)\nfindFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=3, qty=5, color=blue, vendor=[A, E], rating=null)\nPaintOrder(id=5, qty=4, color=yellow, vendor=[A, B], rating=null)" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.size(\"vendor\", 3)\nval findFlow = collection.find(filter)\nfindFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=2, qty=8, color=purple, vendor=[B, D, F], rating=5)\nPaintOrder(id=8, qty=7, color=black, vendor=[A, C, D], rating=null)" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.exists(\"rating\")\nval findFlow = collection.find(filter)\nfindFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=2, qty=8, color=purple, vendor=[B, D, F], rating=5)\nPaintOrder(id=4, qty=6, color=white, vendor=[D], rating=9)\nPaintOrder(id=7, qty=8, color=green, vendor=[C, E], rating=7)" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.regex(\"color\", \"k$\")\nval findFlow = collection.find(filter)\nfindFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=6, qty=3, color=pink, vendor=[C], rating=null)\nPaintOrder(id=8, qty=7, color=black, vendor=[A, C, D], rating=null)" + } + ], + "preview": "In this guide, you can learn how to specify a query in the MongoDB Kotlin\ndriver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/read-operations/change-streams", + "title": "Open Change Streams", + "headings": [ + "Overview", + "Open a Change Stream", + "Example", + "Apply Aggregation Operators to your Change Stream", + "Example", + "Split Large Change Stream Events", + "Include Pre-images and Post-images", + "Create a Collection with Pre-Image and Post-Images Enabled", + "Pre-image Configuration Example", + "Post-image Configuration Example" + ], + "paragraphs": "In this guide, you can learn how to use a change stream to monitor\nreal-time changes to your database. A change stream is a MongoDB server\nfeature that allows your application to subscribe to data changes on a single\ncollection, database, or deployment. You can specify a set of aggregation\noperators to filter and transform the data your application receives.\nWhen connecting to MongoDB v6.0 or later, you can configure the events\nto include the document data before and after the change. Learn how to open and configure your change streams in the following\nsections: Open a Change Stream Apply Aggregation Operators to your Change Stream Split Large Change Stream Events Include Pre-images and Post-images You can open a change stream to subscribe to specific types of data changes\nand produce change events in your application. To open a change stream, call the watch() method on an instance of a\n MongoCollection , MongoDatabase , or MongoClient . The object on which you call the watch() method on determines the scope of\nevents that the change stream listens for. If you call watch() on a MongoCollection , the change stream monitors\na collection. If you call watch() on a MongoDatabase , the change stream monitors all\ncollections in that database. If you call watch() on a MongoClient , the change stream monitors all\nchanges in the connected MongoDB deployment. Standalone MongoDB deployments don't support change streams because\nthe feature requires a replica set oplog. To learn more about the oplog,\nsee the Replica Set Oplog server manual page. The following code example shows how to open a change stream and print\nchange stream events whenever the data in the collection changes: An insert operation on the collection should produce output similar to the\nfollowing text: For a runnable example, see the Watch for Changes usage example page. To learn more about the watch() method, see the following API\ndocumentation: MongoCollection.watch() MongoDatabase.watch() MongoClient.watch() You can pass an aggregation pipeline as a parameter to the watch() method\nto specify which change events the change stream receives. To learn which aggregation operators your MongoDB server version supports, see\n Modify Change Stream Output . The following code example shows how you can apply an aggregation pipeline to\nconfigure your change stream to receive change events for only insert and\nupdate operations: When the change stream receives an update change event, the preceding code\nexample outputs the following text: When connecting to MongoDB v7.0 or later,\nyou can use the $changeStreamSplitLargeEvent aggregation operator to\nsplit event documents that exceed 16 MB into smaller fragments. Use the $changeStreamSplitLargeEvent operator only when you expect\nthe change stream events to exceed the document size limit. For\nexample, you might use this feature if your application requires full\ndocument pre-images or post-images. A $changeStreamSplitLargeEvent aggregation stage returns\nfragments sequentially. You can access the fragments by using a change\nstream cursor. Each fragment document includes a splitEvent object that\ncontains the following fields: The following example opens a change stream that includes an aggregation\npipeline with an $changeStreamSplitLargeEvent aggregation stage to\nsplit large events: To learn more about the $changeStreamSplitLargeEvent aggregation operator,\nsee $changeStreamSplitLargeEvent (aggregation) in the\nServer manual. Field Description fragment The index of the fragment, starting at 1 of The total number of fragments that compose the split event You can have only one $changeStreamSplitLargeEvent stage in your\naggregation pipeline, and it must be the last stage in the pipeline. You can configure the change event to contain or omit the following data: To receive change stream events that include a pre-image or post-image, you\nmust connect to a MongoDB v6.0 or later deployment and set up the following: The pre-image which is a document that represents the version of the\ndocument before the operation if it exists The post-image which is a document that represents the version of the\ndocument after the operation if it exists Enable pre-images and post-images for the collection on your MongoDB\ndeployment. To learn how to enable these on your deployment, see the\n Change Streams with Document Pre- and Post-Images \nMongoDB server manual page. To learn how to instruct the driver to create a collection with pre-images\nand post-images enabled, see the Create a Collection with Pre-Image and Post-Images Enabled \nsection. Configure your change stream to retrieve either or both the pre-images and\npost-images. To configure your change stream to include the pre-image, see\nthe Pre-image Configuration Example . To configure your change stream to include the post-image, see the\n Post-image Configuration Example . To create a collection with the pre-image and post-image option using the\ndriver, specify an instance of ChangeStreamPreAndPostImagesOptions \nand call the createCollection() method as shown in the following example: You can change the pre-image and post-image option in an existing collection\nby running the collMod command from the MongoDB Shell. To learn how to\nperform this operation, see the collMod \nserver manual documentation. When you modify this option on a collection, any change streams open on\nthat collection in your application may fail if configured to require\nreceiving the pre-image or post-image. The following code example shows how you can configure a change stream to\ninclude the pre-image and output the results: The preceding example configures the change stream to use the\n FullDocumentBeforeChange.REQUIRED option. This configures the change\nstream to return pre-images for replace, update, and delete change events and\nfor the server to raise an error if the pre-image is unavailable. Suppose an application updated the latestVersion field of a document in a\ncollection of software library dependencies from the value of 2.0.0 to\n 2.1.0 . The corresponding change event output by the preceding code example\nshould resemble the following text: For a list of options, see the FullDocumentBeforeChange \nAPI documentation. The following code example shows how you can configure a change stream to\ninclude the post-image and output the results: The preceding example configures the change stream to use the\n FullDocument.UPDATE_LOOKUP option. This configures the change\nstream to return both the deltas between the original and changed document\nand a copy of the document at some point in time after the change occurred. Suppose an application updated the population field of a document from\nthe value of 800 to 950 in a collection of city census data. The\ncorresponding change event output by the preceding code example should\nresemble the following text: For a list of options, see the FullDocument \nAPI documentation.", + "code": [ + { + "lang": null, + "value": "Received a change event: ChangeStreamDocument{\n operationType='insert',\n resumeToken={\"_data\": \"825EC...\"},\n namespace=myDb.myChangeStreamCollection,\n ...\n}" + }, + { + "lang": "kotlin", + "value": "\n// Launch the change stream in a separate coroutine,\n// so you can cancel it later.\nval job = launch {\n val changeStream = collection.watch()\n changeStream.collect {\n println(\"Received a change event: $it\")\n }\n}\n\n// Perform MongoDB operations that trigger change events...\n\n// Cancel the change stream when you're done listening for events.\njob.cancel()\n" + }, + { + "lang": "text", + "value": "Received a change event: ChangeStreamDocument{\noperationType=update,\nresumeToken={...},\n..." + }, + { + "lang": "kotlin", + "value": "val pipeline = listOf(\n Aggregates.match(Filters.`in`(\"operationType\",\n listOf(\"insert\", \"update\")))\n)\n\n// Launch the change stream in a separate coroutine,\n// so you can cancel it later.\nval job = launch {\n val changeStream = collection.watch(pipeline)\n changeStream.collect {\n println(\"Received a change event: $it\")\n }\n}\n\n// Perform MongoDB operations that trigger change events...\n\n// Cancel the change stream when you're done listening for events.\njob.cancel()\n" + }, + { + "lang": "kotlin", + "value": "val pipeline = listOf(BsonDocument().append(\"\\$changeStreamSplitLargeEvent\", BsonDocument()))\n\nval job = launch {\n val changeStream = collection.watch(pipeline)\n changeStream.collect {\n println(\"Received a change event: $it\")\n }\n}\n" + }, + { + "lang": "kotlin", + "value": "val collectionOptions = CreateCollectionOptions()\ncollectionOptions.changeStreamPreAndPostImagesOptions(ChangeStreamPreAndPostImagesOptions(true))\ndatabase.createCollection(\"myChangeStreamCollection\", collectionOptions)\n" + }, + { + "lang": "text", + "value": "Received a change event: ChangeStreamDocument{\n operationType=update,\n resumeToken={...}\n namespace=software.libraries,\n destinationNamespace=null,\n fullDocument=null,\n fullDocumentBeforeChange=Document{{_id=6388..., latestVersion=2.0.0, ...}},\n ..." + }, + { + "lang": "kotlin", + "value": "val job = launch {\n val changeStream = collection.watch()\n .fullDocumentBeforeChange(FullDocumentBeforeChange.REQUIRED)\n changeStream.collect {\n println(it)\n }\n}\n// Perform MongoDB operations that trigger change events...\n\n// Cancel the change stream when you're done listening for events.\njob.cancel()\n" + }, + { + "lang": "text", + "value": "Received a change event: ChangeStreamDocument{\n operationType=update,\n resumeToken={...},\n namespace=censusData.cities,\n destinationNamespace=null,\n fullDocument=Document{{_id=6388..., city=Springfield, population=950, ...}},\n updatedFields={\"population\": 950}, ...\n ..." + }, + { + "lang": "kotlin", + "value": "val job = launch {\n val changeStream = collection.watch()\n .fullDocument(FullDocument.UPDATE_LOOKUP)\n changeStream.collect {\n println(it)\n }\n}\n\n// Perform MongoDB operations that trigger change events...\n\n// Cancel the change stream when you're done listening for events.\njob.cancel()\n" + } + ], + "preview": "In this guide, you can learn how to use a change stream to monitor\nreal-time changes to your database. A change stream is a MongoDB server\nfeature that allows your application to subscribe to data changes on a single\ncollection, database, or deployment. You can specify a set of aggregation\noperators to filter and transform the data your application receives.\nWhen connecting to MongoDB v6.0 or later, you can configure the events\nto include the document data before and after the change.", + "tags": "code example, monitoring, aggregation", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/read-operations/flow", + "title": "Access Data From a Flow", + "headings": [ + "Overview", + "Terminal Methods", + "Find the First Document", + "Count Number of Results", + "Convert Results to a List", + "Iterate through Results", + "Explain the Query" + ], + "paragraphs": "In this guide, you can learn how to access data using a Flow with the\nMongoDB Kotlin driver. A Flow is a data type built into Kotlin coroutines that represent a stream\nof values that are being computed asynchronously. The Kotlin coroutine driver\nuses flows to represent the results of database read operations. This page uses an initiating method, find() to show how to access\ndata from a FindFlow . The find() method creates and returns an instance of a\n FindFlow . A FindFlow allows you to browse the documents\nmatched by your search criteria and to further specify which documents\nto see by setting parameters through methods. The following ways to access and store data apply to\nother iterables such as an AggregateFlow . Terminal methods execute an operation on the MongoDB server after\nconfiguring all parameters of a Flow instance controlling the\noperation. Use the firstOrNull() method to retrieve the first document in your query\nresults or null if there are no results: Alternatively, you can use the first() method to retrieve the first document\nin your query or throw a NoSuchElementException if there are no results: These methods are often used when your query filter will match one\ndocument, such as when filtering by a unique index. Use the count() method to retrieve the number of results in the query: Use the toList() method to store your query results in a List : This method is often used when your query filter returns a small number\nof documents that can fit into available memory. Use the collect() method to iterate through fetched documents and\nensure that the flow closes if there is an early termination: Use the explain() method to view information about how MongoDB\nexecutes your operation. The explain() method returns execution plans and performance\nstatistics. An execution plan is a potential way MongoDB\ncan complete an operation. The explain() method provides both the\nwinning plan (the plan MongoDB executed) and rejected plans. The following example prints the JSON representation of the\nwinning plan for aggregation stages that produce execution plans: For more information on the explain operation, see the following\nServer Manual Entries: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: You can specify the level of detail of your explanation by passing a\nverbosity level to the explain() method. The following table shows all verbosity levels for explanations and\ntheir intended use cases: Verbosity Level Use Case ALL_PLANS_EXECUTIONS You want to know which plan MongoDB will choose to run your query. EXECUTION_STATS You want to know if your query is performing well. QUERY_PLANNER You have a problem with your query and you want as much information\nas possible to diagnose the issue. Explain Output Query Plans collect() explain() ExplainVerbosity", + "code": [ + { + "lang": "kotlin", + "value": "val resultsFlow = collection.find()\nval firstResultOrNull = resultsFlow.firstOrNull()\n" + }, + { + "lang": "kotlin", + "value": "try {\n val resultsFlow = collection.find()\n val firstResult = resultsFlow.first()\n} catch (e: NoSuchElementException) {\n println(\"No results found\")\n}\n" + }, + { + "lang": "kotlin", + "value": "val resultsFlow = collection.find()\nval count = resultsFlow.count()\n" + }, + { + "lang": "kotlin", + "value": "val resultsFlow = collection.find()\nval results = resultsFlow.toList()\n" + }, + { + "lang": "kotlin", + "value": "val resultsFlow = collection.find()\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "kotlin", + "value": "val explanation = collection.find().explain(ExplainVerbosity.EXECUTION_STATS)\nval jsonSummary = explanation.getEmbedded(\n listOf(\"queryPlanner\", \"winningPlan\"),\n Document::class.java\n).toJson()\nprintln(jsonSummary)\n" + }, + { + "lang": "json", + "value": "{ \"stage\": \"COLLSCAN\", \"direction\": \"forward\" }" + } + ], + "preview": "In this guide, you can learn how to access data using a Flow with the\nMongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/read-operations/geo", + "title": "Search Geospatially", + "headings": [ + "Overview", + "Coordinates on Earth", + "GeoJSON Positions", + "GeoJSON Types", + "Index", + "Coordinates on a 2D Plane", + "Index", + "Geospatial Queries", + "Query Operators", + "Query Parameters", + "Examples", + "Query by Proximity", + "Query Within a Range" + ], + "paragraphs": "In this guide, you can learn how to search geospatial data with the\nMongoDB Kotlin Driver, and the different geospatial data formats supported by MongoDB. Geospatial data is data that represents a geographical location on\nthe surface of the Earth. Examples of geospatial data include: Locations of movie theaters Borders of countries Routes of bicycle rides Dog exercise areas in New York City To store and query your geospatial data in MongoDB, use GeoJSON . GeoJSON is\na data format created by the Internet Engineering Task Force (IETF). Here is the location of MongoDB headquarters in GeoJSON: For definitive information on GeoJSON, see the\n official IETF specification . A position represents a single place on Earth, and exists in code as an array\ncontaining two or three number values: Longitude in the first position (required) Latitude in the second position (required) Elevation in the third position (optional) GeoJSON orders coordinates as longitude first and latitude second. This may\nbe surprising as geographic coordinate system conventions generally list\nlatitude first and longitude second. Make sure to check what format any other\ntools you are working with use. Popular tools such as OpenStreetMap and Google\nMaps list coordinates as latitude first and longitude second. Your GeoJSON object's type determines its geometric shape. Geometric shapes are\nmade up of positions. Here are some common GeoJSON types and how you can specify them with positions: To learn more about the shapes you can use in MongoDB, see the\n GeoJSON manual entry . Point : a single position. This could represent the location of a\n sculpture . LineString : an array of two or more positions, thus forming a series of line\nsegments. This could represent\n the route of the Great Wall of China . Polygon : an array of positions in which the first and last\nposition are the same, thus enclosing some space. This could represent\n the land within Vatican City . To query data stored in the GeoJSON format, add the field containing\nGeoJSON data to a 2dsphere index. The following snippet creates a\n 2dsphere index on the location.geo field using the Indexes builder: For more information on the Indexes builder, see our\n guide on the Indexes builder . You can store geospatial data using x and y coordinates on\na two-dimensional Euclidean plane. We refer to coordinates on a two-dimensional\nplane as \"legacy coordinate pairs\". Legacy coordinate pairs have the following structure: Your field should contain an array of two values in which the first represents\nthe x axis value and the second represents the y axis value. To query data stored as legacy coordinate pairs, you must add the field containing\nlegacy coordinate pairs to a 2d index. The following snippet creates a\n 2d index on the coordinates field using the Indexes builder: For more information on the Indexes builder, see our\n guide on the Indexes builder . For more information on legacy coordinate pairs, see the\n MongoDB server manual page on legacy coordinate pairs . Spherical ( 2dsphere ) and flat ( 2d ) indexes support some, but\nnot all, of the same query operators. For a full list of operators\nand their index compatibility, see the\n manual entry for geospatial queries . Geospatial queries consist of a query operator and GeoJSON shapes as query\nparameters. To query your geospatial data, use one of the following query operators: You can specify these query operators in the MongoDB Kotlin driver with the\n near() , geoWithin() , nearSphere() , and geoIntersects() utility\nmethods of the Filters builder class. For more information on geospatial query operators, see the\n manual entry for geospatial queries . For more information on Filters , see our\n guide on the Filters builder . $near $geoWithin $nearSphere $geoIntersects requires a 2dsphere index To specify a shape to use in a geospatial query, use the\n Position , Point , LineString , and Polygon classes of the MongoDB\nKotlin driver. For a full list of the GeoJSON shapes available in the MongoDB Kotlin driver, see the\n GeoJSON package \nAPI Documentation. The following examples use the MongoDB Atlas sample dataset. You can learn how\nto set up your own free-tier Atlas cluster and how to load the sample dataset\nin our quick start guide . The examples use the theaters collection in the sample_mflix database\nfrom the sample dataset. The examples require the following imports: The data is modeled using the following Kotlin data class: The results are modeled using the following Kotlin data class: The theaters collection already contains a 2dsphere index on the\n \"${Theater::location.name}.${Theater.Location::geo.name}\" field. To search for and return documents from nearest to farthest from a point, use\nthe near() static utility method of the Filters builder class. The\n near() method constructs a query with the $near query operator. The following example queries for theaters between 10,000 and 5,000 \nmeters from the Great Lawn of Central Park: For more information on the $near operator, see the\n reference documentation for $near . For more information on Filters , see\n our guide on the Filters builder . MongoDB uses the\n same reference system \nas GPS satellites to calculate geometries over the Earth. To search for geospatial data within a specified shape use the geoWithin() \nstatic utility method of the Filters builder class. The geoWithin() \nmethod constructs a query with the $geoWithin query operator. The following example searches for movie theaters in a section of Long Island. The following figure shows the polygon defined by the\n longIslandTriangle variable and dots representing the locations of\nthe movie theaters returned by our query. For more information on the $geoWithin operator, see the\n reference documentation for $geoWithin For more information on the operators you can use in your query, see the\n MongoDB server manual page on geospatial query operators", + "code": [ + { + "lang": "json", + "value": "\"MongoDB Headquarters\" : {\n \"type\": \"point\",\n \"coordinates\": [-73.986805, 40.7620853]\n}" + }, + { + "lang": "kotlin", + "value": "collection.createIndex((Indexes.geo2dsphere(\"location.geo\")))\n" + }, + { + "lang": "json", + "value": "\"\" : [ x, y ]" + }, + { + "lang": "kotlin", + "value": "collection.createIndex((Indexes.geo2d(\"coordinates\")))\n" + }, + { + "lang": null, + "value": "import com.mongodb.client.model.geojson.Point\nimport com.mongodb.client.model.geojson.Polygon\nimport com.mongodb.client.model.geojson.Position\nimport com.mongodb.client.model.Filters.near\nimport com.mongodb.client.model.Filters.geoWithin\nimport com.mongodb.client.model.Projections.fields\nimport com.mongodb.client.model.Projections.include\nimport com.mongodb.client.model.Projections.excludeId" + }, + { + "lang": "kotlin", + "value": "data class Theater(\n val theaterId: Int,\n val location: Location\n) {\n data class Location(\n val address: Address,\n val geo: Point\n ) {\n data class Address(\n val street1: String,\n val street2: String? = null,\n val city: String,\n val state: String,\n val zipcode: String\n )\n }\n}\n" + }, + { + "lang": "kotlin", + "value": "data class TheaterResults(\n val location: Location\n) {\n data class Location(\n val address: Address\n ) {\n data class Address(\n val city: String\n )\n }\n}\n\n" + }, + { + "lang": "kotlin", + "value": "val database = client.getDatabase(\"sample_mflix\")\nval collection = database.getCollection(\"theaters\")\nval centralPark = Point(Position(-73.9667, 40.78))\nval query = Filters.near(\n \"${Theater::location.name}.${Theater.Location::geo.name}\", centralPark, 10000.0, 5000.0\n)\nval projection = Projections.fields(\n Projections.include(\n \"${Theater::location.name}.${Theater.Location::address.name}.${Theater.Location.Address::city.name}\"),\n Projections.excludeId()\n)\nval resultsFlow = collection.find(query).projection(projection)\n\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "TheaterResults(location=Location(address=Address(city=Bronx)))\nTheaterResults(location=Location(address=Address(city=New York)))\nTheaterResults(location=Location(address=Address(city=New York)))\nTheaterResults(location=Location(address=Address(city=Long Island City)))\nTheaterResults(location=Location(address=Address(city=New York)))\nTheaterResults(location=Location(address=Address(city=Secaucus)))\nTheaterResults(location=Location(address=Address(city=Jersey City)))\nTheaterResults(location=Location(address=Address(city=Elmhurst)))\nTheaterResults(location=Location(address=Address(city=Flushing)))\nTheaterResults(location=Location(address=Address(city=Flushing)))\nTheaterResults(location=Location(address=Address(city=Flushing)))\nTheaterResults(location=Location(address=Address(city=Elmhurst)))" + }, + { + "lang": "kotlin", + "value": "val longIslandTriangle = Polygon(\n listOf(\n Position(-72.0, 40.0),\n Position(-74.0, 41.0),\n Position(-72.0, 39.0),\n Position(-72.0, 40.0)\n )\n)\nval projection = Projections.fields(\n Projections.include(\n \"${Theater::location.name}.${Theater.Location::address.name}.${Theater.Location.Address::city.name}\"),\n Projections.excludeId()\n)\nval geoWithinComparison = Filters.geoWithin(\n \"${Theater::location.name}.${Theater.Location::geo.name}\", longIslandTriangle\n)\nval resultsFlow = collection.find(geoWithinComparison)\n .projection(projection)\n\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "TheaterResults(location=Location(address=Address(city=Baldwin))))\nTheaterResults(location=Location(address=Address(city=Levittown)))\nTheaterResults(location=Location(address=Address(city=Westbury)))\nTheaterResults(location=Location(address=Address(city=Mount Vernon)))\nTheaterResults(location=Location(address=Address(city=Massapequa)))" + } + ], + "preview": "In this guide, you can learn how to search geospatial data with the\nMongoDB Kotlin Driver, and the different geospatial data formats supported by MongoDB.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/read-operations/limit", + "title": "Limit the Number of Returned Results", + "headings": [ + "Overview", + "Sample Documents", + "Specify a Limit", + "Combining Skip and Limit" + ], + "paragraphs": "In this guide, you can learn how to limit the number of results returned\nfrom read operations with the MongoDB Kotlin driver. Use limit() to cap the number of documents that a read operation returns.\nThis instance method designates the maximum number of\ndocuments that a read operation can return. If there are not enough documents\nto reach the specified limit, it can return a smaller number.\nIf you use limit() with the skip() instance method, the skip applies\nfirst and the limit only applies to the documents left over after\nthe skip. For more information on the skip() method, see our\n guide on Skipping Returned Documents . The following examples demonstrate, respectively, how to insert data into\na collection, how to use limit() to restrict the number of returned documents,\nand how to combine limit() with skip() to further narrow the results returned from a query. The following sections feature examples that update this sample document: This data is modeled with the following Kotlin data class: The next example queries the collection to return the top three\nlongest books. It first matches all the documents with the query, then sorts on the\n length field to return books with longer lengths before\nbooks with shorter lengths. Lastly, it limits the return value to 3 documents,\nand returns the following three documents, sorted by length: The order in which you call limit() and sort() does not matter\nbecause the find command always applies the sort first and the\nlimit after it. The following two calls are equivalent: To see the next three longest books, append the skip() method to your\n find() call. The integer argument passed to skip() will determine\nhow many documents the find operation returns. This operation returns the\ndocuments that describe the fourth through sixth longest books: You can combine skip() and limit() in this way to implement paging for your\ncollection, returning only small subsets of the collection at one time. For more information about the methods and classes mentioned in this guide,\nsee the following API Documentation: In order to ensure stable sorts across multiple queries, you must sort\nusing a unique key (such as _id ). Otherwise, a call to skip() \nand limit() may produce unpredictable results when combined with\n sort() . For example, consider the following data: If you sorted by type alone, sort() does not guarantee the same order\nupon return. Appending skip() and limit() to the sort() \ncould return different documents for different queries. In this case, sorting\nby data or serial_no would guarantee a stable sort, as both are unique keys. FindFlow.collect() MongoCollection.find()", + "code": [ + { + "lang": "json", + "value": "{ \"_id\": 1, \"title\": \"The Brothers Karamazov\", \"author\": \"Dostoyevsky\", \"length\": 824 }\n{ \"_id\": 2, \"title\": \"Les Mis\u00e9rables\", \"author\": \"Hugo\", \"length\": 1462 }\n{ \"_id\": 3, \"title\": \"Atlas Shrugged\", \"author\": \"Rand\", \"length\": 1088 }\n{ \"_id\": 4, \"title\": \"Infinite Jest\", \"author\": \"Wallace\", \"length\": 1104 }\n{ \"_id\": 5, \"title\": \"Cryptonomicon\", \"author\": \"Stephenson\", \"length\": 918 }\n{ \"_id\": 6, \"title\": \"A Dance with Dragons\", \"author\": \"Martin\", \"length\": 1104 }" + }, + { + "lang": "kotlin", + "value": "data class Book(\n @BsonId val id: Int,\n val title: String,\n val author: String,\n val length: Int\n)\n" + }, + { + "lang": "kotlin", + "value": "val results = collection.find()\n .sort(descending(\"length\"))\n .limit(3)\n\nresults.collect { println(it) }\n" + }, + { + "lang": "console", + "value": " Book(id=2, title=Les Mis\u00e9rables, author=Hugo, length=1462)\n Book(id=6, title=A Dance with Dragons, author=Martin, length=1104)\n Book(id=4, title=Infinite Jest, author=Wallace, length=1104)" + }, + { + "lang": "kotlin", + "value": " collection.find().sort(descending(\"length\")).limit(3)\n collection.find().limit(3).sort(descending(\"length\"))\n" + }, + { + "lang": "kotlin", + "value": "val results = collection.find()\n .sort(descending(\"length\"))\n .skip(3)\n .limit(3)\n\nresults.collect { println(it) }\n" + }, + { + "lang": "console", + "value": " Book(id=3, title=Atlas Shrugged, author=Rand, length=1088)\n Book(id=5, title=Cryptonomicon, author=Stephenson, length=918)\n Book(id=1, title=The Brothers Karamazov, author=Dostoyevsky, length=824)" + }, + { + "lang": "json", + "value": "{ type: \"computer\", data: \"1\", serial_no: 235235 }\n{ type: \"computer\", data: \"2\", serial_no: 235237 }\n{ type: \"computer\", data: \"3\", serial_no: 235239 }\n{ type: \"computer\", data: \"4\", serial_no: 235241 }" + } + ], + "preview": "In this guide, you can learn how to limit the number of results returned\nfrom read operations with the MongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/read-operations/project", + "title": "Specify Which Fields to Return", + "headings": ["Overview", "Behavior", "Explanation"], + "paragraphs": "In this guide, you can learn how to control which fields appear in\ndocuments returned from read operations with the MongoDB Kotlin driver. Many read requests require only a subset of fields in a document.\nFor example, when logging a user in you may only need their username, and\nnot all of their profile information. By default, queries in MongoDB return\nall fields in matching documents. You can use a projection to return\nonly the data you need. A projection is a document that instructs MongoDB which fields of a\ndocument to return. Use the Projections class\nto construct a projection document. Projections work in two ways: These two methods of projection are mutually exclusive: if you\nexplicitly include fields, you cannot explicitly exclude fields, and\nvice versa. Explicitly including fields. This has the side-effect of implicitly\nexcluding all unspecified fields. Implicitly excluding fields. This has the side-effect of implicitly\nincluding all unspecified fields. The _id field is not subject to these mechanics. You must\nexplicitly exclude the _id field if you do not want it returned.\nYou can exclude the _id field even if you have specified certain\nfields to include. Consider the following collection containing documents that describe\nvarieties of fruit: This data is modeled using the following Kotlin data class: In the following query, pass the projection to return the name \nfield of each document. The results are modeled using the FruitName Kotlin data class: The projection document specifies that the read operation result should\n include the name field of each returned document. As a result, this\nprojection implicitly excludes the qty and rating fields. Chaining\nthis projection to find() with an empty query filter yields the\nabove results. Despite the fact that this projection only explicitly included the\n name field, the query also returned the _id field, represented by id in the data class. The _id field is a special case: it is always included in every query\nresult unless explicitly excluded. That's because the _id field is a\nunique identifier for each document, a property that can be useful when\nconstructing queries. The _id is the only exception to the mutually exclusive include-exclude\nbehavior in projections: you can explicitly exclude the _id field\neven when explicitly including other fields if you do not want _id \nto be present in returned documents. The projection document specifies that the read operation result should\n include the name field of each returned document, and specifies to\n exclude the _id field. As a result, this projection implicitly\nexcludes the qty and rating fields. Chaining this projection to\n find() with an empty query filter yields the above results. You can also specify multiple fields to include in your projection. This example that identifies two fields to include in the projection yields\nthe following results using the FruitRating Kotlin data class: For additional projection examples, see the\n MongoDB Manual page on Project Fields to Return from Query . The order in which you specify the fields in the projection does not\nalter the order in which they are returned.", + "code": [ + { + "lang": "json", + "value": "{ \"_id\": 1, \"name\": \"apples\", \"qty\": 5, \"rating\": 3 },\n{ \"_id\": 2, \"name\": \"bananas\", \"qty\": 7, \"rating\": 1 },\n{ \"_id\": 3, \"name\": \"oranges\", \"qty\": 6, \"rating\": 2 },\n{ \"_id\": 4, \"name\": \"avocados\", \"qty\": 3, \"rating\": 5 }," + }, + { + "lang": "kotlin", + "value": "data class Fruit(\n @BsonId val id: Int,\n val name: String,\n val qty: Int,\n val rating: Int\n)\n" + }, + { + "lang": "kotlin", + "value": "data class FruitName(\n @BsonId val id: Int? = null,\n val name: String\n)\n\n// Return all documents with only the name field\nval filter = Filters.empty()\nval projection = Projections.fields(\n Projections.include(FruitName::name.name)\n)\nval flowResults = collection.find(filter).projection(projection)\n\nflowResults.collect { println(it)}\n" + }, + { + "lang": "console", + "value": "FruitName(id=1, name=apples),\nFruitName(id=2, name=bananas),\nFruitName(id=3, name=oranges),\nFruitName(id=4, name=avocados)" + }, + { + "lang": "kotlin", + "value": "data class FruitName(\n @BsonId val id: Int? = null,\n val name: String\n)\n\n// Return all documents with *only* the name field\n// excludes the id\nval filter = Filters.empty()\nval projection = Projections.fields(\n Projections.include(FruitName::name.name),\n Projections.excludeId()\n)\nval flowResults = collection.find(filter).projection(projection)\n\nflowResults.collect { println(it)}\n" + }, + { + "lang": "console", + "value": "FruitName(name=apples),\nFruitName(name=bananas),\nFruitName(name=oranges),\nFruitName(name=avocados)" + }, + { + "lang": "kotlin", + "value": "data class FruitRating(\n val name: String,\n val rating: Int\n)\n\nval filter = Filters.empty()\nval projection = Projections.fields(\n Projections.include(FruitRating::name.name, FruitRating::rating.name),\n Projections.excludeId()\n)\nval flowResults = collection.find(filter).projection(projection)\n\nflowResults.collect { println(it)}\n" + }, + { + "lang": "console", + "value": "FruitRating(name=apples, rating=3),\nFruitRating(name=bananas, rating=1),\nFruitRating(name=oranges, rating=2),\nFruitRating(name=avocados, rating=5)" + } + ], + "preview": "In this guide, you can learn how to control which fields appear in\ndocuments returned from read operations with the MongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/read-operations/retrieve", + "title": "Retrieve Data", + "headings": [ + "Overview", + "Sample Data for Examples", + "Find Operation", + "Example", + "Aggregate Operation", + "Example" + ], + "paragraphs": "In this guide, you can learn how to retrieve data from your MongoDB\ndatabase. To retrieve data, use read operations. Read operations allow you to do the following: Retrieve a subset of documents from your collection using a find operation Perform transformations on retrieved documents from your collection using an aggregate operation Monitor real-time changes to your database using change streams The following sections feature examples of how the owner of a paint\nstore manages their customers' orders. For each order, the owner keeps\ntrack of the color and quantity, which corresponds to the color and\n qty fields in their paint_order collection: This data is modeled with the following Kotlin data class: Use the find operation to retrieve a subset of your existing data in\nMongoDB. You can specify what data to return including which documents\nto retrieve, in what order to retrieve them, and how many to retrieve. To perform a find operation, call the find() method on an instance\nof a MongoCollection . This method searches a collection for documents that\nmatch the query filter you provide. For more information on how to\nspecify a query, see our Specify a Query guide. The owner would like to know which orders contain greater than three, but\nless than nine cans of paint from their paint_order collection . To address this scenario, the owner finds orders to match the criteria: After the owner runs this query, they find two orders that matched the\ncriteria. For more information on how to build filters, see our Filters Builders guide. For a runnable find() example, see our Find Multiple\nDocuments page. Use the aggregate operation to perform the stages in an aggregation\npipeline. An aggregation pipeline is a multi-staged transformation that\nproduces an aggregated result. To perform an aggregate operation, call the aggregate() method on an\ninstance of a MongoCollection . This method accepts aggregation\nexpressions to run in sequence. To perform aggregations, you can\ndefine aggregation stages that specify how to match documents, rename\nfields, and group values. For more information, see our\n Aggregation guide. The owner would like to know which paint color is the most purchased\n(highest quantity sold) from their paint_order collection . To address the scenario, the owner creates an aggregation pipeline that: After the owner runs the aggregation, they find that \"green\" is the most\npurchased color. For more information on how to construct an aggregation pipeline, see\nthe MongoDB server manual page on Aggregation . For additional information on the methods mentioned on this page, see\nthe following API Documentation: Matches all the documents in the paint_order collection Groups orders by colors Sums up the quantity field by color Orders the results by highest-to-lowest quantity MongoCollection.find() MongoCollection.aggregate()", + "code": [ + { + "lang": "json", + "value": "{ \"_id\": 1, \"color\": \"purple\", \"qty\": 10 }\n{ \"_id\": 2, \"color\": \"green\", \"qty\": 8 }\n{ \"_id\": 3, \"color\": \"purple\", \"qty\": 4 }\n{ \"_id\": 4, \"color\": \"green\", \"qty\": 11 }" + }, + { + "lang": "kotlin", + "value": "data class PaintOrder(\n @BsonId val id: Int,\n val qty: Int,\n val color: String\n)\n" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.and(Filters.gt(\"qty\", 3), Filters.lt(\"qty\", 9))\nval resultsFlow = collection.find(filter)\n\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=2, qty=8, color=green)\nPaintOrder(id=3, qty=4, color=purple)" + }, + { + "lang": "kotlin", + "value": "data class AggregationResult(@BsonId val id: String, val qty: Int)\n\nval filter = Filters.empty()\nval pipeline = listOf(\n Aggregates.match(filter),\n Aggregates.group(\n \"\\$color\",\n Accumulators.sum(\"qty\", \"\\$qty\")\n ),\n Aggregates.sort(Sorts.descending(\"qty\"))\n)\nval resultsFlow = collection.aggregate(pipeline)\n\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=2, qty=8, color=green)\nPaintOrder(id=3, qty=4, color=purple)" + } + ], + "preview": "In this guide, you can learn how to retrieve data from your MongoDB\ndatabase. To retrieve data, use read operations.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/read-operations/skip", + "title": "Skip Returned Results", + "headings": [ + "Overview", + "Examples", + "Using a FindIterable", + "Using Aggregation" + ], + "paragraphs": "In this guide, you can learn how to skip a specified number of returned\nresults from read operations with the MongoDB Kotlin driver. You can skip results on the returned results of a query by using the\n skip() method. You can also skip documents at a specific stage in an\naggregation pipeline by specifying a $skip aggregation stage. The skip() method takes an integer that specifies the number of documents\nto omit from the beginning of the list of documents returned by the\n FindFlow . You can use the skip() method to skip the first two documents as follows: Aggregates.skip() \nis an optional stage in the aggregation pipeline that specifies how many\ndocuments to omit from the beginning of the results of the prior stage. You can use the Aggregates.skip() method to skip the first two documents as follows: The following example is about a paint store that sells eight different\ncolors of paint. The best colors sell quicker than the other colors.\nOne day, a customer asks what the three best-selling (lowest inventory)\ncolors are. The paint store keeps track of inventory in the qty \nfield in their paint_inventory collection: This data is modeled with the following Kotlin data class: To address the scenario, the paint store needs to query the\n paint_inventory collection with an empty filter, sort the documents\nby qty field and omit the first five results. The find() method returns all documents. The sort() method specifies documents to display from highest to lowest based on the qty field. The skip() method specifies to omit the first five documents. After the paint store runs the query, they find the three best-selling colors are pink,\nred, and white. The match() stage returns all documents. The sort() stage specifies documents to display from highest to lowest based on the qty field. The skip() stage specifies to omit the first five documents. If the value of skip is greater than or equal to the number of matched\ndocuments for a query, that query returns no documents. If the skip() method from the preceding example skips the first nine\ndocuments, no results would return since the specified quantity\nexceeds the number of matched documents.", + "code": [ + { + "lang": "kotlin", + "value": "collection.find().skip(2)\n" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.empty()\nval results = collection.aggregate(listOf(\n Aggregates.match(filter),\n Aggregates.skip(2))\n)\n" + }, + { + "lang": "json", + "value": "{ \"_id\": 1, \"color\": \"red\", \"qty\": 5 }\n{ \"_id\": 2, \"color\": \"purple\", \"qty\": 10 }\n{ \"_id\": 3, \"color\": \"blue\", \"qty\": 9 }\n{ \"_id\": 4, \"color\": \"white\", \"qty\": 6 }\n{ \"_id\": 5, \"color\": \"yellow\", \"qty\": 11 }\n{ \"_id\": 6, \"color\": \"pink\", \"qty\": 3 }\n{ \"_id\": 7, \"color\": \"green\", \"qty\": 8 }\n{ \"_id\": 8, \"color\": \"orange\", \"qty\": 7 }" + }, + { + "lang": "kotlin", + "value": "data class PaintOrder(\n @BsonId val id: Int,\n val qty: Int,\n val color: String\n)\n" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.empty()\nval results = collection.find(filter)\n .sort(descending(PaintOrder::qty.name))\n .skip(5)\nresults.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=4, qty=6, color=white)\nPaintOrder(id=1, qty=5, color=red)\nPaintOrder(id=6, qty=3, color=pink)" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.empty()\nval aggregate = listOf(\n Aggregates.match(filter),\n Aggregates.sort(descending(PaintOrder::qty.name)),\n Aggregates.skip(5)\n)\nval findFlow = collection.aggregate(aggregate)\nfindFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=4, qty=6, color=white)\nPaintOrder(id=1, qty=5, color=red)\nPaintOrder(id=6, qty=3, color=pink)" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.empty()\nval emptyQuery = listOf(\n Aggregates.match(filter),\n Aggregates.sort(descending(PaintOrder::qty.name)),\n Aggregates.skip(9)\n)\nval findFlow = collection.aggregate(emptyQuery)\nfindFlow.collect { println(it) }\n" + } + ], + "preview": "In this guide, you can learn how to skip a specified number of returned\nresults from read operations with the MongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/read-operations/sort", + "title": "Sort Results", + "headings": [ + "Overview", + "Methods For Sorting", + "Sorting Direction", + "Ascending", + "Descending", + "Handling Ties", + "Combining Sort Criteria", + "Text Search" + ], + "paragraphs": "In this guide, you can learn how to use sort operations to order your\nresults from read operations with the MongoDB Kotlin driver. The sort operation orders the documents returned from your query by your specified\n sort criteria . Sort criteria are the rules you pass to MongoDB that describe\nhow you would like your data to be ordered. Some examples of sort criteria are: You should read this guide to learn how to perform the following\nactions: The examples in this guide use a sample collection that contains the following\ndocuments: This data is modeled with the following Kotlin data class: Smallest number to largest number Earliest time of day to latest time of day Alphabetical order by first name Perform ascending sorts and descending sorts Combine sort criteria Sort on the text score of a text search You can sort results retrieved by a query, or you can sort results\nwithin an aggregation pipeline. To sort your query results, use the\n sort() method of a FindFlow instance. To sort your results within an\naggregation pipeline, use the Aggregates.sort() static factory method. Both\nof these methods receive objects that implement the Bson interface as\narguments. For more information, see the API Documentation for the\n BSON interface . You can use the sort() method of a FindFlow instance as follows: You can use the Aggregates.sort() method within an aggregation pipeline to\nsort the documents in the\n sample collection from smallest to\nlargest value of the orderTotal field as follows: In the preceding code snippets, we specify the sort criteria using the Sorts \nbuilder class. While it is possible to specify sort criteria using any class\nthat implements the Bson interface, we recommend that you specify sort\ncriteria through the Sorts builder. For more information on the Sorts \nbuilder class, see the Sorts builder guide. For more information about the classes and interfaces in this section, see the\nfollowing API Documentation: FindFlow Aggregates Sorts BSON Document The direction of your sort can either be ascending or descending .\nAn ascending sort orders your results from smallest to largest. A\ndescending sort orders your results from largest to smallest. Here are some examples of data sorted in ascending order: Here are some examples of data sorted in descending order: The following subsections show how to specify these sort criteria. Numbers: 1, 2, 3, 43, 43, 55, 120 Dates: 1990-03-10, 1995-01-01, 2005-10-30, 2005-12-21 Words (ASCII): Banana, Dill, carrot, cucumber, hummus Numbers: 100, 30, 12, 12, 9, 3, 1 Dates: 2020-01-01, 1998-12-11, 1998-12-10, 1975-07-22 Words (reverse ASCII): pear, grapes, apple, Cheese To specify an ascending sort, use the Sorts.ascending() static\nfactory method. Pass the Sorts.ascending() method\nthe name of the field you need to sort in ascending order. You can pass the sort() method the output of the Sorts.ascending() \nmethod to specify an ascending sort on a field as follows: The preceding sort() method returns a FindIterable object that can iterate\nover the documents in your collection, sorted from smallest to largest on the\nspecified field name. In the following code example, we use the ascending() method to sort the\n sample collection \nby the orderTotal field: To specify a descending sort, use the Sorts.descending() static factory\nmethod. Pass the Sorts.descending() method the name of the field you need to sort in descending order. The following code snippet shows how to specify a descending sort on the\n orderTotal field and return the documents in the\n sample collection \nin descending order: A tie occurs when two or more documents have identical values in the field\nyou are using to order your results. MongoDB does not guarantee sort order in\nthe event of ties. For example, suppose we encounter a tie when applying a sort\nto the sample collection using the following\ncode: Since multiple documents that matched the query contain the same value\nin the date field, the documents may not be returned in a consistent order. If you need to guarantee a specific order for documents that have fields\nwith identical values, you can specify additional fields to sort on in the event\nof a tie. We can specify an ascending sort on the date field followed by the\n orderTotal field to return the documents in the\n sample collection \nin the following order: To combine sort criteria, use the Sorts.orderBy() static factory\nmethod. This method constructs an object containing an ordered list of sort\ncriteria. When performing the sort, if the previous sort criteria result in a\ntie, the sort uses the next sort criteria in the list to determine the order. In the following code snippet, we use the orderBy() method to order the data\nby performing a descending sort on the date field, and in the event of a\ntie, by performing an ascending sort on the orderTotal field. With\nthese sort criteria, the code returns the documents in the sample\ncollection in the following order: You can specify the order of the results of a\n text search by how closely the string values of\neach result's fields specified by the collection's text index match your search\nstring. The text search assigns a numerical\n text score to\nindicate how closely each result matches the search string. Use the\n Sorts.metaTextScore() static factory method to build your sort criteria to\nsort by the text score. In the following code example, we show how you can use the\n Sorts.metaTextScore() method to sort the results of a text\nsearch on the sample collection .\nThe code example uses the Filters ,\n Indexes , and\n Projections builders. The code example performs the following actions: The data is modeled with the following Kotlin data class: For more information about the classes in this section, see the\nfollowing API Documentation: For more information, see the\n Sorts class API Documentation.\nSee the server manual documentation for more information on the $text \nquery operator and the\n $meta \naggregation pipeline operator. You need a text index on your collection to\nperform a text search. See the server manual documentation for more\ninformation on how to\n create a text index . Creates a text index for your\n sample collection \non the description field. If you call createIndex() specifying an index that\nalready exists on the collection, the operation does not create a new index. Runs your text search for the phrase \"vanilla\" . Projects text scores into your query results as the\n score field. Sorts your results by text score (best match first). The structure of text search has changed for MongoDB 4.4 or later. You no\nlonger need to project Projections.metaTextScore() into your\n FindFlow instance in order to sort on the text score. In addition,\nthe field name you specify in a $meta text score aggregation operation\nused in a sort is ignored. This means that the field name argument you pass\nto Sorts.metaTextScore() is disregarded. Filters Indexes Projections MongoCollection", + "code": [ + { + "lang": "json", + "value": "{ \"_id\": 1, \"date\": \"2022-01-03\", \"orderTotal\": 17.86, \"description\": \"1/2 lb cream cheese and 1 dozen bagels\" },\n{ \"_id\": 2, \"date\": \"2022-01-11\", \"orderTotal\": 83.87, \"description\": \"two medium vanilla birthday cakes\" },\n{ \"_id\": 3, \"date\": \"2022-01-11\", \"orderTotal\": 19.49, \"description\": \"1 dozen vanilla cupcakes\" },\n{ \"_id\": 4, \"date\": \"2022-01-15\", \"orderTotal\": 43.62, \"description\": \"2 chicken lunches and a diet coke\" },\n{ \"_id\": 5, \"date\": \"2022-01-23\", \"orderTotal\": 60.31, \"description\": \"one large vanilla and chocolate cake\" },\n{ \"_id\": 6, \"date\": \"2022-01-23\", \"orderTotal\": 10.99, \"description\": \"1 bagel, 1 orange juice, 1 muffin\" }" + }, + { + "lang": "kotlin", + "value": "data class Order(\n @BsonId val id: Int,\n val date: String,\n val orderTotal: Double,\n val description: String,\n)\n" + }, + { + "lang": "kotlin", + "value": "val resultsFlow = collection.find().sort(Sorts.ascending(Order::orderTotal.name))\n" + }, + { + "lang": "kotlin", + "value": "val resultsFlow = collection.aggregate(listOf(\n Aggregates.sort(Sorts.ascending(Order::orderTotal.name))\n))\n\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Order(id=6, date=2022-01-23, orderTotal=10.99, description=1 bagel, 1 orange juice, 1 muffin)\nOrder(id=1, date=2022-01-03, orderTotal=17.86, description=1/2 lb cream cheese and 1 dozen bagels)\nOrder(id=3, date=2022-01-11, orderTotal=19.49, description=1 dozen vanilla cupcakes)\nOrder(id=4, date=2022-01-15, orderTotal=43.62, description=2 chicken lunches and a diet coke)\nOrder(id=5, date=2022-01-23, orderTotal=60.31, description=one large vanilla and chocolate cake)\nOrder(id=2, date=2022-01-11, orderTotal=83.87, description=two medium vanilla birthday cakes)" + }, + { + "lang": "kotlin", + "value": "collection.find().sort(Sorts.ascending(\"\"))" + }, + { + "lang": "kotlin", + "value": "val resultsFlow = collection.find()\n .sort(Sorts.ascending(Order::orderTotal.name))\n\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Order(id=6, date=2022-01-23, orderTotal=10.99, description=1 bagel, 1 orange juice, 1 muffin)\nOrder(id=1, date=2022-01-03, orderTotal=17.86, description=1/2 lb cream cheese and 1 dozen bagels)\nOrder(id=3, date=2022-01-11, orderTotal=19.49, description=1 dozen vanilla cupcakes)\nOrder(id=4, date=2022-01-15, orderTotal=43.62, description=2 chicken lunches and a diet coke)\nOrder(id=5, date=2022-01-23, orderTotal=60.31, description=one large vanilla and chocolate cake)\nOrder(id=2, date=2022-01-11, orderTotal=83.87, description=two medium vanilla birthday cakes)" + }, + { + "lang": "kotlin", + "value": "val resultsFlow = collection.find()\n .sort(Sorts.descending(Order::orderTotal.name))\n\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Order(id=2, date=2022-01-11, orderTotal=83.87, description=two medium vanilla birthday cakes)\nOrder(id=5, date=2022-01-23, orderTotal=60.31, description=one large vanilla and chocolate cake)\nOrder(id=4, date=2022-01-15, orderTotal=43.62, description=2 chicken lunches and a diet coke)\nOrder(id=3, date=2022-01-11, orderTotal=19.49, description=1 dozen vanilla cupcakes)\nOrder(id=1, date=2022-01-03, orderTotal=17.86, description=1/2 lb cream cheese and 1 dozen bagels)\nOrder(id=6, date=2022-01-23, orderTotal=10.99, description=1 bagel, 1 orange juice, 1 muffin)" + }, + { + "lang": "kotlin", + "value": "collection.find().sort(Sorts.ascending(Order::date.name))\n" + }, + { + "lang": "kotlin", + "value": "collection.find().sort(Sorts.ascending(Order::date.name, Order::orderTotal.name))\n" + }, + { + "lang": "console", + "value": "Order(id=1, date=2022-01-03, orderTotal=17.86, description=1/2 lb cream cheese and 1 dozen bagels)\nOrder(id=3, date=2022-01-11, orderTotal=19.49, description=1 dozen vanilla cupcakes)\nOrder(id=2, date=2022-01-11, orderTotal=83.87, description=two medium vanilla birthday cakes)\nOrder(id=4, date=2022-01-15, orderTotal=43.62, description=2 chicken lunches and a diet coke)\nOrder(id=6, date=2022-01-23, orderTotal=10.99, description=1 bagel, 1 orange juice, 1 muffin)\nOrder(id=5, date=2022-01-23, orderTotal=60.31, description=one large vanilla and chocolate cake)" + }, + { + "lang": "kotlin", + "value": "val orderBySort = Sorts.orderBy(\n Sorts.descending(Order::date.name), Sorts.ascending(Order::orderTotal.name)\n)\nval results = collection.find().sort(orderBySort)\n\nresults.collect {println(it) }\n" + }, + { + "lang": "console", + "value": "Order(id=6, date=2022-01-23, orderTotal=10.99, description=1 bagel, 1 orange juice, 1 muffin)\nOrder(id=5, date=2022-01-23, orderTotal=60.31, description=one large vanilla and chocolate cake)\nOrder(id=4, date=2022-01-15, orderTotal=43.62, description=2 chicken lunches and a diet coke)\nOrder(id=3, date=2022-01-11, orderTotal=19.49, description=1 dozen vanilla cupcakes)\nOrder(id=2, date=2022-01-11, orderTotal=83.87, description=two medium vanilla birthday cakes)\nOrder(id=1, date=2022-01-03, orderTotal=17.86, description=1/2 lb cream cheese and 1 dozen bagels)" + }, + { + "lang": "kotlin", + "value": "import com.mongodb.client.model.Sorts\nimport com.mongodb.client.model.Projections\nimport com.mongodb.client.model.Filters\nimport com.mongodb.client.model.Indexes" + }, + { + "lang": "kotlin", + "value": "data class OrderScore(\n @BsonId val id: Int,\n val description: String,\n val score: Double\n)\n" + }, + { + "lang": "kotlin", + "value": "collection.createIndex(Indexes.text(Order::description.name))\nval metaTextScoreSort = Sorts.orderBy(\n Sorts.metaTextScore(OrderScore::score.name),\n Sorts.descending(\"_id\")\n)\nval metaTextScoreProj = Projections.metaTextScore(OrderScore::score.name)\nval searchTerm = \"vanilla\"\nval searchQuery = Filters.text(searchTerm)\n\nval results = collection.find(searchQuery)\n .projection(metaTextScoreProj)\n .sort(metaTextScoreSort)\n\nresults.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "OrderScore(id=3, description=1 dozen vanilla cupcakes, score=0.625)\nOrderScore(id=5, description=one large vanilla and chocolate cake, score=0.6)\nOrderScore(id=2, description=two medium vanilla birthday cakes, score=0.6)" + } + ], + "preview": "In this guide, you can learn how to use sort operations to order your\nresults from read operations with the MongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/read-operations/text", + "title": "Search Text", + "headings": [ + "Overview", + "Sample Documents", + "Text Index", + "Text Search", + "Specify Options", + "Search Text by a Term", + "Example", + "Example", + "Search Text by a Phrase", + "Example", + "Search Text with Terms Excluded", + "Example" + ], + "paragraphs": "In this guide, you can learn how to run a text search in the MongoDB\nKotlin driver. You can use a text search to retrieve documents that contain a term \nor a phrase in a specified field. A term is a sequence of characters\nthat excludes whitespace characters. A phrase is a sequence of terms\nwith any number of whitespace characters. The following sections show you how to perform the following types of\ntext searches: If you want to sort your text search results, see the Text Search section of our Sort Results guide. Search Text by a Term Search Text by a Phrase Search Text with Terms Excluded The following sections feature examples of text searches on the\n fast_and_furious_movies collection. Each section uses a variable\nnamed collection to refer to the MongoCollection instance of the\n fast_and_furious_movies collection. The fast_and_furious_movies collection contains documents that\ndescribe one of the several movies that are part of the Fast and Furious\nmovie franchise. Each document contains a title field and a tags field. This data is modeled with the following Kotlin data class: You must create a text index before running a text search. A text\nindex specifies the string or string array field on which to run a text\nsearch. In the following examples, you run text searches on the title \nfield in the fast_and_furious_movies collection. To enable text\nsearches on the title field, create a text index using the\n Indexes builder with the following snippet: For more information, see the following resources: Text Indexes section of our Indexes guide Text Indexes Server Manual Entry Use the Filters.text() method to specify a text search. The Filters.text() method uses the Filters builder to define a query filter specifying\nwhat to search for during the text search. The query filter is\nrepresented by a BSON instance. Pass the query filter to the\n find() method to run a text search. When you execute the find() method, MongoDB runs a text search on\nall the fields indexed with the text index on the collection. MongoDB\nreturns documents that contain one or more of the search terms and a\nrelevance score for each result. For more information on relevance\nscores, see the Text Search section in\nour Sort Results guide. You can include TextSearchOptions as the second parameter of the\n Filters.text() method to specify text search options such as case\nsensitivity. By default, text searches run without case sensitivity\nwhich means the search matches lowercase and uppercase values. To specify a case sensitive search, use the following snippet: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: Filters.text() TextSearchOptions Pass a term as a string to the Filters.text() method to specify the\nterm in your text search. The following example runs a text search on the documents in the\n fast_and_furious_movies collection for titles that contain the\nterm \"fast\": To match multiple terms in your text search, separate each term\nwith spaces in the Filters.text() builder method. The builder method\nreturns the text search query as a Bson instance. When you pass\nthis to the find() method, it returns documents that match any of\nthe terms. The following example runs a text search on the documents in the\n fast_and_furious_movies collection for titles that contain the\nterms \"fate\" or \"7\": Pass a phrase with escaped quotes to the Filters.text() method to\nspecify the phrase in your text search. Escaped quotes are double quote\ncharacters preceded by a backslash character. If you don't add escaped\nquotes around the phrase, the find() method runs a term search . The following example runs a text search on the documents in the\n fast_and_furious_movies collection for titles that contain the\nphrase \"fate of the furious\": For each term you want to exclude from your text search, prefix the term\nwith a minus sign in the string that you pass to the Filters.text() \nbuilder method. None of the documents returned from the search contain the excluded term\nin your text index field. You must have at least one text search term if you want to\nexclude terms from your search. The following example runs a text search on the documents in the\n fast_and_furious_movies collection for titles that contain the\nterm \"furious\", but do not contain the term \"fast\":", + "code": [ + { + "lang": "json", + "value": "{ \"_id\": 1, \"title\": \"2 Fast 2 Furious \", \"tags\": [\"undercover\", \"drug dealer\"] }\n{ \"_id\": 2, \"title\": \"Fast 5\", \"tags\": [\"bank robbery\", \"full team\"] }\n{ \"_id\": 3, \"title\": \"Furious 7\", \"tags\": [\"emotional\"] }\n{ \"_id\": 4, \"title\": \"The Fate of the Furious\", \"tags\": [\"betrayal\"] }" + }, + { + "lang": "kotlin", + "value": "data class Movies(\n @BsonId val id: Int,\n val title: String,\n val tags: List\n)\n" + }, + { + "lang": "kotlin", + "value": "collection.createIndex(Indexes.text(\"title\"))\n" + }, + { + "lang": "kotlin", + "value": "val options: TextSearchOptions = TextSearchOptions().caseSensitive(true)\nval filter = Filters.text(\"SomeText\", options)\n" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.text(\"fast\")\nval findFlow = collection.find(filter)\nfindFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Movies(id=1, title=2 Fast 2 Furious, tags=[undercover, drug dealer])\nMovies(id=2, title=Fast 5, tags=[bank robbery, full team])" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.text(\"fate 7\")\nval findFlow = collection.find(filter)\nfindFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Movies(id=3, title=Furious 7, tags=[emotional])\nMovies(id=4, title=The Fate of the Furious, tags=[betrayal])" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.text(\"\\\"fate of the furious\\\"\")\nval findFlow = collection.find(filter)\nfindFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Movies(id=4, title=The Fate of the Furious, tags=[betrayal])" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.text(\"furious -fast\")\nval findFlow = collection.find(filter)\nfindFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Movies(id=3, title=Furious 7, tags=[emotional])\nMovies(id=4, title=The Fate of the Furious, tags=[betrayal])" + } + ], + "preview": "In this guide, you can learn how to run a text search in the MongoDB\nKotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/read-operations", + "title": "Read Operations", + "headings": [], + "paragraphs": "Retrieve Data Access Data From a Flow Open Change Streams Sort Results Skip Returned Results Limit the Number of Returned Results Specify Which Fields to Return Search Geospatially Search Text", + "code": [], + "preview": null, + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/write-operations/bulk", + "title": "Bulk Operations", + "headings": [ + "Overview", + "Performing Bulk Operations", + "Insert Operation", + "Example", + "Replace Operation", + "Example", + "Update Operation", + "Example", + "Delete Operation", + "Example", + "Order of Execution", + "Ordered Execution", + "Example", + "Unordered Execution", + "Summary" + ], + "paragraphs": "In this guide, you can learn how to use bulk operations in the\nMongoDB Kotlin Driver. For individual CRUD operations, you can use the relevant method. For\nexample, to insert one document and then update multiple documents, you\ncan use the insertOne() method and the updateMany() method. The MongoClient performs these operations by making a request to the\ndatabase corresponding to each operation. You can reduce the number of\ncalls to the database by using bulk operations. Bulk operations consist of a large number of write operations. To perform\na bulk operation, pass a List containing WriteModel documents to the\n bulkWrite() method. A WriteModel is a model that represents a single\nwrite operation. The following sections show how to create and use each variation of the WriteModel \ntype. The examples in each section use the following documents in the people collection: This data is modeled with the following Kotlin data class: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: bulkWrite() WriteModel BulkWriteOptions To perform an insert operation, create an InsertOneModel specifying\nthe document you want to insert. To insert multiple documents, you must\ncreate an InsertOneModel for each document you want to insert. The following example creates an InsertOneModel for two documents\ndescribing people: For more information about the methods and classes mentioned in this section,\nsee the InsertOneModel API Documentation. When performing a bulkWrite() operation, the InsertOneModel cannot\ninsert a document with an _id that already exists in the\ncollection. In this case, the driver throws a MongoBulkWriteException . The following example tries to insert two documents where the _id \nvalues are 1 and 3 . Since there is already a document with an _id \nof 1 in the collection, the operation results in an error: To learn about why the driver didn't insert the document with the\n _id of 3 , see the Order of Execution section. To perform a replace operation, create a ReplaceOneModel specifying\na query filter for the document you want to replace and the replacement\ndocument. When performing a bulkWrite() , the ReplaceOneModel cannot\nmake changes that violate unique index constraints on\nthe collection. Additionally, the model does not perform the replace\noperation if there are no matches to the query filter. The following example creates a ReplaceOneModel to\nreplace a document where the _id is 1 with a document that\ncontains the additional location field: For more information about the methods and classes mentioned in this section,\nsee the following resources: ReplaceOneModel API Documentation Unique indexes Server Manual Explanation To perform an update operation, create an UpdateOneModel or an\n UpdateManyModel that specifies a query filter and an update document. The UpdateOneModel updates the first document that matches your query\nfilter and the UpdateManyModel updates all the documents that\nmatch your query filter. When performing a bulkWrite() , the UpdateOneModel and\n UpdateManyModel types cannot make changes that violate unique\nindex constraints on the collection. Additionally, the models do not\nperform update operations if there are no matches to the query\nfilter. The following example creates an UpdateOneModel to increment the age \nfield by 1 in a document where the _id is 2 : For more information about the methods and classes mentioned in this section,\nsee the following resources: UpdateOneModel API Documentation UpdateManyModel API Documentation unique indexes Server Manual Explanation To perform a delete operation, create a DeleteOneModel or a\n DeleteManyModel that specifies a query filter for documents you want\nto delete. The DeleteOneModel deletes the first document that matches your query\nfilter and the DeleteManyModel deletes all the documents that\nmatch your query filter. When performing a bulkWrite() , the DeleteOneModel and\n DeleteManyModel types do not delete any documents if there are no\nmatches to the query filter. The following example creates a DeleteOneModel to delete\na document where the _id is 1 and a DeleteManyModel to delete\ndocuments where the age value is less than 30 : For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: DeleteOneModel DeleteManyModel The bulkWrite() method accepts an optional BulkWriteOptions as\na second parameter to specify if you want to execute the bulk operations\nas ordered or unordered. By default, the bulkWrite() method executes bulk operations in\norder. This means that the operations execute in the order you\nadded them to the list until any error occurs. The following example performs these bulk operations: After running this example, your collection contains the following\ndocument: An insert operation for a document where the name is\n \"Zaynab Omar\" and the age is 37 A replace operation for a document where the _id is 1 with a new\ndocument that contains the location field An update operation for a document where the _id is 6 to\nchange the name field A delete operation for all documents that have an age value\ngreater than 50 You can also execute bulk operations in any order by passing false \nto the ordered() method on a BulkWriteOptions object. This means that\nall the write operations execute regardless of errors. If any errors occur,\nthe driver reports them at the end. The following code shows how to execute a bulk operation with no order\nof execution: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: Unordered bulk operations do not guarantee the order of execution. The\norder may differ from the way you list them to optimize the runtime. In the preceding example, if the bulkWrite() method performed the\ninsert operation after the update operation, the update operation\nwould not produce changes because the document did not exist\nat that point in time. The collection would then contain the following\ndocuments: BulkWriteOptions ordered() To perform a bulk operation, create and pass a list of\n WriteModel documents to the bulkWrite() method. There are six variations of WriteModel : There are two ways to execute the bulkWrite() method: InsertOneModel ReplaceOneModel UpdateOneModel UpdateManyModel DeleteOneModel DeleteManyModel Ordered, where the driver performs the write operations in order until any error occurs Unordered, where the driver performs all the write operations in any order and\nreports any errors after the operations complete", + "code": [ + { + "lang": "json", + "value": "{ \"_id\": 1, \"name\": \"Karen Sandoval\", \"age\": 31 }\n{ \"_id\": 2, \"name\": \"William Chin\", \"age\": 54 }\n{ \"_id\": 8, \"name\": \"Shayla Ray\", \"age\": 20 }" + }, + { + "lang": "kotlin", + "value": "data class Person(\n @BsonId val id: Int,\n val name: String,\n val age: Int? = null,\n val location: String? = null\n)\n" + }, + { + "lang": "kotlin", + "value": "val juneDoc = InsertOneModel(Person(3, \"June Carrie\", 17))\nval kevinDoc = InsertOneModel(Person(4, \"Kevin Moss\", 22))\n" + }, + { + "lang": "kotlin", + "value": "try {\n val bulkOperations = listOf(\n (InsertOneModel(Person(1, \"James Smith\", 13))),\n (InsertOneModel(Person(3, \"Colin Samuels\")))\n )\n val bulkWrite = collection.bulkWrite(bulkOperations)\n} catch (e: MongoBulkWriteException) {\n println(\"A MongoBulkWriteException occurred with the following message: \" + e.message)\n}\n" + }, + { + "lang": "console", + "value": "A MongoBulkWriteException occurred with the following message:\nBulk write operation error on server sample-shard-00-02.pw0q4.mongodb.net:27017.\nWrite errors: [BulkWriteError{index=0, code=11000, message='E11000 duplicate key\nerror collection: crudOps.bulkWrite index: _id_ dup key: { _id: 1 }', details={}}]." + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval insert = Person(1, \"Celine Stork\", location = \"San Diego, CA\")\nval doc = ReplaceOneModel(filter, insert)\n" + }, + { + "lang": "java", + "value": "val filter = Filters.eq(\"_id\", 2)\nval update = Updates.inc(Person::age.name, 1)\nval doc = UpdateOneModel(filter, update)\n" + }, + { + "lang": "kotlin", + "value": "val deleteId1 = DeleteOneModel(Filters.eq(\"_id\", 1))\nval deleteAgeLt30 = DeleteManyModel(Filters.lt(Person::age.name, 30))\n" + }, + { + "lang": "json", + "value": "{ \"_id\": 1, \"name\": \"Sandy Kane\", \"location\": \"Helena, MT\" }\n{ \"_id\": 8, \"name\": \"Shayla Ray\", \"age\": 20 }\n{ \"_id\": 6, \"name\": \"Zaynab Hassan\", \"age\": 37 }" + }, + { + "lang": "kotlin", + "value": "val insertMdl = InsertOneModel(Person(6, \"Zaynab Omar\", 37))\nval replaceMdl = ReplaceOneModel(\n Filters.eq(\"_id\", 1),\n Person(1, \"Sandy Kane\", location = \"Helena, MT\")\n)\nval updateMdl = UpdateOneModel(\n Filters.eq(\"_id\", 6),\n Updates.set(Person::name.name, \"Zaynab Hassan\")\n )\nval deleteMdl = DeleteManyModel(Filters.gt(Person::age.name, 50))\n\nval bulkOperations = listOf(\n insertMdl,\n replaceMdl,\n updateMdl,\n deleteMdl\n)\n\nval result = collection.bulkWrite(bulkOperations)\n" + }, + { + "lang": "kotlin", + "value": "val options = BulkWriteOptions().ordered(false)\nval unorderedResult = collection.bulkWrite(bulkOperations, options)\n" + }, + { + "lang": "json", + "value": "{ \"_id\": 1, \"name\": \"Sandy Kane\", \"location\": \"Helena, MT\" }\n{ \"_id\": 8, \"name\": \"Shayla Ray\", \"age\": 20 }\n{ \"_id\": 6, \"name\": \"Zaynab Omar\", \"age\": 37 }" + } + ], + "preview": "In this guide, you can learn how to use bulk operations in the\nMongoDB Kotlin Driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/write-operations/delete", + "title": "Delete Documents", + "headings": [ + "Overview", + "Sample Documents", + "Delete Many Documents", + "Delete a Document", + "Find and Delete a Document" + ], + "paragraphs": "In this guide, you can learn how to remove documents with the MongoDB Kotlin\ndriver. You can remove documents by passing a query filter to the\n deleteOne() , deleteMany() or findOneAndDelete() methods. The deleteOne() method deletes a single document. If the query\nfilter matches more than one document, the method will remove the first\noccurrence of a match in the collection. The deleteMany() method deletes all documents that match the query\nfilter. The findOneAndDelete() method atomically finds and deletes the first\noccurrence of a match in the collection. To specify a collation or hint an index, use DeleteOptions \nas a second parameter to the deleteOne() and deleteMany() methods. To specify a collation, hint an index, specify sort order, or specify a\nprojection on the returned document, use FindOneAndDeleteOptions \nas the second parameter to the findOneAndDelete() method. When deleting a single document, filter your query by a unique index,\nsuch as an _id , to ensure your query matches the document you want to\ndelete. The following examples are about a paint store that sells eight different\ncolors of paint. The store had their annual online sale resulting in the\nfollowing documents in their paint_inventory collection: This data is modeled with the following Kotlin data class: The paint store website displays all documents in the\n paint_inventory collection. To reduce customer confusion, the store\nwants to remove the colors that are out of stock. To remove the out of stock colors, query the paint_inventory \ncollection where the qty is 0 and pass the query to the\n deleteMany() method: The following shows the documents remaining in the paint_inventory \ncollection: The store is donating the remaining quantity of their yellow paint. This\nmeans that the qty for yellow is now 0 and we need to remove yellow\nfrom the collection. To remove yellow, query the paint_inventory collection where the\n color is \"yellow\" and pass the query to the deleteOne() \nmethod: The following shows the documents remaining in the paint_inventory \ncollection: The store would like to raffle the remaining quantity of purple paint\nand remove purple from the paint_inventory collection. To pick a color, query the paint_inventory collection where the\n color is \"purple\" and pass the query to the findOneAndDelete() \nmethod. Unlike the other delete methods, findOneAndDelete() returns the\ndeleted document: The following shows the documents remaining in the paint_inventory \ncollection: For more information about the methods and classes mentioned in this guide,\nsee the following resources: If there are no matches to your query filter, no document gets\ndeleted and the method returns null . deleteOne() API Documentation deleteMany() API Documentation findOneAndDelete() API Documentation DeleteOptions API Documentation FindOneAndDeleteOptions API Documentation db.collection.deleteOne() Server Manual Entry db.collection.deleteMany() Server Manual Entry db.collection.findOneAndDelete() Server Manual Entry", + "code": [ + { + "lang": "json", + "value": "{ \"_id\": 1, \"color\": \"red\", \"qty\": 5 }\n{ \"_id\": 2, \"color\": \"purple\", \"qty\": 8 }\n{ \"_id\": 3, \"color\": \"blue\", \"qty\": 0 }\n{ \"_id\": 4, \"color\": \"white\", \"qty\": 0 }\n{ \"_id\": 5, \"color\": \"yellow\", \"qty\": 6 }\n{ \"_id\": 6, \"color\": \"pink\", \"qty\": 0 }\n{ \"_id\": 7, \"color\": \"green\", \"qty\": 0 }\n{ \"_id\": 8, \"color\": \"black\", \"qty\": 8 }" + }, + { + "lang": "kotlin", + "value": "data class PaintOrder(\n @BsonId val id: Int,\n val qty: Int,\n val color: String\n)\n" + }, + { + "lang": "json", + "value": "{ \"_id\": 1, \"color\": \"red\", \"qty\": 5 }\n{ \"_id\": 2, \"color\": \"purple\", \"qty\": 8 }\n{ \"_id\": 5, \"color\": \"yellow\", \"qty\": 6 }\n{ \"_id\": 8, \"color\": \"black\", \"qty\": 8 }" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"qty\", 0)\ncollection.deleteMany(filter)\n" + }, + { + "lang": "json", + "value": "{ \"_id\": 1, \"color\": \"red\", \"qty\": 5 }\n{ \"_id\": 2, \"color\": \"purple\", \"qty\": 8 }\n{ \"_id\": 8, \"color\": \"black\", \"qty\": 8 }" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"color\", \"yellow\")\ncollection.deleteOne(filter)\n" + }, + { + "lang": "json", + "value": " { \"_id\": 1, \"color\": \"red\", \"qty\": 5 }\n { \"_id\": 8, \"color\": \"black\", \"qty\": 8 }" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"color\", \"purple\")\nval result = collection.findOneAndDelete(filter)\n\nprintln(\"The following was deleted: $result\")\n" + }, + { + "lang": "console", + "value": "The following was deleted: PaintOrder(id=2, qty=8, color=purple)" + } + ], + "preview": "In this guide, you can learn how to remove documents with the MongoDB Kotlin\ndriver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/write-operations/embedded-arrays", + "title": "Update Arrays in a Document", + "headings": [ + "Overview", + "Sample Document", + "Specifying an Update", + "Specifying Array Elements", + "The First Matching Array Element", + "Example", + "Matching All Array Elements", + "Example", + "Matching Multiple Array Elements", + "Example" + ], + "paragraphs": "In this guide, you can learn how to update arrays in a document with the\nMongoDB Kotlin driver. To update an array, you must do the following: Specify the update you want to perform Specify what array elements to apply your update to Perform an update operation using these specifications The following sections feature examples that update this sample\ndocument: This data is modeled with the following Kotlin data class: The examples on this page use the findOneAndUpdate() method of the\n MongoCollection class to retrieve and update the document. Each\nexample uses an instance of the FindOneAndUpdateOptions class to\nhave MongoDB retrieve the document after the update occurs. For\nmore information on the findOneAndUpdate() method, see our\n Compound Operations guide . To specify an update, use the Updates builder. The Updates \nbuilder provides static utility methods to construct update\nspecifications. For more information on using the Updates builder with\narrays, see our guide on the Updates builder . The following example performs these actions: Query for the sample document Append \"17\" to the qty array in the document that matches the query filter You can specify which array elements to update using a positional\noperator. Positional operators can specify the first, all, or certain\narray elements to update. To specify elements in an array with positional operators, use dot\nnotation . Dot notation is a property access syntax for navigating BSON\nobjects. For additional information, see the Server Manual Entry on\n dot notation . To update the first array element that matches your query filter, use the\npositional $ operator. The array field must appear as part of your\nquery filter to use the positional $ operator. The following example performs these actions: For more information about the methods and operators mentioned in this section,\nsee the following resources: Query for a document with a qty field containing the value \"18\" Decrement the first array value in the document that matches the query filter by \"3\" Positional $ Operator Server Manual Entry inc() API Documentation To update all elements in an array, use the all positional $[] operator. The following example performs these actions: For more information about the methods and operators mentioned in this section,\nsee the following resources: Query for the sample document Multiply array elements matching the query filter by \"2\" All Positional $[] Operator Server Manual Entry mul() API Documentation To update array elements that match a filter, use the\nfiltered positional $[] operator. You must include an\narray filter in your update operation to specify which array elements to\nupdate. The is the name you give your array filter. This value\nmust begin with a lowercase letter and contain only alphanumeric\ncharacters. The following example performs these actions: For more information about the methods and operators mentioned in this section,\nsee the following resources: Query for the sample document Set an array filter to search for values less than \"15\" Increment array elements matching the query filter by \"5\" Filtered Positional $[] Operator Server Manual Entry inc() API Documentation", + "code": [ + { + "lang": "json", + "value": "{ \"_id\": 1, \"color\": \"green\", \"qty\": [8, 12, 18] }" + }, + { + "lang": "kotlin", + "value": "data class PaintOrder(\n @BsonId val id: Int,\n val qty: List,\n val color: String\n)\n" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.push(PaintOrder::qty.name, 17)\nval options = FindOneAndUpdateOptions()\n .returnDocument(ReturnDocument.AFTER)\nval result = collection.findOneAndUpdate(filter, update, options)\n\nprint(result)\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=1, qty=[8, 12, 18, 17], color=green)" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(PaintOrder::qty.name, 18)\nval update = Updates.inc(\"${PaintOrder::qty.name}.$\", -3)\nval options = FindOneAndUpdateOptions()\n .returnDocument(ReturnDocument.AFTER)\nval result = collection.findOneAndUpdate(filter, update, options)\n\nprint(result)\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=1, qty=[8, 12, 15], color=green)" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval update = Updates.mul(\"${PaintOrder::qty.name}.$[]\", 2)\nval options = FindOneAndUpdateOptions()\n .returnDocument(ReturnDocument.AFTER)\nval result = collection.findOneAndUpdate(filter, update, options)\n\nprintln(result)\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=1, qty=[16, 24, 36], color=green)" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(\"_id\", 1)\nval smallerFilter = Filters.lt(\"smaller\", 15)\nval options = FindOneAndUpdateOptions()\n .returnDocument(ReturnDocument.AFTER)\n .arrayFilters(listOf(smallerFilter))\nval update = Updates.inc(\"${PaintOrder::qty.name}.$[smaller]\", 5)\nval result = collection.findOneAndUpdate(filter, update, options)\n\nprintln(result)\n" + }, + { + "lang": "console", + "value": "PaintOrder(id=1, qty=[13, 17, 18], color=green)" + } + ], + "preview": "In this guide, you can learn how to update arrays in a document with the\nMongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/write-operations/insert", + "title": "Insert Operations", + "headings": [ + "Overview", + "A Note About _id", + "Insert a Single Document", + "Example", + "Insert Multiple Documents", + "Example", + "Summary" + ], + "paragraphs": "In this guide, you can learn how to insert documents with the MongoDB Kotlin\ndriver. You can use MongoDB to retrieve, update, and delete information. To\nperform any of those operations, that information, such as user profiles\nand orders, needs to exist in MongoDB. For that information to exist,\nyou need to first perform an insert operation. An insert operation inserts a single or multiple documents into MongoDB\nusing the insertOne() , insertMany() , and bulkWrite() \nmethods. The following sections focus on insertOne() and\n insertMany() . For information on how to use the bulkWrite() \nmethod, see our\n guide on Bulk Operations . In the following examples, a paint store has an inventory of different colors\nof paint. This data is modeled with the following Kotlin data class: When inserting a document, MongoDB enforces one constraint on your\ndocuments by default: each document must contain a unique _id \nfield. There are two ways to manage this field: Unless you have provided strong guarantees for uniqueness, we recommend\nyou let the driver automatically generate _id values. For additional information on unique indexes, see the manual entry on\n Unique Indexes . You can manage this field yourself, ensuring each value you use is unique. You can let the driver automatically generate unique ObjectId values. Duplicate _id values violate unique index constraints, resulting\nin a WriteError . Use the insertOne() method when you want to insert a single\ndocument. On successful insertion, the method returns an InsertOneResult \ninstance representing the _id of the new document. The following example creates and inserts a document using the\n insertOne() method: For more information about the methods and classes mentioned in this section,\nsee the following resources: insertOne() API Documentation InsertOneResult API Documentation Manual Explanation on insertOne() Runnable Insert a Document example Use the insertMany() method when you want to insert multiple\ndocuments. This method inserts documents in the order specified until an\nexception occurs, if any. For example, assume you want to insert the following documents: If you attempt to insert these documents, a WriteError occurs at the\nthird document and the documents prior to the error get inserted into\nyour collection. On successful insertion, the method returns an InsertManyResult \ninstance representing the _id of each new document. Use a try-catch block to get an acknowledgment for successfully\nprocessed documents before the error occurs. The output consists of\ndocuments MongoDB can process: If you look inside your collection, you should see the following documents: The following example creates and adds two documents to a List , and\ninserts the List using the insertMany() method: For more information about the methods and classes mentioned in this section,\nsee the following resources: insertMany() API Documentation InsertManyResult API Documentation Manual Explanation on insertMany() Runnable Insert Multiple Documents example There are three ways to perform an insert operation, but we focused on two: Both methods automatically generate an _id if you omit the field in\nyour document. If the insertion is successful, both methods return an instance\nrepresenting the _id of each new document. The insertOne() method inserts a single document. The insertMany() method inserts multiple documents.", + "code": [ + { + "lang": "kotlin", + "value": "data class PaintOrder(\n @BsonId val id: ObjectId? = null,\n val qty: Int,\n val color: String\n)\n" + }, + { + "lang": "kotlin", + "value": "val paintOrder = PaintOrder(ObjectId(), 5, \"red\")\nval result = collection.insertOne(paintOrder)\n\nval insertedId = result.insertedId?.asObjectId()?.value\n\nprintln(\"Inserted a document with the following id: $insertedId\")\n" + }, + { + "lang": "console", + "value": "Inserted a document with the following id: 60930c39a982931c20ef6cd6" + }, + { + "lang": "json", + "value": "{ \"color\": \"red\", \"qty\": 5 }\n{ \"color\": \"purple\", \"qty\": 10 }\n{ \"color\": \"yellow\", \"qty\": 3 }\n{ \"color\": \"blue\", \"qty\": 8 }" + }, + { + "lang": "json", + "value": "{ \"color\": \"red\", \"qty\": 5 }\n{ \"color\": \"purple\", \"qty\": 10 }" + }, + { + "lang": "kotlin", + "value": "val result = collection.insertMany(paintOrders)\ntry {\n println(\"Inserted documents with the following ids: ${result.insertedIds}\")\n} catch(e: MongoBulkWriteException){\n val insertedIds = e.writeResult.inserts.map { it.id.asInt32().value }\n println(\n \"A MongoBulkWriteException occurred, but there are \" +\n \"successfully processed documents with the following ids: $insertedIds\"\n )\n collection.find().collect { println(it) }\n}\n" + }, + { + "lang": "console", + "value": "A MongoBulkWriteException occurred, but there are successfully processed\ndocuments with the following ids: [60930c3aa982931c20ef6cd7, 644ad1378ea29443837a14e9, 60930c3aa982931c20ef6cd8]" + }, + { + "lang": "kotlin", + "value": "val paintOrders = listOf(\n PaintOrder(ObjectId(), 5, \"red\"),\n PaintOrder(ObjectId(), 10, \"purple\")\n)\nval result = collection.insertMany(paintOrders)\n\nprintln(\"Inserted a document with the following ids: ${result.insertedIds.toList()}\")\n" + }, + { + "lang": "console", + "value": "Inserted documents with the following ids: [60930c3aa982931c20ef6cd7, 60930c3aa982931c20ef6cd8]" + } + ], + "preview": "In this guide, you can learn how to insert documents with the MongoDB Kotlin\ndriver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/write-operations/modify", + "title": "Modify Documents", + "headings": [ + "Overview", + "Update", + "Update Operation Parameters", + "Example", + "Replace", + "Replace Operation Parameters", + "Example" + ], + "paragraphs": "In this guide, you can learn how to modify documents in a MongoDB\ncollection using two distinct operation types: Update operations specify the fields and values to change in one or more\ndocuments. A replace operation specifies the fields and values to replace\na single document from your collection. In the following examples, a paint store sells five different\ncolors of paint. The paint_inventory collection represents their\ncurrent inventory: This data is modeled with the following Kotlin data class: Update Replace Update operations can modify fields and values. They apply changes\nspecified in an update document to one or more documents that match your\nquery filter. The updateOne() \nmethod changes the first document your query filter matches and the\n updateMany() \nmethod changes all the documents your query filter matches. You can call the updateOne() and updateMany() methods on a\n MongoCollection instance as follows: The updateOne() and updateMany() methods both have the following\nparameters: You can create the updateDocument using an Updates builder as\nfollows: See the MongoDB API documentation for a complete list of\nUpdates builders and their usage . query specifies a query filter with the criteria to match documents to update in your collection updateDocument specifies the fields and values to modify in the matching document or documents. For this example, we use the Updates builder to create the update document. The paint store needs to update their inventory after a customer returns a\ncan of yellow paint. To update the single can of paint, call the updateOne() method specifying\nthe following: The paint store then receives a fresh shipment and needs to update their\ninventory again. The shipment contains 20 cans of each paint color. To update the inventory, call the updateMany() method specifying the\nfollowing: The following shows the updated documents in the paint_inventory collection: If zero documents match the query filter in the update operation,\n updateMany() makes no changes to documents in the collection. See\nour upsert guide to\nlearn how to insert a new document instead of updating one if no\ndocuments match. A query filter that matches the yellow color An update document that contains instructions to increment the qty field by \"1\" A query filter that matches all the colors An update document that contains instructions to increment the qty field by \"20\" The updateOne() and updateMany() methods cannot make changes\nto a document that violate unique index constraints on the\ncollection. See the MongoDB server manual for more information on\n unique indexes . A replace operation substitutes one document from your collection. The\nsubstitution occurs between a document your query filter matches and a\nreplacement document. The replaceOne() \nmethod removes all the existing fields and values in the\nmatching document (except the _id field) and substitutes it with the\nreplacement document. You can call the replaceOne() method on a MongoCollection \ninstance as follows: The replaceOne() method has the following parameters: query specifies a query filter with the criteria to match a document to replace in your collection replacementDocument specifies fields and values of a new Document object to replace in the matched document The paint store realizes they need to update their inventory again. What they\nthought was 20 cans of pink paint is actually 25 cans of orange paint. To update the inventory, call the replaceOne() method specifying the\nfollowing: The following shows the updated document: If zero documents match the query filter in the replace operation,\n replaceOne() makes no changes to documents in the collection. See\nour upsert guide to\nlearn how to insert a new document instead of replacing one if no\ndocuments match. If multiple documents match the query filter specified in\nthe replaceOne() method, it replaces the first result. A query filter that matches documents where the color is \"pink\" A replacement document where the color is \"orange\" and the qty is \"25\" The replaceOne() method cannot make changes to a document that\nviolate unique index constraints on the collection. See the MongoDB\nserver manual for more information on unique indexes .", + "code": [ + { + "lang": "json", + "value": "{ \"_id\": 1, \"color\": \"red\", \"qty\": 5 }\n{ \"_id\": 2, \"color\": \"purple\", \"qty\": 8 }\n{ \"_id\": 3, \"color\": \"yellow\", \"qty\": 0 }\n{ \"_id\": 4, \"color\": \"green\", \"qty\": 6 }\n{ \"_id\": 5, \"color\": \"pink\", \"qty\": 0 }" + }, + { + "lang": "kotlin", + "value": "data class PaintOrder(\n @BsonId val id: Int,\n val color: String,\n val qty: Int\n)\n" + }, + { + "lang": "kotlin", + "value": "collection.updateOne(query, updateDocument)\n\ncollection.updateMany(query, updateDocument)" + }, + { + "lang": "kotlin", + "value": "val updateDocument = Updates.operator(field, value)" + }, + { + "lang": "json", + "value": " { \"_id\": 1, \"color\": \"red\", \"qty\": 25 }\n { \"_id\": 2, \"color\": \"purple\", \"qty\": 28 }\n { \"_id\": 3, \"color\": \"yellow\", \"qty\": 20 }\n { \"_id\": 4, \"color\": \"green\", \"qty\": 26 }\n { \"_id\": 5, \"color\": \"pink\", \"qty\": 20 }" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(PaintOrder::color.name, \"yellow\")\nval update = Updates.inc(PaintOrder::qty.name, 1)\nval result = collection.updateOne(filter, update)\n\nprintln(\"Matched document count: $result.matchedCount\")\nprintln(\"Modified document count: $result.modifiedCount\")\n" + }, + { + "lang": "console", + "value": " Matched document count: 1\n Modified document count: 1" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.empty()\nval update = Updates.inc(PaintOrder::qty.name, 20)\nval result = collection.updateMany(filter, update)\n\nprintln(\"Matched document count: $result.matchedCount\")\nprintln(\"Modified document count: $result.modifiedCount\")\n" + }, + { + "lang": "console", + "value": " Matched document count: 5\n Modified document count: 5" + }, + { + "lang": "kotlin", + "value": "collection.replaceOne(query, replacementDocument)" + }, + { + "lang": "json", + "value": " { \"_id\": 5, \"color\": \"orange\", \"qty\": 25 }" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(PaintOrder::color.name, \"pink\")\nval update = PaintOrder(5, \"orange\", 25)\nval result = collection.replaceOne(filter, update)\n\nprintln(\"Matched document count: $result.matchedCount\")\nprintln(\"Modified document count: $result.modifiedCount\")\n" + }, + { + "lang": "console", + "value": " Matched document count: 1\n Modified document count: 1" + } + ], + "preview": "In this guide, you can learn how to modify documents in a MongoDB\ncollection using two distinct operation types:", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/write-operations/upsert", + "title": "Insert or Update in a Single Operation", + "headings": ["Overview", "Specify an Upsert"], + "paragraphs": "In this guide, you can learn how to perform an upsert with the\nMongoDB Kotlin driver. Applications use insert and update operations to store and modify data.\nSometimes, you need to choose between an insert and update depending on\nwhether the document exists. MongoDB simplifies this decision for us\nwith an upsert option. An upsert : Updates documents that match your query filter Inserts a document if there are no matches to your query filter To specify an upsert with the updateOne() or updateMany() \nmethods, pass true to UpdateOptions.upsert() . To specify an upsert with the replaceOne() method, pass true to\n ReplaceOptions.upsert() . In the following example, a paint store sells eight different\ncolors of paint. The store had their annual online sale. Their\n paint_inventory collection now shows the following documents: This data is modeled with the following Kotlin data class: The store received a fresh shipment and needs to update their inventory.\nThe first item in the shipment is ten cans of orange paint. To update the inventory, query the paint_inventory collection\nwhere the color is \"orange\" , specify an update to increment the\n qty field by 10 , and specify true to\n UpdateOptions.upsert() : This AcknowledgedUpdateResult tells us: The following shows the documents in the paint_inventory collection: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: Zero documents matched our query filter Zero documents in our collection got modified A document with an _id of 606b4cfc1601f9443b5d6978 got upserted Not including UpdateOptions results in no change to the collection. UpdateOptions.upsert() ReplaceOptions.upsert()", + "code": [ + { + "lang": "json", + "value": "{ \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958da\" }, \"color\": \"red\", \"qty\": 5 }\n{ \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958db\" }, \"color\": \"purple\", \"qty\": 8 }\n{ \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958dc\" }, \"color\": \"blue\", \"qty\": 0 }\n{ \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958dd\" }, \"color\": \"white\", \"qty\": 0 }\n{ \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958de\" }, \"color\": \"yellow\", \"qty\": 6 }\n{ \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958df\" }, \"color\": \"pink\", \"qty\": 0 }\n{ \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958e0\" }, \"color\": \"green\", \"qty\": 0 }\n{ \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958e1\" }, \"color\": \"black\", \"qty\": 8 }" + }, + { + "lang": "json", + "value": " { \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958da\" }, \"color\": \"red\", \"qty\": 5 }\n { \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958db\" }, \"color\": \"purple\", \"qty\": 8 }\n { \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958dc\" }, \"color\": \"blue\", \"qty\": 0 }\n { \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958dd\" }, \"color\": \"white\", \"qty\": 0 }\n { \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958de\" }, \"color\": \"yellow\", \"qty\": 6 }\n { \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958df\" }, \"color\": \"pink\", \"qty\": 0 }\n { \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958e0\" }, \"color\": \"green\", \"qty\": 0 }\n { \"_id\": { \"$oid\": \"606b4cfbcd83be7518b958e1\" }, \"color\": \"black\", \"qty\": 8 }\n { \"_id\": { \"$oid\": \"606b4cfc1601f9443b5d6978\" }, \"color\": \"orange\", \"qty\": 10 }]" + }, + { + "lang": "kotlin", + "value": "data class PaintOrder(\n @BsonId val id: ObjectId = ObjectId(),\n val qty: Int,\n val color: String\n)\n" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(PaintOrder::color.name, \"orange\")\nval update = Updates.inc(PaintOrder::qty.name, 10)\nval options = UpdateOptions().upsert(true)\n\nval results = collection.updateOne(filter, update, options)\n\nprintln(results)\n" + }, + { + "lang": "console", + "value": " AcknowledgedUpdateResult{ matchedCount=0, modifiedCount=0, upsertedId=BsonObjectId{ value=606b4cfc1601f9443b5d6978 }}" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(PaintOrder::color.name, \"orange\")\nval update = Updates.inc(PaintOrder::qty.name, 10)\n\nval results = collection.updateOne(filter, update)\n\nprintln(results)\n" + }, + { + "lang": "console", + "value": "AcknowledgedUpdateResult{ matchedCount=0, modifiedCount=0, upsertedId=null }" + } + ], + "preview": "In this guide, you can learn how to perform an upsert with the\nMongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud/write-operations", + "title": "Write Operations", + "headings": [], + "paragraphs": "Insert Operations Delete Documents Modify Documents Update Arrays in a Document Insert or Update in a Single Operation Bulk Operations", + "code": [], + "preview": null, + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/crud", + "title": "CRUD Operations", + "headings": [], + "paragraphs": "CRUD (Create, Read, Update, Delete) operations enable you to work with\ndata stored in MongoDB. Some operations combine aspects of read and write operations. See our\nguide on compound operations \nto learn more about these hybrid methods. Read Operations find and return\ndocuments stored in your database. Write Operations insert, modify,\nor delete documents in your database.", + "code": [], + "preview": "CRUD (Create, Read, Update, Delete) operations enable you to work with\ndata stored in MongoDB.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/data-formats/codecs", + "title": "Codecs", + "headings": [ + "Overview", + "Codec", + "CodecRegistry", + "CodecProvider", + "Default Codec Registry", + "BsonTypeClassMap", + "Custom Codec Example" + ], + "paragraphs": "In this guide, you can learn about Codecs and the supporting classes that\nhandle the encoding and decoding of Kotlin objects to and from BSON data\nin the MongoDB Kotlin driver. The Codec abstraction allows you to map any Kotlin type to\na corresponding BSON type. You can use this to map your domain objects\ndirectly to and from BSON instead of using data classes or an intermediate\nmap-based object such as Document or BsonDocument . You can learn how to specify custom encoding and decoding logic using\nthe Codec abstraction and view example implementations in the following\nsections: Codec CodecRegistry CodecProvider Custom Codec Example The Codec interface contains abstract methods for serializing and\ndeserializing Kotlin objects to BSON data. You can define your conversion logic\nbetween BSON and your Kotlin object in your implementation of this interface. To implement the Codec interface, override the encode() , decode() ,\nand getEncoderClass() abstract methods. The encode() method requires the following parameters: This method uses the BsonWriter instance to send the encoded value to\nMongoDB and does not return a value. The decode() method returns your Kotlin object instance populated with the\nvalue from the BSON data. This method requires the following parameters: The getEncoderClass() method returns a class instance of the Kotlin class\nsince Kotlin cannot infer the type due to type erasure. See the following code examples that show how you can implement a custom\n Codec . The PowerStatus enum contains the values \"ON\" and \"OFF\" to represent\nthe states of an electrical switch. The PowerStatusCodec class implements Codec in order to convert\nthe Kotlin enum values to corresponding BSON boolean values. The\n encode() method converts a PowerStatus to a BSON boolean and the\n decode() method performs the conversion in the opposite direction. You can add an instance of the PowerStatusCodec to your CodecRegistry \nwhich contains a mapping between your Codec and the Kotlin object type to\nwhich it applies. Continue to the CodecRegistry \nsection of this page to see how you can include your Codec . For more information about the classes and interfaces in this section, see the\nfollowing API Documentation: Parameter Type Description writer An instance of a class that implements BsonWriter , an interface type\nthat exposes methods for writing a BSON document. For example, the\n BsonBinaryWriter implementation writes to a binary stream of data.\nUse this instance to write your BSON value using the appropriate\nwrite method. value The data that your implementation encodes. The type must match the type\nvariable assigned to your implementation. encoderContext Contains meta information about the Kotlin object data that it encodes\nto BSON including whether to store the current value in a\nMongoDB collection. Parameter Type Description bsonReader An instance of a class that implements BsonReader , an interface type\nthat exposes methods for reading a BSON document. For example, the\n BsonBinaryReader implementation reads from a binary stream of data. decoderContext Contains information about the BSON data that it decodes to a Kotlin\nobject. Codec BsonWriter BsonBinaryWriter EncoderContext BsonReader DecoderContext BsonBinaryReader A CodecRegistry is an immutable collection of Codec instances that\nencode and decode the Kotlin classes they specify. You can use any of the\nfollowing CodecRegistries class static factory methods to construct a\n CodecRegistry from the Codec instances contained in the associated\ntypes: The following code snippet shows how to construct a CodecRegistry using\nthe fromCodecs() method: In the preceding example, we assign the CodecRegistry the following Codec \nimplementations: You can retrieve the Codec instances from the CodecRegistry instance\nfrom the prior example using the following code: If you attempt to retrieve a Codec instance for a class that is not\nregistered, the get() method throws a CodecConfigurationException \nexception. For more information about the classes and interfaces in this section, see the\nfollowing API Documentation: fromCodecs() fromProviders() fromRegistries() IntegerCodec , a Codec that converts Integers and is part of the BSON package. PowerStatusCodec , our sample Codec \nthat converts Kotlin enum values to BSON booleans. CodecRegistries IntegerCodec A CodecProvider is an interface that contains abstract methods that create\n Codec instances and assign them to a CodecRegistry instance. Similar\nto the CodecRegistry , the BSON library uses the Codec instances\nretrieved by the get() method to convert between Kotlin and BSON data types. However, in cases in which you add a class that contains fields that require\ncorresponding Codec objects, you need to ensure that you instantiate the\n Codec objects for the class' fields before you instantiate the\n Codec for the class. You can use the CodecRegistry parameter in\nthe get() method to pass any of the Codec instances that the\n Codec relies on. The following code example shows how you can implement CodecProvider to\npass the MonolightCodec any Codec instances it needs in a\n CodecRegistry instance such as the PowerStatusCodec from our prior\nexample: To see a runnable example that demonstrates read and write operations using\nthese Codec classes, see the Custom Codec Example \nsection of this guide. The default codec registry is a set of CodecProvider classes that\nspecify conversion between commonly-used Kotlin and MongoDB types. The\ndriver automatically uses the default codec registry unless you specify\na different one. If you need to override the behavior of one or more Codec classes, but\nkeep the behavior from the default codec registry for the other classes,\nyou can specify all of the registries in order of precedence. For example,\nsuppose you wanted to override the default provider behavior of a Codec for\nenum types with your custom MyEnumCodec , you must add it to the registry\nlist prior to the default codec registry as shown in the example below: For more information about the classes and interfaces in this section, see\nthe following API documentation sections: CodecProvider Default codec registry The BsonTypeClassMap class contains a recommended mapping between BSON\nand Kotlin types. You can use this class in your custom Codec or\n CodecProvider to help you manage which Kotlin types to decode your BSON\ntypes to container classes that implement Iterable or Map such as\nthe Document class. You can add or modify the BsonTypeClassMap default mapping by passing a\n Map containing new or replacement entries. The following code snippet shows how you can retrieve the Kotlin class type\nthat corresponds to the BSON type in the default BsonTypeClassMap \ninstance: You can modify these mappings in your instance by specifying replacements in the\n BsonTypeClassMap constructor. The following code snippet shows how\nyou can replace the mapping for ARRAY in your BsonTypeClassMap \ninstance with the Set class: For a complete list of the default mappings, refer to the\n BsonTypeClassMap API Documentation. For an example of how the Document class uses BsonTypeClassMap , see\nthe driver source code for the following classes: DocumentCodecProvider DocumentCodec In this section, we show how you can implement Codec and CodecProvider \nto define the encoding and decoding logic for a custom Kotlin class. We also show\nhow you can specify and use your custom implementations to perform insert\nand retrieve operations. The following code snippet shows our example custom class called Monolight \nand its fields that we want to store and retrieve from a MongoDB collection: This class contains the following fields, each of which we need to assign a\n Codec : The following code example shows how we can implement a Codec for the\n Monolight class. Note that the constructor expects an instance of\n CodecRegistry from which it retrieves the Codec instances it needs\nto encode and decode its fields: To ensure we make the Codec instances for the fields available for\n Monolight , we implement a custom CodecProvider shown in the following\ncode example: After defining the conversion logic, we can perform the following: The following example class contains code that assigns the\n MonolightCodecProvider to the MongoCollection instance by passing it\nto the withCodecRegistry() method. The example class also inserts and\nretrieves data using the Monolight class and associated codecs: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: As an alternative to implementing custom codecs, you can use\nKotlin serialization to handle your data encoding and decoding with\n @Serializable classes. You might choose Kotlin serialization if you are\nalready familiar with the framework or prefer to use an idiomatic Kotlin approach.\nSee the Kotlin Serialization \ndocumentation for more information. powerStatus describes whether the light is switched \"on\" or \"off\" for\nwhich we use the PowerStatusCodec that\nconverts specific enum values to BSON booleans. colorTemperature describes the color of the light and contains an\n Int value for which we use the IntegerCodec included in the\nBSON library. Store data from instances of Monolight into MongoDB Retrieve data from MongoDB into instances of Monolight withCodecRegistry() MongoClientSettings.getDefaultCodecRegistry() Codec CodecProvider", + "code": [ + { + "lang": "kotlin", + "value": "enum class PowerStatus {\n ON,\n OFF\n}\n" + }, + { + "lang": "kotlin", + "value": "class PowerStatusCodec : Codec {\n override fun encode(writer: BsonWriter, value: PowerStatus, encoderContext: EncoderContext) = writer.writeBoolean(value == PowerStatus.ON)\n\n override fun decode(reader: BsonReader, decoderContext: DecoderContext): PowerStatus {\n return when (reader.readBoolean()) {\n true -> PowerStatus.ON\n false -> PowerStatus.OFF\n }\n }\n\n override fun getEncoderClass(): Class = PowerStatus::class.java\n}\n" + }, + { + "lang": "kotlin", + "value": "val codecRegistry = CodecRegistries.fromCodecs(IntegerCodec(), PowerStatusCodec())\n" + }, + { + "lang": "kotlin", + "value": "val powerStatusCodec = codecRegistry.get(PowerStatus::class.java)\nval integerCodec = codecRegistry.get(Integer::class.java)\n" + }, + { + "lang": "kotlin", + "value": "class MonolightCodec(registry: CodecRegistry) : Codec {\n private val powerStatusCodec: Codec\n private val integerCodec: Codec\n\n init {\n powerStatusCodec = registry[PowerStatus::class.java]\n integerCodec = IntegerCodec()\n }\n\n override fun encode(writer: BsonWriter, value: Monolight, encoderContext: EncoderContext) {\n writer.writeStartDocument()\n writer.writeName(\"powerStatus\")\n powerStatusCodec.encode(writer, value.powerStatus, encoderContext)\n writer.writeName(\"colorTemperature\")\n integerCodec.encode(writer, value.colorTemperature, encoderContext)\n writer.writeEndDocument()\n }\n\n override fun decode(reader: BsonReader, decoderContext: DecoderContext): Monolight {\n val monolight = Monolight()\n reader.readStartDocument()\n while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) {\n when (reader.readName()) {\n \"powerStatus\" -> monolight.powerStatus = powerStatusCodec.decode(reader, decoderContext)\n \"colorTemperature\" -> monolight.colorTemperature = integerCodec.decode(reader, decoderContext)\n \"_id\" -> reader.readObjectId()\n }\n }\n reader.readEndDocument()\n return monolight\n }\n\n override fun getEncoderClass(): Class = Monolight::class.java\n}\n" + }, + { + "lang": "kotlin", + "value": "val newRegistry = CodecRegistries.fromRegistries(\n CodecRegistries.fromCodecs(MyEnumCodec()),\n MongoClientSettings.getDefaultCodecRegistry()\n)\n" + }, + { + "lang": "kotlin", + "value": "val bsonTypeClassMap = BsonTypeClassMap()\nval clazz = bsonTypeClassMap[BsonType.ARRAY]\nprintln(\"Class name: \" + clazz.name)\n" + }, + { + "lang": "console", + "value": "Java type: java.util.List" + }, + { + "lang": "kotlin", + "value": "val replacements = mutableMapOf>(BsonType.ARRAY to MutableSet::class.java)\nval bsonTypeClassMap = BsonTypeClassMap(replacements)\nval clazz = bsonTypeClassMap[BsonType.ARRAY]\nprintln(\"Class name: \" + clazz.name)\n" + }, + { + "lang": "console", + "value": "Java type: java.util.Set" + }, + { + "lang": "kotlin", + "value": "data class Monolight(\n var powerStatus: PowerStatus = PowerStatus.OFF,\n var colorTemperature: Int? = null\n) {\n override fun toString(): String = \"Monolight [powerStatus=$powerStatus, colorTemperature=$colorTemperature]\"\n}\n" + }, + { + "lang": "kotlin", + "value": "class MonolightCodec(registry: CodecRegistry) : Codec {\n private val powerStatusCodec: Codec\n private val integerCodec: Codec\n\n init {\n powerStatusCodec = registry[PowerStatus::class.java]\n integerCodec = IntegerCodec()\n }\n\n override fun encode(writer: BsonWriter, value: Monolight, encoderContext: EncoderContext) {\n writer.writeStartDocument()\n writer.writeName(\"powerStatus\")\n powerStatusCodec.encode(writer, value.powerStatus, encoderContext)\n writer.writeName(\"colorTemperature\")\n integerCodec.encode(writer, value.colorTemperature, encoderContext)\n writer.writeEndDocument()\n }\n\n override fun decode(reader: BsonReader, decoderContext: DecoderContext): Monolight {\n val monolight = Monolight()\n reader.readStartDocument()\n while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) {\n when (reader.readName()) {\n \"powerStatus\" -> monolight.powerStatus = powerStatusCodec.decode(reader, decoderContext)\n \"colorTemperature\" -> monolight.colorTemperature = integerCodec.decode(reader, decoderContext)\n \"_id\" -> reader.readObjectId()\n }\n }\n reader.readEndDocument()\n return monolight\n }\n\n override fun getEncoderClass(): Class = Monolight::class.java\n}\n" + }, + { + "lang": "kotlin", + "value": "class MonolightCodecProvider : CodecProvider {\n @Suppress(\"UNCHECKED_CAST\")\n override fun get(clazz: Class, registry: CodecRegistry): Codec? {\n return if (clazz == Monolight::class.java) {\n MonolightCodec(registry) as Codec\n } else null // Return null when not a provider for the requested class\n }\n}\n" + }, + { + "lang": "kotlin", + "value": "fun main() = runBlocking {\n val mongoClient = MongoClient.create(\"\")\n val codecRegistry = CodecRegistries.fromRegistries(\n CodecRegistries.fromCodecs(IntegerCodec(), PowerStatusCodec()),\n CodecRegistries.fromProviders(MonolightCodecProvider()),\n MongoClientSettings.getDefaultCodecRegistry()\n )\n val database = mongoClient.getDatabase(\"codecs_example_products\")\n val collection = database.getCollection(\"monolights\")\n .withCodecRegistry(codecRegistry)\n\n // Construct and insert an instance of Monolight\n val myMonolight = Monolight(PowerStatus.ON, 5200)\n collection.insertOne(myMonolight)\n\n // Retrieve one or more instances of Monolight\n val lights = collection.find().toList()\n println(lights)\n}\n" + }, + { + "lang": "none", + "value": "[Monolight [powerStatus=ON, colorTemperature=5200]]" + } + ], + "preview": "In this guide, you can learn about Codecs and the supporting classes that\nhandle the encoding and decoding of Kotlin objects to and from BSON data\nin the MongoDB Kotlin driver. The Codec abstraction allows you to map any Kotlin type to\na corresponding BSON type. You can use this to map your domain objects\ndirectly to and from BSON instead of using data classes or an intermediate\nmap-based object such as Document or BsonDocument.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/data-formats/document-data-format-bson", + "title": "Document Data Format: BSON", + "headings": [ + "Overview", + "BSON Data Format", + "MongoDB and BSON", + "Install the BSON Library" + ], + "paragraphs": "In this guide, you can learn about the BSON data format, how MongoDB\nuses it, and how to install the BSON library independently of the\nMongoDB Kotlin driver. BSON , or Binary JSON, is the data format that MongoDB uses to organize\nand store data. This data format includes all JSON data structure types and\nadds support for types including dates, different size integers, ObjectIds, and\nbinary data. For a complete list of supported types, see the\n BSON Types server manual page. The binary format is not human-readable, but you can use the\n BSON library to convert it to a JSON\nrepresentation. You can read more about the relationship between these\nformats in our article on JSON and BSON . The MongoDB Kotlin driver, which uses the BSON library, allows you to work\nwith BSON data by using one of the object types that implements the\n BSON interface ,\nincluding: For more information on using these object types, see our\n Documents guide . Document (BSON library package) BsonDocument (BSON library package) RawBsonDocument (BSON library package) JsonObject (BSON library package) These instructions show you how to add the BSON library as a dependency to\nyour project. If you added the MongoDB Kotlin driver as a dependency to your\nproject, you can skip this step since the BSON library is already included\nas a required dependency of the driver. For instructions on how to add the\nMongoDB Kotlin driver as a dependency to your project, see the\n driver installation section of our Quick Start\nguide. We recommend that you use the Maven or\n Gradle build automation tool to manage your project's\ndependencies. Select from the following tabs to see the dependency declaration\nfor that tool: If you are not using one of the preceding tools, you can include it in\nyour project by downloading the JAR file directly from the\n sonatype repository . The following snippet shows the dependency declaration in the\n dependencies section of your pom.xml file. The following snippet shows the dependency declaration in the\n dependencies object in your build.gradle file.", + "code": [ + { + "lang": "xml", + "value": "\n \n org.mongodb\n bson\n 5.1.2\n \n" + }, + { + "lang": "kotlin", + "value": "dependencies {\n implementation(\"org.mongodb:bson:5.1.2\")\n}" + } + ], + "preview": "In this guide, you can learn about the BSON data format, how MongoDB\nuses it, and how to install the BSON library independently of the\nMongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/data-formats/document-data-format-data-class", + "title": "Document Data Format: Data Classes", + "headings": [ + "Overview", + "Serialize and Deserialize a Data Class", + "Example Data Class", + "Insert a Data Class", + "Retrieve a Data Class", + "Specify Component Conversion Using Annotations", + "Example Annotated Data Class", + "Insert an Annotated Data Class", + "Retrieve an Annotated Data Class", + "Operations with Recursive Types" + ], + "paragraphs": "In this guide, you can learn how to store and retrieve data in the\nMongoDB Kotlin Driver using Kotlin data classes . The driver natively supports encoding and decoding Kotlin data classes for\nMongoDB read and write operations using the default codec registry . The\ndefault codec registry is a collection of classes called codecs that\ndefine how to encode and decode Kotlin and Java types. The code examples in this section reference the following sample data class, which\ndescribes a data storage device: You can insert a DataStorage instance as shown in the following code: You can retrieve documents as DataStorage instances and print them\nas shown in the following code: You specify a class for documents returned from a collection, even if it\nis different than the class you specified when retrieving the\ncollection. The following example performs an update to the document\nrepresented by the DataStorage data class in the previous example\nand returns the updated document as a NewDataStorage type. The\noperation adds the releaseDate field to the document with a\n name value of tape : For more information about this feature, see Specify Return Type in the Databases and Collections guide. This section describes the annotations you can use to configure the\nserialization behavior of data classes and provides an example to\ndemonstrate the annotation behavior. You can use the following annotations on data classes: For reference information on these property annotations,\nrefer to the org.bson.codecs.pojo.annotations \npackage. Annotation Name Description BsonId Marks a property to serialize as the _id property. BsonProperty Specifies a custom document field name when converting the data class\nfield to BSON. BsonRepresentation Specifies the BSON type MongoDB uses to store the value. Use this\nannotation only when you need to store a value as a different\nBSON type than the data class property. Your code might throw an exception if you include the\n BsonRepresentation annotation on a property that you store\nas the same type as the data class property. The code examples in this section reference the following sample data class, which\ndescribes a network device: You can insert a NetworkDevice instance as shown in the following code: The inserted document in MongoDB should resemble the following: You can retrieve documents as NetworkDevice instances and print them\nas shown in the following code: The driver natively supports encoding and decoding of recursively\ndefined data classes without causing runtime recursion. This support extends\nto cycles of multiple data class types in type definitions. The following\ncode provides an example of a recursive data class design: You can perform read and write operations on recursively defined data classes the same\nway you would for other data classes. The following code shows how you can\nexecute a find operation on a collection of DataClassTree types:", + "code": [ + { + "lang": "kotlin", + "value": "data class DataStorage(val productName: String, val capacity: Double)\n" + }, + { + "lang": "kotlin", + "value": "val collection = database.getCollection(\"data_storage\")\nval record = DataStorage(\"tape\", 5.0)\ncollection.insertOne(record)\n" + }, + { + "lang": "kotlin", + "value": "val collection = database.getCollection(\"data_storage_devices\")\n\n// Retrieve and print the documents as data classes\nval resultsFlow = collection.find()\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "DataStorage(productName=tape, capacity=5.0)" + }, + { + "lang": "kotlin", + "value": "// Define a data class for returned documents\ndata class NewDataStorage(\n val productName: String,\n val capacity: Double,\n val releaseDate: LocalDate\n)\n\nval filter = Filters.eq(DataStorage::productName.name, \"tape\")\nval update = Updates.currentDate(\"releaseDate\")\nval options = FindOneAndUpdateOptions().returnDocument(ReturnDocument.AFTER)\n\n// Specify the class for returned documents as the type parameter in withDocumentClass()\nval result = collection\n .withDocumentClass()\n .findOneAndUpdate(filter, update, options)\n\nprintln(\"Updated document: ${result}\")\n" + }, + { + "lang": "console", + "value": "Updated document: NewDataStorage(productName=tape, capacity=5.0, releaseDate=2023-06-15)" + }, + { + "lang": "kotlin", + "value": "data class NetworkDevice(\n @BsonId\n @BsonRepresentation(BsonType.OBJECT_ID)\n val deviceId: String,\n val name: String,\n @BsonProperty(\"type\")\n val deviceType: String\n)\n" + }, + { + "lang": "json", + "value": "{\n _id: ObjectId(\"fedc...\"),\n name: 'Enterprise Wi-fi',\n type: 'router'\n}" + }, + { + "lang": "kotlin", + "value": "val collection = database.getCollection(\"network_devices\")\n\n// Insert the record\nval deviceId = ObjectId().toHexString()\nval device = NetworkDevice(deviceId, \"Enterprise Wi-fi\", \"router\")\ncollection.insertOne(device)\n" + }, + { + "lang": "kotlin", + "value": "val collection = database.getCollection(\"network_devices\")\n\n// Return all documents in the collection as data classes\nval resultsFlow = collection.find()\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "NetworkDevice(deviceId=645cf..., name=Enterprise Wi-fi, deviceType=router)" + }, + { + "lang": "kotlin", + "value": "data class DataClassTree(\n val content: String,\n val left: DataClassTree?,\n val right: DataClassTree?\n)\n" + }, + { + "lang": "kotlin", + "value": "val collection = database.getCollection(\"myCollection\")\n\nval filter = Filters.eq(\"left.left.right.content\", \"high german\")\nval resultsFlow = collection.find(filter)\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "DataClassTree(content=indo-european, left=DataClassTree(content=germanic, left=DataClassTree(content=german, left=null, right=DataClassTree(content=high german, ...)), right=...)" + } + ], + "preview": "In this guide, you can learn how to store and retrieve data in the\nMongoDB Kotlin Driver using Kotlin data classes.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/data-formats/document-data-format-extended-json", + "title": "Document Data Format: Extended JSON", + "headings": [ + "Overview", + "Extended JSON Formats", + "Extended JSON Examples", + "Read Extended JSON", + "Using the Document Classes", + "Using the BSON Library", + "Write Extended JSON", + "Using the Document Classes", + "Using the BSON Library", + "Custom BSON Type Conversion" + ], + "paragraphs": "In this guide, you can learn how to use the Extended JSON format in the\nMongoDB Kotlin driver. JSON is a data format that represents the values of objects, arrays, numbers,\nstrings, booleans, and nulls. The Extended JSON format defines a reserved\nset of keys prefixed with \" $ \" to represent field type information that\ndirectly corresponds to each type in BSON, the format that MongoDB uses to\nstore data. This guide explains the following topics: For more information on the difference between these formats, see our\n article on JSON and BSON . The different MongoDB Extended JSON formats How to use the BSON library to convert between Extended JSON and Kotlin objects How to create a custom conversion of BSON types MongoDB Extended JSON features different string formats to represent BSON data.\nEach of the different formats conform to the JSON RFC\nand meet specific use cases. The extended format, also known as the\n canonical format, features specific representations for every BSON type\nfor bidirectional conversion without loss of information. The Relaxed mode \nformat is more concise and closer to ordinary JSON, but does not represent\nall the type information such as the specific byte size of number fields. See the following table to see a description of each format: For more detailed information on these formats, see the following\nresources: Name Description Extended Relaxed Mode Shell Strict The driver parses the $uuid Extended JSON type from a string to a\n BsonBinary object of binary subtype 4. For more information about $uuid field\nparsing, see the\n special rules for parsing $uuid fields \nsection in the extended JSON specification. JSON RFC Official Documentation MongoDB Extended JSON Server Manual Entry BsonBinary API Documentation Extended JSON specification GitHub Documentation The following examples show a document containing an ObjectId, date, and long\nnumber field represented in each Extended JSON format. Click the tab that\ncorresponds to the format of the example you want to see: You can read an Extended JSON string into a Kotlin document object by calling\nthe parse() static method from either the Document or BsonDocument \nclass, depending on which object type you need. This method parses the Extended\nJSON string in any of the formats and returns an instance of that class\ncontaining the data. The following example shows how you can use the Document class to read\nan example Extended JSON string into a Document object using the\n parse() method: For more information, see our Fundamentals page\non Documents . You can also read an Extended JSON string into Kotlin objects without using\nthe MongoDB Kotlin driver's document classes by using the JsonReader class.\nThis class contains methods to sequentially parse the fields and values\nin any format of the Extended JSON string, and returns them as Kotlin objects.\nThe driver's document classes also use this class to parse Extended JSON. The following code example shows how you can use the JsonReader class to convert\nan Extended JSON string into Kotlin objects: For more information, see the JsonReader API Documentation. You can write an Extended JSON string from an instance of Document or\n BsonDocument by calling the toJson() method, optionally passing it an\ninstance of JsonWriterSettings to specify the Extended JSON format. In this example, we output the Extended JSON in the Relaxed mode format. You can also output an Extended JSON string from data in Kotlin objects using\nthe BSON library with the JsonWriter class. To construct an instance\nof JsonWriter , pass a subclass of a Java Writer to specify how\nyou want to output the Extended JSON. You can optionally pass a JsonWriterSettings \ninstance to specify options such as the Extended JSON format. By default, the\n JsonWriter uses the Relaxed mode format. The MongoDB Kotlin driver's\ndocument classes also use this class to convert BSON to Extended JSON. The following code example shows how you can use JsonWriter to create an\nExtended JSON string and output it to System.out . We specify the format\nby passing the outputMode() builder method the JsonMode.EXTENDED constant: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: JsonWriter JsonWriterSettings outputMode() In addition to specifying the outputMode() to format the JSON output, you\ncan further customize the output by adding converters to your\n JsonWriterSettings.Builder . These converter methods detect the Kotlin types\nand execute the logic defined by the Converter passed to them. The following sample code shows how to append converters, defined as lambda\nexpressions, to simplify the Relaxed mode JSON output. For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: Converter JsonWriterSettings.Builder", + "code": [ + { + "lang": "json", + "value": "{\n \"_id\": { \"$oid\": \"573a1391f29313caabcd9637\" },\n \"createdAt\": { \"$date\": { \"$numberLong\": \"1601499609\" }},\n \"numViews\": { \"$numberLong\": \"36520312\" }\n}" + }, + { + "lang": "json", + "value": "{\n \"_id\": { \"$oid\": \"573a1391f29313caabcd9637\" },\n \"createdAt\": { \"$date\": \"2020-09-30T18:22:51.648Z\" },\n \"numViews\": 36520312\n}" + }, + { + "lang": "json", + "value": "{\n \"_id:\": ObjectId(\"573a1391f29313caabcd9637\"),\n \"createdAt\": ISODate(\"2020-09-30T18:22:51.648Z\"),\n \"numViews\": NumberLong(\"36520312\")\n}" + }, + { + "lang": "json", + "value": "{\n \"_id:\": { \"$oid\": \"573a1391f29313caabcd9637\" },\n \"createdAt\": { \"$date\": 1601499609 },\n \"numViews\": { \"$numberLong\": \"36520312\" }\n}" + }, + { + "lang": "kotlin", + "value": "val ejsonStr = \"\"\"\n { \"_id\": { \"${\"$\"}oid\": \"507f1f77bcf86cd799439011\"},\n \"myNumber\": {\"${\"$\"}numberLong\": \"4794261\" }}\n \"\"\".trimIndent()\n\nval doc = Document.parse(ejsonStr)\n\nprintln(doc)\n" + }, + { + "lang": "console", + "value": "Document{{_id=507f1f77bcf86cd799439011, myNumber=4794261}}" + }, + { + "lang": "kotlin", + "value": "val ejsonStr = \"\"\"\n { \"_id\": { \"${\"$\"}oid\": \"507f1f77bcf86cd799439011\"},\n \"myNumber\": {\"${\"$\"}numberLong\": \"4794261\" }}\n \"\"\".trimIndent()\n\nval jsonReader = JsonReader(ejsonStr)\n\njsonReader.readStartDocument()\n\njsonReader.readName(\"_id\")\nval id = jsonReader.readObjectId()\njsonReader.readName(\"myNumber\")\nval myNumber = jsonReader.readInt64()\n\njsonReader.readEndDocument()\n\nprintln(id.toString() + \" is type: \" + id.javaClass.name)\nprintln(myNumber.toString() + \" is type: \" + myNumber.javaClass.name)\n\njsonReader.close()\n" + }, + { + "lang": "console", + "value": "507f1f77bcf86cd799439011 is type: org.bson.types.ObjectId\n4794261 is type: java.lang.Long" + }, + { + "lang": "kotlin", + "value": "val myDoc = Document().append(\"_id\", ObjectId(\"507f1f77bcf86cd799439012\"))\n .append(\"myNumber\", 11223344)\n\nval settings = JsonWriterSettings.builder().outputMode(JsonMode.RELAXED).build()\nmyDoc.toJson(settings)\n" + }, + { + "lang": "javascript", + "value": "{\"_id\": {\"$oid\": \"507f1f77bcf86cd799439012\"}, \"myNumber\": 11223344}" + }, + { + "lang": "kotlin", + "value": "val settings = JsonWriterSettings.builder().outputMode(JsonMode.EXTENDED).build()\n\nJsonWriter(BufferedWriter(OutputStreamWriter(System.out)), settings).use { jsonWriter ->\n jsonWriter.writeStartDocument()\n jsonWriter.writeObjectId(\"_id\", ObjectId(\"507f1f77bcf86cd799439012\"))\n jsonWriter.writeInt64(\"myNumber\", 11223344)\n jsonWriter.writeEndDocument()\n jsonWriter.flush()\n}\n" + }, + { + "lang": "javascript", + "value": "{\"_id\": {\"$oid\": \"507f1f77bcf86cd799439012\"}, \"myNumber\": {\"$numberLong\": \"11223344\"}}" + }, + { + "lang": "kotlin", + "value": "val settings = JsonWriterSettings.builder()\n .outputMode(JsonMode.RELAXED)\n .objectIdConverter { value, writer -> writer.writeString(value.toHexString()) }\n .timestampConverter { value, writer ->\n val ldt = LocalDateTime.ofInstant(Instant.ofEpochSecond(value.time.toLong()), ZoneOffset.UTC)\n writer.writeString(ldt.format(DateTimeFormatter.ISO_DATE_TIME))\n }\n .build()\n\nval doc = Document()\n .append(\"_id\", ObjectId(\"507f1f77bcf86cd799439012\"))\n .append(\"createdAt\", BsonTimestamp(1601516589,1))\n .append(\"myNumber\", 4794261)\n\nprintln(doc.toJson(settings))\n" + }, + { + "lang": "javascript", + "value": "{\"_id\": \"507f1f77bcf86cd799439012\", \"createdAt\": \"2020-10-01T01:43:09\", \"myNumber\": 4794261}\n\n// Without specifying the converters, the Relaxed mode JSON output\n// should look something like this:\n{\"_id\": {\"$oid\": \"507f1f77bcf86cd799439012\"}, \"createdAt\": {\"$timestamp\": {\"t\": 1601516589, \"i\": 1}}, \"myNumber\": 4794261}" + } + ], + "preview": "In this guide, you can learn how to use the Extended JSON format in the\nMongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/data-formats/documents", + "title": "Documents", + "headings": [ + "Overview", + "Document", + "BsonDocument", + "JsonObject", + "Summary" + ], + "paragraphs": "In this guide, you can learn how to use documents in the\nMongoDB Kotlin driver. A MongoDB document is a data structure that contains key/value fields in\nbinary JSON (BSON) format. You can use documents and the data they contain\nin their fields to store data as well as issue commands or queries in\nMongoDB. For more information on the terminology, structure, and limitations of documents,\nread our page on Documents in the MongoDB manual. The MongoDB Kotlin driver and BSON library include the following classes that help you\naccess and manipulate the BSON data in documents: While you can use any of these classes in your application, we recommend\nthat you use the Document class since it can concisely represent\ndynamically structured documents of any complexity. It implements the\n Map interface which enables it to use loosely-typed\nvalues. Name Package Implements Map Recommended Usage Document org.bson Yes, implements Map When you want a flexible and concise data representation. BsonDocument org.bson Yes, implements Map When you need a type-safe API. JsonObject org.bson.json No When you only want to work with JSON strings. The Document class offers a flexible representation of a BSON document.\nYou can access and manipulate fields using Kotlin types from the standard\nlibrary with this class. See the following table for mappings between\nfrequently-used BSON and Kotlin types: In the following code snippet, we show how to instantiate and build a sample\n Document instance representing a document containing several\ndifferent field types: To insert this document into a collection, instantiate a collection\nusing the getCollection() method and call the insertOne operation as follows: Once you perform a successful insert, you can retrieve the sample document\ndata from the collection using the following code: For more information on retrieving and manipulating MongoDB data, see our\n CRUD guide . For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: BSON type Kotlin type Array kotlin.collections.List Binary org.bson.types.Binary Boolean kotlin.Boolean Date java.time.LocalDateTime Document org.bson.Document Double kotlin.Double Int32 kotlin.Int Int64 kotlin.Long Null null ObjectId org.bson.types.ObjectId String kotlin.String The preceding code sample uses helper methods that check the returned type\nand throw an exception if it is unable to cast the field value.\nYou can call the get() method to retrieve values as type\n Object and to skip type checking. Document getCollection() get() The BsonDocument class provides a type-safe API to access and manipulate\na BSON document. You need to specify the BSON type from the BSON\nlibrary for each field. See the following table for mappings between\nfrequently-used BSON and BSON library types: In the following code snippet, we show how to instantiate and build a sample\n BsonDocument instance representing a document containing several\ndifferent field types: To insert this document into a collection, instantiate a collection\nusing the getCollection() method specifying the BsonDocument \nclass as the documentClass parameter. Then, call the\n insertOne operation as follows: Once you perform a successful insert, you can retrieve the sample document\ndata from the collection using the following code: For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: BSON type BSON library type Array org.bson.BsonArray Binary org.bson.BsonBinary Boolean org.bson.Boolean Date (long value) org.bson.BsonDateTime Document org.bson.BsonDocument Double org.bson.BsonDouble Int32 org.bson.BsonInt32 Int64 org.bson.BsonInt64 Null org.bson.BsonNull ObjectId org.bson.BsonObjectId String org.bson.BsonString The preceding code sample uses helper methods that check the returned type\nand throw a BsonInvalidOperationException if it is unable to cast\nthe field value. You can call the get() method to retrieve values as type\n BsonValue and to skip type checking. BsonDocument getCollection() BsonInvalidOperationException get() BsonValue The JsonObject class acts as a wrapper for JSON strings.\nIf you only want to work with JSON data, you can use JsonObject \nto avoid unnecessary data conversion to a Map object. By default, JsonObject stores Extended JSON .\nYou can customize the format of JSON in JsonObject by specifying a\n JsonObjectCodec and passing it a JsonWriterSettings \nobject. For more information on JSON formats, see\nour Extended JSON guide . In the following code snippet, we show how to instantiate a sample JsonObject \ninstance wrapping an Extended JSON string containing different types of key value pairs: To insert this document into a collection, instantiate a collection\nusing the getCollection() method specifying the JsonObject class\nas the documentClass parameter. Then, call the\n insertOne operation as follows: Once you perform a successful insert, you can retrieve the sample JSON data from the\ncollection. While you can use any class that extends Bson to specify your query,\nhere is how to query your data using a JsonObject : For more information about the methods and classes mentioned in this section,\nsee the following API Documentation: JsonObject JsonObjectCodec JsonWriterSettings getCollection() In this guide, we covered the following topics on classes you can use to\nwork with BSON data: Described Kotlin classes you can use to work with MongoDB documents and\nwhy you might prefer one over the other. Provided usage examples for each class on building documents containing\nmultiple types, inserting them into a collection, and\nretrieving/accessing their typed fields.", + "code": [ + { + "lang": "kotlin", + "value": "val author = Document(\"_id\", ObjectId())\n .append(\"name\", \"Gabriel Garc\u00eda M\u00e1rquez\")\n .append(\n \"dateOfDeath\",\n LocalDateTime.of(2014, 4, 17, 4, 0)\n )\n .append(\n \"novels\", listOf(\n Document(\"title\", \"One Hundred Years of Solitude\").append(\"yearPublished\", 1967),\n Document(\"title\", \"Chronicle of a Death Foretold\").append(\"yearPublished\", 1981),\n Document(\"title\", \"Love in the Time of Cholera\").append(\"yearPublished\", 1985)\n )\n )\n" + }, + { + "lang": "kotlin", + "value": "// val mongoClient = \n\nval database = mongoClient.getDatabase(\"fundamentals_data\")\nval collection = database.getCollection(\"authors\")\nval result = collection.insertOne(author)\n" + }, + { + "lang": "kotlin", + "value": "val doc = collection.find(Filters.eq(\"name\", \"Gabriel Garc\u00eda M\u00e1rquez\")).firstOrNull()\ndoc?.let {\n println(\"_id: ${it.getObjectId(\"_id\")}, name: ${it.getString(\"name\")}, dateOfDeath: ${it.getDate(\"dateOfDeath\")}\")\n\n it.getList(\"novels\", Document::class.java).forEach { novel ->\n println(\"title: ${novel.getString(\"title\")}, yearPublished: ${novel.getInteger(\"yearPublished\")}\")\n }\n}\n" + }, + { + "lang": "none", + "value": "_id: 5fb5fad05f734e3794741a35, name: Gabriel Garc\u00eda M\u00e1rquez, dateOfDeath: Thu Apr 17 00:00:00 EDT 2014\ntitle: One Hundred Years of Solitude, yearPublished: 1967\ntitle: Chronicle of a Death Foretold, yearPublished: 1981\ntitle: Love in the Time of Cholera, yearPublished: 1985" + }, + { + "lang": "kotlin", + "value": "val author = BsonDocument()\n .append(\"_id\", BsonObjectId())\n .append(\"name\", BsonString(\"Gabriel Garc\u00eda M\u00e1rquez\"))\n .append(\n \"dateOfDeath\",\n BsonDateTime(\n LocalDateTime.of(2014, 4, 17, 0, 0).atZone(ZoneId.of(\"America/New_York\")).toInstant().toEpochMilli()\n )\n )\n .append(\n \"novels\", BsonArray(\n listOf(\n BsonDocument().append(\"title\", BsonString(\"One Hundred Years of Solitude\"))\n .append(\"yearPublished\", BsonInt32(1967)),\n BsonDocument().append(\"title\", BsonString(\"Chronicle of a Death Foretold\"))\n .append(\"yearPublished\", BsonInt32(1981)),\n BsonDocument().append(\"title\", BsonString(\"Love in the Time of Cholera\"))\n .append(\"yearPublished\", BsonInt32(1985))\n )\n )\n )\n" + }, + { + "lang": "kotlin", + "value": "// val mongoClient = \n\nval database = mongoClient.getDatabase(\"fundamentals_data\")\nval collection = database.getCollection(\"authors\")\n\nval result: InsertOneResult = collection.insertOne(author)\n" + }, + { + "lang": "kotlin", + "value": "// \n\nval doc = collection.find(Filters.eq(\"name\", \"Gabriel Garc\u00eda M\u00e1rquez\")).firstOrNull()\ndoc?.let {\n println(\"_id: ${it.getObjectId(\"_id\").value}, name: ${it.getString(\"name\").value}, dateOfDeath: ${Instant.ofEpochMilli(it.getDateTime(\"dateOfDeath\").value).atZone(ZoneId.of(\"America/New_York\")).toLocalDateTime()}\")\n\n it.getArray(\"novels\").forEach { novel ->\n val novelDocument = novel.asDocument()\n println(\"title: ${novelDocument.getString(\"title\").value}, yearPublished: ${novelDocument.getInt32(\"yearPublished\").value}\")\n }\n}\n" + }, + { + "lang": "none", + "value": "_id: 5fb5fad05f734e3794741a35, name: Gabriel Garc\u00eda M\u00e1rquez, dateOfDeath: 2014-04-17T00:00\ntitle: One Hundred Years of Solitude, yearPublished: 1967\ntitle: Chronicle of a Death Foretold, yearPublished: 1981\ntitle: Love in the Time of Cholera, yearPublished: 1985" + }, + { + "lang": "kotlin", + "value": "val ejsonStr = \"\"\"\n {\"_id\": {\"${\"$\"}oid\": \"6035210f35bd203721c3eab8\"},\n \"name\": \"Gabriel Garc\u00eda M\u00e1rquez\",\n \"dateOfDeath\": {\"${\"$\"}date\": \"2014-04-17T04:00:00Z\"},\n \"novels\": [\n {\"title\": \"One Hundred Years of Solitude\",\"yearPublished\": 1967},\n {\"title\": \"Chronicle of a Death Foretold\",\"yearPublished\": 1981},\n {\"title\": \"Love in the Time of Cholera\",\"yearPublished\": 1985}]}\n \"\"\".trimIndent()\n\nval author = JsonObject(ejsonStr)\n" + }, + { + "lang": "kotlin", + "value": "// val mongoClient = ;\n\nval database = mongoClient.getDatabase(\"fundamentals_data\")\nval collection= database.getCollection(\"authors\")\n\nval result = collection.insertOne(author)\n" + }, + { + "lang": "kotlin", + "value": "// val mongoClient = ;\n\nval query = JsonObject(\"{\\\"name\\\": \\\"Gabriel Garc\\\\u00eda M\\\\u00e1rquez\\\"}\")\nval jsonResult = collection.find(query).firstOrNull()\njsonResult?.let {\n println(\"query result in extended json format: \" + jsonResult.json)\n}\n" + }, + { + "lang": "none", + "value": "query result in extended json format: {\"_id\": {\"$oid\": \"6035210f35bd203721c3eab8\"}, \"name\": \"Gabriel Garc\u00eda M\u00e1rquez\", \"dateOfDeath\": {\"$date\": \"2014-04-17T04:00:00Z\"}, \"novels\": [{\"title\": \"One Hundred Years of Solitude\", \"yearPublished\": 1967}, {\"title\": \"Chronicle of a Death Foretold\", \"yearPublished\": 1981}, {\"title\": \"Love in the Time of Cholera\", \"yearPublished\": 1985}]}" + } + ], + "preview": "In this guide, you can learn how to use documents in the\nMongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/data-formats/serialization", + "title": "Kotlin Serialization", + "headings": [ + "Overview", + "Supported Types", + "Add Kotlin Serialization to Your Project", + "Annotate Data Classes", + "Custom Serializer Example", + "Customize the Serializer Configuration", + "Custom Codec Example", + "Polymorphic Serialization", + "Polymorphic Data Classes Example" + ], + "paragraphs": "The Kotlin driver supports the kotlinx.serialization library for\nserializing and deserializing Kotlin objects. The driver provides an efficient Bson serializer that you can use with\nclasses marked as @Serializable to handle the serialization of Kotlin objects\nto BSON data. You can also install the bson-kotlinx library to support\n custom codecs with configurations to encode\ndefaults, encode nulls, and define class discriminators. Although you can use the Kotlin driver with the Kotlin serialization Json \nlibrary, the Json serializer does not directly support BSON value types such\nas ObjectId . You must provide a custom serializer that can handle the\nconversion between BSON and JSON. To learn how to use the Codec interface instead of the\nKotlin serialization library to specify custom encoding and decoding\nof Kotlin objects to BSON data, see the Codecs guide. You might choose Kotlin serialization if you are already familiar\nwith the framework or if you prefer to use an idiomatic Kotlin approach. The Kotlin driver supports: All Kotlin types that are supported by the Kotlin serialization library All available BSON types Support for serialization in the Kotlin driver depends on the official Kotlin\nserialization library . Select from the following tabs to see how to add the serialization\ndependencies to your project by using the Gradle and\n Maven package managers: If you are using Gradle to manage your\ndependencies, add the following to your build.gradle.kts dependencies list: If you are using Maven to manage your\ndependencies, add the following to your pom.xml dependencies list: To declare a class as serializable, annotate your Kotlin data classes with the\n @Serializable annotation from the Kotlin serialization framework. You can use your data classes in your code as normal after you mark them as serializable.\nThe Kotlin driver and the Kotlin serialization framework handle the\nBSON serialization and deserialization. This example shows a simple data class annotated with the following: For more information on serializable classes and available annotation classes,\nsee the official Kotlin Serialization \ndocumentation. @Serializable to mark the class as serializable. @SerialName to specify the name of the id and manufacturer properties\nin the BSON document. This can be used in place of the @BsonId and\n @BsonProperty annotations, which are unsupported in serializable classes. @Contextual to mark the BSON id property to use the built-in ObjectIdSerializer .\nThis annotation is required for BSON types to be serialized correctly. You cannot use annotations \nfrom the org.bson.codecs.pojo.annotations package on @Serializable data classes. You can create a custom serializer to handle how your data is\nrepresented in BSON. The Kotlin driver uses the KSerializer \ninterface from the kotlinx.serialization package to implement custom\nserializers. You can specify the custom serializer as the parameter to\nthe @Serializable annotation for a specific field. The following example shows how to create a custom\n KSerializer instance to convert a kotlinx.datetime.Instant to a\n BsonDateTime : The following code shows the PaintOrder data class in which the\n orderDate field has an annotation that specifies the custom\nserializer class defined in the preceding code: For more information about the methods and classes mentioned in this section,\nsee the following API documentation: KSerializer Instant BsonEncoder BsonDecoder You can use the KotlinSerializerCodec class from the org.bson.codecs.kotlinx \npackage to create a codec for your @Serializable data classes and\ncustomize what is stored. Use the BsonConfiguration class to define the configuration,\nincluding whether to encode defaults, encode nulls, or define class discriminators. To create a custom codec, install the bson-kotlinx \ndependency to your project. Select from the following tabs to see how to\nadd the dependency to your project by using the Gradle and\n Maven package managers: Then, you can define your codec using the\n KotlinSerializerCodec.create() \nmethod and add it to the registry. If you are using Gradle to manage your\ndependencies, add the following to your build.gradle.kts dependencies list: If you are using Maven to manage your\ndependencies, add the following to your pom.xml dependencies list: You can also optionally install the bson-kotlin dependency\nthrough the default codec registry. This dependency uses reflection\nand the codec registry to support Kotlin data classes, but it does\nnot support certain POJO annotations such as BsonDiscriminator ,\n BsonExtraElements , and BsonConstructor . To learn more, see\nthe bson-kotlin API documentation . Generally, we recommend that you install and use the faster\n bson-kotlinx library for codec configuration. The following example shows how to create a codec using the\n KotlinSerializerCodec.create() method and configure it to not encode defaults: For more information about the methods and classes mentioned in this section,\nsee the following API documentation: KotlinSerializerCodec KotlinSerializerCodec.create() BsonConfiguration The Kotlin driver natively supports serialization and deserialization\nof polymorphic classes. When you mark a sealed interface and data\nclasses that inherit that interface with the @Serializable \nannotation, the driver uses a KSerializer implementation to handle\nconversion of your types to and from BSON. When you insert an instance of a polymorphic data class into MongoDB,\nthe driver adds the field _t , the\ndiscriminator field. The value of this field is the data class name. The following example creates an interface and two data classes that\ninherit that interface. In the data classes, the id field is marked\nwith the annotations described in the\n Annotate Data Classes section: Then, you can perform operations with data classes as usual. The\nfollowing example parametrizes the collection with the Person \ninterface, then performs operations with the polymorphic classes\n Teacher and Student . When you retrieve documents, the driver\nautomatically detects the type based on the discriminator value and\ndeserializes them accordingly.", + "code": [ + { + "lang": "kotlin", + "value": "implementation(\"org.jetbrains.kotlinx:kotlinx-serialization-core:1.5.1\")\nimplementation(\"org.mongodb:bson-kotlinx:5.1.2\")" + }, + { + "lang": "kotlin", + "value": "\n org.jetbrains.kotlinx\n kotlinx-serialization-core\n 1.5.1\n\n\n org.mongodb\n bson-kotlinx\n 5.1.2\n" + }, + { + "lang": "kotlin", + "value": "@Serializable\ndata class PaintOrder(\n @SerialName(\"_id\") // Use instead of @BsonId\n @Contextual val id: ObjectId?,\n val color: String,\n val qty: Int,\n @SerialName(\"brand\")\n val manufacturer: String = \"Acme\" // Use instead of @BsonProperty\n)\n" + }, + { + "lang": "kotlin", + "value": "object InstantAsBsonDateTime : KSerializer {\n override val descriptor: SerialDescriptor = PrimitiveSerialDescriptor(\"InstantAsBsonDateTime\", PrimitiveKind.LONG)\n\n override fun serialize(encoder: Encoder, value: Instant) {\n when (encoder) {\n is BsonEncoder -> encoder.encodeBsonValue(BsonDateTime(value.toEpochMilliseconds()))\n else -> throw SerializationException(\"Instant is not supported by ${encoder::class}\")\n }\n }\n\n override fun deserialize(decoder: Decoder): Instant {\n return when (decoder) {\n is BsonDecoder -> Instant.fromEpochMilliseconds(decoder.decodeBsonValue().asDateTime().value)\n else -> throw SerializationException(\"Instant is not supported by ${decoder::class}\")\n }\n }\n}\n" + }, + { + "lang": "kotlin", + "value": "@Serializable\ndata class PaintOrder(\n val color: String,\n val qty: Int,\n @Serializable(with = InstantAsBsonDateTime::class)\n val orderDate: Instant,\n)\n" + }, + { + "lang": "kotlin", + "value": "implementation(\"org.mongodb:bson-kotlinx:5.1.2\")" + }, + { + "lang": "kotlin", + "value": "\n org.jetbrains.kotlinx\n bson-kotlinx\n 5.1.2\n" + }, + { + "lang": "kotlin\n :copyable: true", + "value": "import org.bson.codecs.configuration.CodecRegistries\nimport org.bson.codecs.kotlinx.BsonConfiguration\nimport org.bson.codecs.kotlinx.KotlinSerializerCodec" + }, + { + "lang": "kotlin", + "value": "val myCustomCodec = KotlinSerializerCodec.create(\n bsonConfiguration = BsonConfiguration(encodeDefaults = false)\n)\n\nval registry = CodecRegistries.fromRegistries(\n CodecRegistries.fromCodecs(myCustomCodec), collection.codecRegistry\n)\n" + }, + { + "lang": "kotlin", + "value": "@Serializable\nsealed interface Person {\n val name: String\n}\n\n@Serializable\ndata class Student(\n @Contextual\n @SerialName(\"_id\")\n val id: ObjectId,\n override val name: String,\n val grade: Int,\n) : Person\n\n@Serializable\ndata class Teacher(\n @Contextual\n @SerialName(\"_id\")\n val id: ObjectId,\n override val name: String,\n val department: String,\n) : Person\n" + }, + { + "lang": "kotlin", + "value": "val collection = database.getCollection(\"school\")\n\nval teacherDoc = Teacher(ObjectId(), \"Vivian Lee\", \"History\")\nval studentDoc = Student(ObjectId(), \"Kate Parker\", 10)\n\ncollection.insertOne(teacherDoc)\ncollection.insertOne(studentDoc)\n\nprintln(\"Retrieving by using data classes\")\ncollection.withDocumentClass()\n .find(Filters.exists(\"department\"))\n .first().also { println(it) }\n\ncollection.withDocumentClass()\n .find(Filters.exists(\"grade\"))\n .first().also { println(it) }\n\nprintln(\"\\nRetrieving by using Person interface\")\nval resultsFlow = collection.withDocumentClass().find()\nresultsFlow.collect { println(it) }\n\nprintln(\"\\nRetrieving as Document type\")\nval resultsDocFlow = collection.withDocumentClass().find()\nresultsDocFlow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Retrieving by using data classes\nTeacher(id=..., name=Vivian Lee, department=History)\nStudent(id=..., name=Kate Parker, grade=10)\n\nRetrieving by using Person interface\nTeacher(id=..., name=Vivian Lee, department=History)\nStudent(id=..., name=Kate Parker, grade=10)\n\nRetrieving as Document type\nDocument{{_id=..., _t=Teacher, name=Vivian Lee, department=History}}\nDocument{{_id=..., _t=Student, name=Kate Parker, grade=10}}" + } + ], + "preview": "The Kotlin driver supports the kotlinx.serialization library for\nserializing and deserializing Kotlin objects.", + "tags": "code example, data model, conversion, polymorphism", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/data-formats", + "title": "Data Formats", + "headings": [], + "paragraphs": "Document Data Format: Data Classes Document Data Format: BSON Document Data Format: Extended JSON Documents Kotlin Serialization Codecs", + "code": [], + "preview": null, + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/databases-collections", + "title": "Databases and Collections", + "headings": [ + "Overview", + "Access a Database", + "Access a Collection", + "Specify Return Type", + "Create a Collection", + "Document Validation", + "Get a List of Collections", + "Drop a Collection", + "Specify Read Preferences, Read Concerns, and Write Concerns" + ], + "paragraphs": "In this guide, you can learn how to use MongoDB databases and\ncollections with the MongoDB Kotlin driver. MongoDB organizes data into a hierarchy of the following levels: With the MongoDB Kotlin driver, you can model data by using Kotlin data\nclasses or by using the Document class to store and\nretrieve data from MongoDB. To learn more about using data classes, see\nthe guide on the Data Class Data Format . To learn more about using the Document \nclass, see the guide on the Document Data Format . Databases : Databases are the top level of data organization in a MongoDB instance. Collections : Databases are organized into collections which contain documents . Documents : Documents contain literal data such as strings, numbers, and dates, as well as other embedded documents. For more information on document field types and structure, see the Server documentation on documents . Use the getDatabase() method of\na MongoClient instance to access a MongoDatabase in a MongoDB\ninstance. The following example accesses a database named testDatabase : Use the getCollection() \nmethod of a MongoDatabase instance to access a\n MongoCollection in a database of your connected MongoDB instance. The following example accesses a collection named testCollection from a\n MongoDatabase that contains documents of type ExampleDataClass : If the provided collection name does not already exist in the database,\nMongoDB implicitly creates the collection when you first insert data\ninto that collection. The driver provides a way for you to specify a class for documents\nreturned from a collection, even if it is different than the class you\nspecified when retrieving the collection. You can specify a return class\nby using the MongoCollection.withDocumentClass() \nmethod. Specifying a different return class could be useful in the following\nsituations: The following example retrieves a collection that\ncontains data represented by the Fruit data class but returns the result\nof a findOneAndUpdate() operation as an instance of the NewFruit \nclass. The operation changes the name of the qty field to\n quantity and adds an item to the seasons array field in the\ndocument with a name value of \"strawberry\" : Your collection contains multiple data types. You specify a projection that changes your data fields. You cannot directly specify a return type on a method that changes the data,\nsuch as findOneAndUpdate() or findOneAndReplace() . Use the createCollection() \nmethod of a MongoDatabase instance to create a collection\nin a database of your connected MongoDB instance. The following example creates a collection called exampleCollection : You can specify collection options like maximum size and document\nvalidation rules using the CreateCollectionOptions \nclass. The createCollection() method accepts an instance of\n CreateCollectionOptions as an optional second parameter. Document validation provides the ability to validate documents\nagainst a series of filters during writes to a collection. You can\nspecify these filters using the ValidationOptions \nclass, which accepts a series of Filters instances\nthat specify the validation rules and expressions: For more information, see the server documentation for document\nvalidation . You can query for a list of collections in a database using the\n MongoDatabase.listCollectionNames() method: You can remove a collection from the database using the\n MongoCollection.drop() method: Dropping a collection from your database also permanently deletes all\ndocuments within that collection and all indexes on that collection.\nOnly drop collections that contain data that is no longer needed. Read preferences , read concerns , and write concerns control\nhow the driver routes read operations and waits for acknowledgment for\nread and write operations when connected to a MongoDB replica set.\nRead preferences and read concerns apply to all read operations;\nwrite concerns apply to all write operations. MongoDatabase instances inherit their write concern, read concern,\nand write preference settings from the MongoClient used to create\nthem. MongoCollection instances inherit their write concern, read concern,\nand write preference settings from the MongoDatabase used to create\nthem. However, you can use the following methods to obtain an instance\nof a MongoDatabase or MongoCollection with a read preference,\nread concern, or write concern that differs from the setting they would\nnormally inherit: For more information on these topics, see the following pages in the\nServer manual: MongoDatabase.withReadConcern() MongoDatabase.withReadPreference() MongoDatabase.withWriteConcern() MongoCollection.withReadConcern() MongoCollection.withReadPreference() MongoCollection.withWriteConcern() The withReadConcern() , withReadPreference() , and\n withWriteConcern methods create a new instance of a\n MongoDatabase or MongoCollection with the desired preference\nor concern. The MongoDatabase or MongoCollection upon which\nthe method is called retains its original preference and concern\nsettings. Read Preference Read Concern Write Concern", + "code": [ + { + "lang": "kotlin", + "value": "val database = client.getDatabase(\"testDatabase\")\n" + }, + { + "lang": "kotlin", + "value": "data class ExampleDataClass(\n @BsonId val id: ObjectId = ObjectId(),\n val exampleProperty: String,\n)\n" + }, + { + "lang": "kotlin", + "value": "val collection = database.getCollection(\"testCollection\")\n" + }, + { + "lang": "kotlin", + "value": "data class Fruit(\n @BsonId val id: Int,\n val name: String,\n val qty: Int,\n val seasons: List\n)\n" + }, + { + "lang": "kotlin", + "value": "val collection =\n database.getCollection(\"fruits\")\n\n// Define a data class for returned documents\ndata class NewFruit(\n @BsonId val id: Int,\n val name: String,\n val quantity: Int,\n val seasons: List\n)\n\nval filter = Filters.eq(Fruit::name.name, \"strawberry\")\nval update = Updates.combine(\n Updates.rename(Fruit::qty.name, \"quantity\"),\n Updates.push(Fruit::seasons.name, \"fall\"),\n)\nval options = FindOneAndUpdateOptions()\n .returnDocument(ReturnDocument.AFTER)\n\n// Specify the class for returned documents as the type parameter in withDocumentClass()\nval result = collection\n .withDocumentClass()\n .findOneAndUpdate(filter, update, options)\nprintln(result)\n" + }, + { + "lang": "console", + "value": "NewFruit(id=1, name=strawberry, quantity=205, seasons=[summer, fall])" + }, + { + "lang": "kotlin", + "value": "database.createCollection(\"exampleCollection\")\n" + }, + { + "lang": "kotlin", + "value": "val collOptions: ValidationOptions = ValidationOptions().validator(\n Filters.or(\n Filters.exists(\"title\"),\n Filters.exists(\"name\")\n )\n)\ndatabase.createCollection(\n \"movies\",\n CreateCollectionOptions().validationOptions(collOptions)\n)\n" + }, + { + "lang": "kotlin", + "value": "val collectionList = database.listCollectionNames().toList()\n\nprintln(collectionList)\n" + }, + { + "lang": "console", + "value": "[movies, exampleCollection]" + }, + { + "lang": "kotlin", + "value": "val collection =\n database.getCollection(\"movies\")\ncollection.drop()\n" + } + ], + "preview": "In this guide, you can learn how to use MongoDB databases and\ncollections with the MongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/encrypt-fields", + "title": "In-Use Encryption", + "headings": [ + "Overview", + "Queryable Encryption", + "Client-side Field Level Encryption" + ], + "paragraphs": "You can use the Kotlin driver to encrypt specific document fields by using a\nset of features called in-use encryption . In-use encryption allows\nyour application to encrypt data before sending it to MongoDB\nand query documents with encrypted fields. In-use encryption prevents unauthorized users from viewing plaintext\ndata as it is sent to MongoDB or while it is in an encrypted database. To\nenable in-use encryption in an application and authorize it to decrypt\ndata, you must create encryption keys that only your application can\naccess. Only applications that have access to your encryption\nkeys can access the decrypted, plaintext data. If an attacker gains\naccess to the database, they can only see the encrypted ciphertext data\nbecause they lack access to the encryption keys. You might use in-use encryption to encrypt fields in your MongoDB\ndocuments that contain the following types of sensitive data: MongoDB offers the following features to enable in-use encryption: Credit card numbers Addresses Health information Financial information Any other sensitive or personally identifiable information (PII) Queryable Encryption Client-side Field Level Encryption Queryable Encryption is the next-generation in-use encryption feature,\nfirst introduced as a preview feature in MongoDB Server version 6.0 and\nas a generally available (GA) feature in MongoDB 7.0. Queryable\nEncryption supports searching encrypted fields for equality and encrypts\neach value uniquely. To learn more about Queryable Encryption, see Queryable\nEncryption in the Server manual. The implementation of Queryable Encryption in MongoDB 6.0 is incompatible with the GA version introduced in MongoDB 7.0. The Queryable Encryption preview feature is no longer supported. Client-side Field Level Encryption (CSFLE) was introduced in MongoDB\nServer version 4.2 and supports searching encrypted fields for equality.\nCSFLE differs from Queryable Encryption in that you can select either a\ndeterministic or random encryption algorithm to encrypt fields. You can only\nquery encrypted fields that use a deterministic encryption algorithm when\nusing CSFLE. When you use a random encryption algorithm to encrypt\nfields in CSFLE, they can be decrypted, but you cannot perform equality\nqueries on those fields. When you use Queryable Encryption, you cannot\nspecify the encryption algorithm, but you can query all encrypted\nfields. When you deterministically encrypt a value, the same input value\nproduces the same output value. While deterministic encryption allows\nyou to perform queries on those encrypted fields, encrypted data with\nlow cardinality is susceptible to code breaking by frequency analysis. To learn more about CSFLE, see CSFLE in the\nServer manual. To learn more about these concepts, see the following Wikipedia\nentries: Cardinality Frequency Analysis", + "code": [], + "preview": "You can use the Kotlin driver to encrypt specific document fields by using a\nset of features called in-use encryption. In-use encryption allows\nyour application to encrypt data before sending it to MongoDB\nand query documents with encrypted fields.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/enterprise-auth", + "title": "Enterprise Authentication Mechanisms", + "headings": [ + "Overview", + "Specify an Authentication Mechanism", + "Mechanisms", + "Kerberos (GSSAPI)", + "LDAP (PLAIN)", + "MONGODB-OIDC", + "Azure IMDS", + "GCP IMDS", + "Custom Callback" + ], + "paragraphs": "In this guide, you can learn how to authenticate with MongoDB using each\n authentication mechanism available exclusively in the MongoDB Enterprise\nEdition. You can use the following mechanisms with the latest version of MongoDB\nEnterprise Edition: Authentication Mechanisms guide . For more\ninformation on establishing a connection to your MongoDB cluster, read our\n Connection Guide . Kerberos (GSSAPI) LDAP (PLAIN) MONGODB-OIDC You can specify your authentication mechanism and credentials when connecting\nto MongoDB using either of the following: A connection string (also known as a connection URI ) specifies how to\nconnect and authenticate to your MongoDB cluster. To authenticate using a connection string, include your settings in your\nconnection string and pass it to the MongoClient.create() method to\ninstantiate your MongoClient . The Connection String \ntab in each section provides the syntax for authenticating using a\n connection string . Alternatively, you can use the MongoCredential class to specify your\nauthentication details. The MongoCredential class contains static factory\nmethods that construct instances containing your authentication mechanism and\ncredentials. When you use the MongoCredential helper class, you need\nto use the MongoClientSettings.Builder class to configure your\nconnection settings when constructing your MongoClient . The\n MongoCredential tab in each section provides the syntax for\nauthenticating using a MongoCredential . For more information on these classes and methods, refer to the following API\ndocumentation: A connection string A MongoCredential factory method MongoClient.create() MongoClient MongoClientSettings.Builder MongoCredential The Generic Security Services API ( GSSAPI ) authentication mechanism\nallows the user to authenticate to a Kerberos service using the user's\nprincipal name. The following code snippets show how to specify the authentication mechanism,\nusing the following placeholders: Select the Connection String or the MongoCredential \ntab below for instructions and sample code for specifying this authentication\nmechanism: In order to acquire a\n Kerberos ticket ,\nthe GSSAPI Java libraries require you to specify the realm and Key Distribution\nCenter (KDC) system properties. See the sample settings in the following example: You may need to specify one or more of the following additional\n MongoCredential mechanism properties depending on your Kerberos setup: By default, the Kotlin driver caches Kerberos tickets by MongoClient instance.\nIf your deployment needs to frequently create and destroy MongoClient instances,\nyou can change the default Kerberos ticket caching behavior to cache by process\nto improve performance. The method refers to the GSSAPI authentication mechanism instead\nof Kerberos because the driver authenticates using the\n GSSAPI RFC-4652 SASL\nmechanism. Kerberos principal - your URL-encoded principal name, e.g. \"username%40REALM.ME\" hostname - network address of your MongoDB server, accessible by your client port - port number of your MongoDB server To specify the GSSAPI authentication mechanism using a connection\nstring: Your code to instantiate a MongoClient should resemble the following: Assign the authMechanism URL parameter to the value GSSAPI (optional) Assign the authSource URL parameter to the value $external If you specify the GSSAPI mechanism, you cannot assign\n authSource to any value other than $external . To specify the GSSAPI authentication mechanism using the\n MongoCredential class, use the createGSSAPICredential() \nmethod. Your code to instantiate a MongoClient should resemble the following: SERVICE_NAME CANONICALIZE_HOST_NAME JAVA_SUBJECT JAVA_SASL_CLIENT_PROPERTIES JAVA_SUBJECT_PROVIDER To specify one of the GSSAPI additional properties, include it in the\nconnection string as a URL parameter using the format:\n : . Your code to instantiate a MongoClient using GSSAPI and additional\nproperties might resemble the following: You can only specify the following GSSAPI properties using the\n MongoCredential : Select the MongoCredential tab to see how to specify\nthem. JAVA_SUBJECT JAVA_SASL_CLIENT_PROPERTIES JAVA_SUBJECT_PROVIDER To specify one of the GSSAPI additional properties, call the\n withMechanismProperty() method on your MongoCredential \ninstance and pass the property name and value as parameters. Use the\nproperty name constants defined in the MongoCredential class: Select the SERVICE_NAME_KEY or JAVA_SUBJECT_KEY tab to\nsee sample code to instantiate a MongoCredential that uses GSSAPI and\nthe selected property: SERVICE_NAME_KEY CANONICALIZE_HOST_NAME_KEY JAVA_SUBJECT_KEY JAVA_SASL_CLIENT_PROPERTIES_KEY JAVA_SUBJECT_PROVIDER_KEY To cache Kerberos tickets by process, you must use the MongoCredential authentication\nmechanism, as the connection string authentication mechanism does not support the JAVA_SUBJECT_PROVIDER \nmechanism property. If you would like to cache Kerberos tickets by process, select the MongoCredential \ntab to learn how to accomplish this. To cache Kerberos tickets by process, you must specify the JAVA_SUBJECT_PROVIDER \nmechanism property and provide a\n KerberosSubjectProvider \nin your MongoCredential instance. The code to configure the Kotlin driver to cache Kerberos tickets\nby process should resemble the following: On Windows, Oracle\u2019s JRE uses LSA \nrather than SSPI \nin its implementation of GSSAPI which limits interoperability with\nWindows Active Directory and implementations of single sign-on. See the\nfollowing articles for more information: JDK-8054026 JDK-6722928 SO 23427343 Available in MongoDB Enterprise Edition 3.4 and later. You can authenticate to a Lightweight Directory Access Protocol (LDAP)\nserver using your directory server username and password. You can specify this authentication mechanism by setting the authMechanism \nparameter to PLAIN and including your LDAP username and password in the\n connection string . The following code snippets show how to specify the authentication mechanism,\nusing the following placeholders: Select the Connection String or the MongoCredential \ntab below for instructions and sample code for specifying this authentication\nmechanism: The authentication mechanism is named PLAIN instead of LDAP since it\nauthenticates using the PLAIN Simple Authentication and Security Layer\n(SASL) defined in RFC-4616 . LDAP username - your LDAP username password - your LDAP user's password hostname - network address of your MongoDB server, accessible by your client port - port number of your MongoDB server To specify the LDAP (PLAIN) authentication mechanism using a connection\nstring: Your code to instantiate a MongoClient should resemble the following: Assign the authMechanism URL parameter to the value PLAIN (optional) Assign the authSource URL parameter to the value $external If you specify the PLAIN mechanism, you cannot assign\n authSource to any value other than $external . To specify the LDAP (PLAIN) authentication mechanism using the\n MongoCredential class, use the createPlainCredential() \nmethod. Your code to instantiate a MongoClient should resemble the following: The following sections describe how to use the MONGODB-OIDC\nauthentication mechanism to authenticate to various platforms. For more information about the MONGODB-OIDC authentication mechanism, see\n OpenID Connect Authentication and\n MongoDB Server Parameters \nin the MongoDB Server manual. The MONGODB-OIDC authentication mechanism requires MongoDB server v7.0 or later running\non a Linux platform. If your application runs on an Azure VM, or otherwise uses the\n Azure Instance Metadata Service \n(IMDS), you can authenticate to MongoDB by using the Kotlin driver's built-in Azure\nsupport. You can specify Azure IMDS OIDC authentication either by\nusing a MongoCredential instance or by specifying your credentials\nin the connection string. Select from the Connection String or MongoCredential tabs to\nsee the corresponding syntax. Replace the placeholder in the\nfollowing code with the percent-encoded value of the audience server\nparameter configured on your MongoDB deployment. The comma ( , ) character and its encoding ( %2C ) are\nreserved, and using these characters in a value causes the\ndriver to interpret commas as delimiters of key-value pairs.\nYou must specify values that contain commas in a MongoCredential instance, as\ndemonstrated in the MongoCredential tab. Replace the placeholder with the client ID or application ID of the\nAzure managed identity or enterprise application. Replace the \nplaceholder with the value of the\n audience server parameter configured on your MongoDB deployment. If your application runs on a Google Compute Engine VM, or otherwise uses the\n GCP Instance Metadata Service ,\nyou can authenticate to MongoDB by using the Kotlin driver's built-in GCP\nsupport. You can specify GCP IMDS OIDC authentication either by\nusing a MongoCredential instance or by specifying your credentials\nin the connection string. Select from the Connection String or MongoCredential tabs to\nsee the corresponding syntax. Replace the placeholder in the\nfollowing code with the percent-encoded value of the audience server\nparameter configured on your MongoDB deployment. The comma ( , ) character and its encoding ( %2C ) are\nreserved, and using these characters in a value causes the\ndriver to interpret commas as delimiters of key-value pairs.\nYou must specify values that contain commas in a MongoCredential instance, as\ndemonstrated in the MongoCredential tab. Replace the placeholder with the value of the\n audience server parameter configured on your MongoDB deployment. The Kotlin driver doesn't offer built-in support for all platforms, including\nAzure Functions and Azure Kubernetes Service (AKS). Instead, you\nmust define a custom callback to use OIDC to authenticate from these platforms.\nTo do so, use the \"OIDC_CALLBACK\" authentication property, as shown in the following\ncode example: The value of the \"OIDC_CALLBACK\" property must be a lambda or other implementation\nof the OidcCallback functional interface that accepts an OidcCallbackContext \nas a parameter and returns an OidcCallbackResult . The following example uses an example callback to retrieve an OIDC token from a file\nnamed \"access-token.dat\" in the local file system:", + "code": [ + { + "lang": "none", + "value": "java.security.krb5.realm=MYREALM.ME\njava.security.krb5.kdc=mykdc.myrealm.me" + }, + { + "lang": "kotlin", + "value": "val connectionString = ConnectionString(\"@:/?authSource=$external&authMechanism=GSSAPI\")\nval mongoClient = MongoClient.create(connectionString)\n" + }, + { + "lang": "kotlin", + "value": "val credential = MongoCredential.createGSSAPICredential(\"\")\n\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder ->\n builder.hosts(listOf(ServerAddress(\"\", )))\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" + }, + { + "lang": "kotlin", + "value": "val connectionString = ConnectionString(\"@:/?authSource=$external&authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:myService\")\nval mongoClient = MongoClient.create(connectionString)\n" + }, + { + "lang": "kotlin", + "value": "val credential = MongoCredential.createGSSAPICredential(\"\")\n .withMechanismProperty(MongoCredential.SERVICE_NAME_KEY, \"myService\")\n" + }, + { + "lang": "kotlin", + "value": "val loginContext = LoginContext(\"\")\nloginContext.login()\nval subject: Subject = loginContext.subject\n\nval credential = MongoCredential.createGSSAPICredential(\"\")\n .withMechanismProperty(MongoCredential.JAVA_SUBJECT_KEY, subject)\n" + }, + { + "lang": "kotlin", + "value": "/* All MongoClient instances sharing this instance of KerberosSubjectProvider\nwill share a Kerberos ticket cache */\nval myLoginContext = \"myContext\"\n/* Login context defaults to \"com.sun.security.jgss.krb5.initiate\"\nif unspecified in KerberosSubjectProvider */\nval credential = MongoCredential.createGSSAPICredential(\"\")\n .withMechanismProperty(\n MongoCredential.JAVA_SUBJECT_PROVIDER_KEY,\n KerberosSubjectProvider(myLoginContext)\n )\n" + }, + { + "lang": "kotlin", + "value": "val connectionString = ConnectionString(\":@:/?authSource=$external&authMechanism=PLAIN\")\nval mongoClient = MongoClient.create(connectionString)\n" + }, + { + "lang": "kotlin", + "value": "val credential = MongoCredential.createPlainCredential(\"\", \"$external\", \"\".toCharArray())\n\nval settings = MongoClientSettings.builder()\n .applyToClusterSettings { builder ->\n builder.hosts(listOf(ServerAddress(\"\", )))\n }\n .credential(credential)\n .build()\n\nval mongoClient = MongoClient.create(settings)\n" + }, + { + "lang": "kotlin", + "value": "val connectionString = ConnectionString(\n \"mongodb://@:/?\" +\n \"?authMechanism=MONGODB-OIDC\" +\n \"&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:\")\nval mongoClient = MongoClient.create(connectionString)\n" + }, + { + "lang": "kotlin", + "value": "val credential = MongoCredential.createOidcCredential(\"\")\n .withMechanismProperty(\"ENVIRONMENT\", \"azure\")\n .withMechanismProperty(\"TOKEN_RESOURCE\", \"\")\n\nval mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyToClusterSettings { builder ->\n builder.hosts(listOf(ServerAddress(\"\", )))\n }\n .credential(credential)\n .build())\n" + }, + { + "lang": "kotlin", + "value": "val connectionString = ConnectionString(\n \"mongodb://@:/?\" +\n \"authMechanism=MONGODB-OIDC\" +\n \"&authMechanismProperties=ENVIRONMENT:gcp,TOKEN_RESOURCE:\")\nval mongoClient = MongoClient.create(connectionString)\n" + }, + { + "lang": "kotlin", + "value": "val credential = MongoCredential.createOidcCredential(\"\")\n .withMechanismProperty(\"ENVIRONMENT\", \"gcp\")\n .withMechanismProperty(\"TOKEN_RESOURCE\", \"\")\n\nval mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyToClusterSettings { builder ->\n builder.hosts(listOf(ServerAddress(\"\", )))\n }\n .credential(credential)\n .build())\n" + }, + { + "lang": "kotlin", + "value": "val credential = MongoCredential.createOidcCredential(null)\n .withMechanismProperty(\"OIDC_CALLBACK\") { context: Context ->\n val accessToken = \"...\"\n OidcCallbackResult(accessToken)\n }\n" + }, + { + "lang": "kotlin", + "value": "val credential = MongoCredential.createOidcCredential(null)\n .withMechanismProperty(\"OIDC_CALLBACK\") { context: Context ->\n val accessToken = String(Files.readAllBytes(Paths.get(\"access-token.dat\")))\n OidcCallbackResult(accessToken)\n }\n\nval mongoClient = MongoClient.create(\n MongoClientSettings.builder()\n .applyToClusterSettings { builder ->\n builder.hosts(listOf(ServerAddress(\"\", )))\n }\n .credential(credential)\n .build()\n)\n" + } + ], + "preview": "In this guide, you can learn how to authenticate with MongoDB using each\nauthentication mechanism available exclusively in the MongoDB Enterprise\nEdition.", + "tags": "ldap, encryption, principal, tls", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/indexes", + "title": "Indexes", + "headings": [ + "Overview", + "Query Coverage and Performance", + "Operational Considerations", + "Index Types", + "Single Field and Compound Indexes", + "Single Field Indexes", + "Compound Indexes", + "Multikey Indexes (Indexes on Array Fields)", + "Atlas Search and Vector Search Indexes", + "Create a Search Index", + "List Search Indexes", + "Update a Search Index", + "Drop a Search Index", + "Text Indexes", + "Single Field", + "Multiple Fields", + "Geospatial Indexes", + "Unique Indexes", + "Clustered Indexes", + "Remove an Index", + "Remove an Index Using an Index Specification Document", + "Remove an Index Using a Name Field", + "Remove an Index Using a Wildcard Character" + ], + "paragraphs": "In this guide, you can learn how to create and manage indexes by\nusing the MongoDB Kotlin Driver. Indexes support the efficient execution of queries in MongoDB. Without\nindexes, MongoDB must scan every document in a collection (a\n collection scan ) to find the documents that match each query. These\ncollection scans are slow and can negatively affect the performance of\nyour application. If an appropriate index exists for a query, MongoDB\ncan use the index to limit the documents that the query must inspect. Indexes also have the following benefits: To learn more, see Indexes in the Server manual. Indexes allow efficient sorting. Indexes enable special capabilities such as geospatial queries . Indexes allow the creation of constraints to ensure a field value is unique . Update operations use indexes when finding documents to update, and\ndelete operations use indexes when finding documents to delete.\n Certain stages in\nthe aggregation pipeline also use indexes to improve performance. When you execute a query against MongoDB, your command can include various elements: When all the fields specified in the query, projection, and sort are in the same index, MongoDB returns results directly\nfrom the index, also called a covered query . For more information on how to ensure your index covers your query criteria and projection, see the Server manual\narticles on query coverage . Query criteria that specify fields and values you are looking for Options that affect the query's execution, such as the read concern Projection criteria to specify the fields MongoDB returns (optional) Sort criteria to specify the order of documents returned from MongoDB (optional) Sort criteria must match or invert the order of the index. Consider an index on the field name in ascending order (A-Z) and age in descending order (9-0): MongoDB uses this index when you sort your data in either of the\nfollowing ways: Specifying a sort order of name and age ascending or name and age \ndescending requires an in-memory sort. name ascending, age descending name descending, age ascending The following guidelines describe how you can optimize the way\nyour application uses indexes: Since MongoDB supports dynamic schemas, applications can query against fields whose names cannot be known in advance or\nare arbitrary. MongoDB 4.2 introduced wildcard indexes to help support these queries.\nWildcard indexes are not designed to replace workload-based index planning. For more information on designing your data model and choosing indexes appropriate for your application, see the MongoDB\nserver documentation on Indexing Strategies and\n Data Modeling and Indexes . To improve query performance, build indexes on fields that appear often in\nyour application's queries and operations that return sorted results. Track index memory and disk usage for capacity planning, because each\nindex that you add consumes disk space and memory when active. Avoid adding indexes that you infrequently use. Note that when a write\noperation updates an indexed field, MongoDB updates the related index. MongoDB supports several different index types to support querying your data. The following sections describe the\nmost common index types and provide sample code for creating each index type. For a full list of index types, see\n Indexes in the Server manual. The following examples use the\n createIndex() \nmethod to create various indexes, and the following data classes to model data\nin MongoDB: The Kotlin driver provides the Indexes \nclass to create and manage indexes. This class includes static\nfactory methods to create index specification documents for different\nMongoDB index key types. Single field indexes are indexes with a reference to a single field within a collection's\ndocuments. They improve single field query and sort performance, and support TTL Indexes that\nautomatically remove documents from a collection after a certain amount of time or at a specific clock time. The following example creates an index in ascending order on the title field: The following is an example of a query that is covered by the index\ncreated in the preceding code snippet: See the MongoDB server manual section on single field indexes for more information. The _id_ index is an example of a single field index. This index is automatically created on the _id field\nwhen a new collection is created. Compound indexes hold references to multiple fields within a collection's documents,\nimproving query and sort performance. The following example creates a compound index on the type and rated fields: The following is an example of a query that is covered by the index\ncreated in the preceding code snippet: See the MongoDB server manual section on Compound indexes for more information. Read more about compound indexes, index prefixes , and sort order here . Multikey indexes are indexes that improve performance for queries that specify a field with an index that contains\nan array value. You can define a multikey index using the same syntax as a single field or compound index. The following example creates a compound, multikey index on the rated , genres (an array of\nStrings), and title fields: The following is an example of a query that is covered by the index\ncreated in the preceding code snippet: Multikey indexes behave differently from other indexes in terms of query coverage, index-bound computation, and\nsort behavior. To learn more about multikey indexes, including a discussion of their behavior and limitations,\nsee Multikey Indexes in the Server manual. You can programmatically manage your Atlas Search and Atlas Vector\nSearch indexes by using the Kotlin driver. The Atlas Search feature enables you to perform full-text searches on\ncollections hosted on MongoDB Atlas. To learn more about MongoDB Atlas\nSearch, see the Atlas Search Indexes documentation. Atlas Vector Search enables you to perform semantic searches on vector\nembeddings stored in MongoDB Atlas. To learn more about Atlas Vector Search, see the\n Atlas Vector Search section in the Aggregates Builder guide. You can call the following methods on a collection to manage your Atlas\nSearch and Vector Search indexes: The following sections provide code examples that demonstrate how to use\neach of the preceding methods. createSearchIndex() createSearchIndexes() listSearchIndexes() updateSearchIndex() dropSearchIndex() The Atlas Search index-management methods run asynchronously. The\ndriver methods can return before confirming that they ran\nsuccessfully. To determine the current status of the indexes, call the\n listSearchIndexes() method. You can use the createSearchIndex() \nand createSearchIndexes() \nmethods to create Atlas Search and Vector Search indexes on a\ncollection. The following code example shows how to create an Atlas Search index: The following code example shows how to create Search and\nVector Search indexes in one call: You can use the\n listSearchIndexes() \nmethod to return a list of the Atlas Search indexes on a collection. The following code example shows how to print a list of the search indexes on\na collection: You can use the\n updateSearchIndex() \nmethod to update an Atlas Search index. The following code shows how to update a search index: You can use the\n dropSearchIndex() \nmethod to remove an Atlas Search index. The following code shows how to delete a search index from a collection: Text indexes support text search queries on string content. These indexes can include any field whose value is a\nstring or an array of string elements. MongoDB supports text search for various languages. You can specify the default\nlanguage as an option when creating the index. MongoDB offers an improved full-text search solution,\n Atlas Search . To learn more about Atlas Search\nindexes and how to use them, see the Atlas Search and Vector Search Indexes section of this\nguide. The following example creates a text index on the plot field: The following is an example of a query that is covered by the index\ncreated in the preceding code snippet. Note that the sort is\nomitted because text indexes do not contain sort order. A collection can only contain one text index. If you want to create a\ntext index for multiple text fields, you must create a compound\nindex. A text search runs on all the text fields within the compound\nindex. The following snippet creates a compound text index for the title and genre \nfields: For more information, see the following Server Manual Entries: Compound Text Index Restrictions Text Indexes MongoDB supports queries of geospatial coordinate data using 2dsphere indexes . With a 2dsphere index, you can query\nthe geospatial data for inclusion, intersection, and proximity. For more information on querying geospatial data, see\n Geospatial Queries in the Server manual. To create a 2dsphere index, you must specify a field that contains\nonly GeoJSON objects . To learn more about this type, see\n GeoJSON objects in the Server manual. The location.geo field in the following sample document from the theaters collection in the sample_mflix \ndatabase is a GeoJSON Point object that describes the coordinates of the theater: The following example creates a 2dsphere index on the location.geo field: The following is an example of a geospatial query that is covered by the index\ncreated in the preceding code snippet: MongoDB also supports 2d indexes for calculating distances on a\nEuclidean plane and for working with the \"legacy coordinate pairs\"\nsyntax used in MongoDB 2.2 and earlier. To learn more, see\n Geospatial Queries in the Server manual. Attempting to create a geospatial index on a field that is already\ncovered by a geospatial index results in an error. Unique indexes ensure that the indexed fields do not store duplicate values. By default, MongoDB creates a unique index\non the _id field during the creation of a collection. To create a unique index, specify the field or combination of\nfields that you want to prevent duplication on and set the unique option to true . The following example creates a unique, descending index on the theaterId field: Refer to the Unique Indexes page in the MongoDB server manual for more information. If you perform a write operation that stores a duplicate value that\nviolates the unique index, the driver raises a DuplicateKeyException ,\nand MongoDB throws an error resembling the following: Clustered indexes instruct a collection to store documents ordered\nby a key value. To create a clustered index, specify the clustered index\noption with the _id field as the key and the unique field as\n true when you create your collection. The following example creates a clustered index on the _id field in\nthe vendors collection: See the MongoDB server manual sections for more information: Clustered Index Clustered Collections You can remove any unused index except the default unique index on the\n _id field. The following sections show the ways to remove indexes: Using an index specification document Using an indexed name field Using a wildcard character to remove all indexes Pass an index specification document to the dropIndex() method to\nremove an index from a collection. An index specification document is\na Bson instance that specifies the type of index on a\nspecified field. The following snippet removes an ascending index on the title field\nin a collection: If you want to drop a text index, you must use the name of the index\ninstead. See the Remove an Index Using a Name Field section for details. Pass the name field of the index to the dropIndex() method to\nremove an index from a collection. If you must find the name of your index, use the listIndexes() \nmethod to see the value of the name fields in your indexes. The following snippet retrieves and prints all the indexes in a\ncollection: If you call listIndex() on a collection that contains a text index,\nthe output might resemble the following: This output tells us the names of the existing indexes are \"_id\" and\n\"title_text\". The following snippet removes the \"title_text\" index from the collection: You cannot remove a single field from a compound text index. You must\ndrop the entire index and create a new one to update the indexed\nfields. Starting with MongoDB 4.2, you can drop all indexes by calling the\n dropIndexes() method on your collection: For prior versions of MongoDB, pass \"*\" as a parameter to your call to\n dropIndex() on your collection: For more information on the methods in this section, see the following API Documentation: dropIndex() dropIndexes()", + "code": [ + { + "lang": "none", + "value": "name_1_age_-1" + }, + { + "lang": "kotlin", + "value": "// Data class for the movies collection\ndata class Movie(\n val title: String,\n val year: Int,\n val cast: List,\n val genres: List,\n val type: String,\n val rated: String,\n val plot: String,\n val fullplot: String,\n)\n\n// Data class for the theaters collection\ndata class Theater(\n val theaterId: Int,\n val location: Location\n) {\n data class Location(\n val address: Address,\n val geo: Point\n ) {\n data class Address(\n val street1: String,\n val city: String,\n val state: String,\n val zipcode: String\n )\n }\n}\n" + }, + { + "lang": "kotlin", + "value": "val resultCreateIndex = moviesCollection.createIndex(Indexes.ascending(Movie::title.name))\nprintln(\"Index created: $resultCreateIndex\")\n" + }, + { + "lang": "console", + "value": "Index created: title_1" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.eq(Movie::title.name, \"The Dark Knight\")\nval sort = Sorts.ascending(Movie::title.name)\nval projection = Projections.fields(\n Projections.include(Movie::title.name),\n Projections.excludeId()\n)\n\ndata class Results(val title: String)\n\nval resultsFlow = moviesCollection.find(filter).sort(sort).projection(projection)\n\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "kotlin", + "value": "val resultCreateIndex = moviesCollection.createIndex(Indexes.ascending(Movie::type.name, Movie::rated.name))\n\nprintln(\"Index created: $resultCreateIndex\")\n" + }, + { + "lang": "console", + "value": "Index created: type_1_rated_1" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.and(\n Filters.eq(Movie::type.name, \"movie\"),\n Filters.eq(Movie::rated.name, \"G\")\n)\nval sort = Sorts.ascending(Movie::type.name, Movie::rated.name)\nval projection = Projections.fields(\n Projections.include(Movie::type.name, Movie::rated.name),\n Projections.excludeId()\n)\nval resultsFlow = moviesCollection.find(filter).sort(sort).projection(projection)\n\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "kotlin", + "value": "val resultCreateIndex =\n moviesCollection.createIndex(Indexes.ascending(Movie::rated.name, Movie::genres.name, Movie::title.name))\n\nprintln(\"Index created: $resultCreateIndex\")\n" + }, + { + "lang": "console", + "value": "Index created: rated_1_genres_1_title_1" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.and(\n Filters.eq(Movie::genres.name, \"Animation\"),\n Filters.eq(Movie::rated.name, \"G\")\n)\nval sort = Sorts.ascending(Movie::title.name)\nval projection = Projections.fields(\n Projections.include(Movie::title.name, Movie::rated.name),\n Projections.excludeId()\n)\nval resultsFlow = moviesCollection.find(filter).sort(sort).projection(projection)\n\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "kotlin", + "value": "val searchIdx = Document(\n \"mappings\",\n Document(\"dynamic\", true)\n)\nval resultCreateIndex = moviesCollection.createSearchIndex(\"myIndex\", searchIdx)\n" + }, + { + "lang": "kotlin", + "value": "val searchIdxMdl = SearchIndexModel(\n \"searchIdx\",\n Document(\"analyzer\", \"lucene.standard\").append(\n \"mappings\", Document(\"dynamic\", true)\n ),\n SearchIndexType.search()\n)\n\nval vectorSearchIdxMdl = SearchIndexModel(\n \"vsIdx\",\n Document(\n \"fields\",\n listOf(\n Document(\"type\", \"vector\")\n .append(\"path\", \"embeddings\")\n .append(\"numDimensions\", 1536)\n .append(\"similarity\", \"dotProduct\")\n )\n ),\n SearchIndexType.vectorSearch()\n)\n\nval resultCreateIndexes = moviesCollection.createSearchIndexes(\n listOf(searchIdxMdl, vectorSearchIdxMdl)\n)\n" + }, + { + "lang": "kotlin", + "value": "val searchIndexesList = moviesCollection.listSearchIndexes().toList()\n" + }, + { + "lang": "kotlin", + "value": "moviesCollection.updateSearchIndex(\n \"myIndex\",\n Document(\"analyzer\", \"lucene.simple\").append(\n \"mappings\",\n Document(\"dynamic\", false)\n .append(\n \"fields\",\n Document(\n \"title\",\n Document(\"type\", \"string\")\n )\n )\n )\n)\n" + }, + { + "lang": "kotlin", + "value": "moviesCollection.dropSearchIndex(\"myIndex\");\n" + }, + { + "lang": "kotlin", + "value": "try {\n val resultCreateIndex = moviesCollection.createIndex(Indexes.text(Movie::plot.name))\n println(\"Index created: $resultCreateIndex\")\n} catch (e: MongoCommandException) {\n if (e.errorCodeName == \"IndexOptionsConflict\") {\n println(\"there's an existing text index with different options\")\n }\n}\n" + }, + { + "lang": "console", + "value": "Index created: plot_text" + }, + { + "lang": "kotlin", + "value": "val filter = Filters.text(\"Batman\")\nval projection = Projections.fields(\n Projections.include(Movie::fullplot.name),\n Projections.excludeId()\n)\n\ndata class Results(val fullplot: String)\n\nval resultsFlow = moviesCollection.find(filter).projection(projection)\n\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "kotlin", + "value": "try {\n val resultCreateIndex = moviesCollection.createIndex(\n Indexes.compoundIndex(\n Indexes.text(Movie::title.name), Indexes.text(Movie::genres.name)\n )\n )\n println(\"Index created: $resultCreateIndex\")\n} catch (e: MongoCommandException) {\n if (e.errorCodeName == \"IndexOptionsConflict\") {\n println(\"there's an existing text index with different options\")\n }\n}\n" + }, + { + "lang": "console", + "value": "Index created: title_text_genre_text" + }, + { + "lang": "javascript", + "value": "{\n \"_id\" : ObjectId(\"59a47286cfa9a3a73e51e75c\"),\n \"theaterId\" : 104,\n \"location\" : {\n \"address\" : {\n \"street1\" : \"5000 W 147th St\",\n \"city\" : \"Hawthorne\",\n \"state\" : \"CA\",\n \"zipcode\" : \"90250\"\n },\n \"geo\" : {\n \"type\" : \"Point\",\n \"coordinates\" : [\n -118.36559,\n 33.897167\n ]\n }\n }\n}" + }, + { + "lang": "kotlin", + "value": "val resultCreateIndex = theatersCollection.createIndex(\n Indexes.geo2dsphere(\"${Theater::location.name}.${Theater.Location::geo.name}\")\n)\n\nprintln(\"Index created: $resultCreateIndex\")\n" + }, + { + "lang": "console", + "value": "Index created: location.geo_2dsphere" + }, + { + "lang": "kotlin", + "value": "// MongoDB Headquarters in New York, NY.\nval refPoint = Point(Position(-73.98456, 40.7612))\nval filter = Filters.near(\n \"${Theater::location.name}.${Theater.Location::geo.name}\",\n refPoint, 1000.0, 0.0\n)\nval resultsFlow = theatersCollection.find(filter)\n\nresultsFlow.collect { println(it) }\n" + }, + { + "lang": "kotlin", + "value": "try {\n val indexOptions = IndexOptions().unique(true)\n val resultCreateIndex = theatersCollection.createIndex(\n Indexes.descending(Theater::theaterId.name), indexOptions\n )\n println(\"Index created: $resultCreateIndex\")\n} catch (e: DuplicateKeyException) {\n println(\"duplicate field values encountered, couldn't create index: \\t${e.message}\")\n}\n" + }, + { + "lang": "console", + "value": "Index created: theaterId_-1" + }, + { + "lang": "none", + "value": "E11000 duplicate key error index" + }, + { + "lang": "kotlin", + "value": "val clusteredIndexOptions = ClusteredIndexOptions(Document(\"_id\", 1), true)\nval createCollectionOptions = CreateCollectionOptions().clusteredIndexOptions(clusteredIndexOptions)\n\ndatabase.createCollection(\"vendors\", createCollectionOptions)\n" + }, + { + "lang": "kotlin", + "value": "moviesCollection.dropIndex(Indexes.ascending(Movie::title.name));\n" + }, + { + "lang": "json", + "value": "{ \"v\": 2, \"key\": {\"_id\": 1}, \"name\": \"_id_\" }\n{ \"v\": 2, \"key\": {\"_fts\": \"text\", \"_ftsx\": 1}, \"name\": \"title_text\", \"weights\": {\"title\": 1},\n\"default_language\": \"english\", \"language_override\": \"language\", \"textIndexVersion\": 3 }" + }, + { + "lang": "kotlin", + "value": "val indexes = moviesCollection.listIndexes()\n\nindexes.collect { println(it.toJson()) }\n" + }, + { + "lang": "kotlin", + "value": "moviesCollection.dropIndex(\"title_text\")\n" + }, + { + "lang": "kotlin", + "value": "moviesCollection.dropIndexes()\n" + }, + { + "lang": "kotlin", + "value": "moviesCollection.dropIndex(\"*\")\n" + } + ], + "preview": "In this guide, you can learn how to create and manage indexes by\nusing the MongoDB Kotlin Driver.", + "tags": "code example, optimization, atlas search", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/logging", + "title": "Logging", + "headings": [ + "Overview", + "Set Up a Logger", + "Background", + "Example - Set Up", + "Configure Your Logger", + "Example - Configure", + "Logger Names", + "Example - Names" + ], + "paragraphs": "In this guide, you can learn how to set up and configure a logger in the\nMongoDB Kotlin driver. You will learn how to: This guide shows how to record events in the driver.\nIf you would like to learn how to use information about the activity of the\ndriver in code, consider reading our\n guide on monitoring . Set up a logger using the Simple Logging Facade For Java (SLF4J) Configure the log level of your logger This section gives background on the dependencies necessary to set up a\nlogger and provides an example logger setup. The MongoDB Kotlin driver uses the Simple Logging Facade For Java (SLF4J).\nSLF4J allows you to specify your logging framework of choice at deployment time.\nFor more information on SLF4J,\n see the SLF4J documentation . Setting up a logger is optional. When you start your application the MongoDB\nKotlin driver looks for the slf4j-api artifact in your classpath. If the driver\ncan't find the slf4j-api artifact, the driver logs the following warning with\n java.util.logging and disables all further logging: To set up a logger, you must include the following in your project. A binding is a piece of code that connects the slf4j-api artifact with a\nlogging framework. The following example shows how to bind the slf4j-api artifact\nto the two most popular logging frameworks, Log4j2 and Logback. The slf4j-api artifact A logging framework A binding For the most popular logging frameworks, there is often a single binding\nartifact that lists the slf4j-api and the logging framework as\ndependencies. This means that you can set up a logger by adding one artifact\nto your project's dependency list. You will see this in the example below. This example shows how to set up your logger. Click the\ntab corresponding to the logging framework you would like to use in your project. The following versions listed are illustrative rather than a\nsource of truth. You should check the official documentation for SLF4J and\nyour logging framework of choice for guaranteed up-to-date version\ninformation. SLF4J documentation Logback documentation Log4j2 documentation Select the build tool you are using in your project. Once you have included the preceding dependency, connect to your\nMongoDB instance and retrieve a document with the following code: For more information on Logback, see the\n Logback manual . Add the following dependency to your pom.xml file. Add the following dependency to your build.gradle.kts file: The default log level of Logback is DEBUG. To learn how to change your\nLogback logger's log level, see the\n example in the Configure Your Logger section of this page . Select the build tool you are using in your project. Once you have included the preceding dependency, log an error using the\nfollowing code: For more information on Log4j2, see the\n Log4j2 manual . Add the following dependency to your pom.xml file. Add the following dependency to your build.gradle.kts file. The default log level of Log4J2 is ERROR. This means that running\nstandard operations in the MongoDB Kotlin driver will not produce output\nfrom Log4J2 without configuration. To learn how to change your Log4J2\nlogger's log level, see the\n example in the Configure Your Logger section of this page . To configure your logger, you must use the configuration system of the logging\nframework bound to SLF4J. In the following example we show how you can use your logging framework's\nconfiguration system to set your logger's log level . A logger's log level specifies a lower bound for how urgent a message must be\nfor the logger to output that message. This example shows how to configure your logger's log level to INFO.\nSelect the tab corresponding to the logging framework you are using in your\nproject. Specify Logback configurations in a file named logback.xml . Your\n logback.xml file does not have to be in a specific location, but it must\nbe accessible from your classpath. The Logback framework defines the following log levels. The\nfollowing lists the log levels, ordered from most urgent to least\nurgent: Set your logback.xml file to the following. To test that your logger configuration was successful, run the following\ncode. For more information on configuring Logback, see the\n the Logback Manual . ERROR WARN INFO DEBUG TRACE Specify Log4j2 configurations in a file named log4j2.xml . Your\n log4j2.xml file does not have to be in a specific location, but it must\nbe accessible from your classpath. The Log4j2 framework defines the following log levels. The following lists the\nlog levels, ordered from most urgent to least urgent: Set your log4j2.xml file to the following. To test that your logger configuration was successful, run the following\ncode. For more information on configuring Log4j2, see the official\n Log4j2 configuration guide . FATAL ERROR WARN INFO DEBUG TRACE ALL Your logger uses logger names to help organize different logging events. Logger\nnames are strings that form a hierarchy. A logger is an ancestor of another logger if\nits name followed by a \".\" is a prefix of the other logger's name. For example,\n \"grandparent\" is an ancestor of \"grandparent.parent\" which is an\nancestor of \"grandparent.parent.child\" . For a concrete example, this is what a logger hierarchy looks like in code. A logger inherits the properties of its ancestor logger and can define\nits own. You can think of this as similar to class inheritance in Kotlin. The MongoDB Kotlin driver defines the following logger names to organize different\nlogging events in the driver. Here are the logger names defined in the driver\nand the logging events they correspond to: org.mongodb.driver.authenticator : authentication org.mongodb.driver.client : events related to MongoClient instances org.mongodb.driver.cluster : monitoring of MongoDB servers org.mongodb.driver.connection : connections and connection pools org.mongodb.driver.connection.tls : TLS/SSL org.mongodb.driver.operation : operations, including logging related to automatic retries org.mongodb.driver.protocol : commands sent to and replies received from MongoDB servers org.mongodb.driver.uri : connection string parsing org.mongodb.driver.management : JMX (Java Management Extensions) This example shows how to change the log level for a specific driver logger.\nWe set the root logger to OFF and the org.mongodb.driver.connection logger to\nINFO. This will cause the application to only log messages related to connecting\nto a MongoDB instance. Select the tab corresponding to the logging framework you are using in your\nproject. Set your logback.xml file to the following. To test that your logger configuration was successful, run the following\ncode: For more information on configuring Logback, see the\n official Logback configuration guide . Set your log4j2.xml file to the following. To test that your logger configuration was successful, run the following\ncode. For more information on configuring Log4j2, see the\n official Log4J2 configuration guide .", + "code": [ + { + "lang": "none", + "value": "WARNING: SLF4J not found on the classpath. Logging is disabled for the 'org.mongodb.driver' component" + }, + { + "lang": "xml", + "value": "\n \n ch.qos.logback\n logback-classic\n 1.2.11\n \n" + }, + { + "lang": "kotlin", + "value": "dependencies {\n implementation(\"ch.qos.logback:logback-classic:1.2.11\")\n}" + }, + { + "lang": "kotlin", + "value": "val mongoClient = MongoClient.create(\"\");\nval database = mongoClient.getDatabase(DB_NAME_PLACEHOLDER);\nval collection = database.getCollection(COLLECTION_NAME_PLACEHOLDER);\ncollection.find().firstOrNull()\n" + }, + { + "lang": "console", + "value": "...\n12:14:55.853 [main] DEBUG org.mongodb.driver.connection - Opened connection [connectionId{localValue:3, serverValue:3}] to \n12:14:55.861 [main] DEBUG org.mongodb.driver.protocol.command - Command \"find\" started on database using a connection with driver-generated ID 3 and server-generated ID 3 to . The request ID is 5. Command: {\"find\": \"\", \"filter\": {}, \"limit\": 1, \"singleBatch\": true, \"$db\": \"\", \"lsid\": {\"id\": {\"$binary\": {\"base64\": \"<_id>\", \"subType\": \"04\"}}}, \"$readPreference\": {\"mode\": \"primaryPreferred\"}}\n12:14:55.864 [main] DEBUG org.mongodb.driver.protocol.command - Command \"find\" succeeded in 4.34 ms using a connection with driver-generated ID 3 and server-generated ID 3 to .\", \"firstBatch\": []}, \"ok\": 1.0, \"$clusterTime\": {\"clusterTime\": {\"$timestamp\": {\"t\": 1673778535, \"i\": 1}}, \"signature\": {\"hash\": {\"$binary\": {\"base64\": \"<_id>\", \"subType\": \"00\"}}, \"keyId\": 0}}, \"operationTime\": {\"$timestamp\": {\"t\": 1673778535, \"i\": 1}}}" + }, + { + "lang": "xml", + "value": "\n \n org.apache.logging.log4j\n log4j-slf4j-impl\n 2.17.1\n \n" + }, + { + "lang": "groovy", + "value": "dependencies {\n implementation(\"org.apache.logging.log4j:log4j-slf4j-impl:2.17.1\")\n}" + }, + { + "lang": "kotlin", + "value": "val loggerParent = LoggerFactory.getLogger(\"parent\")\nval loggerChild = LoggerFactory.getLogger(\"parent.child\")\n" + }, + { + "lang": "kotlin", + "value": "val loggerParent = LoggerFactory.getLogger(\"parent\")\nval loggerChild = LoggerFactory.getLogger(\"parent.child\")\n" + }, + { + "lang": "console", + "value": "12:35:00.438 [main] ERROR - Logging an Error" + }, + { + "lang": "xml", + "value": "\n \n \n \n %-4relative [%thread] %-5level %logger{30} - %msg%n\n \n \n \n \n \n \n" + }, + { + "lang": "kotlin", + "value": "val mongoClient = MongoClient.create(\"\");\nval database = mongoClient.getDatabase(DB_NAME_PLACEHOLDER);\nval collection = database.getCollection(COLLECTION_NAME_PLACEHOLDER);\ncollection.find().firstOrNull()\n" + }, + { + "lang": "console", + "value": "...\n1317 [cluster-ClusterId{value='', description='null'}-] INFO org.mongodb.driver.cluster - Discovered replica set primary \n1568 [main] INFO org.mongodb.driver.connection - Opened connection [connectionId{localValue:7, serverValue:}] to " + }, + { + "lang": "xml", + "value": "\n\n \n \n \n \n \n \n \n \n \n \n" + }, + { + "lang": "kotlin", + "value": "val mongoClient = MongoClient.create(\"\");\nval database = mongoClient.getDatabase(DB_NAME_PLACEHOLDER);\nval collection = database.getCollection(COLLECTION_NAME_PLACEHOLDER);\ncollection.find().firstOrNull()\n" + }, + { + "lang": "console", + "value": "...\n10:14:57.633 [cluster-ClusterId{value=, description='null'}-] INFO org.mongodb.driver.cluster - Discovered replica set primary \n10:14:57.790 [main] INFO org.mongodb.driver.connection - Opened connection [connectionId{localValue:7, serverValue:}] to " + }, + { + "lang": "kotlin", + "value": "import org.slf4j.LoggerFactory\n" + }, + { + "lang": "kotlin", + "value": "val loggerParent = LoggerFactory.getLogger(\"parent\")\nval loggerChild = LoggerFactory.getLogger(\"parent.child\")\n" + }, + { + "lang": "xml", + "value": "\n \n \n \n %-4relative [%thread] %-5level %logger{30} - %msg%n\n \n \n \n \n \n \n \n" + }, + { + "lang": "kotlin", + "value": "val mongoClient = MongoClient.create(\"\");\nval database = mongoClient.getDatabase(DB_NAME_PLACEHOLDER);\nval collection = database.getCollection(COLLECTION_NAME_PLACEHOLDER);\ncollection.find().firstOrNull()\n" + }, + { + "lang": "console", + "value": "...\n829 [cluster-rtt-ClusterId{value='', description='null'}-] INFO org.mongodb.driver.connection - Opened connection [connectionId{localValue:2, serverValue:}] to \n977 [main] INFO org.mongodb.driver.connection - Opened connection [connectionId{localValue:7, serverValue:}] to " + }, + { + "lang": "xml", + "value": "\n\n \n \n \n \n \n \n \n \n \n \n \n" + }, + { + "lang": "kotlin", + "value": "val mongoClient = MongoClient.create(\"\");\nval database = mongoClient.getDatabase(DB_NAME_PLACEHOLDER);\nval collection = database.getCollection(COLLECTION_NAME_PLACEHOLDER);\ncollection.find().firstOrNull()\n" + }, + { + "lang": "console", + "value": "...\n15:40:23.005 [cluster-ClusterId{value='', description='null'}-] INFO org.mongodb.driver.connection - Opened connection [connectionId{localValue:3, serverValue:}] to \n15:40:23.159 [main] INFO org.mongodb.driver.connection - Opened connection [connectionId{localValue:7, serverValue:}] to " + } + ], + "preview": "In this guide, you can learn how to set up and configure a logger in the\nMongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/monitoring", + "title": "Monitoring", + "headings": [ + "Overview", + "Monitor Events", + "Command Events", + "Example", + "Server Discovery and Monitoring Events", + "Example", + "Connection Pool Events", + "Example", + "Monitor Connection Pool Events with JMX", + "JMX Support", + "JMX and JConsole Example", + "Include the Driver in Your Distributed Tracing System" + ], + "paragraphs": "In this guide, you can learn how to set up and configure monitoring in the\nMongoDB Kotlin driver. Monitoring is the process of getting information about the activities a running\nprogram performs for use in an application or an application performance\nmanagement library. Monitoring the MongoDB Kotlin driver lets you understand the\ndriver's resource usage and performance, and can help you make informed\ndecisions when designing and debugging your application. In this guide you will learn how to perform these tasks: This guide shows how to use information about the activity of the driver in code.\nIf you would like to learn how to record events in the driver,\nconsider reading our guide on logging . Monitor different types of events in the MongoDB Kotlin driver Monitor connection pool events with Java Management Extensions (JMX) and JConsole To monitor an event , you must register a listener on your MongoClient \ninstance. An event is any action that happens in a running program. The driver includes functionality\nfor listening to a subset of the events that occur when the driver is running. A listener is a class that performs some action when certain events occur.\nA listener's API defines the events it can respond to. Each method of a listener class represents a response to a certain event. Each\nmethod receives one argument: an object representing the event the method\nresponds to. The MongoDB Kotlin driver organizes the events it defines into three categories: The following sections show how to monitor each event category. For a full list of the events you can monitor,\n see the event package of the MongoDB Kotlin driver . Command Events Server Discovery and Monitoring Events Connection Pool Events A command event is an event related to a MongoDB database command. Some\nexamples of database commands that produce command events are find ,\n insert , delete , and count . To monitor command events, write a class that implements the\n CommandListener interface and register an instance of that class with your\n MongoClient instance. For more information on MongoDB database commands, see the\n MongoDB manual entry on database commands . The driver does not publish events for commands it calls internally. This\nincludes database commands the driver uses to monitor your cluster and\ncommands related to connection establishment (such as the initial hello \ncommand). As a security measure, the driver redacts the contents of some command events. This\nprotects the sensitive information contained in these command events. For a\nfull list of redacted command events, see the\n MongoDB command logging and monitoring specification . This example shows how to make a counter for database commands. The counter\nkeeps track of the number of times the driver successfully executes each database\ncommand, and prints this information every time a database command finishes. To make a counter, do the following: The following code defines the CommandCounter class which implements the\n CommandListener interface: The following code adds an instance of the CommandCounter class to a\n MongoClientSettings object, and configures a MongoClient instance with the\n MongoClientSettings object. The code then runs some database commands to test the\ncounter. For more information on the classes and methods mentioned in this section, see\nthe following API Documentation: Make a class with counter functionality that implements the CommandListener interface. Add an instance of the new class that implements CommandListener to a MongoClientSettings object. Configure a MongoClient instance with the MongoClientSettings object. CommandListener MongoClientSettings MongoClient CommandStartedEvent CommandSucceededEvent CommandFailedEvent A server discovery and monitoring (SDAM) event is an event related to a change\nin the state of the MongoDB instance or cluster you have connected the driver to. The driver defines nine SDAM events. The driver divides these nine events\nbetween three separate listener interfaces which each listen for three of the\nnine events. Here are the three interfaces and the events they listen for: To monitor a type of SDAM event, write a class that\nimplements one of the three preceding interfaces and register an instance of that\nclass with your MongoClient instance. For a detailed description of each SDAM event, see the MongoDB SDAM monitoring events specification . ClusterListener : topology-related events ServerListener : events related to mongod or mongos processes ServerMonitorListener : heartbeat related events This example shows how to make a listener class that prints a message that lets\nyou know if the driver can write to your MongoDB instance. The following code defines the IsWritable class which implements the\n ClusterListener interface. The following code adds an instance of the IsWritable class to a\n MongoClient object. The code then runs a find operation to test the\n IsWritable class. For more information on the classes and methods mentioned in this section, see\nthe following API Documentation: ClusterListener ServerListener ServerMonitorListener MongoClientSettings MongoClient ClusterDescriptionChangedEvent A connection pool event is an event related to a connection pool held by the driver.\nA connection pool is a set of open TCP connections your driver maintains with\na MongoDB instance. Connection pools help reduce the number of network handshakes\nyour application needs to perform with a MongoDB instance, and can help your\napplication run faster. To monitor connection pool events, write a class that implements the\n ConnectionPoolListener interface and register an instance of that class with your\n MongoClient instance. This example shows how to make a listener class that prints a message each time\nyou check out a connection from your connection pool. The following code defines the ConnectionPoolLibrarian class which implements the\n ConnectionPoolListener interface. The following code adds an instance of the ConnectionPoolLibrarian class to a\n MongoClient object. The code then runs a database command to test the\nlibrarian. For more information on the classes and methods mentioned in this section, see\nthe following API Documentation: ConnectionPoolListener MongoClientSettings MongoClient ConnectionCheckedOutEvent ConnectionCheckOutFailedEvent You can monitor connection pool events using Java Management Extensions (JMX) .\nJMX provides tools to monitor applications and devices. For more information on JMX, see\n the official Oracle JMX documentation . To enable JMX connection pool monitoring, add an instance of the\n JMXConnectionPoolListener class to your MongoClient object. The JMXConnectionPoolListener class performs the following actions: MXBeans registered on the platform MBean server have the following properties: All MXBean instances created by the driver are under the domain\n \"org.mongodb.driver\" . For more information on the topics discussed in this subsection, see the\nfollowing resources from Oracle: Creates MXBean instances for each mongod or mongos process the driver\nmaintains a connection pool with. Registers these MXBean instances with the platform MBean server. Property Description clusterId A client-generated unique identifier. This identifier ensures that\neach MXBean the driver makes has a unique name when an application has\nmultiple MongoClient instances connected to the same MongoDB deployment. host The hostname of the machine running the mongod or mongos process. port The port on which the mongod or mongos process is listening. minSize The minimum size of the connection pool, including idle and in-use connections. maxSize The maximum size of the connection pool, including idle and in-use connections. size The current size of the connection pool, including idle and in-use connections. checkedOutCount The current count of connections that are in use. Platform MBean Server Reference Documentation MXBean Documentation MBean Documentation This example shows how you can monitor the driver's connection pools using JMX\nand JConsole . JConsole is a JMX compliant GUI monitoring tool that comes with\nthe Java Platform. The following code snippet adds a JMXConnectionPoolListener to a\n MongoClient instance. The code then pauses execution so you can\nnavigate to JConsole and inspect your connection pools. Once you have started your server, open JConsole in your terminal using the\nfollowing command: Once JConsole is open, perform the following actions in the GUI: When you no longer want to inspect your connection pools in JConsole, do the\nfollowing: For more information on JMX and JConsole, see the following resources from\nOracle: For more information on the JMXConnectionPoolListener class, see\nthe API Documentation for\n JMXConnectionPoolListener . The descriptions of JMX and JConsole in this example are illustrative\nrather than a source of truth. For guaranteed up to date information, consult\nthe following official Oracle resources: JConsole documentation . JMX documentation Select the process running the preceding example code. Press Insecure Connection in the warning dialog box. Click on the MBeans tab. Inspect your connection pool events under the \"org.mongodb.driver\" domain. Exit JConsole by closing the JConsole window Stop the program running the preceding code snippet JConsole Documentation . Monitoring and Management Guide If you use a distributed tracing system , you can include event data from the\ndriver. A distributed tracing system is an application that\ntracks requests as they propagate throughout different services in a\nservice-oriented architecture. If you use the driver in a Spring Cloud \napplication, use\n Spring Cloud Sleuth to\ninclude MongoDB event data in the\n Zipkin distributed tracing system. If you do not use Spring Cloud or need to include driver event data in a distributed\ntracing system other than Zipkin, you must write a command event listener that\nmanages spans \nfor your desired distributed tracing system. To see an implementation of such a\nlistener, see the Java source code for the\n TraceMongoCommandListener \nclass in the Spring Cloud Sleuth source code. To learn more about Spring Cloud Sleuth, see\n Getting Started \nin the Spring Cloud Sleuth documentation. To view a detailed description of a distributed tracing system, see\n Dapper from Google Research.", + "code": [ + { + "lang": "kotlin", + "value": "class CommandCounter : CommandListener {\n private val commands = mutableMapOf()\n\n\n @Synchronized\n override fun commandSucceeded(event: CommandSucceededEvent) {\n val commandName = event.commandName\n val count = commands[commandName] ?: 0\n commands[commandName] = count + 1\n println(commands.toString())\n }\n\n override fun commandFailed(event: CommandFailedEvent) {\n println(\"Failed execution of command '${event.commandName}' with id ${event.requestId}\")\n }\n}\n" + }, + { + "lang": "kotlin", + "value": "val commandCounter = CommandCounter()\n\nval settings = MongoClientSettings.builder()\n .applyConnectionString(URI)\n .addCommandListener(commandCounter)\n .build()\nval mongoClient = MongoClient.create(settings)\nval database = mongoClient.getDatabase(DATABASE)\nval collection = database.getCollection(COLLECTION)\n\n// Run some commands to test the counter\ncollection.find().firstOrNull()\ncollection.find().firstOrNull()\n" + }, + { + "lang": "console", + "value": "{find=1}\n{find=2}\n{find=2, endSessions=1}" + }, + { + "lang": "kotlin", + "value": "class IsWriteable : ClusterListener {\n private var isWritable = false\n\n\n @Synchronized\n override fun clusterDescriptionChanged(event: ClusterDescriptionChangedEvent) {\n if (!isWritable) {\n if (event.newDescription.hasWritableServer()) {\n isWritable = true\n println(\"Able to write to cluster\")\n }\n } else {\n if (!event.newDescription.hasWritableServer()) {\n isWritable = false\n println(\"Unable to write to cluster\")\n }\n }\n }\n}\n" + }, + { + "lang": "kotlin", + "value": "val clusterListener = IsWriteable()\nval settings = MongoClientSettings.builder()\n .applyConnectionString(URI)\n .applyToClusterSettings { builder ->\n builder.addClusterListener(clusterListener)\n }\n .build()\nval mongoClient = MongoClient.create(settings)\nval database = mongoClient.getDatabase(DATABASE)\nval collection = database.getCollection(COLLECTION)\n// Run a command to trigger a ClusterDescriptionChangedEvent event\ncollection.find().firstOrNull()\n" + }, + { + "lang": "console", + "value": "Able to write to server" + }, + { + "lang": "kotlin", + "value": "class ConnectionPoolLibrarian : ConnectionPoolListener {\n\n override fun connectionCheckedOut(event: ConnectionCheckedOutEvent) {\n println(\"Let me get you the connection with id ${event.connectionId.localValue}...\")\n }\n\n override fun connectionCheckOutFailed(event: ConnectionCheckOutFailedEvent) {\n println(\"Something went wrong! Failed to checkout connection.\")\n }\n}\n" + }, + { + "lang": "kotlin", + "value": "val cpListener = ConnectionPoolLibrarian()\nval settings = MongoClientSettings.builder()\n .applyConnectionString(URI)\n .applyToConnectionPoolSettings { builder ->\n builder.addConnectionPoolListener(cpListener)\n }\n .build()\nval mongoClient = MongoClient.create(settings)\nval database = mongoClient.getDatabase(DATABASE)\nval collection = database.getCollection(COLLECTION)\n// Run a command to trigger connection pool events\ncollection.find().firstOrNull()\n" + }, + { + "lang": "console", + "value": "Let me get you the connection with id 21..." + }, + { + "lang": "shell", + "value": "jconsole" + }, + { + "lang": "kotlin", + "value": "val connectionPoolListener = JMXConnectionPoolListener()\nval settings = MongoClientSettings.builder()\n .applyConnectionString(uri)\n .applyToConnectionPoolSettings {\n it.addConnectionPoolListener(connectionPoolListener)\n }\n .build()\nval mongoClient: MongoClient = MongoClient.create(settings)\n\ntry {\n println(\"Navigate to JConsole to see your connection pools...\")\n Thread.sleep(Long.MAX_VALUE)\n} catch (e: Exception) {\n e.printStackTrace()\n}\n" + }, + { + "lang": "console", + "value": "Navigate to JConsole to see your connection pools..." + } + ], + "preview": "In this guide, you can learn how to set up and configure monitoring in the\nMongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/stable-api", + "title": "Stable API", + "headings": [ + "Overview", + "Enable the Stable API on a MongoDB Client", + "Stable API Options" + ], + "paragraphs": "The Stable API feature requires MongoDB Server 5.0 or later. You should only use the Stable API feature if all the MongoDB\nservers you are connecting to support this feature. In this guide, you can learn how to specify the Stable API when connecting to\na MongoDB instance or replica set. You can use the Stable API feature to\nforce the server to run operations with behavior compatible with the\nspecified API version . An API version defines the expected behavior of the\noperations it covers and the format of server responses. If you change to\na different API version, the operations are not guaranteed to be\ncompatible and the server responses are not guaranteed to be similar. When you use the Stable API feature with an official MongoDB driver, you\ncan update your driver or server without worrying about backward compatibility\nissues of the commands covered by the Stable API. See the MongoDB reference page on the Stable API \nfor more information including a list of commands it covers. The following sections describe how you can enable the Stable API for\nyour MongoDB client and the options that you can specify. To enable the Stable API, you must specify an API version in the settings\nof your MongoDB client. Once you instantiate a MongoClient instance with\na specified API version, all commands you run with that client use that\nversion of the Stable API. The following example shows how you can instantiate a MongoClient that\nsets the Stable API version and connects to a server by performing the\nfollowing operations: For more information on the methods and classes referenced in this\nsection, see the following API Documentation: If you need to run commands using more than one version of the\nStable API, instantiate a separate client with that version. If you need to run commands not covered by the Stable API, make sure the\n\"strict\" option is disabled. See the section on\n Stable API Options for more information. Construct a ServerApi instance using the ServerApi.Builder \nhelper class. Specify a Stable API version using a constant from the\n ServerApiVersion class. Construct a MongoClientSettings instance using the\n MongoClientSettings.Builder class. Specify a server to connect to using a ServerAddress instance. Instantiate a MongoClient using the MongoClient.create() method\nand pass your MongoClientSettings instance as a parameter. If you specify an API version and connect to a MongoDB server that does\nnot support the Stable API, your application may raise an exception when\nexecuting a command on your MongoDB server. If you use a MongoClient \nthat specifies the API version to query a server that does not support it,\nyour query could fail with an exception message that includes the\nfollowing text: ServerApi ServerApi.Builder ServerApiVersion ServerAddress MongoClientSettings MongoClientSettings.Builder MongoClient.create() MongoClient You can enable or disable optional behavior related to the Stable API as\ndescribed in the following table. The following example shows how you can set the two options on an instance\nof ServerApi by chaining methods on the ServerApi.Builder : For more information on the options in this section, see the following\nAPI Documentation: Option Name Description Strict DeprecationErrors strict() deprecationErrors()", + "code": [ + { + "lang": "kotlin", + "value": "val serverApi = ServerApi.builder()\n .version(ServerApiVersion.V1)\n .build()\n\n// Replace the uri string placeholder with your MongoDB deployment's connection string\nval uri = \"\"\n\nval settings = MongoClientSettings.builder()\n .applyConnectionString(ConnectionString(uri))\n .serverApi(serverApi)\n .build()\n\nval client = MongoClient.create(settings)\n" + }, + { + "lang": "none", + "value": "'Unrecognized field 'apiVersion' on server..." + }, + { + "lang": "kotlin", + "value": "val serverApi = ServerApi.builder()\n .version(ServerApiVersion.V1)\n .strict(true)\n .deprecationErrors(true)\n .build()\n" + } + ], + "preview": "In this guide, you can learn how to specify the Stable API when connecting to\na MongoDB instance or replica set. You can use the Stable API feature to\nforce the server to run operations with behavior compatible with the\nspecified API version. An API version defines the expected behavior of the\noperations it covers and the format of server responses. If you change to\na different API version, the operations are not guaranteed to be\ncompatible and the server responses are not guaranteed to be similar.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals/time-series", + "title": "Time Series Collections", + "headings": [ + "Overview", + "Create a Time Series Collection", + "Query a Time Series Collection" + ], + "paragraphs": "In this guide, you can learn about time series collections in\nMongoDB, and how to interact with them in the MongoDB Kotlin driver. Time series collections efficiently store sequences of measurements over\na period of time. Time series data consists of any data collected over\ntime, metadata that describes the measurement, and the time of the\nmeasurement. Example Measurement Metadata Sales Data Revenue Company Infection Rates Amount of People Infected Location To create a time series collection, pass the following parameters to the\n createCollection() \nmethod: To check if you successfully created the collection, send the\n \"listCollections\" command to the runCommand() method. The name of the new collection to create The TimeSeriesOptions \nfor creating the collection in a CreateCollectionOptions object Versions prior to MongoDB 5.0 cannot create a time series collection. To query in a time series collection, use the same conventions as you\nwould for retrieving \nand aggregating data . For more information, see our\n Aggregates Builders guide . MongoDB version 5.0 introduces window functions into the aggregation\npipeline. You can use window functions to perform operations on a\ncontiguous span of time series data.", + "code": [ + { + "lang": "kotlin", + "value": "val database = mongoClient.getDatabase(\"fall_weather\")\nval tsOptions = TimeSeriesOptions(\"temperature\")\nval collOptions = CreateCollectionOptions().timeSeriesOptions(tsOptions)\n\ndatabase.createCollection(\"september2021\", collOptions)\n" + }, + { + "lang": "kotlin", + "value": "val commandResult = database.listCollections().toList()\n .find { it[\"name\"] == \"september2021\" }\n\nprintln(commandResult?.toJson(JsonWriterSettings.builder().indent(true).build()))\n" + }, + { + "lang": "json", + "value": "{\n \"name\": \"september2021\",\n \"type\": \"timeseries\",\n \"options\": {\n \"timeseries\": {\n \"timeField\": \"temperature\",\n \"granularity\": \"seconds\",\n \"bucketMaxSpanSeconds\": 3600\n }\n },\n \"info\": {\n \"readOnly\": false\n }\n}" + } + ], + "preview": "In this guide, you can learn about time series collections in\nMongoDB, and how to interact with them in the MongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "fundamentals", + "title": "Fundamentals", + "headings": [], + "paragraphs": "Learn how to perform the following tasks using the Kotlin driver in the\nFundamentals section: Connect to MongoDB Use the Stable API Authenticate with MongoDB Convert between MongoDB Data Formats and Kotlin Objects Read from and Write to MongoDB Simplify your Code with Builders Transform your Data Create Aggregation Expressions Create Indexes to Speed Up Queries Sort Using Collations Log Events in the Driver Monitor Driver Events Use a Time Series Collection Encrypt Fields in a Document", + "code": [], + "preview": null, + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "", + "title": "MongoDB Kotlin Driver", + "headings": [ + "Introduction", + "Quick Start", + "Quick Reference", + "What's New", + "Usage Examples", + "Fundamentals", + "API Documentation", + "FAQ", + "Connection Troubleshooting", + "Issues & Help", + "Compatibility", + "Migrate from KMongo", + "Validate Driver Artifact Signatures", + "Learn", + "Developer Hub" + ], + "paragraphs": "Welcome to the documentation site for the Kotlin Driver, the official\nMongoDB driver for server-side Kotlin applications that use coroutines.\nDownload the driver by using Maven or Gradle , or set up a runnable project by following our\nQuick Start guide. If your Kotlin application requires synchronous processing, use the\n Sync Driver , which uses synchronous operations\nto make blocking calls to MongoDB. If you are developing an Android or Kotlin Multiplatform (KMP)\napplication, you can use the MongoDB Atlas Device Kotlin SDK \nto access Atlas App Services and to manage your Realm data. Learn how to establish a connection to MongoDB Atlas and begin\nworking with data in the Quick Start section. See driver syntax examples for common MongoDB commands in the\n Quick Reference section. For a list of new features and changes in each version, see the\n What's New section. For fully runnable code snippets and explanations for common\nmethods, see the Usage Examples section. Learn how to perform the following tasks using the Kotlin driver in the\nFundamentals section: Connect to MongoDB Use the Stable API Authenticate with MongoDB Convert between MongoDB Data Formats and Kotlin Objects Read from and Write to MongoDB Simplify your Code with Builders Transform your Data Create Aggregation Expressions Create Indexes to Speed Up Queries Sort Using Collations Log Events in the Driver Monitor Driver Events Use a Time Series Collection Encrypt Fields in a Document The MongoDB Kotlin driver API documentation contains several libraries\norganized by functionality. For detailed information about classes and\nmethods in each library, see the following table for their descriptions\nand links to the API documentation. Library Description BSON Base BSON classes BSON Record Codec Classes that support records Core Shared core classes Kotlin Driver API For answers to commonly asked questions about the MongoDB\nKotlin Driver, see the Frequently Asked Questions (FAQ) \nsection. For solutions to some issues you might experience when connecting to a MongoDB\ndeployment while using the MongoDB Kotlin Driver, see the\n Connection Troubleshooting section. Learn how to report bugs, contribute to the driver, and find\nadditional resources for asking questions and receiving help in the\n Issues & Help section. For the compatibility charts that show the recommended Kotlin\nDriver version for each MongoDB Server version, see the\n Compatibility section. Learn about the changes needed to migrate from the\ncommunity-developed KMongo driver to the MongoDB Kotlin Driver in the\n Migrate from KMongo section. Learn about how to validate signatures of Kotlin driver artifacts\npublished on Maven in the Validate Driver Artifact Signatures section. Visit the Developer Hub to learn more about the MongoDB Kotlin driver. The Developer Hub provides tutorials and social engagement for\ndevelopers. To learn how to use MongoDB features with the Kotlin driver, see the\n Kotlin Tutorials and Articles page, which\nfeatures our Getting Started with the MongoDB Kotlin Driver \ndeveloper tutorial. To ask questions and engage in discussions with fellow developers using\nthe Kotlin Driver, visit the MongoDB Developer Community .", + "code": [], + "preview": "Welcome to the documentation site for the Kotlin Driver, the official\nMongoDB driver for server-side Kotlin applications that use coroutines.\nDownload the driver by using Maven or Gradle, or set up a runnable project by following our\nQuick Start guide.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "issues-and-help", + "title": "Issues & Help", + "headings": ["Bugs / Feature Requests", "Pull Requests"], + "paragraphs": "We are lucky to have a vibrant MongoDB Kotlin community that includes users\nwith varying levels of experience using the Kotlin driver. We find the quickest\nway to get support for general questions is through the MongoDB Community Forums . Refer to our support channels \ndocumentation for more information. If you think you've found a bug or want to see a new feature in the Kotlin\ndriver, please open a case in our issue management tool, JIRA: If you've identified a security vulnerability in a driver or any other\nMongoDB project, please report it according to the instructions found in the\n Create a Vulnerability Report page . Create an account and login . Navigate to the JAVA project . Click Create . Please provide as much information as possible\nabout the issue and the steps to reproduce it. Bug reports in JIRA for the Kotlin driver and the Core Server (i.e. SERVER)\nproject are public. We are happy to accept contributions to help improve the driver. We will guide\nuser contributions to ensure they meet the standards of the codebase. Please\nensure that any pull requests include documentation, tests, and pass the\n gradle checks. To get started check out the source and work on a branch: Finally, ensure that the code passes gradle checks.", + "code": [ + { + "lang": "bash", + "value": "$ git clone https://github.com/mongodb/mongo-java-driver.git\n$ cd mongo-java-driver\n$ git checkout -b myNewFeature" + }, + { + "lang": "bash", + "value": "$ ./gradlew check" + } + ], + "preview": "We are lucky to have a vibrant MongoDB Kotlin community that includes users\nwith varying levels of experience using the Kotlin driver. We find the quickest\nway to get support for general questions is through the MongoDB Community Forums.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "migrate-kmongo", + "title": "Migrate from KMongo", + "headings": [ + "Overview", + "Connect to MongoDB Cluster", + "CRUD and Aggregation", + "Construct Queries", + "Data Typing", + "Data Serialization", + "Synchronous and Asynchronous Support", + "What Next?" + ], + "paragraphs": "This page contains a high-level comparison of most of the ways the official\nMongoDB Kotlin and the community-developed KMongo driver differ.\nYou can use this page to identify the changes you need to make to migrate from\nthe deprecated KMongo driver to the official MongoDB Kotlin driver. The MongoDB Kotlin driver is the officially supported and maintained MongoDB driver for\nKotlin. It is developed by the MongoDB team. Although both drivers support synchronous and asynchronous operations ,\nthe examples on this page will use asynchronous coroutine-based operations. KMongo is a popular community-developed library\nfor working with MongoDB from Kotlin applications.\nIt is a wrapper around the Java driver that was created prior to the creation of\nthe official Kotlin driver to serve the needs of the Kotlin community. As of July 2023, KMongo has been marked as deprecated. Both drivers let you connect to and communicate with MongoDB clusters from a\nKotlin application. To connect to a MongoDB cluster using the MongoDB Kotlin driver: See the Connect to MongoDB documentation for more\ninformation. To connect to a MongoDB cluster using KMongo with coroutines: Unlike the MongoDB Kotlin driver, KMongo allows the collection name to be\ninferred from the data class name. Both drivers provide support for all MongoDB CRUD APIs and aggregation\noperations. The MongoDB Kotlin driver also provides functions for all basic CRUD operations: Aggregation pipelines can be built using the aggregate method and the\n pipeline function: See the CRUD Operations and\n Aggregation documentation for more information. KMongo provides functions for all basic CRUD operations: Aggregation pipelines can be built using the aggregate method and the\n pipeline function: For more information on available methods, see the\n Extensions Overview KMongo\ndocumentation. Both drivers provide support for type-safe queries using property references. The MongoDB Kotlin driver uses the Builders API to construct queries.\nAlternatively, you can use the Document class. To map a KMongo string query to the Kotlin driver, you can use the JsonObject class. For more information, see the following Kotlin driver documentation: Builders Documents guide JsonObject API Documentation With KMongo, you can create queries using property references on the data class\nthat represents objects in a collection and infix operators that the library\nprovides. KMongo also supports string queries that let you construct queries with\nMongoDB Query Language: For more information, see the following KMongo documentation: Typed Queries Mongo Shell Queries Both drivers support the use of Kotlin data classes as well as the Document class to\nmodel the data stored in a MongoDB collection. The Document \nclass lets you model data represented in a MongoDB collection in a flexible format. You can use data classes and Document classes to model data with the\nMongoDB Kotlin driver: You can use data classes and Document classes to model data in KMongo: Both drivers provide support for serializing and deserializing data objects\nin Kotlin to and from BSON. You can serialize data classes in the Kotlin driver using both automatic\ndata class codecs as well as the kotlinx.serialization library. The\ndriver provides an efficient Bson serializer that handles the\nserialization of Kotlin objects to BSON data. To learn more, see the Kotlin Serialization \ndocumentation. If you use the Document class to represent your collection, you can\nserialize it to JSON and EJSON using the .toJson() method: To learn more about serializing data with the Document class, refer to\n Document Data Format - Extended JSON documentation. You can serialize data in KMongo using the following serialization libraries: To learn more about the KMongo serialization methods, refer to the\n Object Mapping \nKMongo documentation. Jackson (default) POJO Codec engine kotlinx.serialization Both drivers support synchronous and asynchronous operations. The MongoDB Kotlin driver also has separate libraries for synchronous and\nasynchronous operations. However, the Kotlin driver only has built-in support\nfor coroutines as an asynchronous paradigm. The MongoDB Kotlin driver does not\ncurrently provide support for other asynchronous paradigms such as Reactive\nStreams, Reactor, or RxJava2. Unlike KMongo, if you want to write asynchronous code, you only need to import\nthe relevant package. To write synchronous code: To write asynchronous coroutine code: Driver Package Sync Coroutines KMongo has a core library org.litote.kmongo:kmongo with main functionality and\nseparate companion libraries that provide asynchronous support to the core library. KMongo supports the following asynchronous paradigms: To write synchronous code with KMongo: To write async coroutine code with KMongo: To learn more, refer to the Quick Start \nin the KMongo documentation. Async Style Package Reactive Streams Coroutines Reactor RxJava2 Now that you have learned about the differences between KMongo and the MongoDB\nKotlin driver, see the Quick Start to get\nstarted using the KMongo Kotlin driver.", + "code": [ + { + "lang": "kotlin", + "value": "import com.mongodb.kotlin.client.coroutine.MongoClient\n\ndata class Jedi(val name: String, val age: Int)\n\n// Replace the placeholder with your MongoDB deployment's connection string\nval uri = CONNECTION_STRING_URI_PLACEHOLDER\n\nval mongoClient = MongoClient.create(uri)\n\nval database = mongoClient.getDatabase(\"test\")\n// Get a collection of documents of type Jedi\nval collection = database.getCollection(\"jedi\")" + }, + { + "lang": "kotlin", + "value": "import org.litote.kmongo.reactivestreams.*\nimport org.litote.kmongo.coroutine.*\n\ndata class Jedi(val name: String, val age: Int)\n\n// Get new MongoClient instance using coroutine extension\nval client = KMongo.createClient().coroutine\n\nval database = client.getDatabase(\"test\")\n// Get a collection of documents of type Jedi\nval col = database.getCollection()" + }, + { + "lang": "kotlin", + "value": "// Insert a document\n val jedi =a Jedi(\"Luke Skywalker\", 19)\n collection.insertOne(jedi)\n\n // Find a document\n val luke = collection.find(Jedi::name.name, \"Luke Skywalker\")\n val jedis = collection.find(lt(Jedi::age.name, 30)).toList()\n\n // Update a document\n val filter = Filters.eq(Jedi::name.name, \"Luke Skywalker\")\n val update = Updates.set(Jedi::age.name, 20)\n collection.updateOne(filter, update)\n\n // Delete a document\n val filter = Filters.eq(Jedi::name.name, \"Luke Skywalker\")\n collection.deleteOne(filter)" + }, + { + "lang": "kotlin", + "value": "data class Results(val avgAge: Double)\n\nval resultsFlow = collection.aggregate(\n listOf(\n Aggregates.match(Filters.ne(Jedi::name.name, \"Luke Skywalker\")),\n Aggregates.group(\"\\$${Jedi::name.name}\",\n Accumulators.avg(\"avgAge\", \"\\$${Jedi::age.name}\"))\n )\n)\nresultsFlow.collect { println(it) }" + }, + { + "lang": "kotlin", + "value": "// Insert a document\nval jedi = Jedi(\"Luke Skywalker\", 19)\ncol.insertOne(jedi)\n\n// Find a document\nval luke = col.findOne(Jedi::name eq \"Luke Skywalker\")\nval jedis = col.find(Jedi::age lt 30).toList()\n\n// Update a document\ncol.updateOne(Jedi::name eq \"Luke Skywalker\", setValue(Jedi::age, 20))\n\n// Delete a document\ncol.deleteOne(Jedi::name eq \"Luke Skywalker\")" + }, + { + "lang": "kotlin", + "value": "val avgAge = collection.aggregate(\n pipeline(\n match(Jedi::name ne \"Luke Skywalker\"),\n group(Jedi::name, avg(Jedi::age))\n )\n).toList()" + }, + { + "lang": "kotlin", + "value": "data class Person(val name: String, val email: String, val gender: String, val age: Int)\ndata class Results(val email: String)\n\nval collection = database.getCollection(\"people\")\n\n// Using Builders\nval filter = and(eq(\"gender\", \"female\"), gt(\"age\", 29))\nval projection = fields(excludeId(), include(\"email\"))\nval results = collection.find(filter).projection(projection)\n\n// Using Document class\nval filter = Document().append(\"gender\", \"female\").append(\"age\", Document().append(\"\\$gt\", 29))\nval projection = Document().append(\"_id\", 0).append(\"email\", 1)\nval results = collection.find(filter).projection(projection)" + }, + { + "lang": "kotlin", + "value": "val query = JsonObject(\"{\\\"name\\\": \\\"Gabriel Garc\\\\u00eda M\\\\u00e1rquez\\\"}\")\nval jsonResult = collection.find(query).firstOrNull()" + }, + { + "lang": "kotlin", + "value": "data class Jedi(val name: String)\n\nval yoda = col.findOne(Jedi::name eq \"Yoda\")\n\n// Compile error (2 is not a String)\nval error = col.findOne(Jedi::name eq 2)\n\n// Use property reference with instances\nval yoda2 = col.findOne(yoda::name regex \"Yo.*\")" + }, + { + "lang": "kotlin", + "value": "import org.litote.kmongo.MongoOperator.lt\nimport org.litote.kmongo.MongoOperator.match\nimport org.litote.kmongo.MongoOperator.regex\nimport org.litote.kmongo.MongoOperator.sample\n\nval yoda = col.findOne(\"{name: {$regex: 'Yo.*'}}\")!!\nval luke = col.aggregate(\"\"\"[ {$match:{age:{$lt : ${yoda.age}}}},\n {$sample:{size:1}}\n ]\"\"\").first()" + }, + { + "lang": "kotlin", + "value": "// With data class\ndata class Movie(val title: String, val year: Int, val rating: Float)\n\nval dataClassCollection = database.getCollection(\"movies\")\nval movieDataClass = dataClassCollection.findOneOrNull()\nval movieNameDataClass = movieDataClass.title\n\n// With Document class\nval documentCollection = database.getCollection(\"movies\")\nval movieDocument = documentCollection.findOneOrNull()\nval movieTitleDocument = movieDocument.getString(\"title\")" + }, + { + "lang": "kotlin", + "value": "// With data class\ndata class Movie(val title: String, val year: Int, val rating: Float)\n\nval collection = database.getCollection(\"movies\")\nval movieDataClass = dataClassCollection.findOne()\nval movieNameDataClass = movieDataClass.title\n\n// With Document class\nval documentCollection = database.getCollection(\"movies\")\nval movieDocument = documentCollection.findOne()\nval movieTitleDocument = movieDocument.getString(\"title\")" + }, + { + "lang": "kotlin", + "value": "@Serializable\ndata class LightSaber(\n @SerialName(\"_id\") // Use instead of @BsonId\n @Contextual val id: ObjectId?,\n val color: String,\n val qty: Int,\n @SerialName(\"brand\")\n val manufacturer: String = \"Acme\" // Use instead of @BsonProperty\n)" + }, + { + "lang": "kotlin", + "value": "val document = Document(\"_id\", 1).append(\"color\", \"blue\")\n\n// Serialize to JSON\ndocument.toJson()\n\n// Serialize to EJSON\nval settings = JsonWriterSettings.builder().outputMode(JsonMode.STRICT).build()\nval json = doc.toJson(settings)" + }, + { + "lang": "kotlin", + "value": "// Using KotlinX Serialization\n@Serializable\ndata class Data(@Contextual val _id: Id = newId())\n\nval json = Json { serializersModule = IdKotlinXSerializationModule }\nval data = Data()\nval json = json.encodeToString(data)" + }, + { + "lang": "kotlin", + "value": "import com.mongodb.kotlin.client.MongoClient\n\n// Instantiate your collection\ndata class Jedi(val name: String, val age: Int)\nval uri = \"\nval mongoClient = MongoClient.create(uri)\nval database = mongoClient.getDatabase(\"test\")\nval collection = database.getCollection(\"jedi\")\n\n// Synchronous operations\nval jedi =a Jedi(\"Luke Skywalker\", 19)\ncollection.insertOne(jedi)" + }, + { + "lang": "kotlin", + "value": "import com.mongodb.kotlin.client.coroutine.MongoClient\n\n// Instantiate your collection\ndata class Jedi(val name: String, val age: Int)\nval uri = \"\nval mongoClient = MongoClient.create(uri)\nval database = mongoClient.getDatabase(\"test\")\nval collection = database.getCollection(\"jedi\")\n\nrunBlocking {\n\n // Async operations\n val jedi =a Jedi(\"Luke Skywalker\", 19)\n collection.insertOne(jedi)\n}" + }, + { + "lang": "kotlin", + "value": "import org.litote.kmongo.*\n\n// Instantiate your collection\ndata class Jedi(val name: String, val age: Int)\n\nval client = KMongo.createClient()\nval database = client.getDatabase(\"test\")\nval col = database.getCollection()\n\n// Synchronous operations\ncol.insertOne(Jedi(\"Luke Skywalker\", 19))\nval yoda : Jedi? = col.findOne(Jedi::name eq \"Yoda\")" + }, + { + "lang": "kotlin", + "value": "import org.litote.kmongo.reactivestreams.*\nimport org.litote.kmongo.coroutine.*\n\n// Instantiate your collection\ndata class Jedi(val name: String, val age: Int)\n\nval client = KMongo.createClient()\nval database = client.getDatabase(\"test\")\nval col = database.getCollection()\n\nrunBlocking {\n\n // Async operations\n col.insertOne(Jedi(\"Luke Skywalker\", 19))\n val yoda : Jedi? = col.findOne(Jedi::name eq \"Yoda\")\n}" + } + ], + "preview": "This page contains a high-level comparison of most of the ways the official\nMongoDB Kotlin and the community-developed KMongo driver differ.\nYou can use this page to identify the changes you need to make to migrate from\nthe deprecated KMongo driver to the official MongoDB Kotlin driver.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "quick-reference", + "title": "Quick Reference", + "headings": [], + "paragraphs": "This page shows the driver syntax for several MongoDB commands and links to\ntheir related reference and API documentation. The examples on the page use the following data class to represent MongoDB documents: Command Syntax", + "code": [ + { + "lang": "kotlin", + "value": "data class Movie(\n val title: String,\n val year: Int,\n val rated: String? = \"Not Rated\",\n val genres: List? = listOf()\n)\n" + }, + { + "lang": "kotlin", + "value": "collection.find(\n Filters.eq(Movie::title.name, \"Shrek\")\n).firstOrNull()\n" + }, + { + "lang": "console", + "value": "Movie(title=Shrek, year=2001, ...)" + }, + { + "lang": "kotlin", + "value": "collection.find(\n Filters.eq(Movie::year.name, 2004)\n)\n" + }, + { + "lang": "console", + "value": "[\n Movie(title=Shrek 2, year=2004, ...),\n Movie(title=Spider-Man 2, year=2004, ...),\n Movie(title=National Treasure, year=2004, ...),\n ...\n]" + }, + { + "lang": "kotlin", + "value": "collection.insertOne(Movie(\"Shrek\", 2001))\n" + }, + { + "lang": "kotlin", + "value": "collection.insertMany(\n listOf(\n Movie(\"Shrek\", 2001),\n Movie(\"Shrek 2\", 2004),\n Movie(\"Shrek the Third\", 2007),\n Movie(\"Shrek Forever After\", 2010),\n )\n)\n" + }, + { + "lang": "kotlin", + "value": "collection.updateOne(\n Filters.eq(Movie::title.name, \"Shrek\"),\n Updates.set(Movie::rated.name, \"PG\")\n)\n" + }, + { + "lang": "console", + "value": "Movie(title=Shrek, year=2001, rated=PG, genres=[])" + }, + { + "lang": "kotlin", + "value": "collection.updateMany(\n Filters.regex(Movie::title.name, \"Shrek\"),\n Updates.set(Movie::rated.name, \"PG\")\n)\n" + }, + { + "lang": "console", + "value": "[\n Movie(title=Shrek, year=2001, rated=PG, genres=[]),\n Movie(title=Shrek 2, year=2004, rated=PG, genres=[]),\n Movie(title=Shrek the Third, year=2007, rated=PG, genres=[]),\n Movie(title=Shrek Forever After, year=2010, rated=PG, genres=[])\n]" + }, + { + "lang": "kotlin", + "value": "collection.updateOne(\n Filters.eq(Movie::title.name, \"Shrek\"),\n Updates.addEachToSet(Movie::genres.name, listOf(\"Family\", \"Fantasy\"))\n)\n" + }, + { + "lang": "console", + "value": "Movie(title=Shrek, year=2001, rated=Not Rated, genres=[Family, Fantasy])" + }, + { + "lang": "kotlin", + "value": "collection.replaceOne(\n Filters.eq(Movie::title.name, \"Shrek\"),\n Movie(\"Kersh\", 1002, \"GP\")\n)\n" + }, + { + "lang": "console", + "value": "Movie(title=Kersh, year=1002, rated=GP, genres=[])" + }, + { + "lang": "kotlin", + "value": "collection.deleteOne(\n Filters.eq(Movie::title.name, \"Shrek\")\n)\n" + }, + { + "lang": "kotlin", + "value": "collection.deleteMany(\n Filters.regex(Movie::title.name, \"Shrek\")\n)\n" + }, + { + "lang": "kotlin", + "value": "collection.bulkWrite(\n listOf(\n InsertOneModel(Movie(\"Shrek\", 2001)),\n DeleteManyModel(Filters.lt(Movie::year.name, 2004)),\n )\n)\n" + }, + { + "lang": "kotlin", + "value": "val changeStream = collection.watch()\nchangeStream.collect {\n println(\"Change to ${it.fullDocument?.title}\")\n}\n" + }, + { + "lang": "kotlin", + "value": "collection.find().toList()\n" + }, + { + "lang": "console", + "value": "[\n Movie(title=Shrek, year=2001, rated=Not Rated, genres=[]),\n Movie(title=Shrek 2, year=2004, rated=Not Rated, genres=[]),\n Movie(title=Shrek the Third, year=2007, rated=Not Rated, genres=[]),\n Movie(title=Shrek Forever After, year=2010, rated=Not Rated, genres=[])\n]" + }, + { + "lang": "kotlin", + "value": "collection.countDocuments(Filters.eq(\"year\", 2001))\n" + }, + { + "lang": "console", + "value": "42" + }, + { + "lang": "kotlin", + "value": "collection.distinct(Movie::rated.name)\n" + }, + { + "lang": "console", + "value": "[Not Rated, PG, PG-13]" + }, + { + "lang": "kotlin", + "value": "collection.find()\n .limit(2)\n" + }, + { + "lang": "console", + "value": "[\n Movie(title=Shrek, year=2001, rated=Not Rated, genres=[]),\n Movie(title=Shrek 2, year=2004, rated=Not Rated, genres=[])\n]" + }, + { + "lang": "kotlin", + "value": "collection.find()\n .skip(2)\n" + }, + { + "lang": "console", + "value": "[\n Movie(title=Shrek the Third, year=2007, rated=Not Rated, genres=[]),\n Movie(title=Shrek Forever After, year=2010, rated=Not Rated, genres=[])\n]" + }, + { + "lang": "kotlin", + "value": "collection.find().sort(Sorts.descending(Movie::year.name))\n" + }, + { + "lang": "console", + "value": "[\n Movie(title=Shrek Forever After, year=2010, rated=Not Rated, genres=[]),\n Movie(title=Shrek the Third, year=2007, rated=Not Rated, genres=[]),\n Movie(title=Shrek 2, year=2004, rated=Not Rated, genres=[]),\n Movie(title=Shrek, year=2001, rated=Not Rated, genres=[])\n]" + }, + { + "lang": "kotlin", + "value": "data class Result(val title: String)\n collection.find()\n .projection(Projections.include(Movie::title.name))\n" + }, + { + "lang": "console", + "value": "Result(title=Shrek)" + }, + { + "lang": "kotlin", + "value": "collection.createIndex(Indexes.ascending(Movie::title.name))\n" + }, + { + "lang": "kotlin", + "value": "collection.find(Filters.text(\"Forever\"));\n" + }, + { + "lang": "console", + "value": "[Movie(title=Shrek Forever After, year=2010, rated=Not Rated, genres=[])]" + }, + { + "lang": "xml", + "value": "\n \n org.mongodb\n mongodb-driver-kotlin-coroutine\n 5.1.2\n \n" + }, + { + "lang": "kotlin", + "value": "dependencies {\n implementation(\"org.mongodb:mongodb-driver-kotlin-coroutine:5.1.2\")\n}" + }, + { + "lang": "kotlin", + "value": "val flow = collection.find(\n Filters.eq(Movie::year.name, 2004)\n)\nflow.collect { println(it) }\n" + }, + { + "lang": "console", + "value": "Movie(title=2001: A Space Odyssey, ...)\nMovie(title=The Sound of Music, ...)" + } + ], + "preview": "This page shows the driver syntax for several MongoDB commands and links to\ntheir related reference and API documentation.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "quick-start", + "title": "Kotlin Driver Quick Start", + "headings": [ + "Introduction", + "Set up Your Project", + "Install Kotlin", + "Create the Project", + "Add MongoDB as a Dependency", + "Add Serialization Library Dependencies", + "Create a MongoDB Cluster", + "Connect to your Cluster", + "Query Your MongoDB Cluster from Your Application", + "Working with the Document Class (Alternative)", + "Next Steps" + ], + "paragraphs": "This guide shows you how to create an application that uses the Kotlin driver \nto connect to a MongoDB Atlas cluster . If you prefer to connect to\nMongoDB by using a different driver or programming language, see the\n list of official MongoDB drivers . The Kotlin driver lets you connect to and communicate with MongoDB clusters\nfrom a Kotlin application. MongoDB Atlas is a fully managed cloud database service that hosts your data\non MongoDB clusters. In this guide, you can learn how to get started with your\nown free cluster. To view another example that demonstrates how to build an\napplication in Kotlin that connects to MongoDB Atlas, see the\n Getting Started with the MongoDB Kotlin Driver \ndeveloper tutorial. Make sure that your system has Kotlin installed and running on JDK 1.8 or later.\nFor more information on getting started with Kotlin/JVM development,\nrefer to Get started with Kotlin/JVM \nin the Kotlin language documentation. This guide shows you how to add the MongoDB Kotlin driver dependencies\nby using Gradle or Maven. We recommend that you use an integrated development\nenvironment (IDE) such as IntelliJ IDEA or Eclipse IDE to configure\nGradle or Maven to build and run your project. If you are not using an IDE, see the\n Creating New Gradle Builds guide\nor the Building Maven guide\nfor more information on how to set up your project. If you are using Gradle to manage your\npackages, add the following entry to your build.gradle.kts \ndependencies list: If you are using Maven to manage your\npackages, add the following entry to your pom.xml dependencies list: After you configure your dependencies, ensure that they are available to your\nproject by running the dependency manager and refreshing the\nproject in your IDE. To enable the driver to convert between Kotlin objects and BSON, the\ndata format for documents in MongoDB, you must also add one or both of the\nfollowing serialization packages to your application: If you are using Gradle to manage your packages, add one of the following\nentries to your build.gradle.kts dependencies list: If you are using Maven to manage your packages, add one of the following\nentries to your pom.xml dependencies list: After you configure your dependencies, ensure that they are available to your\nproject by running the dependency manager and refreshing the\nproject in your IDE. To learn more about these packages, see\n Kotlin Serialization . bson-kotlinx (Recommended) bson-kotlin After setting up your Kotlin project dependencies, create a MongoDB cluster\nin which you can store and manage your data. Complete the\n Get Started with Atlas tutorial\nto set up a new Atlas account, create and launch a free tier MongoDB cluster,\nand load sample datasets. After you complete the steps in the Get Started with Atlas tutorial, you\nhave a new MongoDB cluster deployed in Atlas, a new database user, and\nsample data loaded into your cluster. This step shows how to create and run an application that uses the\nKotlin driver to connect to your MongoDB cluster and run a query on\nthe sample data. First, you must specify how the driver connects to your MongoDB cluster\nby including a connection string in your code. This string includes\ninformation on the hostname or IP address and port of your cluster,\nauthentication mechanism, user credentials, and other connection\noptions. If you are connecting to an instance or cluster that is not hosted on Atlas,\nsee the Other Ways to Connect to MongoDB section of the Connection Guide for\ninstructions on how to format your connection string. To retrieve your connection string for the cluster and user you created in\nthe previous step, log into your Atlas account and navigate to the\n Database page under Deployment and click the\n Connect button for your cluster, which is shown in the following\nimage: Select the Drivers option for connection and select\n Kotlin from the list of drivers and 4.10 or\nlater from the version dropdown. Next, click the Copy icon, which is highlighted in the\nfollowing image, to copy your connection string to\nyour clipboard: Save your Atlas connection string in a safe location that you can access\nfor the next step. Next, create a file called QuickStartDataClassExample.kt in your\nproject. Copy the following sample code into the file and replace the value of\nthe uri variable with your MongoDB Atlas connection string that you\nsaved in the preceding step. Replace the \"\" placeholder of\nyour connection string with the password you set for your user that has\n atlasAdmin permissions: When you run the main function, the application prints the details\nof a movie document that matches the query, as shown in the following output: If you don't see any output or receive an error, check whether you\nincluded the proper connection string in your application. Also, confirm\nthat you successfully loaded the sample dataset into your MongoDB Atlas cluster. After completing this step, you have a working application that uses\nthe Kotlin driver to connect to your MongoDB cluster, run a query on the\nsample data, and print out the result. This example uses a Kotlin data class to model MongoDB data. To learn more about using data classes to store and retrieve data,\nsee the Document Data Format: Data Classes guide. If you encounter the following error while connecting to your MongoDB\ninstance, you must update your JDK to the latest patch release: This exception is a known issue when using the TLS 1.3 protocol with\nspecific versions of JDK, but this issue is fixed for the following\nJDK versions: To resolve this error, update your JDK to one of the preceding patch\nversions or a newer one. JDK 11.0.7 JDK 13.0.3 JDK 14.0.2 The preceding section demonstrates how to run a query on a sample\ncollection to retrieve data by using a Kotlin data class. This section\nshows how to use the Document class\nto store and retrieve data from MongoDB. In a new file called QuickStartDocumentExample.kt , paste the following sample\ncode to run a query on your sample dataset in MongoDB Atlas. Replace the\nvalue of the uri variable with your MongoDB Atlas connection string: When you run the main function, the application prints the details\nof a movie document that matches the query, as shown in the following output: If you don't see any output or receive an error, check whether you\nincluded the proper connection string in your application. Also, confirm\nthat you successfully loaded the sample dataset into your MongoDB Atlas cluster. To learn more about the Kotlin driver, see the\n Fundamentals guides, which describe relevant\nconcepts in detail and provide code examples for performing different tasks.", + "code": [ + { + "lang": "kotlin", + "value": "dependencies {\n implementation(\"org.mongodb:mongodb-driver-kotlin-coroutine:5.1.2\")\n}" + }, + { + "lang": "xml", + "value": "\n \n org.mongodb\n mongodb-driver-kotlin-coroutine\n 5.1.2\n \n" + }, + { + "lang": "kotlin", + "value": "implementation(\"org.mongodb:bson-kotlinx:5.1.2\")\n// OR\nimplementation(\"org.mongodb:bson-kotlin:5.1.2\")" + }, + { + "lang": "xml", + "value": "\n org.mongodb\n bson-kotlinx\n 5.1.2\n\n\n\n org.mongodb\n bson-kotlin\n 5.1.2\n" + }, + { + "lang": "none", + "value": "Movie(\n title=Back to the Future,\n year=1985,\n cast=[Michael J. Fox, Christopher Lloyd, Lea Thompson, Crispin Glover]\n)" + }, + { + "lang": "kotlin", + "value": "import com.mongodb.client.model.Filters.eq\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport io.github.cdimascio.dotenv.dotenv\nimport kotlinx.coroutines.flow.firstOrNull\nimport kotlinx.coroutines.runBlocking\n\n// Create data class to represent a MongoDB document\ndata class Movie(val title: String, val year: Int, val cast: List)\n\nfun main() {\n\n // Replace the placeholder with your MongoDB deployment's connection string\n val uri = CONNECTION_STRING_URI_PLACEHOLDER\n\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n // Get a collection of documents of type Movie\n val collection = database.getCollection(\"movies\")\n\n runBlocking {\n val doc = collection.find(eq(\"title\", \"Back to the Future\")).firstOrNull()\n if (doc != null) {\n println(doc)\n } else {\n println(\"No matching documents found.\")\n }\n }\n\n mongoClient.close()\n}\n\n" + }, + { + "lang": "none", + "value": "javax.net.ssl.SSLHandshakeException: extension (5) should not be presented in certificate_request" + }, + { + "lang": "json", + "value": "{\n _id: ...,\n plot: 'A young man is accidentally sent 30 years into the past...',\n genres: [ 'Adventure', 'Comedy', 'Sci-Fi' ],\n ...\n title: 'Back to the Future',\n ...\n}" + }, + { + "lang": "kotlin", + "value": "import com.mongodb.client.model.Filters.eq\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport io.github.cdimascio.dotenv.dotenv\nimport kotlinx.coroutines.flow.firstOrNull\nimport kotlinx.coroutines.runBlocking\nimport org.bson.Document\n\nfun main() {\n\n // Replace the placeholder with your MongoDB deployment's connection string\n val uri = CONNECTION_STRING_URI_PLACEHOLDER\n\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n runBlocking {\n val doc = collection.find(eq(\"title\", \"Back to the Future\")).firstOrNull()\n if (doc != null) {\n println(doc.toJson())\n } else {\n println(\"No matching documents found.\")\n }\n }\n\n mongoClient.close()\n}\n\n" + } + ], + "preview": "This guide shows you how to create an application that uses the Kotlin driver\nto connect to a MongoDB Atlas cluster. If you prefer to connect to\nMongoDB by using a different driver or programming language, see the\nlist of official MongoDB drivers.", + "tags": "code example, get started, runnable app", + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples/bulkWrite", + "title": "Perform Bulk Operations", + "headings": ["Example"], + "paragraphs": "The bulkWrite() method performs batch write operations against a\n single collection. This method reduces the number of network round trips from\nyour application to your MongoDB instance which increases the performance of your\napplication. Since you only receive the success status after\nall the operations return, we recommend you use this if that meets the\nrequirements of your use case. You can specify one or more of the following write operations in\n bulkWrite() : The bulkWrite() method accepts the following parameters: The bulkWrite() method returns a BulkWriteResult object that\ncontains information about the write operation results including the number\nof documents inserted, modified, and deleted. If one or more of your operations attempts to set a value that violates a\nunique index on your collection, an exception is raised that should look\nsomething like this: Similarly, if you attempt to perform a bulk write against a collection\nthat uses schema validation and one or more of your write operations\nprovide an unexpected format, you may encounter exceptions. insertOne updateOne updateMany deleteOne deleteMany replaceOne A List of objects that implement WriteModel : the classes that\nimplement WriteModel correspond to the aforementioned write\noperations. For example, the InsertOneModel class wraps the insertOne \nwrite operation. See the links to the API documentation at the bottom of this\npage for more information on each class. BulkWriteOptions : optional object that specifies settings such as\nwhether to ensure your MongoDB instance orders your write operations. Retryable writes run on MongoDB server versions 3.6 or later in bulk\nwrite operations unless they include one or more instances of\n UpdateManyModel or DeleteManyModel . By default, MongoDB executes bulk write operations one-by-one in the\nspecified order (i.e. serially). During an ordered bulk write, if\nan error occurs during the processing of an operation, MongoDB returns\nwithout processing the remaining operations in the list. In contrast,\nwhen you set ordered to false , MongoDB continues to process remaining\nwrite operations in the list in the event of an error. Unordered operations\nare theoretically faster since MongoDB can execute them in parallel, but\nyou should only use them if your writes do not depend on order. The following code sample performs an ordered bulk write operation on the\n movies collection in the sample_mflix database. The example call\nto bulkWrite() includes examples of the InsertOneModel ,\n UpdateOneModel , and DeleteOneModel . For additional information on the classes and methods mentioned on this\npage, see the following resources: This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . Unique Index Server Manual Entry Schema Validation Server Manual Entry bulkWrite() API Documentation BulkWriteOptions API Documentation BulkWriteResult API Documentation InsertOneModel API Documentation UpdateOneModel API Documentation UpdateManyModel API Documentation DeleteOneModel API Documentation DeleteManyModel API Documentation ReplaceOneModel API Documentation", + "code": [ + { + "lang": "sh", + "value": "The bulk write operation failed due to an error: Bulk write operation error on server . Write errors: [BulkWriteError{index=0, code=11000, message='E11000 duplicate key error collection: ... }]." + }, + { + "lang": "kotlin", + "value": "import com.mongodb.MongoException\nimport com.mongodb.client.model.DeleteOneModel\nimport com.mongodb.client.model.Filters\nimport com.mongodb.client.model.InsertOneModel\nimport com.mongodb.client.model.ReplaceOneModel\nimport com.mongodb.client.model.UpdateOneModel\nimport com.mongodb.client.model.UpdateOptions\nimport com.mongodb.client.model.Updates\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\n\ndata class Movie(val title: String, val runtime: Int? = null)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n try {\n val result = collection.bulkWrite(\n listOf(\n InsertOneModel(Movie(\"A Sample Movie\")),\n InsertOneModel(Movie(\"Another Sample Movie\")),\n InsertOneModel(Movie(\"Yet Another Sample Movie\")),\n UpdateOneModel(\n Filters.eq(Movie::title.name,\"A Sample Movie\"),\n Updates.set(Movie::title.name, \"An Old Sample Movie\"),\n UpdateOptions().upsert(true)\n ),\n DeleteOneModel(Filters.eq(\"title\", \"Another Sample Movie\")),\n ReplaceOneModel(\n Filters.eq(Movie::title.name, \"Yet Another Sample Movie\"),\n Movie(\"The Other Sample Movie\", 42)\n )\n )\n )\n println(\n \"\"\"\n Result statistics:\n inserted: ${result.insertedCount}\n updated: ${result.modifiedCount}\n deleted: ${result.deletedCount}\n \"\"\".trimIndent()\n )\n } catch (e: MongoException) {\n System.err.println(\"The bulk write operation failed due to an error: $e\")\n }\n mongoClient.close()\n}\n" + }, + { + "lang": "console", + "value": "Result statistics:\ninserted: 3\nupdated: 2\ndeleted: 1" + } + ], + "preview": "The bulkWrite() method performs batch write operations against a\nsingle collection. This method reduces the number of network round trips from\nyour application to your MongoDB instance which increases the performance of your\napplication. Since you only receive the success status after\nall the operations return, we recommend you use this if that meets the\nrequirements of your use case.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples/command", + "title": "Run a Command", + "headings": ["Example"], + "paragraphs": "You can run all raw database operations using the\n MongoDatabase.runCommand() method. A raw database operation is a\ncommand you can execute directly on the MongoDB server CLI. These\ncommands include administrative and diagnostic tasks, such as fetching\nserver stats or initializing a replica set. Call the runCommand() \nmethod with a Bson command object on an instance of a MongoDatabase \nto run your raw database operation. The runCommand() method accepts a command in the form of a Bson object.\nBy default, runCommand returns an object of type\n org.bson.Document containing the output of the database command. You\ncan specify a return type for runCommand() as an optional second\nparameter. Use the MongoDB Shell for\nadministrative tasks instead of the Kotlin driver whenever possible,\nsince these tasks are often quicker and easier to implement with the\nshell than in a Kotlin application. In the following sample code, we send the dbStats command to request\nstatistics from a specific MongoDB database. For additional information on the classes and methods mentioned on this\npage, see the following resources: This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . runCommand() API Documentation Database Commands Server Manual Entry dbStats Server Manual Entry", + "code": [ + { + "lang": "kotlin", + "value": "\nimport com.mongodb.MongoException\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\nimport org.bson.BsonDocument\nimport org.bson.BsonInt64\nimport org.bson.json.JsonWriterSettings\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n try {\n val command = BsonDocument(\"dbStats\", BsonInt64(1))\n val commandResult = database.runCommand(command)\n println(commandResult.toJson(JsonWriterSettings.builder().indent(true).build()))\n } catch (me: MongoException) {\n System.err.println(\"An error occurred: $me\")\n }\n mongoClient.close()\n}\n" + }, + { + "lang": "json", + "value": "{\n \"db\": \"sample_mflix\",\n \"collections\": 5,\n \"views\": 0,\n \"objects\": 75595,\n \"avgObjSize\": 692.1003770090614,\n \"dataSize\": 52319328,\n \"storageSize\": 29831168,\n \"numExtents\": 0,\n \"indexes\": 9,\n \"indexSize\": 14430208,\n \"fileSize\": 0,\n \"nsSizeMB\": 0,\n \"ok\": 1\n}" + } + ], + "preview": "You can run all raw database operations using the\nMongoDatabase.runCommand() method. A raw database operation is a\ncommand you can execute directly on the MongoDB server CLI. These\ncommands include administrative and diagnostic tasks, such as fetching\nserver stats or initializing a replica set. Call the runCommand()\nmethod with a Bson command object on an instance of a MongoDatabase\nto run your raw database operation.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples/count", + "title": "Count Documents", + "headings": ["Example"], + "paragraphs": "There are two instance methods in the MongoCollection class that you can\ncall to count the number of documents in a collection: The estimatedDocumentCount() method returns more quickly than the\n countDocuments() method because it uses the collection's metadata rather\nthan scanning the entire collection. The countDocuments() method returns\nan accurate count of the number of documents and supports specifying\na filter. When you call the countDocuments() method, you can optionally pass a\n query filter parameter. You cannot pass any parameters when you call\n estimatedDocumentCount() . You can also pass an optional parameter to either of these methods to\nspecify the behavior of the call: Both methods return the number of matching documents as a Long primitive. countDocuments() returns an accurate count of the number of documents\nin the collection that match a specified query. If you specify an empty query\nfilter, the method returns the total number of documents in the collection. estimatedDocumentCount() returns an estimation of the number of\ndocuments in the collection based on the collection metadata. You cannot\nspecify a query when using this method. When using countDocuments() to return the total number of documents in a\ncollection, you can improve performance by avoiding a collection scan. To do\nthis, use a hint to take advantage\nof the built-in index on the _id field. Use this technique only when\ncalling countDocuments() with an empty query parameter: If you are using the Stable API V1 with the \"strict\" option and a\nMongoDB server version between 5.0.0 and 5.0.8 inclusive, method calls to\n estimatedDocumentCount() may error due to a server bug. Upgrade to MongoDB server 5.0.9 or set the Stable API \"strict\" option to\n false to avoid this issue. Method Optional Parameter Class Description countDocuments() CountOptions You can specify a maximum number of documents to count by using the\n limit() method or the maximum amount of execution time using the\n maxTime() method. estimatedDocumentCount() EstimatedDocumentCountOptions You can specify the maximum execution time using the maxTime() \nmethod. The following example estimates the number of documents in the\n movies collection in the sample_mflix database, and then returns\nan accurate count of the number of documents in the movies \ncollection with Spain in the countries field.\nIf you run the preceding sample code, you should see output that looks something\nlike this (exact numbers may vary depending on your data): For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . countDocuments() estimatedDocumentCount() CountOptions EstimatedDocumentCountOptions", + "code": [ + { + "lang": "kotlin", + "value": "val options = CountOptions().hintString(\"_id_\")\nval numDocuments = collection.countDocuments(BsonDocument(), options)\n" + }, + { + "lang": "kotlin", + "value": "\nimport com.mongodb.MongoException\nimport com.mongodb.client.model.Filters\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\n\n\ndata class Movie(val countries: List)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n\n val query = Filters.eq(Movie::countries.name, \"Spain\")\n try {\n val estimatedCount = collection.estimatedDocumentCount()\n println(\"Estimated number of documents in the movies collection: $estimatedCount\")\n val matchingCount = collection.countDocuments(query)\n println(\"Number of movies from Spain: $matchingCount\")\n } catch (e: MongoException) {\n System.err.println(\"An error occurred: $e\")\n }\n\n mongoClient.close()\n}\n" + }, + { + "lang": "console", + "value": "Estimated number of documents in the movies collection: 23541\nNumber of movies from Spain: 755" + } + ], + "preview": "There are two instance methods in the MongoCollection class that you can\ncall to count the number of documents in a collection:", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples/delete-operations", + "title": "Delete Operations", + "headings": [], + "paragraphs": "Delete a Document Delete Multiple Documents", + "code": [], + "preview": null, + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples/deleteMany", + "title": "Delete Multiple Documents", + "headings": ["Example"], + "paragraphs": "You can delete multiple documents from a collection in a single operation\nby calling the deleteMany() method on a MongoCollection object. To specify which documents to delete, pass a query filter that matches\nthe documents you want to delete. If you provide an empty document,\nMongoDB matches all documents in the collection and deletes them. While\nyou can use deleteMany() to delete all documents in a collection,\nconsider using the drop() method instead for better performance. Upon successful deletion, this method returns an instance of\n DeleteResult . You can retrieve information such as the number of\ndocuments deleted by calling the getDeletedCount() method on the\n DeleteResult instance. If your delete operation fails, the driver raises an exception. For more\ninformation on the types of exceptions raised under specific conditions,\nsee the API documentation for deleteMany() , linked at the bottom of\nthis page. The following snippet deletes multiple documents from the movies \ncollection in the sample_mflix database. The query filter passed to the deleteMany() method matches all\nmovie documents that contain a rating of less than 2.9 in the imdb \nsub-document. When you run the example, you should see output that reports the number of\ndocuments deleted in your call to deleteMany() . For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . deleteMany() DeleteResult drop()", + "code": [ + { + "lang": "kotlin", + "value": "\nimport com.mongodb.MongoException\nimport com.mongodb.client.model.Filters\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\n\ndata class Movie(val imdb: IMDB){\n data class IMDB(val rating: Double)\n}\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n val query = Filters.lt(\"${Movie::imdb.name}.${Movie.IMDB::rating.name}\", 2.9)\n try {\n val result = collection.deleteMany(query)\n println(\"Deleted document count: \" + result.deletedCount)\n } catch (e: MongoException) {\n System.err.println(\"Unable to delete due to an error: $e\")\n }\n mongoClient.close()\n}\n" + }, + { + "lang": "console", + "value": "Deleted document count: 4" + } + ], + "preview": "You can delete multiple documents from a collection in a single operation\nby calling the deleteMany() method on a MongoCollection object.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples/deleteOne", + "title": "Delete a Document", + "headings": ["Example"], + "paragraphs": "You can delete a single document from a collection using the deleteOne() \nmethod on a MongoCollection object. The method accepts a query filter\nthat matches the document you want to delete. If you do not specify\na filter, MongoDB matches the first document in the collection. The\n deleteOne() method only deletes the first document matched. This method returns an instance of DeleteResult which contains information\nincluding how many documents were deleted as a result of the operation. If your delete operation fails, the driver raises an exception. For more\ninformation on the types of exceptions raised under specific conditions,\nsee the API documentation for deleteOne() , linked at the bottom of\nthis page. The following snippet deletes a single document from the movies \ncollection of the sample_mflix database. The example uses the eq() \nfilter to match movies with the title exactly matching the text\n 'The Garbage Pail Kids Movie' . When you run the example, if the query filter you passed in your call to\n deleteOne() matches a document and removes it, you should see output\nthat looks something like this: If your query filter does not match a document in your collection,\nyour call to deleteOne() removes no documents and returns the following: For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . deleteOne() DeleteResult eq()", + "code": [ + { + "lang": "none", + "value": "Deleted document count: 1" + }, + { + "lang": "none", + "value": "Deleted document count: 0" + }, + { + "lang": "kotlin", + "value": "\nimport com.mongodb.MongoException\nimport com.mongodb.client.model.Filters\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\n\n\ndata class Movie(val title: String)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n val query = Filters.eq(Movie::title.name, \"The Garbage Pail Kids Movie\")\n\n try {\n val result = collection.deleteOne(query)\n println(\"Deleted document count: \" + result.deletedCount)\n } catch (e: MongoException) {\n System.err.println(\"Unable to delete due to an error: $e\")\n }\n mongoClient.close()\n}\n" + } + ], + "preview": "You can delete a single document from a collection using the deleteOne()\nmethod on a MongoCollection object. The method accepts a query filter\nthat matches the document you want to delete. If you do not specify\na filter, MongoDB matches the first document in the collection. The\ndeleteOne() method only deletes the first document matched.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples/distinct", + "title": "Retrieve Distinct Values of a Field", + "headings": ["Example"], + "paragraphs": "You can retrieve a list of distinct values for a field across a\ncollection by calling the distinct() method on a MongoCollection \nobject. Pass the document field name as the first parameter and the class\nyou want to cast the results to as the type parameter. The following snippets demonstrate the distinct() method using the movies \ncollection in the sample_mflix sample database. Documents are modeled\nwith the following Kotlin data class: The following method call returns each distinct value of the countries \nfield in the movies collection: You can specify a field on the document or one within an embedded document \nusing dot notation . The following method call returns each distinct\nvalue of the wins field in the awards embedded document: You can also limit the set of documents from which your MongoDB instance retrieves\ndistinct values with a query filter as a second parameter, as follows: The distinct() method returns an object that implements the\n DistinctFlow class, which contains methods to access, organize, and traverse\nthe results. DistinctFlow delegates to the Flow interface\nfrom the Kotlin Coroutines library, allowing access to methods such as first() and\n firstOrNull() . For more information, see our\n guide on Accessing Data From a Flow . The following example retrieves a list of distinct values for the year \ndocument field from the movies collection. It uses a query filter to\nmatch movies that include \"Carl Franklin\" as one of the values in the\n directors array. When you run the example, you should see output that reports each distinct\nyear for all the movies that Carl Franklin was included as a director. For additional information on the classes and methods mentioned on this\npage, see the following resources: This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . distinct() API Documentation distinctFlow API Documentation Dot Notation Server Manual Entry", + "code": [ + { + "lang": "kotlin", + "value": "data class Movie(\n val type: String,\n val languages: List,\n val countries: List,\n val awards: Awards){\n data class Awards(val wins: Int)\n }\n" + }, + { + "lang": "kotlin", + "value": "collection.distinct(Movie::countries.name)\n" + }, + { + "lang": "kotlin", + "value": "collection.distinct(\"${Movie::awards.name}.${Movie.Awards::wins.name}\")\n" + }, + { + "lang": "kotlin", + "value": "collection.distinct(Movie::type.name, Filters.eq(Movie::languages.name, \"French\"))\n" + }, + { + "lang": "kotlin", + "value": "import com.mongodb.MongoException\nimport com.mongodb.client.model.Filters\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\n\ndata class Movie(val year: Int, val directors: List)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n try {\n val resultsFlow = collection.distinct(\n Movie::year.name, Filters.eq(Movie::directors.name, \"Carl Franklin\")\n )\n resultsFlow.collect { println(it) }\n } catch (e: MongoException) {\n System.err.println(\"An error occurred: $e\")\n }\n\n mongoClient.close()\n}\n" + }, + { + "lang": "console", + "value": "1992\n1995\n1998\n..." + } + ], + "preview": "You can retrieve a list of distinct values for a field across a\ncollection by calling the distinct() method on a MongoCollection\nobject. Pass the document field name as the first parameter and the class\nyou want to cast the results to as the type parameter.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples/find-operations", + "title": "Find Operations", + "headings": [], + "paragraphs": "Find a Document Find Multiple Documents", + "code": [], + "preview": null, + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples/find", + "title": "Find Multiple Documents", + "headings": ["Example"], + "paragraphs": "You can query for multiple documents in a collection by calling the find() \nmethod on a MongoCollection object. Pass a query filter to the\n find() method to query for and return documents that match the filter in\nthe collection. If you do not include a filter, MongoDB returns all the\ndocuments in the collection. For more information on querying MongoDB with the Kotlin driver, see our\n guide on Querying Documents . You can also chain methods to the find() method such as sort() which\norganizes the matched documents in a specified order and\n projection() which configures the included fields in the\nreturned documents. For more information on the sort() method, see our\n guide on Sorting .\nFor more information on the projection() method, see our\n guide on Projections The find() method returns an instance of FindFlow , a class\nthat offers several methods to access, organize, and traverse the results. FindFlow also obtains methods from its delegate interface Flow from the\nKotlin Coroutines library.\nYou can call the collect() method to iterate through the fetched results.\nYou can also call terminal methods, such as firstOrNull() to return either\nthe first document or null if there are no results, or first() to return\nthe first document in the collection. If no documents match the query,\ncalling first() throws a NoSuchElementException exception. For more information on accessing data from a flow with the Kotlin driver, see our\n guide on Accessing Data From a Flow . The following snippet finds and prints all documents that match a query on\nthe movies collection. It uses the following objects and methods: For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: A query filter that is passed to the find() method. The lt() \nfilter matches only movies with a runtime of less than 15 minutes. A sort that organizes returned documents in descending order by\ntitle (\"Z\" before \"A\"). A projection that includes the objects in the title and imdb \nfields and excludes the _id field using the helper method\n excludeId() . This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . FindFlow find()", + "code": [ + { + "lang": "kotlin", + "value": "import com.mongodb.client.model.Filters.lt\nimport com.mongodb.client.model.Projections\nimport com.mongodb.client.model.Sorts\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\n\ndata class Movie(val title: String, val runtime: Int, val imdb: IMDB){\n data class IMDB(val rating: Double)\n}\n\ndata class Results(val title: String)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n val projectionFields= Projections.fields(\n Projections.include(Movie::title.name, Movie::imdb.name),\n Projections.excludeId()\n )\n val resultsFlow = collection.withDocumentClass()\n .find(lt(Movie::runtime.name, 15))\n .projection(projectionFields)\n .sort(Sorts.descending(Movie::title.name))\n\n resultsFlow.collect { println(it) }\n\n mongoClient.close()\n}\n" + } + ], + "preview": "You can query for multiple documents in a collection by calling the find()\nmethod on a MongoCollection object. Pass a query filter to the\nfind() method to query for and return documents that match the filter in\nthe collection. If you do not include a filter, MongoDB returns all the\ndocuments in the collection.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples/findOne", + "title": "Find a Document", + "headings": ["Example"], + "paragraphs": "You can retrieve a single document in a collection by chaining together\nthe find() and first() methods on a MongoCollection object.\nYou can pass a query filter to the find() method to query for and\nreturn documents that match the filter in the collection. If you do not\ninclude a filter, MongoDB returns all the documents in the collection. For more information on querying MongoDB with the Kotlin driver, see our\n guide on Querying Documents . You can also chain other methods to the find() method\nsuch as sort() which organizes the matched documents in a specified order, and\n projection() which configures the fields included in the returned documents. For more information on the sort() method, see our\n guide on Sorting .\nFor more information on the projection() method, see our\n guide on Projections The find() method returns an instance of FindFlow , a class\nthat offers several methods to access, organize, and traverse the results. FindFlow also obtains methods from its delegate interface Flow from the\nKotlin Coroutines library, such as first() and firstOrNull() .\nThe firstOrNull() method returns the first document from the retrieved results\nor null if there are no results. The first() method returns\nthe first document or throws a NoSuchElementException exception if no\ndocuments match the query. For more information on accessing data from a flow with the Kotlin driver, see our\n guide on Accessing Data From a Flow . The following snippet finds a single document from the movies collection.\nIt uses the following objects and methods: For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: A query filter that is passed to the find() method. The eq \nfilter matches only movies with the title exactly matching the text\n \"The Room\" . A sort that organizes matched documents in descending order by\nrating, so if our query matches multiple documents the returned\ndocument is the one with the highest rating. A projection that includes the objects in the title and imdb \nfields and excludes the _id field using the helper method\n excludeId() . This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . FindFlow find()", + "code": [ + { + "lang": "kotlin", + "value": "import com.mongodb.client.model.Filters.eq\nimport com.mongodb.client.model.Filters.lt\nimport com.mongodb.client.model.Projections\nimport com.mongodb.client.model.Sorts\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.flow.firstOrNull\nimport kotlinx.coroutines.runBlocking\nimport usageExamples.find.Results\n\ndata class Movie(val title: String, val runtime: Int, val imdb: IMDB) {\n data class IMDB(val rating: Double)\n}\n\ndata class Results(val title: String, val imdb: Movie.IMDB)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n val projectionFields= Projections.fields(\n Projections.include(Movie::title.name, Movie::imdb.name),\n Projections.excludeId()\n )\n val resultsFlow = collection.withDocumentClass()\n .find(eq(Movie::title.name, \"The Room\"))\n .projection(projectionFields)\n .sort(Sorts.descending(\"${Movie::imdb.name}.${Movie.IMDB::rating.name}\"))\n .firstOrNull()\n\n if (resultsFlow == null) {\n println(\"No results found.\");\n } else {\n println(resultsFlow)\n }\n\n mongoClient.close()\n}\n" + } + ], + "preview": "You can retrieve a single document in a collection by chaining together\nthe find() and first() methods on a MongoCollection object.\nYou can pass a query filter to the find() method to query for and\nreturn documents that match the filter in the collection. If you do not\ninclude a filter, MongoDB returns all the documents in the collection.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples/insert-operations", + "title": "Insert Operations", + "headings": [], + "paragraphs": "Insert a Document Insert Multiple Documents", + "code": [], + "preview": null, + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples/insertMany", + "title": "Insert Multiple Documents", + "headings": ["Example"], + "paragraphs": "You can insert multiple documents into a collection in a single\noperation by calling the insertMany() method on a MongoCollection \nobject. To insert them, add your Document objects to a List and pass\nthat List as an argument to insertMany() . If you call the insertMany() method\non a collection that does not exist yet, the server creates it for you. Upon successful insertion, insertMany() returns an instance of\n InsertManyResult . You can retrieve information such as the _id \nfields of the documents you inserted by calling the getInsertedIds() \nmethod on the InsertManyResult instance. If your insert operation fails, the driver raises an exception. For more\ninformation on the types of exceptions raised under specific conditions,\nsee the API documentation for insertMany() , linked at the bottom of\nthis page. The following snippet inserts multiple documents into the movies \ncollection. When you run the example, you should see output with the inserted documents'\n ObjectId values in each of the value fields: For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . insertMany() Document InsertManyResult", + "code": [ + { + "lang": "kotlin", + "value": "import com.mongodb.MongoException\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\n\ndata class Movie(val title: String)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n val movieList = listOf(\n Movie(\"Short Circuit 3\"),\n Movie(\"The Lego Frozen Movie\")\n )\n\n try {\n val result = collection.insertMany(movieList)\n println(\"Success! Inserted document ids: \" + result.insertedIds)\n } catch (e: MongoException) {\n System.err.println(\"Unable to insert due to an error: $e\")\n }\n mongoClient.close()\n}\n" + }, + { + "lang": "console", + "value": "Success! Inserted document ids: {0=BsonObjectId{value=...}, 1=BsonObjectId{value=...}}" + } + ], + "preview": "You can insert multiple documents into a collection in a single\noperation by calling the insertMany() method on a MongoCollection\nobject. To insert them, add your Document objects to a List and pass\nthat List as an argument to insertMany(). If you call the insertMany() method\non a collection that does not exist yet, the server creates it for you.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples/insertOne", + "title": "Insert a Document", + "headings": ["Example"], + "paragraphs": "You can insert a single document into a collection using the insertOne() \nmethod on a MongoCollection object. To insert a document, construct a\n Document object that contains the fields and values that you want to\nstore. If you call the insertOne() method on a collection that does\nnot exist yet, the server automatically creates it for you. Upon a successful insertion, insertOne() returns an instance of\n InsertOneResult . You can retrieve information such as the _id \nfield of the document you inserted by calling the getInsertedId() \nmethod on the InsertOneResult instance. If your insert operation fails, the driver raises an exception. For more\ninformation on the types of exceptions raised under specific conditions,\nsee the API documentation for insertOne() , linked at the bottom of\nthis page. The following snippet inserts a single document into the movies \ncollection. When you run the example, you should see output with the inserted document's\n ObjectId in the value field: For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . insertOne() Document InsertOneResult", + "code": [ + { + "lang": "kotlin", + "value": "import com.mongodb.MongoException\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\nimport org.bson.codecs.pojo.annotations.BsonId\nimport org.bson.types.ObjectId\n\ndata class Movie(@BsonId val id: ObjectId, val title: String, val genres: List)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n try {\n val result = collection.insertOne(\n Movie(ObjectId(), \"Ski Bloopers\", listOf(\"Documentary\", \"Comedy\"))\n )\n println(\"Success! Inserted document id: \" + result.insertedId)\n } catch (e: MongoException) {\n System.err.println(\"Unable to insert due to an error: $e\")\n }\n mongoClient.close()\n}\n" + }, + { + "lang": "console", + "value": "Success! Inserted document id: BsonObjectId{value=...}" + } + ], + "preview": "You can insert a single document into a collection using the insertOne()\nmethod on a MongoCollection object. To insert a document, construct a\nDocument object that contains the fields and values that you want to\nstore. If you call the insertOne() method on a collection that does\nnot exist yet, the server automatically creates it for you.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples/replaceOne", + "title": "Replace a Document", + "headings": ["Example"], + "paragraphs": "You can replace a single document using the replaceOne() method on\na MongoCollection object. This method removes all the existing fields\nand values from a document (except the _id field) and substitutes it\nwith your replacement document. The replaceOne() method accepts a query filter that matches the\ndocument you want to replace and a replacement document that contains the\ndata you want to save in place of the matched document. The replaceOne() \nmethod only replaces the first document that matches the filter. You can optionally pass an instance of ReplaceOptions to the replaceOne() method in\norder to specify the method's behavior. For example, if you set the upsert \nfield of the ReplaceOptions object to true , the operation inserts\na new document from the fields in the replacement document if no documents\nmatch the query filter. See the link to the ReplaceOptions API\ndocumentation at the bottom of this page for more information. Upon successful execution, the replaceOne() method returns an instance\nof UpdateResult . You can retrieve information such as the number of\ndocuments modified by calling the getModifiedCount() method. You can also\nretrieve the value of the document's _id field by calling the\n getUpsertedId() method if you set upsert(true) in the\n ReplaceOptions instance and the operation resulted in the insertion of a new document. If your replacement operation fails, the driver raises an exception.\nFor example, if you try to specify a value for the immutable field\n _id in your replacement document that differs from the original\ndocument, the method throws a MongoWriteException with the message: If your replacement document contains a change that violates unique index\nrules, the method throws a MongoWriteException with an error\nmessage that should look something like this: For more information on the types of exceptions raised under specific\nconditions, see the API documentation for replaceOne() , linked at the\nbottom of this page. In this example, we replace the first match of our query filter in the\n movies collection of the sample_mflix database with a replacement\ndocument. All the fields except for the _id field are deleted from the\noriginal document and are substituted by the replacement document. Before the replaceOne() operation runs, the original document contains\nseveral fields describing the movie. After the operation runs, the resulting\ndocument contains only the fields specified by the replacement document\n( title and fullplot ) and the _id field. The following snippet uses the following objects and methods: After you run the example, you should see output that looks something like\nthis: Or if the example resulted in an upsert: If you query the replaced document, it should look something like this: For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: A query filter that is passed to the replaceOne() method. The eq \nfilter matches only movies with the title exactly matching the text\n 'Music of the Heart' . A replacement document that contains the document that replaces the\nmatching document if it exists. A ReplaceOptions object with the upsert option set to true .\nThis option specifies that the method should insert the data contained in\nthe replacement document if the query filter does not match any documents. This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . ReplaceOne ReplaceOptions UpdateResult eq()", + "code": [ + { + "lang": "none", + "value": "After applying the update, the (immutable) field '_id' was found to have been altered to _id: ObjectId('...)" + }, + { + "lang": "none", + "value": "E11000 duplicate key error collection: ..." + }, + { + "lang": "none", + "value": "Modified document count: 1\nUpserted id: null" + }, + { + "lang": "none", + "value": "Modified document count: 0\nUpserted id: BsonObjectId{value=...}" + }, + { + "lang": "none", + "value": "Movie(title=50 Violins, fullplot= A dramatization of the true story of Roberta Guaspari who co-founded the Opus 118 Harlem School of Music)" + }, + { + "lang": "kotlin", + "value": "import com.mongodb.MongoException\nimport com.mongodb.client.model.Filters\nimport com.mongodb.client.model.ReplaceOptions\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\n\ndata class Movie(val title: String, val fullplot: String)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n try {\n val query = Filters.eq(\"title\", \"Music of the Heart\")\n val replaceDocument = Movie( \"50 Violins\", \" A dramatization of the true story of Roberta Guaspari who co-founded the Opus 118 Harlem School of Music\")\n val options = ReplaceOptions().upsert(true)\n val result = collection.replaceOne(query, replaceDocument, options)\n println(\"Modified document count: \" + result.modifiedCount)\n println(\"Upserted id: \" + result.upsertedId) // only contains a non-null value when an upsert is performed\n } catch (e: MongoException) {\n System.err.println(\"Unable to replace due to an error: $e\")\n }\n mongoClient.close()\n}\n" + } + ], + "preview": "You can replace a single document using the replaceOne() method on\na MongoCollection object. This method removes all the existing fields\nand values from a document (except the _id field) and substitutes it\nwith your replacement document.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples/update-operations", + "title": "Update & Replace Operations", + "headings": [], + "paragraphs": "Update a Document Update Multiple Documents Replace a Document", + "code": [], + "preview": null, + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples/updateMany", + "title": "Update Multiple Documents", + "headings": ["Example"], + "paragraphs": "You can update multiple documents using the updateMany() method on\na MongoCollection object. The method accepts a filter that matches the\ndocument you want to update and an update statement that instructs the\ndriver how to change the matching document. The updateMany() method updates\nall the documents in the collection that match the filter. To perform an update with the updateMany() method, you must pass\na query filter and an update document. The query filter specifies which\ndocuments in the collection to match and the update document provides\ninstructions on what changes to make to them. You can optionally pass an instance of UpdateOptions to the updateMany() method in\norder to modify the behavior of the call. For example, if you set the\n upsert field of the UpdateOptions object to true and no documents\nmatch the specified query filter, the operation inserts a new document\ncomposed of the fields from both the query and update document. Upon successful execution, the updateMany() method returns an instance\nof UpdateResult . You can retrieve information such as the number of\ndocuments modified by calling the getModifiedCount() method. If you\nspecified upsert(true) in an UpdateOptions object and the\noperation results in an insert, you can retrieve the _id field of the\nnew document by calling the getUpsertedId() method on the\n UpdateResult instance. If your update operation fails, the driver raises an exception and does not update\nany of the documents matching the filter. For example, if you try to set\na value for the immutable field _id in your update document, the\n updateMany() method does not update any documents and throws a\n MongoWriteException with the message: If your update document contains a change that violates unique index\nrules, the method throws a MongoWriteException with an error\nmessage that should look something like this: For more information on the types of exceptions raised under specific\nconditions, see the API documentation for updateMany() , linked at the\nbottom of this page. In this example, we use a Filter builder to filter our query for\nmovies in the genre \"Frequently Discussed\". Next, we update documents that match our query in the movies collection of the\n sample_mflix database. We perform the following\nupdates to the matching documents: We use the Updates builder, a factory class that contains static\nhelper methods to construct the update document. While you can pass an update\ndocument instead of using the builder, the builder provides type checking and\nsimplified syntax. Read our\n guide on Updates in the Builders\nsection for more information. After you run the example, you should see a similar output. If you query the updated document or documents, they should look something like\nthis: For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: Add Frequently Discussed to the array of genres only if it does not\nalready exist Set the value of lastUpdated to the current time. This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . UpdateMany UpdateOptions combine() addToSet() currentDate() UpdateResult", + "code": [ + { + "lang": "none", + "value": "Performing an update on the path '_id' would modify the immutable field '_id'" + }, + { + "lang": "none", + "value": "E11000 duplicate key error collection: ..." + }, + { + "lang": "none", + "value": "Movie(num_mflix_comments=100, genres=[ ... Frequently Discussed], lastUpdated= ... )" + }, + { + "lang": "kotlin", + "value": "import com.mongodb.MongoException\nimport com.mongodb.client.model.Filters\nimport com.mongodb.client.model.Updates\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\nimport java.time.LocalDateTime\n\ndata class Movie(\n val num_mflix_comments: Int,\n val genres: List,\n val lastUpdated: LocalDateTime\n)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n val query = Filters.gt(Movie::num_mflix_comments.name, 50)\n val updates = Updates.combine(\n Updates.addToSet(Movie::genres.name, \"Frequently Discussed\"),\n Updates.currentDate(Movie::lastUpdated.name)\n )\n try {\n val result = collection.updateMany(query, updates)\n println(\"Modified document count: \" + result.modifiedCount)\n } catch (e: MongoException) {\n System.err.println(\"Unable to update due to an error: $e\")\n }\n mongoClient.close()\n}\n" + }, + { + "lang": "console", + "value": "Modified document count: 53" + } + ], + "preview": "You can update multiple documents using the updateMany() method on\na MongoCollection object. The method accepts a filter that matches the\ndocument you want to update and an update statement that instructs the\ndriver how to change the matching document. The updateMany() method updates\nall the documents in the collection that match the filter.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples/updateOne", + "title": "Update a Document", + "headings": ["Example"], + "paragraphs": "You can update a single document using the updateOne() method on\na MongoCollection object. The method accepts a filter that matches the\ndocument you want to update and an update statement that instructs the\ndriver how to change the matching document. The updateOne() method only\nupdates the first document that matches the filter. To perform an update with the updateOne() method, you must pass\na query filter and an update document. The query filter specifies the criteria\nfor which document to perform the update on and the update document provides\ninstructions on what changes to make to it. You can optionally pass an instance of UpdateOptions to the updateOne() method in\norder to specify the method's behavior. For example, if you set the upsert field of\nthe UpdateOptions object to true , the operation inserts a new\ndocument from the fields in both the query and update document if no documents\nmatch the query filter. See the link to the UpdateOptions API\ndocumentation at the bottom of this page for more information. Upon successful execution, the updateOne() method returns an instance\nof UpdateResult . You can retrieve information such as the number of\ndocuments modified by calling the getModifiedCount() method, or the\nvalue of the _id field by calling the getUpsertedId() method if you\nspecified upsert(true) in an UpdateOptions instance. If your update operation fails, the driver raises an exception.\nFor example, if you try to set a value for the immutable field _id in\nyour update document, the method throws a MongoWriteException with the\nmessage: If your update document contains a change that violates unique index\nrules, the method throws a MongoWriteException with an error\nmessage that should look something like this: For more information on the types of exceptions raised under specific\nconditions, see the updateOne() API documentation linked at the\nbottom of this page. In this example, we use a Filter builder to query the collection for\na movie with the title \"Cool Runnings 2\". Next, we perform the following updates to the first match for our query\nin the movies collection of the sample_mflix database: We use the Updates builder, a factory class that contains static\nhelper methods, to construct the update document. While you can pass an update\ndocument instead of using the builder, the builder provides type checking and\nsimplified syntax. See the guide on the Updates builder \nfor more information. After you run the example, you should see output that looks something like this: Or if the example resulted in an upsert: If you query the updated document, it should look something like this: For additional information on the classes and methods mentioned on this\npage, see the following API Documentation: Set the value of runtime to 99 Add Sports to the array of genres only if it does not already exist Set the value of lastUpdated to the current time This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . UpdateOne UpdateOptions combine() set() addToSet() currentDate() UpdateResult", + "code": [ + { + "lang": "none", + "value": "Performing an update on the path '_id' would modify the immutable field '_id'" + }, + { + "lang": "none", + "value": "E11000 duplicate key error collection: ..." + }, + { + "lang": "none", + "value": "Modified document count: 1\nUpserted id: null" + }, + { + "lang": "none", + "value": "Modified document count: 0\nUpserted id: BsonObjectId{value=...}" + }, + { + "lang": "none", + "value": "Movie(title=Cool Runnings 2, runtime=99, genres=[ ... Sports], lastUpdated= ... )" + }, + { + "lang": "kotlin", + "value": "import com.mongodb.MongoException\nimport com.mongodb.client.model.Filters\nimport com.mongodb.client.model.UpdateOptions\nimport com.mongodb.client.model.Updates\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.runBlocking\nimport java.time.LocalDateTime\n\ndata class Movie(\n val title: String,\n val runtime: Int,\n val genres: List,\n val lastUpdated: LocalDateTime\n)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n val query = Filters.eq(Movie::title.name, \"Cool Runnings 2\")\n val updates = Updates.combine(\n Updates.set(Movie::runtime.name, 99),\n Updates.addToSet(Movie::genres.name, \"Sports\"),\n Updates.currentDate(Movie::lastUpdated.name)\n )\n val options = UpdateOptions().upsert(true)\n try {\n val result = collection.updateOne(query, updates, options)\n println(\"Modified document count: \" + result.modifiedCount)\n println(\"Upserted id: \" + result.upsertedId) // only contains a non-null value when an upsert is performed\n } catch (e: MongoException) {\n System.err.println(\"Unable to update due to an error: $e\")\n }\n mongoClient.close()\n}\n" + } + ], + "preview": "You can update a single document using the updateOne() method on\na MongoCollection object. The method accepts a filter that matches the\ndocument you want to update and an update statement that instructs the\ndriver how to change the matching document. The updateOne() method only\nupdates the first document that matches the filter.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples/watch", + "title": "Watch for Changes", + "headings": ["Process Change Stream Events with .collect()", "Example"], + "paragraphs": "You can keep track of changes to data in MongoDB, such as changes to a\ncollection, database, or deployment, by opening a change stream . A change\nstream allows applications to watch for changes to data and react to them. The change stream returns change event documents when changes occur. A\nchange event contains information about the updated data. Open a change stream by calling the watch() method on a\n MongoCollection , MongoDatabase , or MongoClient object as shown in\nthe following code example: The watch() method optionally takes an aggregation pipeline which\nconsists of an array of stages as the first parameter to filter and\ntransform the change event output as follows: The watch() method returns an instance of ChangeStreamFlow , a class\nthat offers several methods to access, organize, and traverse the results.\n ChangeStreamFlow also inherits methods from its parent class Flow \nfrom the Kotlin Coroutines library. You can call collect() on the ChangeStreamFlow to handle\nevents as they occur. Alternatively, you can use other methods built in to Flow \nto work with the results. To configure options for processing the documents returned from the change\nstream, use member methods of the ChangeStreamFlow object returned\nby watch() . See the link to the ChangeStreamFlow API\ndocumentation at the bottom of this example for more details on the\navailable methods. To capture events from a change stream, call the collect() method\nas shown below: The .collect() function triggers when a change event is emitted. You can\nspecify logic in the function to process the event document when it is\nreceived. For update operation change events, change streams only return the modified\nfields by default rather than the entire updated document. You can configure\nyour change stream to also return the most current version of the document\nby calling the fullDocument() member method of the ChangeStreamFlow \nobject with the value FullDocument.UPDATE_LOOKUP as follows: The following example application opens a change stream on the movies collection\nin the sample_mflix database. The application use an aggregation pipeline\nto filter changes based on operationType so that it only receives insert and update\nevents. Deletes are excluded by omission. The application uses the .collect() method\nto receive and print the filtered change events that occur on the collection. The application launches the collect() operation in a separate coroutine job,\nwhich allows the application to continue running while the change stream is open.\nOnce the operations are complete, the application closes the change stream and exits. For additional information on the classes and methods mentioned on this\npage, see the following resources: This example connects to an instance of MongoDB using a connection URI.\nTo learn more about connecting to your MongoDB instance, see the\n connection guide . Change Streams Server Manual Entry Change Events Server Manual Entry Aggregation Pipeline Server Manual Entry Aggregation Stages Server Manual Entry ChangeStreamFlow API Documentation MongoCollection.watch() API Documentation MongoDatabase.watch() API Documentation MongoClient.watch() API Documentation", + "code": [ + { + "lang": "kotlin", + "value": "val changeStream = collection.watch()\n" + }, + { + "lang": "kotlin", + "value": "val pipeline = listOf(Aggregates.match(Filters.lt(\"fullDocument.runtime\", 15)))\nval changeStream = collection.watch(pipeline)\n" + }, + { + "lang": "kotlin", + "value": "val changeStream = collection.watch()\nchangeStream.collect {\n println(\"Change observed: $it\")\n}\n" + }, + { + "lang": "kotlin", + "value": "val changeStream = collection.watch()\n .fullDocument(FullDocument.UPDATE_LOOKUP)\n" + }, + { + "lang": "kotlin", + "value": "\nimport com.mongodb.client.model.Aggregates\nimport com.mongodb.client.model.Filters\nimport com.mongodb.client.model.Updates\nimport com.mongodb.client.model.changestream.FullDocument\nimport com.mongodb.kotlin.client.coroutine.MongoClient\nimport kotlinx.coroutines.launch\nimport kotlinx.coroutines.runBlocking\nimport java.lang.Thread.sleep\n\ndata class Movie(val title: String, val year: Int)\n\nfun main() = runBlocking {\n // Replace the uri string with your MongoDB deployment's connection string\n val uri = \"\"\n val mongoClient = MongoClient.create(uri)\n val database = mongoClient.getDatabase(\"sample_mflix\")\n val collection = database.getCollection(\"movies\")\n\n\n\n val job = launch {\n val pipeline = listOf(\n Aggregates.match(\n Filters.`in`(\"operationType\", mutableListOf(\"insert\", \"update\"))\n )\n )\n val changeStreamFlow = collection.watch(pipeline)\n .fullDocument(FullDocument.DEFAULT)\n changeStreamFlow.collect { event ->\n println(\"Received a change to the collection: $event\")\n }\n }\n\n // Insert events captured by the change stream watcher\n collection.insertOne(Movie(\"Back to the Future\", 1985))\n collection.insertOne(Movie(\"Freaky Friday\", 2003))\n\n // Update event captured by the change stream watcher\n collection.updateOne(\n Filters.eq(Movie::title.name, \"Back to the Future\"),\n Updates.set(Movie::year.name, 1986)\n )\n\n // Delete event not captured by the change stream watcher\n collection.deleteOne(Filters.eq(Movie::title.name, \"Freaky Friday\"))\n\n sleep(1000) // Give time for the change stream watcher to process all events\n\n // Cancel coroutine job to stop the change stream watcher\n job.cancel()\n mongoClient.close()\n}\n" + }, + { + "lang": "console", + "value": "Received a change to the collection: ChangeStreamDocument{ operationType=insert, resumeToken={\"_data\": \"82646518C0000000022B022C0100296E5A1004782683FAB5A741B0B0805C207A7FCCED46645F69640064646518C0E6873977DD9059EE0004\"}, namespace=sample_mflix.movies, destinationNamespace=null, fullDocument=Movie(title=Back to the Future, year=1985), fullDocumentBeforeChange=null, documentKey={\"_id\": {\"$oid\": \"646518c0e6873977dd9059ee\"}}, clusterTime=Timestamp{value=7234215589353357314, seconds=1684347072, inc=2}, updateDescription=null, txnNumber=null, lsid=null, wallTime=BsonDateTime{value=1684347072952}}\nReceived a change to the collection: ChangeStreamDocument{ operationType=insert, resumeToken={\"_data\": \"82646518C1000000012B022C0100296E5A1004782683FAB5A741B0B0805C207A7FCCED46645F69640064646518C1E6873977DD9059EF0004\"}, namespace=sample_mflix.movies, destinationNamespace=null, fullDocument=Movie(title=Freaky Friday, year=2003), fullDocumentBeforeChange=null, documentKey={\"_id\": {\"$oid\": \"646518c1e6873977dd9059ef\"}}, clusterTime=Timestamp{value=7234215593648324609, seconds=1684347073, inc=1}, updateDescription=null, txnNumber=null, lsid=null, wallTime=BsonDateTime{value=1684347073112}}\nReceived a change to the collection: ChangeStreamDocument{ operationType=update, resumeToken={\"_data\": \"8264651D4A000000042B022C0100296E5A1004CAEADF0D7376406A8197E3082CDB3D3446645F6964006464651D4A8C2D2556BA204FB40004\"}, namespace=sample_mflix.movies, destinationNamespace=null, fullDocument=null, fullDocumentBeforeChange=null, documentKey={\"_id\": {\"$oid\": \"64651d4a8c2d2556ba204fb4\"}}, clusterTime=Timestamp{value=7234220580105355268, seconds=1684348234, inc=4}, updateDescription=UpdateDescription{removedFields=[], updatedFields={\"year\": 1986}, truncatedArrays=[], disambiguatedPaths=null}, txnNumber=null, lsid=null, wallTime=BsonDateTime{value=1684348234958}}" + } + ], + "preview": "You can keep track of changes to data in MongoDB, such as changes to a\ncollection, database, or deployment, by opening a change stream. A change\nstream allows applications to watch for changes to data and react to them.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "usage-examples", + "title": "Usage Examples", + "headings": ["Overview", "How to Use the Usage Examples"], + "paragraphs": "Usage examples provide convenient starting points for popular MongoDB\noperations. Each example provides the following information: An explanation of the operation in the example showing the\npurpose and a sample use case for the method An explanation of how to use the operation, including parameters,\nreturn values, and common exceptions you might encounter A full Kotlin file that you can copy and paste to run the example\nin your own environment These examples use the sample datasets \nprovided by Atlas. You can load them into your database on the free tier of\nMongoDB Atlas by following the\n Get Started with Atlas Guide \nor you can\n import the sample dataset into a local MongoDB instance . Once you have imported the dataset, you can copy and paste a usage\nexample into your development environment of choice. You can follow the\n quick start guide to learn more about getting\nstarted with the MongoDB Kotlin driver. Once you've copied a usage example,\nyou'll need to edit the connection URI to get the example connected to\nyour MongoDB instance: You can use the Atlas Connectivity Guide \nto learn how to allow connections to your instance of Atlas and to find the\n connection string you use to replace the\n uri variable in usage examples. If your instance uses\n SCRAM authentication , you can replace\n with your username, with your password, and\n with the IP address or URL of your instance. For more information about connecting to your MongoDB instance, see our\n Connection Guide .", + "code": [ + { + "lang": "kotlin", + "value": "// Replace the following with your MongoDB deployment's connection string.\nval uri = \"\"" + } + ], + "preview": "Usage examples provide convenient starting points for popular MongoDB\noperations. Each example provides the following information:", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "validate-signatures", + "title": "Validate Driver Artifact Signatures", + "headings": [ + "Overview", + "Procedure", + "Install Encryption Software", + "Download and Import the Public Key", + "Download the Signed File", + "Download the File Signature", + "Verify the Signature", + "Additional Information" + ], + "paragraphs": "You can validate the signature of a Kotlin driver artifact published\non Maven. This process can enhance the security of your system or\nnetwork by allowing you to confirm the authenticity of the driver. The following steps describe how you can validate driver artifact\nsignatures. You must first install the GnuPG encryption suite to use GPG\non the command line. You can install GnuPG by using Homebrew . As an alternative, you can install GPG Suite ,\nwhich provides a GUI to use GPG. There is a Homebrew installation \nfor GPG Suite. Navigate to the Releases page\nin the MongoDB JVM drivers GitHub repository. Each version release contains instructions on\nhow to download and import the public key for verifying signatures. In your terminal, run the curl command to download the signed\nfile corresponding to a version of the driver. For example,\nrunning the following command downloads the signed file for the\nv5.1.0 driver: In your terminal, run the curl command to download the file\nsignature corresponding to a version of the driver. For example,\nrunning the following command downloads the file signature for the\nv5.1.0 driver: Finally, you can verify the signature by using the encryption package.\nThe following terminal command uses gpg to verify the artifact signature of the v5.1.0\ndriver: If you successfully verify the signature, you see a message\nsimilar to the following: To learn more about verifying signatures, see Verify Integrity\nof MongoDB Packages in the Server\nmanual.", + "code": [ + { + "lang": "sh", + "value": "curl -LO https://repo.maven.apache.org/maven2/org/mongodb/mongodb-driver-core/5.1.0/mongodb-driver-core-5.1.0.jar" + }, + { + "lang": "sh", + "value": "curl -LO https://repo.maven.apache.org/maven2/org/mongodb/mongodb-driver-core/5.1.0/mongodb-driver-core-5.1.0.jar.asc" + }, + { + "lang": "sh", + "value": "gpg --verify mongodb-driver-core-5.1.0.jar.asc mongodb-driver-core-5.1.0.jar" + }, + { + "lang": "none", + "value": "gpg: Signature made Tue 30 Apr 12:05:34 2024 MDT\ngpg: using RSA key 76E0008D166740A8\ngpg: Good signature from \"MongoDB Java Driver Release Signing Key \" [unknown]\ngpg: WARNING: This key is not certified with a trusted signature!\ngpg: There is no indication that the signature belongs to the owner.\nPrimary key fingerprint: 1A75 005E 1421 9222 3D6A 7C3B 76E0 008D 1667 40A8" + } + ], + "preview": "You can validate the signature of a Kotlin driver artifact published\non Maven. This process can enhance the security of your system or\nnetwork by allowing you to confirm the authenticity of the driver.", + "tags": "java, kotlin, security, SSDLC, encryption", + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + }, + { + "slug": "whats-new", + "title": "What's New", + "headings": [ + "What's New in 5.2", + "What's New in 5.1.3", + "What's New in 5.1.2", + "What's New in 5.1.1", + "What's New in 5.1", + "Deprecations in 5.1", + "Improvements in 5.1", + "New Features in 5.1", + "What's New in 5.0", + "What's New in 4.11", + "Deprecations in 4.11", + "New Features in 4.11", + "What's New in 4.10" + ], + "paragraphs": "Learn what's new in: Version 5.2 Version 5.1.3 Version 5.1.2 Version 5.1.1 Version 5.1 Version 5.0 Version 4.11 Version 4.10 New features of the 4.11 driver release include: Atlas Search and Vector Search Indexes in the Indexes guide Adds the SearchIndexType class, which you can pass\nwhen constructing a SearchIndexModel instance. This change\nallows you to specify the index type when creating an Atlas\nSearch or Vector Search index. To learn more, see Atlas Search and Vector Search Indexes in the Indexes guide . The 5.1.3 driver patch release includes the following changes: Fixes an issue that could cause assertion errors when using Cursor \ntypes. The 5.1.2 driver patch release includes the following changes: Support for encoding Kotlin data classes with nullable\ngeneric parameter types. For example, you can encode the Container class\nin the following code: The 5.1.1 driver patch release includes the following changes: When using the MONGODB-OIDC authentication mechanism, you must not\ninclude comma characters in the authMechanismProperties connection\nstring value. To learn more about this behavior, see the\n MONGODB-OIDC section of the Enterprise\nAuthentication guide. This section includes the following information: To avoid breaking changes in future major releases of the driver,\nreplace any application code that depends on deprecated program elements. Deprecations in 5.1 Improvements in 5.1 New Features in 5.1 Support for MongoDB server v3.6 is deprecated and will be removed in the\nnext driver version release. To learn how to upgrade your MongoDB server\ndeployment, see Release Notes in the MongoDB server\nmanual. Internal testing of GraalVM native image technology. These tests involve building\nnative applications by using the GraalVM native-image tool. Enhanced support for the MONGODB-OIDC authentication mechanism.\nTo learn more about OIDC, see the MONGODB-OIDC section of the\nEnterprise Authentication Mechanisms guide. Fixes an issue in which operations used the incorrect codec when using\na polymorphic MongoCollection instance. This ensures that\ndiscriminator information is not lost when using bson-kotlinx . Fixes an issue in which the class discriminator was the first field\nwhen decoding, resulting in field type errors when using a polymorphic\n MongoCollection instance. Support for polymorphic serialization. To learn more, see the\n Polymorphic Serialization section of the Kotlin Serialization guide. Introduces the serverMonitoringMode connection URI option. To\nlearn more, see the Connection Options guide. New features of the 5.0 driver release include: The KotlinSerializerCodecProvider constructor now accepts\n serializersModule and bsonConfiguration objects: This makes it easier to customize your configuration. Fixes a Kotlin reflection bug that resulted in container type erasure. This section includes the following information: Deprecations in 4.11 New Features in 4.11 The 4.11 driver release deprecates the following items: To avoid breaking changes in future major releases of the driver,\nreplace any application code that depends on deprecated methods and types. The following network address-related methods are deprecated and will be removed\nin v5.0: The ServerAddress \nmethods getSocketAddress() and getSocketAddresses() . Instead of getSocketAddress() , use the getByName() instance\nmethod of java.net.InetAddress . Instead of getSocketAddresses() , use the getAllByName() instance\nmethod of java.net.InetAddress . The UnixServerAddress \nmethod getUnixSocketAddress() . Instead of getUnixSocketAddress() , construct an instance of\n jnr.unixsocket.UnixSocketAddress . Pass the full path of the UNIX\nsocket file to the constructor. By default, MongoDB creates a UNIX\nsocket file located at \"/tmp/mongodb-27017.sock\" . To learn more\nabout the UnixSocketAddress , see the UnixSocketAddress API documentation. The following methods and types related to the\n StreamFactory \ninterface are deprecated and scheduled for removal in v5.0: If you configure Netty by using\n MongoClientSettings.Builder.streamFactoryFactory() , your code might resemble\nthe following: Replace this code with the TransportSettings.nettyBuilder() \nas shown in the following example: streamFactoryFactory() method from MongoClientSettings.Builder getStreamFactoryFactory() method from MongoClientSettings NettyStreamFactoryFactory class NettyStreamFactory class AsynchronousSocketChannelStreamFactory class AsynchronousSocketChannelStreamFactoryFactory class BufferProvider class SocketStreamFactory class Stream class StreamFactory class StreamFactoryFactory class TlsChannelStreamFactoryFactory class New features of the 4.11 driver release include: Support for connecting to MongoDB by using a SOCKS5 proxy. Added the getSplitEvent() method to the ChangeStreamDocument class\nto identify fragments of a change stream event that exceeds 16MB. You must\nuse the aggregation stage $changeStreamSplitLargeEvent in your change\nstream to handle events that exceed 16MB. Added an aggregation stage builder for $vectorSearch . Added Atlas Search index management helpers. Updated Snappy and Zstd compression library dependency versions. To learn\nmore about the current dependency versions, see Network Compression . Added getElapsedTime() methods to the following classes to monitor the\nduration of connection pool events: ConnectionCheckOutFailedEvent ConnectionCheckedOutEvent ConnectionReadyEvent Support for Java 21 virtual threads and structured concurrency. The driver\ninternals were updated to avoid unnecessary pinning of virtual threads\nand to preserve interrupted status of a thread, as the latter matters for\nstructured concurrency where it is used for cancellation. To learn more about virtual threads, see the Virtual Threads \nJDK enhancement proposal. To learn more about structured concurrency, see the\n Structured Concurrency \nJDK enhancement proposal. Updated API documentation for the following types: ClusterListener ServerListener ServerMonitorListener Starting in version 4.10.1 of the Kotlin driver, you must add\nthe bson-kotlinx library as an explicit dependency to use the\n kotlinx-serialization library. Support for Kotlin server-side usage, both for coroutines and for synchronous applications. Codec support for Kotlin data classes. Support for the kotlinx.serialization library", + "code": [ + { + "lang": "kotlin", + "value": "@Serializable\ndata class Box(\n val boxed: T\n)\n\n@Serializable\ndata class Container(\n val box: Box\n)" + }, + { + "lang": "kotlin", + "value": "KotlinSerializerCodec.create(clazz.kotlin, serializersModule=serializersModule, bsonConfiguration=bsonConfiguration)" + }, + { + "lang": "java", + "value": "import com.mongodb.connection.netty.NettyStreamFactoryFactory;\n\n// ...\n\nMongoClientSettings settings = MongoClientSettings.builder()\n .streamFactoryFactory(NettyStreamFactoryFactory.builder().build())\n .build();" + }, + { + "lang": "java", + "value": "import com.mongodb.connection.TransportSettings;\n\n// ...\n\nMongoClientSettings settings = MongoClientSettings.builder()\n .transportSettings(TransportSettings.nettyBuilder().build())\n .build();" + } + ], + "preview": "Learn what's new in:", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["kotlin"] + } + } + ] } diff --git a/search-manifest/tests/resources/s3Manifests/node-current.json b/search-manifest/tests/resources/s3Manifests/node-current.json index 9d55ae7c7..63ecf23d5 100644 --- a/search-manifest/tests/resources/s3Manifests/node-current.json +++ b/search-manifest/tests/resources/s3Manifests/node-current.json @@ -1,3930 +1,3930 @@ { - "url": "http://mongodb.com/docs/drivers/node/current", - "includeInGlobalSearch": true, - "documents": [ - { - "slug": "aggregation-tutorials/filtered-subset", - "title": "Filtered Subset", - "headings": [ - "Introduction", - "Aggregation Task Summary", - "Before You Get Started", - "Tutorial", - "Add a match stage for people who are engineers", - "Add a sort stage to sort from youngest to oldest", - "Add a limit stage to see only three results", - "Add an unset stage to remove unneeded fields", - "Run the aggregation pipeline", - "Interpret results" - ], - "paragraphs": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app. This aggregation performs the following operations: Matches a subset of documents by a field value Formats result documents You can also query for a subset of documents in a collection by using the\nQuery API. To learn how to specify a query, see the\n Read Operations guides . This tutorial demonstrates how to query a collection for a specific\nsubset of documents in a collection. The results contain\ndocuments that describe the three youngest people who are engineers. This example uses one collection, persons , which contains\ndocuments describing people. Each document includes a person's name,\ndate of birth, vocation, and other details. Before you start this tutorial, complete the\n Aggregation Template App instructions to set up a working\nNode.js application. After you set up the app, access the persons collection by adding the\nfollowing code to the application: Delete any existing data in the collections and insert sample data into\nthe persons collection as shown in the following code: To view the complete code for this tutorial, see the Completed Filtered Subset App \non GitHub. First, add a $match stage that finds documents in which\nthe value of the vocation field is \"ENGINEER\" : Next, add a $sort stage that sorts the\ndocuments in descending order by the dateofbirth field to\nlist the youngest people first: Next, add a $limit \nstage to the pipeline to output only the first three documents in\nthe results. Finally, add an $unset stage. The\n $unset stage removes unnecessary fields from the result documents: Use the $unset operator instead of $project to avoid\nmodifying the aggregation pipeline if documents with\ndifferent fields are added to the collection. Add the following code to the end of your application to perform\nthe aggregation on the persons collection: Finally, run the following command in your shell to start your\napplication: The aggregated result contains three documents. The documents\nrepresent the three youngest people with the vocation of \"ENGINEER\" ,\nordered from youngest to oldest. The results omit the _id and address \nfields.", - "code": [ - { - "lang": "javascript", - "value": "const personColl = aggDB.collection(\"persons\");" - }, - { - "lang": "javascript", - "value": "await personColl.deleteMany({});\n\nconst personData = [\n {\n person_id: \"6392529400\",\n firstname: \"Elise\",\n lastname: \"Smith\",\n dateofbirth: new Date(\"1972-01-13T09:32:07Z\"),\n vocation: \"ENGINEER\",\n address: {\n number: 5625,\n street: \"Tipa Circle\",\n city: \"Wojzinmoj\",\n },\n },\n {\n person_id: \"1723338115\",\n firstname: \"Olive\",\n lastname: \"Ranieri\",\n dateofbirth: new Date(\"1985-05-12T23:14:30Z\"),\n gender: \"FEMALE\",\n vocation: \"ENGINEER\",\n address: {\n number: 9303,\n street: \"Mele Circle\",\n city: \"Tobihbo\",\n },\n },\n {\n person_id: \"8732762874\",\n firstname: \"Toni\",\n lastname: \"Jones\",\n dateofbirth: new Date(\"1991-11-23T16:53:56Z\"),\n vocation: \"POLITICIAN\",\n address: {\n number: 1,\n street: \"High Street\",\n city: \"Upper Abbeywoodington\",\n },\n },\n {\n person_id: \"7363629563\",\n firstname: \"Bert\",\n lastname: \"Gooding\",\n dateofbirth: new Date(\"1941-04-07T22:11:52Z\"),\n vocation: \"FLORIST\",\n address: {\n number: 13,\n street: \"Upper Bold Road\",\n city: \"Redringtonville\",\n },\n },\n {\n person_id: \"1029648329\",\n firstname: \"Sophie\",\n lastname: \"Celements\",\n dateofbirth: new Date(\"1959-07-06T17:35:45Z\"),\n vocation: \"ENGINEER\",\n address: {\n number: 5,\n street: \"Innings Close\",\n city: \"Basilbridge\",\n },\n },\n {\n person_id: \"7363626383\",\n firstname: \"Carl\",\n lastname: \"Simmons\",\n dateofbirth: new Date(\"1998-12-26T13:13:55Z\"),\n vocation: \"ENGINEER\",\n address: {\n number: 187,\n street: \"Hillside Road\",\n city: \"Kenningford\",\n },\n },\n];\n\nawait personColl.insertMany(personData);" - }, - { - "lang": "javascript", - "value": "pipeline.push({\n $match: {\n \"vocation\": \"ENGINEER\"\n },\n});" - }, - { - "lang": "javascript", - "value": "pipeline.push({\n $sort: {\n \"dateofbirth\": -1,\n }\n});" - }, - { - "lang": "javascript", - "value": "pipeline.push({\n $limit: 3\n});" - }, - { - "lang": "javascript", - "value": "pipeline.push({\n $unset: [\n \"_id\",\n \"address\",\n ]\n});" - }, - { - "lang": "bash", - "value": "node agg_tutorial.js" - }, - { - "lang": "javascript", - "value": "const aggregationResult = await personColl.aggregate(pipeline);" - }, - { - "lang": "javascript", - "value": "{\n person_id: '7363626383',\n firstname: 'Carl',\n lastname: 'Simmons',\n dateofbirth: 1998-12-26T13:13:55.000Z,\n vocation: 'ENGINEER'\n}\n{\n person_id: '1723338115',\n firstname: 'Olive',\n lastname: 'Ranieri',\n dateofbirth: 1985-05-12T23:14:30.000Z,\n gender: 'FEMALE',\n vocation: 'ENGINEER'\n}\n{\n person_id: '6392529400',\n firstname: 'Elise',\n lastname: 'Smith',\n dateofbirth: 1972-01-13T09:32:07.000Z,\n vocation: 'ENGINEER'\n}" - } - ], - "preview": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app. This aggregation performs the following operations:", - "tags": "code example, node.js, sort, limit, aggregation", - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "aggregation-tutorials/group-total", - "title": "Group and Total", - "headings": [ - "Introduction", - "Aggregation Task Summary", - "Before You Get Started", - "Tutorial", - "Add a match stage for orders in 2020", - "Add a sort stage to sort by order date", - "Add a group stage to group by email address", - "Add a sort stage to sort by first order date", - "Add a set stage to display the email address", - "Add an unset stage to remove unneeded fields", - "Run the aggregation pipeline", - "Interpret results" - ], - "paragraphs": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app. This aggregation performs the following operations: Matches a subset of documents by a field value Groups documents by common field values Adds computed fields to each result document This tutorial demonstrates how to group and analyze customer order data. The\nresults show the list of customers who purchased items in 2020 and\nincludes each customer's order history for 2020. This example uses one collection, orders , which contains documents\ndescribing individual product orders. Since each order can correspond to\nonly one customer, the order documents are grouped by the\n customer_id field, which contains customer email addresses. Before you start this tutorial, complete the\n Aggregation Template App instructions to set up a working\nNode.js application. After you set up the app, access the orders collection by adding the\nfollowing code to the application: Delete any existing data and insert sample data into\nthe orders collection as shown in the following code: To view the complete code for this tutorial, see the Completed Group and Total App \non GitHub. First, add a $match stage that matches\norders placed in 2020: Next, add a $sort stage to set an\nascending sort on the orderdate field to surface the earliest\n2020 purchase for each customer in the next stage: Add a $group stage to group\norders by the value of the customer_id field. In this\nstage, add aggregation operations that create the\nfollowing fields in the result documents: first_purchase_date : the date of the customer's first purchase total_value : the total value of all the customer's purchases total_orders : the total number of the customer's purchases orders : the list of all the customer's purchases,\nincluding the date and value of each purchase Next, add another $sort stage to set an\nascending sort on the first_purchase_date field: Add a $set stage to recreate the\n customer_id field from the values in the _id field\nthat were set during the $group stage: Finally, add an $unset stage. The\n $unset stage removes the _id field from the result\ndocuments: Add the following code to the end of your application to perform\nthe aggregation on the orders collection: Finally, run the following command in your shell to start your\napplication: The aggregation returns the following summary of customers' orders\nfrom 2020: The result documents contain details from all the orders from\na given customer, grouped by the customer's email address.", - "code": [ - { - "lang": "javascript", - "value": "const ordersColl = aggDB.collection(\"orders\");" - }, - { - "lang": "javascript", - "value": "await ordersColl.deleteMany({});\n\nconst orderData = [\n {\n customer_id: \"elise_smith@myemail.com\",\n orderdate: new Date(\"2020-05-30T08:35:52Z\"),\n value: 231,\n },\n {\n customer_id: \"elise_smith@myemail.com\",\n orderdate: new Date(\"2020-01-13T09:32:07Z\"),\n value: 99,\n },\n {\n customer_id: \"oranieri@warmmail.com\",\n orderdate: new Date(\"2020-01-01T08:25:37Z\"),\n value: 63,\n },\n {\n customer_id: \"tj@wheresmyemail.com\",\n orderdate: new Date(\"2019-05-28T19:13:32Z\"),\n value: 2,\n },\n {\n customer_id: \"tj@wheresmyemail.com\",\n orderdate: new Date(\"2020-11-23T22:56:53Z\"),\n value: 187,\n },\n {\n customer_id: \"tj@wheresmyemail.com\",\n orderdate: new Date(\"2020-08-18T23:04:48Z\"),\n value: 4,\n },\n {\n customer_id: \"elise_smith@myemail.com\",\n orderdate: new Date(\"2020-12-26T08:55:46Z\"),\n value: 4,\n },\n {\n customer_id: \"tj@wheresmyemail.com\",\n orderdate: new Date(\"2021-02-29T07:49:32Z\"),\n value: 1024,\n },\n {\n customer_id: \"elise_smith@myemail.com\",\n orderdate: new Date(\"2020-10-03T13:49:44Z\"),\n value: 102,\n },\n];\n\nawait ordersColl.insertMany(orderData);" - }, - { - "lang": "javascript", - "value": "pipeline.push({\n $match: {\n orderdate: {\n $gte: new Date(\"2020-01-01T00:00:00Z\"),\n $lt: new Date(\"2021-01-01T00:00:00Z\"),\n },\n },\n});" - }, - { - "lang": "javascript", - "value": "pipeline.push({\n $sort: {\n orderdate: 1,\n },\n});" - }, - { - "lang": "javascript", - "value": "pipeline.push({\n $group: {\n _id: \"$customer_id\",\n first_purchase_date: { $first: \"$orderdate\" },\n total_value: { $sum: \"$value\" },\n total_orders: { $sum: 1 },\n orders: { $push: \n { \n orderdate: \"$orderdate\", \n value: \"$value\" \n }\n },\n },\n});" - }, - { - "lang": "javascript", - "value": "pipeline.push({\n $sort: {\n first_purchase_date: 1,\n },\n});" - }, - { - "lang": "javascript", - "value": "pipeline.push({\n $set: {\n customer_id: \"$_id\",\n },\n});" - }, - { - "lang": "javascript", - "value": "pipeline.push({ $unset: [\"_id\"] });" - }, - { - "lang": "bash", - "value": "node agg_tutorial.js" - }, - { - "lang": "javascript", - "value": "const aggregationResult = await ordersColl.aggregate(pipeline);" - }, - { - "lang": "javascript", - "value": "{\n first_purchase_date: 2020-01-01T08:25:37.000Z,\n total_value: 63,\n total_orders: 1,\n orders: [ { orderdate: 2020-01-01T08:25:37.000Z, value: 63 } ],\n customer_id: 'oranieri@warmmail.com'\n}\n{\n first_purchase_date: 2020-01-13T09:32:07.000Z,\n total_value: 436,\n total_orders: 4,\n orders: [\n { orderdate: 2020-01-13T09:32:07.000Z, value: 99 },\n { orderdate: 2020-05-30T08:35:52.000Z, value: 231 },\n { orderdate: 2020-10-03T13:49:44.000Z, value: 102 },\n { orderdate: 2020-12-26T08:55:46.000Z, value: 4 }\n ],\n customer_id: 'elise_smith@myemail.com'\n}\n{\n first_purchase_date: 2020-08-18T23:04:48.000Z,\n total_value: 191,\n total_orders: 2,\n orders: [\n { orderdate: 2020-08-18T23:04:48.000Z, value: 4 },\n { orderdate: 2020-11-23T22:56:53.000Z, value: 187 }\n ],\n customer_id: 'tj@wheresmyemail.com'\n}" - } - ], - "preview": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app. This aggregation performs the following operations:", - "tags": "code example, node.js, analyze, aggregation", - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "aggregation-tutorials/multi-field-join", - "title": "Multi-Field Join", - "headings": [ - "Introduction", - "Aggregation Task Summary", - "Before You Get Started", - "Tutorial", - "Add a lookup stage to link the collections and import fields", - "Add a match stage for products ordered in 2020", - "Add an unset stage to remove unneeded fields", - "Run the aggregation pipeline", - "Interpret results" - ], - "paragraphs": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app. This aggregation performs a multi-field join. A multi-field join occurs when there are\nmultiple corresponding fields in the documents of two collections that you use to\nmatch documents together. The aggregation matches these documents on the\nfield values and combines information from both into one document. A one-to-many join is a variety of a multi-field join. When you\nperform a one-to-many join, you select one field from a document that\nmatches a field value in multiple documents on the other side of the\njoin. To learn more about these data relationships,\nsee the Wikipedia entries about One-to-many (data model) and\n Many-to-many (data model) . This tutorial demonstrates how to combine data from a collection that\ndescribes product information with another collection that describes\ncustomer orders. The results show a list of products ordered in 2020\nthat also contains details about each order. This example uses two collections: An order can only contain one product, so the aggregation uses a\nmulti-field join to match a product document to documents representing orders of\nthat product. The collections are joined by the name and\n variation fields in documents in the products collection, corresponding\nto the product_name and product_variation fields in documents in\nthe orders collection. products , which contains documents describing the products that\na shop sells orders , which contains documents describing individual orders\nfor products in a shop Before you start this tutorial, complete the\n Aggregation Template App instructions to set up a working\nNode.js application. After you set up the app, access the products and orders \ncollections by adding the following code to the application: Delete any existing data and insert sample data into\nthe products collection as shown in the following code: Delete any existing data and insert sample data into\nthe orders collection as shown in the following code: To view the complete code for this tutorial, see the Completed Multi-field Join App \non GitHub. The first stage of the pipeline is a $lookup stage to join the\n orders collection to the products collection by two\nfields in each collection. The lookup stage contains an\nembedded pipeline to configure the join. Within the embedded pipeline, add a $match stage to match the\nvalues of two fields on each side of the join. Note that the following\ncode uses aliases for the name and variation fields\nset when creating the $lookup stage : Within the embedded pipeline, add another $match stage to match\norders placed in 2020: Within the embedded pipeline, add an $unset stage to remove\nunneeded fields from the orders collection side of the join: After the embedded pipeline is completed, add the\n $lookup stage to the main aggregation pipeline.\nConfigure this stage to store the processed lookup fields in\nan array field called orders : Next, add a $match stage to only show\nproducts for which there is at least one order in 2020,\nbased on the orders array calculated in the previous step: Finally, add an $unset stage. The\n $unset stage removes the _id and description \nfields from the result documents: Add the following code to the end of your application to perform\nthe aggregation on the products collection: Finally, run the following command in your shell to start your\napplication: The aggregated result contains two documents. The documents\nrepresent products for which there were orders placed in 2020.\nEach document contains an orders array field that lists details\nabout each order for that product: The result documents contain details from documents in the\n orders collection and the products collection, joined by\nthe product names and variations.", - "code": [ - { - "lang": "javascript", - "value": "const productsColl = aggDB.collection(\"products\");\nconst ordersColl = aggDB.collection(\"orders\");" - }, - { - "lang": "javascript", - "value": "await productsColl.deleteMany({});\n\nconst productsData = [\n {\n name: \"Asus Laptop\",\n variation: \"Ultra HD\",\n category: \"ELECTRONICS\",\n description: \"Great for watching movies\",\n },\n {\n name: \"Asus Laptop\",\n variation: \"Standard Display\",\n category: \"ELECTRONICS\",\n description: \"Good value laptop for students\",\n },\n {\n name: \"The Day Of The Triffids\",\n variation: \"1st Edition\",\n category: \"BOOKS\",\n description: \"Classic post-apocalyptic novel\",\n },\n {\n name: \"The Day Of The Triffids\",\n variation: \"2nd Edition\",\n category: \"BOOKS\",\n description: \"Classic post-apocalyptic novel\",\n },\n {\n name: \"Morphy Richards Food Mixer\",\n variation: \"Deluxe\",\n category: \"KITCHENWARE\",\n description: \"Luxury mixer turning good cakes into great\",\n },\n];\n\nawait productsColl.insertMany(productsData);" - }, - { - "lang": "javascript", - "value": "await ordersColl.deleteMany({});\n\nconst orderData = [\n {\n customer_id: \"elise_smith@myemail.com\",\n orderdate: new Date(\"2020-05-30T08:35:52Z\"),\n product_name: \"Asus Laptop\",\n product_variation: \"Standard Display\",\n value: 431.43,\n },\n {\n customer_id: \"tj@wheresmyemail.com\",\n orderdate: new Date(\"2019-05-28T19:13:32Z\"),\n product_name: \"The Day Of The Triffids\",\n product_variation: \"2nd Edition\",\n value: 5.01,\n },\n {\n customer_id: \"oranieri@warmmail.com\",\n orderdate: new Date(\"2020-01-01T08:25:37Z\"),\n product_name: \"Morphy Richards Food Mixer\",\n product_variation: \"Deluxe\",\n value: 63.13,\n },\n {\n customer_id: \"jjones@tepidmail.com\",\n orderdate: new Date(\"2020-12-26T08:55:46Z\"),\n product_name: \"Asus Laptop\",\n product_variation: \"Standard Display\",\n value: 429.65,\n },\n];\n\nawait ordersColl.insertMany(orderData);" - }, - { - "lang": "javascript", - "value": "const embedded_pl = [];\n\nembedded_pl.push({\n $match: {\n $expr: {\n $and: [\n { $eq: [\"$product_name\", \"$$prdname\"] },\n { $eq: [\"$product_variation\", \"$$prdvartn\"] },\n ],\n },\n },\n});" - }, - { - "lang": "javascript", - "value": "embedded_pl.push({\n $match: {\n orderdate: {\n $gte: new Date(\"2020-01-01T00:00:00Z\"),\n $lt: new Date(\"2021-01-01T00:00:00Z\"),\n },\n },\n});" - }, - { - "lang": "javascript", - "value": "embedded_pl.push({\n $unset: [\"_id\", \"product_name\", \"product_variation\"],\n});" - }, - { - "lang": "javascript", - "value": "pipeline.push({\n $lookup: {\n from: \"orders\",\n let: {\n prdname: \"$name\",\n prdvartn: \"$variation\",\n },\n pipeline: embedded_pl,\n as: \"orders\",\n },\n});" - }, - { - "lang": "javascript", - "value": "pipeline.push({\n $match: {\n orders: { $ne: [] },\n },\n});" - }, - { - "lang": "javascript", - "value": "pipeline.push({\n $unset: [\"_id\", \"description\"],\n});" - }, - { - "lang": "bash", - "value": "node agg_tutorial.js" - }, - { - "lang": "javascript", - "value": "const aggregationResult = await productsColl.aggregate(pipeline);" - }, - { - "lang": "javascript", - "value": "{\n name: 'Asus Laptop',\n variation: 'Standard Display',\n category: 'ELECTRONICS',\n orders: [\n {\n customer_id: 'elise_smith@myemail.com',\n orderdate: 2020-05-30T08:35:52.000Z,\n value: 431.43\n },\n {\n customer_id: 'jjones@tepidmail.com',\n orderdate: 2020-12-26T08:55:46.000Z,\n value: 429.65\n }\n ]\n}\n{\n name: 'Morphy Richards Food Mixer',\n variation: 'Deluxe',\n category: 'KITCHENWARE',\n orders: [\n {\n customer_id: 'oranieri@warmmail.com',\n orderdate: 2020-01-01T08:25:37.000Z,\n value: 63.13\n }\n ]\n}" - } - ], - "preview": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app.", - "tags": "code example, node.js, lookup, aggregation", - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "aggregation-tutorials/one-to-one-join", - "title": "One-to-One Join", - "headings": [ - "Introduction", - "Aggregation Task Summary", - "Before You Get Started", - "Tutorial", - "Add a match stage for orders in 2020", - "Add a lookup stage to link the collections", - "Add set stages to create new document fields", - "Add an unset stage to remove unneeded fields", - "Run the aggregation pipeline", - "Interpret results" - ], - "paragraphs": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app. This aggregation performs a one-to-one join. A one-to-one join occurs\nwhen a document in one collection has a field value that matches a\nsingle document in another collection that has the same field value. The\naggregation matches these documents on the field value and combines\ninformation from both sources into one result. A one-to-one join does not require the documents to have a\none-to-one relationship. To learn more about this data relationship,\nsee the Wikipedia entry about One-to-one (data model) . This tutorial demonstrates how to combine data from a collection that\ndescribes product information with another collection that describes\ncustomer orders. The results show a list of all orders placed in 2020 that\nincludes the product details associated with each order. This example uses two collections: An order can only contain one product, so the aggregation uses a\none-to-one join to match an order document to the document for the\nproduct. The collections are joined by a field called product_id \nthat exists in documents in both collections. orders : contains documents describing individual orders\nfor products in a shop products : contains documents describing the products that\na shop sells Before you start this tutorial, complete the\n Aggregation Template App instructions to set up a working\nNode.js application. After you set up the app, access the orders and products \ncollections by adding the following code to the application: Delete any existing data and insert sample data into\nthe orders collection as shown in the following code: Delete any existing data and insert sample data into\nthe products collection as shown in the following code: To view the complete code for this tutorial, see the Completed One-to-one Join App \non GitHub. Add a $match stage that matches\norders placed in 2020: Next, add a $lookup stage. The\n $lookup stage joins the product_id field in the orders \ncollection to the id field in the products collection: Next, add two $set \nstages to the pipeline. The first $set stage sets the product_mapping field\nto the first element in the product_mapping object\ncreated in the previous $lookup stage. The second $set stage creates two new fields, product_name \nand product_category , from the values in the\n product_mapping object field: Because this is a one-to-one join, the $lookup stage\nadds only one array element to the input document. The pipeline\nuses the $first \noperator to retrieve the data from this element. Finally, add an $unset stage. The\n $unset stage removes unnecessary fields from the document: Add the following code to the end of your application to perform\nthe aggregation on the orders collection: Finally, run the following command in your shell to start your\napplication: The aggregated result contains three documents. The documents\nrepresent customer orders that occurred in 2020, with the\n product_name and product_category of the ordered product: The result consists of documents that contain fields from\ndocuments in the orders collection and the products \ncollection, joined by matching the product_id field present in\neach original document.", - "code": [ - { - "lang": "javascript", - "value": "const ordersColl = aggDB.collection(\"orders\");\nconst productsColl = aggDB.collection(\"products\");" - }, - { - "lang": "javascript", - "value": "await ordersColl.deleteMany({});\n\nconst orderData = [\n {\n customer_id: \"elise_smith@myemail.com\",\n orderdate: new Date(\"2020-05-30T08:35:52Z\"),\n product_id: \"a1b2c3d4\",\n value: 431.43,\n },\n {\n customer_id: \"tj@wheresmyemail.com\",\n orderdate: new Date(\"2019-05-28T19:13:32Z\"),\n product_id: \"z9y8x7w6\",\n value: 5.01,\n },\n {\n customer_id: \"oranieri@warmmail.com\",\n orderdate: new Date(\"2020-01-01T08:25:37Z\"),\n product_id: \"ff11gg22hh33\",\n value: 63.13,\n },\n {\n customer_id: \"jjones@tepidmail.com\",\n orderdate: new Date(\"2020-12-26T08:55:46Z\"),\n product_id: \"a1b2c3d4\",\n value: 429.65,\n },\n];\n\nawait ordersColl.insertMany(orderData);" - }, - { - "lang": "javascript", - "value": "await productsColl.deleteMany({});\n\nconst productData = [\n {\n id: \"a1b2c3d4\",\n name: \"Asus Laptop\",\n category: \"ELECTRONICS\",\n description: \"Good value laptop for students\",\n },\n {\n id: \"z9y8x7w6\",\n name: \"The Day Of The Triffids\",\n category: \"BOOKS\",\n description: \"Classic post-apocalyptic novel\",\n },\n {\n id: \"ff11gg22hh33\",\n name: \"Morphy Richardds Food Mixer\",\n category: \"KITCHENWARE\",\n description: \"Luxury mixer turning good cakes into great\",\n },\n {\n id: \"pqr678st\",\n name: \"Karcher Hose Set\",\n category: \"GARDEN\",\n description: \"Hose + nosels + winder for tidy storage\",\n },\n];\n\nawait productsColl.insertMany(productData);" - }, - { - "lang": "javascript", - "value": "pipeline.push({\n $match: {\n orderdate: {\n $gte: new Date(\"2020-01-01T00:00:00Z\"),\n $lt: new Date(\"2021-01-01T00:00:00Z\"),\n },\n },\n});" - }, - { - "lang": "javascript", - "value": "pipeline.push({\n $lookup: {\n from: \"products\",\n localField: \"product_id\",\n foreignField: \"id\",\n as: \"product_mapping\",\n },\n});" - }, - { - "lang": "javascript", - "value": "pipeline.push(\n {\n $set: {\n product_mapping: { $first: \"$product_mapping\" },\n },\n },\n {\n $set: {\n product_name: \"$product_mapping.name\",\n product_category: \"$product_mapping.category\",\n },\n }\n );" - }, - { - "lang": "javascript", - "value": "pipeline.push({ $unset: [\"_id\", \"product_id\", \"product_mapping\"] });" - }, - { - "lang": "bash", - "value": "node agg_tutorial.js" - }, - { - "lang": "javascript", - "value": "const aggregationResult = await ordersColl.aggregate(pipeline);" - }, - { - "lang": "javascript", - "value": "{\n customer_id: 'elise_smith@myemail.com',\n orderdate: 2020-05-30T08:35:52.000Z,\n value: 431.43,\n product_name: 'Asus Laptop',\n product_category: 'ELECTRONICS'\n}\n{\n customer_id: 'oranieri@warmmail.com',\n orderdate: 2020-01-01T08:25:37.000Z,\n value: 63.13,\n product_name: 'Morphy Richardds Food Mixer',\n product_category: 'KITCHENWARE'\n}\n{\n customer_id: 'jjones@tepidmail.com',\n orderdate: 2020-12-26T08:55:46.000Z,\n value: 429.65,\n product_name: 'Asus Laptop',\n product_category: 'ELECTRONICS'\n}" - } - ], - "preview": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app.", - "tags": "code example, node.js, lookup, aggregation", - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "aggregation-tutorials/unpack-arrays", - "title": "Unpack Arrays and Group", - "headings": [ - "Introduction", - "Aggregation Task Summary", - "Before You Get Started", - "Tutorial", - "Add an unwind stage to unpack the array of product orders", - "Add a match stage for products that cost more than $15", - "Add a group stage to group by product type", - "Add a set stage to display the product ID", - "Add an unset stage to remove unneeded fields", - "Run the aggregation pipeline", - "Interpret results" - ], - "paragraphs": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app. This aggregation performs the following operations: Unwinds an array field into separate documents Matches a subset of documents by a field value Groups documents by common field values Adds computed fields to each result document This tutorial demonstrates how to create insights from customer order\ndata. The results show the list of products ordered that cost more than\n$15, and each document contains the number of units sold and the total\nsale value for each product. This example uses one collection, orders , which contains documents\ndescribing product orders. Since each order contains multiple products,\nthe first step of the aggregation is unpacking the products array\ninto individual product order documents. Before you start this tutorial, complete the\n Aggregation Template App instructions to set up a working\nNode.js application. After you set up the app, access the orders collection by adding the\nfollowing code to the application: Delete any existing data and insert sample data into\nthe orders collection as shown in the following code: To view the complete code for this tutorial, see the Completed Unpack Arrays App \non GitHub. First, add an $unwind stage to separate the\nentries in the products array into individual documents: Next, add a $match stage that matches\nproducts with a products.price value greater than 15 : Add a $group stage to group\norders by the value of the prod_id field. In this\nstage, add aggregation operations that create the\nfollowing fields in the result documents: product : the product name total_value : the total value of all the sales of the product quantity : the number of orders for the product Add a $set stage to recreate the\n product_id field from the values in the _id field\nthat were set during the $group stage: Finally, add an $unset stage. The\n $unset stage removes the _id field from the result\ndocuments: Add the following code to the end of your application to perform\nthe aggregation on the orders collection: Finally, run the following command in your shell to start your\napplication: The aggregation returns the following summary of customers' orders\nfrom 2020: The result documents contain details about the total value and\nquantity of orders for products that cost more than $15.", - "code": [ - { - "lang": "javascript", - "value": "const ordersColl = aggDB.collection(\"orders\");" - }, - { - "lang": "javascript", - "value": "await ordersColl.deleteMany({});\n\nconst orderData = [\n {\n order_id: 6363763262239,\n products: [\n {\n prod_id: \"abc12345\",\n name: \"Asus Laptop\",\n price: 431,\n },\n {\n prod_id: \"def45678\",\n name: \"Karcher Hose Set\",\n price: 22,\n },\n ],\n },\n {\n order_id: 1197372932325,\n products: [\n {\n prod_id: \"abc12345\",\n name: \"Asus Laptop\",\n price: 429,\n },\n ],\n },\n {\n order_id: 9812343774839,\n products: [\n {\n prod_id: \"pqr88223\",\n name: \"Morphy Richards Food Mixer\",\n price: 431,\n },\n {\n prod_id: \"def45678\",\n name: \"Karcher Hose Set\",\n price: 21,\n },\n ],\n },\n {\n order_id: 4433997244387,\n products: [\n {\n prod_id: \"def45678\",\n name: \"Karcher Hose Set\",\n price: 23,\n },\n {\n prod_id: \"jkl77336\",\n name: \"Picky Pencil Sharpener\",\n price: 1,\n },\n {\n prod_id: \"xyz11228\",\n name: \"Russell Hobbs Chrome Kettle\",\n price: 16,\n },\n ],\n },\n];\n\nawait ordersColl.insertMany(orderData);" - }, - { - "lang": "javascript", - "value": "pipeline.push({\n $unwind: {\n path: \"$products\",\n },\n});" - }, - { - "lang": "javascript", - "value": "pipeline.push({\n $match: {\n \"products.price\": {\n $gt: 15,\n },\n },\n});" - }, - { - "lang": "javascript", - "value": "pipeline.push({\n $group: {\n _id: \"$products.prod_id\",\n product: { $first: \"$products.name\" },\n total_value: { $sum: \"$products.price\" },\n quantity: { $sum: 1 },\n },\n});" - }, - { - "lang": "javascript", - "value": "pipeline.push({\n $set: {\n product_id: \"$_id\",\n },\n});" - }, - { - "lang": "javascript", - "value": "pipeline.push({ $unset: [\"_id\"] });" - }, - { - "lang": "bash", - "value": "node agg_tutorial.js" - }, - { - "lang": "javascript", - "value": "const aggregationResult = await ordersColl.aggregate(pipeline);" - }, - { - "lang": "javascript", - "value": "{\n product: 'Asus Laptop',\n total_value: 860,\n quantity: 2,\n product_id: 'abc12345'\n}\n{\n product: 'Morphy Richards Food Mixer',\n total_value: 431,\n quantity: 1,\n product_id: 'pqr88223'\n}\n{\n product: 'Russell Hobbs Chrome Kettle',\n total_value: 16,\n quantity: 1,\n product_id: 'xyz11228'\n}\n{\n product: 'Karcher Hose Set',\n total_value: 66,\n quantity: 3,\n product_id: 'def45678'\n}" - } - ], - "preview": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app. This aggregation performs the following operations:", - "tags": "code example, node.js, analyze, array", - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "aggregation-tutorials", - "title": "Aggregation Tutorials", - "headings": [ - "Overview", - "Aggregation Template App", - "Available Tutorials" - ], - "paragraphs": "Aggregation tutorials provide detailed explanations of common\naggregation tasks in a step-by-step format. The tutorials are adapted\nfrom examples in the Practical MongoDB Aggregations book by Paul Done. Each tutorial includes the following sections: At the end of each aggregation tutorial, you can find a link to a fully\nrunnable Node.js code file that you can run in your environment. Introduction , which describes the purpose and common use cases of the\naggregation type. This section also describes the example and desired\noutcome that the tutorial demonstrates. Before You Get Started , which describes the necessary databases,\ncollections, and sample data that you must have before building the\naggregation pipeline and performing the aggregation. Tutorial , which describes how to build and run the aggregation\npipeline. This section describes each stage of the completed\naggregation tutorial, and then explains how to run and interpret the\noutput of the aggregation. To learn more about performing aggregations, see the\n Aggregation guide. Before you begin following an aggregation tutorial, you must set up a\nnew Node.js app. You can use this app to connect to a MongoDB\ndeployment, insert sample data into MongoDB, and run the aggregation\npipeline in each tutorial. Once you install the driver, create a file called\n agg_tutorial.js . Paste the following code in this file to create an\napp template for the aggregation tutorials: For every tutorial, you must replace the connection string placeholder with\nyour deployment's connection string. For example, if your connection string is\n \"mongodb+srv://mongodb-example:27017\" , your connection string assignment resembles\nthe following: To run the completed file after you modify the template for a\ntutorial, run the following command in your shell: To learn how to install the driver and connect to MongoDB,\nsee the Download and Install and\n Create a MongoDB Deployment steps of the\nQuick Start guide. In the preceding code, read the code comments to find the sections of\nthe code that you must modify for the tutorial you are following. If you attempt to run the code without making any changes, you will\nencounter a connection error. To learn how to locate your deployment's connection string, see the\n Create a Connection String step of the Quick Start guide. Filtered Subset Group and Total Unpack Arrays and Group One-to-One Join Multi-Field Join", - "code": [ - { - "lang": "javascript", - "value": "const uri = \"mongodb+srv://mongodb-example:27017\";" - }, - { - "lang": "bash", - "value": "node agg_tutorial.js" - }, - { - "lang": "javascript", - "value": "const { MongoClient } = require(\"mongodb\");\n\n// Replace the placeholder with your connection string.\nconst uri = \"\";\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const aggDB = client.db(\"agg_tutorials_db\");\n\n // Get a reference to relevant collections.\n // ... const someColl =\n // ... const anotherColl =\n\n // Delete any existing documents in collections.\n // ... await someColl.deleteMany({});\n\n // Insert sample data into the collection or collections.\n // ... const someData = [ ... ];\n\n // ... await someColl.insertMany(someData);\n\n // Create an empty pipeline array.\n const pipeline = [];\n\n // Add code to create pipeline stages.\n // ... pipeline.push({ ... })\n\n // Run the aggregation.\n // ... const aggregationResult = ...\n\n // Print the aggregation results.\n for await (const document of aggregationResult) {\n console.log(document);\n }\n } finally {\n await client.close();\n }\n}\n\nrun().catch(console.dir);\n" - } - ], - "preview": "Aggregation tutorials provide detailed explanations of common\naggregation tasks in a step-by-step format. The tutorials are adapted\nfrom examples in the Practical MongoDB Aggregations book by Paul Done.", - "tags": "node.js, code example, runnable app", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "compatibility", - "title": "Compatibility", - "headings": [ - "MongoDB Compatibility", - "Compatibility Table Legend", - "Language Compatibility", - "Component Compatibility" - ], - "paragraphs": "The following compatibility table specifies the recommended versions of\nthe MongoDB Node.js driver for use with MongoDB. The first column lists the driver version. MongoDB ensures compatibility between the MongoDB Server and the drivers\nfor three years after the server version's end of life (EOL) date. To learn\nmore about the MongoDB release and EOL dates, see\n MongoDB Software Lifecycle Schedules . Icon Explanation \u2713 All features are supported. \u229b The Driver version will work with the MongoDB version, but not all\nnew MongoDB features are supported. No mark The Driver version is not tested with the MongoDB version. Node.js Driver Version MongoDB 7.0 MongoDB 6.0 MongoDB 5.0 MongoDB 4.4 MongoDB 4.2 MongoDB 4.0 MongoDB 3.6 MongoDB 3.4 MongoDB 3.2 MongoDB 3.0 MongoDB 2.6 6.0 to 6.8 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 5.7 to 5.9 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 5.0 to 5.6 \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 4.8 to 4.17 \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 4.2 to 4.7 \u229b \u229b \u2713 \u2713 \u2713 \u2713 \u2713 4.0 to 4.1 \u229b \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 3.7 \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 3.6 \u229b \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 3.3 to 3.5 \u229b \u229b \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 3.1 to 3.2 \u229b \u229b \u229b \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 3.0 \u2713 \u2713 \u2713 \u2713 \u2713 2.2.12 \u2713 \u2713 \u2713 \u2713 2.0.14 \u2713 \u2713 1.4.29 \u2713 \u2713 When using Node.js Driver version 3.7, you must set the useUnifiedTopology flag to true for certain features. The following compatibility table specifies the recommended versions of\nthe MongoDB Node.js driver for use with a specific version of Node.js. The first column lists the driver version. Node.js Driver Version Node.js v20.x.x Node.js v18.x.x Node.js v16.x.x Node.js v14.x.x Node.js v12.x.x Node.js v10.x.x Node.js v8.X.X Node.js v6.X.X Node.js v4.X.X Node.js v0.12.X Node.js v0.10.X Node.js v0.8.X 6.X \u2713 \u2713 \u2713 5.6.X to 5.9.X \u2713 \u2713 \u2713 \u2713 5.0.0 to 5.5.X \u2713 \u2713 \u2713 4.X \u2713 \u2713 \u2713 \u2713 3.X \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 2.X \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 >= 1.4.18 \u2713 \u2713 \u2713 1.4.X \u2713 \u2713 Versions 6.0 and later of the Node.js driver require Node.js v16.20.1 or later. The following table describes add-on component version compatibility for\nversions of the MongoDB Node.js driver. Any other combination of packages might be\nunstable. For more information on how to read the compatibility tables, see our guide\nabout MongoDB Compatibility Tables . Component Node.js Driver v6.x Node.js Driver v5.x Node.js Driver v4.x Node.js Driver v3.x bson ^6.0.0 ^5.0.0 ^4.0.0 ^1.0.0 bson-ext ^4.0.0 ^1.0.0 or ^2.0.0 kerberos ^2.0.1 ^1.0.0 or ^2.0.0 ^1.0.0 or ^2.0.0 ^1.0.0 mongodb-client-encryption ^6.0.0 ^2.3.0 ^1.0.0 or ^2.0.0 ^1.0.0 mongodb-legacy ^6.0.0 ^5.0.0 ^4.0.0 @mongodb-js/zstd ^1.1.0 ^1.0.0 ^1.0.0", - "code": [], - "preview": "Find the recommended versions of the Node.js driver that work with your version of MongoDB.", - "tags": "node.js", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "connection-troubleshooting", - "title": "Connection Troubleshooting", - "headings": [ - "Connection Error", - "Check Your Connection String", - "Configure Your Firewall", - "ECONNREFUSED Error", - "Ensure MongoDB and Your Client Use the Same Protocol", - "ECONNRESET Error", - "Control the Number of File Descriptors", - "Authentication Error", - "Check Your Connection String", - "Verify the User Is in the Authentication Database", - "Error Sending Message", - "Check the User Permissions", - "Configure Your Firewall", - "Check the Number of Connections", - "Too Many Open Connections", - "Check the Number of Connections", - "Timeout Error", - "Set connectTimeoutMS", - "Check the Number of Connections" - ], - "paragraphs": "This page offers potential solutions to issues you might encounter when\nusing the MongoDB Node.js driver to connect to a MongoDB deployment. This page addresses only connection issues. If you encounter any other issues\nwith MongoDB or the driver, visit the following resources: The Frequently Asked Questions (FAQ) for the\nNode.js driver The Issues & Help page, which has\ninformation about reporting bugs, contributing to the driver, and\nfinding more resources The MongoDB Community Forums for\nquestions, discussions, or general technical support The following error message indicates that the driver cannot connect to a server\non the specified hostname or port. Multiple situations can generate this error\nmessage. In this sample error message, the hostname is 127.0.0.1 and the\nport is 27017 : The following sections describe actions you can take to potentially resolve the\nissue. Verify that the hostname and port number in the connection string are both\naccurate. The default port value for a MongoDB instance is\n 27017 , but you can configure MongoDB to communicate on another port. Verify that the ports your MongoDB deployment listens on are not blocked by a\nfirewall on the same network. MongoDB uses port 27017 by default. To learn\nmore about the default ports MongoDB uses and how to change them, see\n Default MongoDB Port . Do not open a port in your firewall unless you are sure it's the port\nused by your MongoDB deployment. If the connection is refused when the driver attempts to connect to the MongoDB\ninstance, it generates this error message: The following sections describe actions you can take to potentially resolve the\nissue. In Node.js v17 and later, the DNS resolver uses IPv6 by default when both\nthe client and host support both. For example, if MongoDB uses IPv4 and your\nclient uses IPv6, the driver returns the previous error message. You can configure your MongoDB deployment to use IPv6 mode when starting\nwith mongod or mongos . For more information about how to specify\n IPv6 mode, see\n IP Binding in the server\nmanual. As an alternative, you can explicitly use IPv4 with your client by\nspecifying family: 4 as an\n option to your MongoClient . If the connection is reset when the driver calls client.connect() , it\ngenerates this error message: The following section describes a method that may help resolve the issue. A file descriptor is a unique identifier associated with an open process. In most\noperating systems, each open connection from the driver is associated with a\nfile descriptor. Operating systems typically have a limit on the number of file\ndescriptors used by a single process. An ECONNRESET error can occur\nif the number of connections exceeds this limit. You can set the maximum number of connections by setting maxPoolSize . To\nresolve this error, you can decrease the number of maximum allowed connections\nby setting the value of maxPoolSize . Alternatively, you could increase the\nfile descriptor limit in your operating system. Always be cautious when changing the configuration of your operating system. The Node.js driver can fail to connect to a MongoDB instance if\nthe authorization is not configured correctly. If you are using SCRAM-SHA-256 \nfor authentication and the driver fails to connect, the driver might raise an\nerror message similar to one of the following messages: The following sections describe actions you can take to potentially resolve the\nissue. An invalid connection string is the most common cause of authentication\nissues when attempting to connect to MongoDB using SCRAM-SHA-256 . If your connection string contains a username and password, ensure that they\nare in the correct format. If the username or password includes any of the\nfollowing characters, they must be\n percent encoded : The following example shows how to percent encode \"#MyP@assword?\": This results in the following output: For more information about connection strings,\nsee Connection URI in the Connection Guide. To successfully authenticate a connection by using a username and password with\n SCRAM-SHA-256 , the username must be defined in the authentication database.\nThe default authentication database is the admin database. To use a different\ndatabase for authentication, specify the authSource in the connection string.\nThe following example instructs the driver to use users as the authentication\ndatabase: You can check if this is the issue by attempting to connect to a MongoDB\ninstance hosted on the local machine with the same code. A deployment on\nthe same machine doesn't require any authorization to connect. When the driver fails to send a command after you make a request,\nit may display the following error message: The following sections describe actions you can take to potentially resolve the\nissue. Verify that you've accessed the MongoDB deployment with the correct user. The\nterm \"message\" in the error can be a command sent by the driver.\nIf you are using a user that doesn't have permissions to send the command, the\ndriver could generate this error. Also ensure that the user has the appropriate permissions for the message you\nare sending. MongoDB uses Role-Based Access Control (RBAC) to control access\nto a MongoDB deployment. For more information about how to configure RBAC in MongoDB,\nsee Default MongoDB Port . The firewall needs to have an open port for communicating with the MongoDB\ninstance. For more information about configuring the firewall, see\n Configure Your Firewall in\nthe Connection Error section. Each MongoClient instance supports a maximum number of concurrent open\nconnections in its connection pool. You can configure the parameter maxPoolSize \nwhich defines this limit. The default value is 100 . If there are already a\nnumber of open connections equal to maxPoolSize , the server waits until\na connection becomes available. If this wait time exceeds the maxIdleTimeMS \nvalue, the driver responds with an error. For more information about how connection pooling works, see\n How Does Connection Pooling Work in the Node Driver? \nin the FAQ. The driver creates the following error message when it attempts to open a\nconnection, but it's reached the maximum number of connections: The following section describes a method that may help resolve the issue. To create more open connections, increase the value of maxPoolSize . For more\ninformation about checking the number of connections, see\n Check the Number of Connections \nin the Error Sending Message section. When the network is not able to deliver a request from the driver to the server\nquickly enough, it can time out. When this happens, you might receive an error message\nsimilar to the following message: If you receive this error, try the following action to resolve the\nissue. The driver may hang when it's unable to establish a connection because it\ntakes too long attempting to reach unreachable replica set nodes. You can limit the\ntime the driver spends attempting to establish the connection by using the\n connectTimeMS setting. To learn more about this setting, see the\n Timeout Options in\nthe Server manual. Ensure the connectTimeoutMS setting is not lower than\nthe highest network latency you have for a member of the set. If one of the\nsecondary members has a latency of 10000 milliseconds, setting the\n connectTimeoutMS to 9000 prevents the driver from ever connecting to that\nmember. The following example sets connectTimeoutMS to 10000 milliseconds. The number of connections to the server may exceed maxPoolSize . For more\ninformation about checking the number of connections, see\n Check the Number of Connections \nin the Error Sending Message section.", - "code": [ - { - "lang": "none", - "value": "Error: couldn't connect to server 127.0.0.1:27017" - }, - { - "lang": "none", - "value": "MongoServerSelectionError: connect ECONNREFUSED :" - }, - { - "lang": "js", - "value": "const client = new MongoClient(uri, {\n family: 4,\n});" - }, - { - "lang": "none", - "value": "MongoServerSelectionError: connect ECONNRESET :::" - }, - { - "lang": "none", - "value": "Command failed with error 18 (AuthenticationFailed): 'Authentication\nfailed.' on server :." - }, - { - "lang": "none", - "value": "connection() error occurred during connection handshake: auth error:\nsasl conversation error: unable to authenticate using mechanism\n\"SCRAM-SHA-256\": (AuthenticationFailed) Authentication failed." - }, - { - "lang": "none", - "value": ": / ? # [ ] @" - }, - { - "lang": "javascript", - "value": "console.log(encodeURIComponent('#MyP@assword?'));" - }, - { - "lang": "none", - "value": "\"%23MyP%40assword%3F\"" - }, - { - "lang": "javascript", - "value": "const { MongoClient } = require(\"mongodb\");\nconst uri = \"mongodb://:@:/?authSource=users\";\nconst client = new MongoClient(uri);" - }, - { - "lang": "none", - "value": "com.mongodb.MongoSocketWriteException: Exception sending message" - }, - { - "lang": "none", - "value": "connection refused because too many open connections" - }, - { - "lang": "none", - "value": "timed out while checking out a connection from connection pool: context canceled" - }, - { - "lang": "javascript", - "value": "const client = new MongoClient(uri, {\n connectTimeoutMS: 10000,\n});" - } - ], - "preview": "This page offers potential solutions to issues you might encounter when\nusing the MongoDB Node.js driver to connect to a MongoDB deployment.", - "tags": "code example, node.js, disconnected, help", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "faq", - "title": "FAQ", - "headings": [ - "Why Am I Getting Errors While Connecting to MongoDB?", - "How Does Connection Pooling Work in the Node Driver?", - "What Is the Difference Between \"connectTimeoutMS\", \"socketTimeoutMS\" and \"maxTimeMS\"?", - "What Happens to Running Operations if the Client Disconnects?", - "How Can I Confirm That the Driver Closed Unusable Sockets?", - "How Can I Prevent Sockets From Timing Out Before They Become Active?", - "What Does a Value of \"0\" Mean for \"connectTimeoutMS\" and \"socketTimeoutMS\"?", - "How Can I Prevent Long-Running Operations From Slowing Down the Server?", - "What Does the keepAlive Option Do?", - "What Can I Do If I'm Experiencing Unexpected Network Behavior?", - "How Can I Prevent a Slow Operation From Delaying Other Operations?", - "How Can I Ensure my Connection String Is Valid for a Replica Set?" - ], - "paragraphs": "This page contains frequently asked questions and their corresponding answers. If you can't find an answer to your problem on this page,\nsee the Issues & Help page for next steps and more\nresources. If you have trouble connecting to a MongoDB deployment, see\nthe Connection Troubleshooting Guide \nfor possible solutions. Every MongoClient instance has a built-in connection pool for each server\nin your MongoDB topology. Connection pools open sockets on demand to\nsupport concurrent requests to MongoDB in your application. The maximum size of each connection pool is set by the maxPoolSize option, which\ndefaults to 100 . If the number of in-use connections to a server reaches\nthe value of maxPoolSize , the next request to that server will wait\nuntil a connection becomes available. In addition to the sockets needed to support your application's requests,\neach MongoClient instance opens two more sockets per server\nin your MongoDB topology for monitoring the server's state.\nFor example, a client connected to a three-node replica set opens six\nmonitoring sockets. If the application uses the default setting for\n maxPoolSize and only queries the primary (default) node, then\nthere can be at most 106 total connections in the connection pool. If the\napplication uses a read preference to query the\nsecondary nodes, those connection pools grow and there can be\n 306 total connections. To support high numbers of concurrent MongoDB requests\nwithin one process, you can increase maxPoolSize . Connection pools are rate-limited. The maxConnecting option\ndetermines the number of connections that the pool can create in\nparallel at any time. For example, if the value of maxConnecting is\n 2 , the third request that attempts to concurrently check out a\nconnection succeeds only when one the following cases occurs: You can set the minimum number of concurrent connections to\neach server with the minPoolSize option, which defaults to 0 .\nThe driver initializes the connection pool with this number of sockets. If\nsockets are closed, causing the total number\nof sockets (both in use and idle) to drop below the minimum, more\nsockets are opened until the minimum is reached. You can set the maximum number of milliseconds that a connection can\nremain idle in the pool by setting the maxIdleTimeMS option.\nOnce a connection has been idle for maxIdleTimeMS , the connection\npool removes and replaces it. This option defaults to 0 (no limit). The following default configuration for a MongoClient works for most\napplications: MongoClient supports multiple concurrent requests. For each process,\ncreate a client and reuse it for all operations in a process. This\npractice is more efficient than creating a client for each request. The driver does not limit the number of requests that\ncan wait for sockets to become available, and it is the application's\nresponsibility to limit the size of its pool to bound queuing\nduring a load spike. Requests wait for the amount of time specified in\nthe waitQueueTimeoutMS option, which defaults to 0 (no limit). A request that waits more than the length of time defined by\n waitQueueTimeoutMS for a socket raises a connection error. Use this\noption if it is more important to bound the duration of operations\nduring a load spike than it is to complete every operation. When MongoClient.close() is called by any request, the driver\ncloses all idle sockets and closes all sockets that are in\nuse as they are returned to the pool. Calling MongoClient.close() \ncloses only inactive sockets, so you cannot interrupt or terminate\nany ongoing operations by using this method. The driver closes these\nsockets only when the process completes. The connection pool finishes creating a connection and there are fewer\nthan maxPoolSize connections in the pool. An existing connection is checked back into the pool. The driver's ability to reuse existing connections improves due to\nrate-limits on connection creation. To specify the optional settings for your MongoClient , declare one or\nmore available settings in the options object of the constructor as\nfollows: To see all the available settings, see the\n MongoClientOptions \nAPI Documentation. To specify maxTimeMS , chain the maxTimeMS() method with a\ntimeout specification to an operation that returns a Cursor : Setting Description connectTimeoutMS connectTimeoutMS is a connection option that sets the time, in milliseconds,\nfor an individual connection from your connection pool to\nestablish a TCP connection to the MongoDB Server before\ntiming out. Default: 30000 To modify the allowed time for MongoClient.connect to establish a\nconnection to a MongoDB Server, use the serverSelectionTimeoutMS option instead. socketTimeoutMS socketTimeoutMS specifies the amount of time the driver waits\nfor an inactive socket before closing it. The default value is to\nnever time out the socket. This option applies only to sockets that\nhave already been connected. maxTimeMS maxTimeMS \nspecifies the maximum amount of time that the server\nwaits for an operation to complete after it has reached the\nserver. If an operation runs over the specified time limit, it\nreturns a timeout error. You can pass maxTimeMS only to an\nindividual operation or to a cursor. Starting in MongoDB Server version 4.2, the server terminates\nrunning operations such as aggregations and find operations if the\nclient disconnects. To see a full list of operations affected by this\nbehavior, see the Server version 4.2 release notes in the Server manual. Other operations, such as write operations, continue to run on the\nMongoDB Server even if the client disconnects. This behavior can cause data\ninconsistencies if your application retries the operation after the\nclient disconnects. If you experience unexpected network behavior or if a MongoDB process\nfails with an error, you may not receive confirmation that the\ndriver correctly closed the corresponding socket. To make sure that the driver correctly closes the socket in these cases,\nset the socketTimeoutMS option. When a MongoDB process times out, the driver\nwill close the socket. We recommend that you select a value\nfor socketTimeoutMS that is two to three times longer than the\nexpected duration of the slowest operation that your application executes. Having a large connection pool does not always reduce reconnection\nrequests. Consider the following example: An application has a connection pool size of 5 sockets and has the\n socketTimeoutMS option set to 5000 milliseconds. Operations occur,\non average, every 3000 milliseconds, and reconnection requests are\nfrequent. Each socket times out after 5000 milliseconds, which means\nthat all sockets must do something during those 5000 milliseconds to\navoid closing. One message every 3000 milliseconds is not enough to keep the sockets\nactive, so several of the sockets will time out after 5000 milliseconds.\nTo avoid excessive socket timeouts, reduce the number of connections\nthat the driver can maintain in the connection pool by specifying the\n maxPoolSize option. To specify the optional maxPoolSize setting for your MongoClient , declare\nit in the options object of the constructor as follows: If you set the value of connectTimeoutMS or socketTimeoutMS to\n 0 , your application will use the operating system's default socket\ntimeout value. You can prevent long-running operations from slowing down the server by\nspecifying a timeout value. You can chain the maxTimeMS() method to\nan operation that returns a Cursor to set a timeout on a specific action. The following example shows how you can chain the maxTimeMS() method\nto an operation that returns a Cursor : The keepAlive connection option specifies whether to enable\n Transmission Control Protocol (TCP) keepalives on a TCP socket. If you enable keepalives,\nthe driver checks whether the connection is active by sending periodic pings\nto your MongoDB deployment. This functionality only works if your\noperating system supports the SO_KEEPALIVE socket option. The keepAliveInitialDelay option specifies the number of\nmilliseconds that the driver waits before initiating a keepalive. The 5.3 driver version release deprecated these options. Starting in\nversion 6.0 of the driver, the keepAlive option is permanently set\nto true , and the keepAliveInitialDelay is set to 300000\nmilliseconds (300 seconds). If your firewall ignores or drops the keepalive messages, you might\nnot be able to identify dropped connections. You might experience unexpected network behavior if the firewall between\nyour application and MongoDB is misconfigured. These firewalls can be\noverly aggressive in their removal of connections, which can lead to\nunexpected errors. Confirm that your firewall exhibits the following behavior: The firewall sends a FIN packet when closing a connection,\ninforming the driver that the socket is closed. The firewall allows keepalive messages. To learn more about keepalive messages, see the What Does the\nkeepAlive Option Do? FAQ entry. When you use the same MongoClient instance to run multiple MongoDB\noperations concurrently, a slow operation can cause delays to other\noperations. Slow operations keep a connection to MongoDB occupied,\nwhich can cause other operations to wait until an additional connection\nbecomes available. If you suspect that slow MongoDB operations are causing delays, you\ncan check the performance of all in-progress operations by using the\nfollowing methods: After you determine which operations are causing delays, try to improve\nthe performance of these operations. Read the Best Practices\nGuide for MongoDB Performance for possible solutions. If you implement performance best practices but still\nexperience delays, you can modify your connection settings to increase\nthe size of the connection pool. A connection pool is the group of\nconnections to the server that the driver maintains at any time. To specify the maximum size of a\nconnection pool, you can set the maxPoolSize option in the\n connection options for your\n MongoClient instance. The default value\nof maxPoolSize is 100 . If the number of in-use connections to a\nserver reaches maxPoolSize , the next operation sent to the server\npauses until a connection to the driver becomes available. The following\ncode sets maxPoolSize to 150 when creating a new MongoClient : Enable the database profiler on your deployment. To learn more, see\n Database Profiler \nin the Server manual. Run the db.currentOp() MongoDB Shell command. To learn more, see the\n db.currentOp() \ndocumentation in the Server manual. Enable connection pool monitoring. To learn more, see\n Connection Pool Monitoring . To learn more about connection pooling, see the How Does Connection\nPooling Work in the Node Driver? FAQ entry. The connection string passed to the driver must use exact hostnames for\nthe servers as set in the Replica Set Config .\nGiven the following configuration settings for your Replica Set, in\norder for the Replica Set discovery and failover to work, the driver must have access\nto server1 , server2 , and server3 . If you are unable to find the answer to your question here, try our forums and\nsupport channels listed in the Issues and Help \nsection.", - "code": [ - { - "lang": "js", - "value": "const client = new MongoClient(\"\");" - }, - { - "lang": "javascript", - "value": "const client = new MongoClient(uri, {\n connectTimeoutMS: ,\n socketTimeoutMS: \n});" - }, - { - "lang": "javascript", - "value": "const cursor = myColl.find({}).maxTimeMS(50);" - }, - { - "lang": "javascript", - "value": "const client = new MongoClient(uri, {\n maxPoolSize: ,\n});" - }, - { - "lang": "javascript", - "value": "// Execute a find command\nawait collection\n .find({ $where: \"sleep(100) || true\" })\n .maxTimeMS(50);\n" - }, - { - "lang": "js", - "value": "const client = new MongoClient(uri, { maxPoolSize: 150 });" - }, - { - "lang": "JSON", - "value": "{\n \"_id\": \"testSet\",\n \"version\": 1,\n \"protocolVersion\": 1,\n \"members\": [\n {\n \"_id\": 1,\n \"host\": \"server1:31000\"\n },\n {\n \"_id\": 2,\n \"host\": \"server2:31001\"\n },\n {\n \"_id\": 3,\n \"host\": \"server3:31002\"\n }\n ]\n}" - } - ], - "preview": "This page contains frequently asked questions and their corresponding answers.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/aggregation", - "title": "Aggregation", - "headings": [ - "Overview", - "Analogy", - "Comparing Aggregation and Query Operations", - "References", - "Runnable Examples", - "Aggregation Example", - "Additional Examples" - ], - "paragraphs": "In this guide, you can learn how to use aggregation operations in\nthe MongoDB Node.js driver. Aggregation operations are expressions you can use to produce reduced\nand summarized results in MongoDB. MongoDB's aggregation framework\nallows you to create a pipeline that consists of one or more stages,\neach of which performs a specific operation on your data. You can think of the aggregation pipeline as similar to an automobile factory.\nAutomobile manufacturing requires the use of assembly stations organized\ninto assembly lines. Each station has specialized tools, such as\ndrills and welders. The factory transforms and\nassembles the initial parts and materials into finished products. The aggregation pipeline is the assembly line, aggregation\nstages are the assembly stations, and expression operators are the\nspecialized tools. Using query operations, such as the find() method, you can perform the following actions: Using aggregation operations, you can perform the following actions: Aggregation operations have some limitations : Select which documents to return Select which fields to return Sort the results Perform all query operations Rename fields Calculate fields Summarize data Group values Returned documents must not violate the BSON-document size limit \nof 16 megabytes. Pipeline stages have a memory limit of 100 megabytes by default. You can exceed this\nlimit by setting the allowDiskUse property of AggregateOptions to true . See\nthe AggregateOptions API documentation \nfor more details. The $graphLookup stage has a strict\nmemory limit of 100 megabytes and will ignore allowDiskUse . To view a full list of expression operators, see Aggregation\nOperators in the Server manual. To learn about assembling an aggregation pipeline and view examples, see\n Aggregation Pipeline in the\nServer manual. To learn more about creating pipeline stages, see Aggregation\nStages in the Server manual. The example uses sample data about restaurants. The following code\ninserts data into the restaurants collection of the aggregation \ndatabase: For more information on connecting to your MongoDB deployment, see the Connection Guide . To perform an aggregation, pass a list of aggregation stages to the\n collection.aggregate() method. In the example, the aggregation pipeline uses the following aggregation stages: This example produces the following output: For more information, see the aggregate() API documentation . A $match stage to filter for documents whose\n categories array field contains the element Bakery . A $group stage to group the matching documents by the stars \nfield, accumulating a count of documents for each distinct value of stars . To view step-by-step explanations of common aggregation tasks, see the\n Aggregation Tutorials . You can find another aggregation pipeline example in the Aggregation\nFramework with Node.js Tutorial \nblog post on the MongoDB website.", - "code": [ - { - "lang": "javascript", - "value": "const db = client.db(\"aggregation\");\nconst coll = db.collection(\"restaurants\");\n\n// Create sample documents\nconst docs = [\n { stars: 3, categories: [\"Bakery\", \"Sandwiches\"], name: \"Rising Sun Bakery\" },\n { stars: 4, categories: [\"Bakery\", \"Cafe\", \"Bar\"], name: \"Cafe au Late\" },\n { stars: 5, categories: [\"Coffee\", \"Bakery\"], name: \"Liz's Coffee Bar\" },\n { stars: 3, categories: [\"Steak\", \"Seafood\"], name: \"Oak Steakhouse\" },\n { stars: 4, categories: [\"Bakery\", \"Dessert\"], name: \"Petit Cookie\" },\n];\n\n// Insert documents into the restaurants collection\nconst result = await coll.insertMany(docs);" - }, - { - "lang": "json", - "value": "{ _id: 4, count: 2 }\n{ _id: 3, count: 1 }\n{ _id: 5, count: 1 }" - }, - { - "lang": "javascript", - "value": "// Define an aggregation pipeline with a match stage and a group stage\nconst pipeline = [\n { $match: { categories: \"Bakery\" } },\n { $group: { _id: \"$stars\", count: { $sum: 1 } } }\n];\n\n// Execute the aggregation\nconst aggCursor = coll.aggregate(pipeline);\n\n// Print the aggregated results\nfor await (const doc of aggCursor) {\n console.log(doc);\n}" - } - ], - "preview": "In this guide, you can learn how to use aggregation operations in\nthe MongoDB Node.js driver.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/authentication/enterprise-mechanisms", - "title": "Enterprise Authentication Mechanisms", - "headings": [ - "Kerberos (GSSAPI/SSPI)", - "LDAP (PLAIN)", - "MONGODB-OIDC", - "Azure IMDS", - "GCP IMDS", - "Custom Callback", - "API Documentation" - ], - "paragraphs": "In this guide, you can find sample code for connection to MongoDB with each\nauthentication mechanism available in the MongoDB Enterprise Edition:\n Kerberos (GSSAPI/SSPI) , LDAP (PLAIN) , and MONGODB-OIDC . The GSSAPI authentication mechanism uses your user principal to\nauthenticate to a Kerberos service. You can specify this authentication mechanism by performing the\nfollowing actions while specifying options on your\n connection string : The following code sample authenticates to Kerberos for UNIX using GSSAPI . The Node.js driver supports Kerberos on UNIX using the MIT Kerberos library\nand on Windows using the SSPI API. Set the authMechanism parameter to GSSAPI . Set the SERVICE_NAME value in the authMechanismProperties \nparameter if using a value other than mongodb . Specify a SERVICE_REALM value in the authMechanismProperties \nparameter if a custom service realm is required. Specify a CANONICALIZE_HOST_NAME value in the authMechanismProperties \nparameter if canonicalization of the hostname is required. This property can take\nthe following values: none : (Default) Does not perform hostname canonicalization forward : Performs a forward DNS lookup to canonicalize the hostname forwardAndReverse : Performs a forward DNS lookup and then a\nreverse lookup on that value to canonicalize the hostname The gssapiServiceName parameter is deprecated and may be removed\nin future versions of the driver. Use\n authMechanismProperties=SERVICE_NAME: in the\nconnection URI instead.\nSee the\n authMechanismProperties \nparameter documentation for more information. Always URI encode the principal using the encodeURIComponent method\nto ensure it is correctly parsed. The method refers to the GSSAPI authentication mechanism instead\nof Kerberos because the driver authenticates through\n GSSAPI RFC-4652 , the SASL\nmechanism. The PLAIN authentication mechanism uses your username and password to\nauthenticate to a Lightweight Directory Access Protocol (LDAP) server. You can specify this authentication mechanism by setting the authMechanism \nparameter to PLAIN and including your LDAP username and password in the\n connection string as shown\nin the following sample code. The authentication mechanism is named PLAIN instead of LDAP since it\nauthenticates using the PLAIN Simple Authentication and Security Layer\n(SASL) defined in RFC-4616 . The following sections describe how to use the MONGODB-OIDC authentication mechanism to\nauthenticate from various platforms. For more information about the MONGODB-OIDC authentication mechanism, see\n OpenID Connect Authentication and\n MongoDB Server Parameters \nin the MongoDB Server manual. The MONGODB-OIDC authentication mechanism requires MongoDB Server v7.0 or later running\non a Linux platform. If your application runs on an Azure VM, or otherwise uses the\n Azure Instance Metadata Service \n(IMDS), you can authenticate to MongoDB by using the Node.js driver's built-in Azure\nsupport. To specify Azure IMDS OIDC as the authentication mechanism, set the following options\nin your connection string: The following code example shows how to set the preceding connection options: username : If you're using an Azure managed identity, set this to the client ID\nof the managed identity. If you're using a service principal to represent an\nenterprise application, set this to the application ID of the service principal.\nOtherwise, omit this option. authMechanism : Set to MONGODB-OIDC . authMechanismProperties : Set to\n ENVIRONMENT:azure,TOKEN_RESOURCE: .\nReplace the placeholder with the\nvalue of the audience parameter configured on your MongoDB deployment. If your application runs on a Google Compute Engine VM, or otherwise uses the\n GCP Instance Metadata Service ,\nyou can authenticate to MongoDB by using the Node.js driver's built-in GCP\nsupport. To specify GCP IMDS OIDC as the authentication mechanism, set the following options\nin your connection string: The following code example shows how to set the preceding connection options: authMechanism : Set to MONGODB-OIDC . authMechanismProperties : Set to\n ENVIRONMENT:gcp,TOKEN_RESOURCE: .\nReplace the placeholder with the\nvalue of the audience parameter configured on your MongoDB deployment. The Node.js driver doesn't offer built-in support for all platforms, including\nAzure Functions and Azure Kubernetes Service (AKS). Instead, you\nmust define a custom callback to use OIDC to authenticate from these platforms. First, define a function that retrieves the access token to use for OIDC authentication.\nThis function must have the following signature: The OIDCCallbackParams parameter contains the following properties, which you can\naccess inside the function: The callback function must return an OIDCResponse object. This object contains the\nfollowing properties: The following example shows a callback function that retrieves an OIDC access token\nfrom a file named access-token.dat in the local file system: After you define your callback function, pass it to the MongoClient constructor\nas part of the authMechanismProperties parameter. The Node.js driver supports\nthe following authentication patterns: Property Value timeoutContext An AbortSignal that aborts the authentication workflow after 30 seconds version The current OIDC API version idpInfo The identity-provider information returned from the server username The username included in the connection string, if any refreshToken The refresh token to request a new access token from the issuer, if any Property Value accessToken The access token to use for authentication. expiresInSeconds Optional. The number of seconds until the access token expires. refreshToken Optional. The refresh token to request a new access token from the issuer. Machine authentication: Used by web services and other applications that require\nno human interaction. Select the Machine Callback tab to see an example of\nthis syntax. Human authentication: Used by database tools, command-line utilities, and other\napplications that involve direct human interaction. Select the Human Callback \ntab to see an example of this syntax. For machine authentication, assign the callback function to the\n authMechanismProperties.OIDC_CALLBACK property, as shown in the following\nexample: For human authentication, assign the callback function to the\n authMechanismProperties.OIDC_HUMAN_CALLBACK property, as shown in the following\nexample: To learn more about the methods and types discussed in this\nguide, see the following API documentation: MongoClient OIDCCallbackParams OIDCResponse", - "code": [ - { - "lang": "js", - "value": "const { MongoClient } = require(\"mongodb\");\n\n// specify the placeholder values for your environment in the following lines\nconst clusterUrl = \"\";\nconst principal = encodeURIComponent(\"\");\nconst serviceRealm = \"\";\nconst canonicalizationSetting = \"\";\nconst authMechanismProperties = `SERVICE_REALM:${serviceRealm},CANONICALIZE_HOST_NAME:${canonicalizationSetting}`;\n\nconst authMechanism = \"GSSAPI\";\n\n// Connection URI\nconst uri = `mongodb+srv://${principal}@${clusterUrl}/?authMechanism=${authMechanism}&authMechanismProperties=${authMechanismProperties}`;\n\nconst client = new MongoClient(uri);\n\n// Function to connect to the server\nasync function run() {\n try {\n // Establish and verify connection\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server\");\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);" - }, - { - "lang": "js", - "value": "const { MongoClient } = require(\"mongodb\");\n\n// specify the placeholder values for your environment in the following lines\nconst clusterUrl = \"\";\nconst ldapUsername = \"\";\nconst ldapPassword = \"\";\nconst authMechanism = \"PLAIN\";\n\n// Connection URI\nconst uri = `mongodb+srv://${ldapUsername}:${ldapPassword}@${clusterUrl}/?authMechanism=${authMechanism}`;\n\nconst client = new MongoClient(uri);\n\n// Function to connect to the server\nasync function run() {\n try {\n // Establish and verify connection\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server\");\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);" - }, - { - "lang": "js", - "value": "const { MongoClient } = require(\"mongodb\");\n\nconst uri = \"mongodb+srv://@:/?authMechanism=MONGODB-OIDC\"\n + \"&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:\";\nconst client = new MongoClient(uri);" - }, - { - "lang": "js", - "value": "const { MongoClient } = require(\"mongodb\");\n\nconst uri = \"mongodb+srv://:/?authMechanism=MONGODB-OIDC\"\n + \"&authMechanismProperties=ENVIRONMENT:gcp,TOKEN_RESOURCE:\";\nconst client = new MongoClient(uri);" - }, - { - "lang": "js", - "value": "const myCallback = (params: OIDCCallbackParams): Promise => { }" - }, - { - "lang": "js", - "value": "const fs = require(\"node:fs\");\n\nconst myCallback = (params: OIDCCallbackParams): Promise => {\n const token = fs.readFileSync(\"access-token.dat\", \"utf8\");\n\n return {\n accessToken: token,\n expiresInSeconds: 300,\n refreshToken: token\n };\n}" - }, - { - "lang": "js", - "value": "const { MongoClient } = require(\"mongodb\");\n\nconst uri = \"mongodb+srv://:/?authMechanism=MONGODB-OIDC\";\nconst client = new MongoClient(uri, {\n authMechanismProperties: {\n OIDC_CALLBACK: myCallback\n }\n});" - }, - { - "lang": "js", - "value": "const { MongoClient } = require(\"mongodb\");\n\nconst uri = \"mongodb+srv://:/?authMechanism=MONGODB-OIDC\";\nconst client = new MongoClient(uri, {\n authMechanismProperties: {\n OIDC_HUMAN_CALLBACK: myCallback\n }\n});" - } - ], - "preview": "In this guide, you can find sample code for connection to MongoDB with each\nauthentication mechanism available in the MongoDB Enterprise Edition:\nKerberos (GSSAPI/SSPI), LDAP (PLAIN), and MONGODB-OIDC.", - "tags": "ldap, encryption, principal, tls", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/authentication/mechanisms", - "title": "Authentication Mechanisms", - "headings": [ - "DEFAULT", - "SCRAM-SHA-256", - "SCRAM-SHA-1", - "MONGODB-CR", - "MONGODB-AWS", - "X.509", - "TLS Options" - ], - "paragraphs": "In this guide, you can find sample code for connection to MongoDB with each\nauthentication mechanism available in the MongoDB Community Edition:\n DEFAULT , SCRAM-SHA-256 , SCRAM-SHA-1 , MONGODB-CR ,\n MONGODB-AWS , and X.509 . The DEFAULT authentication mechanism is a fallback setting that instructs\nthe driver to negotiate the first authentication mechanism supported by the\nserver in the following order of preference: If the DEFAULT option is specified, the driver first attempts to\nauthenticate using SCRAM-SHA-256 . If the version of the MongoDB instance\ndoes not support that mechanism, the driver attempts to authenticate using\n SCRAM-SHA-1 . If the instance does not support that mechanism either,\nthe driver attempts to authenticate using MONGODB-CR . You can specify this authentication mechanism by setting the authMechanism \nparameter to DEFAULT in the\n connection string , or by omitting\nthe parameter since it is the default value. Also include your username and\npassword as shown in the code below. For more information on the challenge-response (CR) and salted\nchallenge-response authentication mechanisms (SCRAM) that MongoDB supports,\nsee the SCRAM section of the manual. SCRAM-SHA-256 SCRAM-SHA-1 MONGODB-CR Always URI encode the username and password using the\n encodeURIComponent method to ensure they are correctly parsed. SCRAM-SHA-256 is a salted challenge-response authentication mechanism\n(SCRAM) that uses your username and password, encrypted with the SHA-256 \nalgorithm to authenticate your user. You can specify this authentication mechanism by setting the authMechanism \nto the value SCRAM-SHA-256 in the\n connection string as shown in the\nfollowing sample code. SCRAM-SHA-256 is the default authentication method for MongoDB starting\nin version 4.0 Always URI encode the username and password using the\n encodeURIComponent method to ensure they are correctly parsed. SCRAM-SHA-1 is a salted challenge-response mechanism (SCRAM) that uses your\nusername and password, encrypted with the SHA-1 algorithm to authenticate\nyour user. You can specify this authentication mechanism by setting the authMechanism \nparameter to the value SCRAM-SHA-1 in the\n connection string as shown\nin the following sample code. SCRAM-SHA-1 is the default authentication method for MongoDB versions\n3.0, 3.2, 3.4, and 3.6. Always URI encode the username and password using the\n encodeURIComponent method to ensure they are correctly parsed. MONGODB-CR is a challenge-response authentication mechanism that uses your\nusername and password to authenticate your user. You can specify this option by setting the authMechanism parameter to value\n MONGODB-CR in the\n connection string as shown\nin the following sample code. Always URI encode the username and password using the\n encodeURIComponent method to ensure they are correctly parsed. If you have upgraded the authentication schema from MONGODB-CR to\nSCRAM , any MONGODB-CR user\nauthentication requests fail. The MONGODB-AWS authentication mechanism uses your Amazon Web Services\nIdentity and Access Management (AWS IAM) credentials to authenticate your\nuser. If you do not already have the AWS signature library , use the following\n npm command to install it: To connect to a MongoDB instance with MONGODB-AWS authentication\nenabled, specify the MONGODB-AWS authentication mechanism. The driver checks for your credentials in the following sources in order: The MONGODB-AWS authentication mechanism is available only in MongoDB\nversions 4.4 and later. Connection string Environment variables Web identity token file AWS ECS endpoint specified in AWS_CONTAINER_CREDENTIALS_RELATIVE_URI AWS EC2 endpoint. For more information, see IAM Roles for Tasks . The driver only reads the credentials from the first method that it detects\nin the order as given by the preceding list. For example, if you specify\nyour AWS credentials in the connection string, the driver ignores any\ncredentials that you specified in environment variables. To connect to your MongoDB instance with a connection string, pass\nyour AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY \ncredentials to the driver when you attempt to connect. If your AWS\nlogin requires a session token, include your AWS_SESSION_TOKEN as well. The following code shows an example of specifying the MONGODB-AWS \nauthentication mechanism and credentials with a connection string: Always URI encode the username and certificate file path using the\n encodeURIComponent method to ensure they are correctly parsed. To authenticate to your MongoDB instance using AWS credentials stored in\nenvironment variables, set the following variables by using\na shell: After you've set the preceding environment variables, specify the MONGODB-AWS \nauthentication mechanism in your connection string as shown in the following example: Omit the line containing AWS_SESSION_TOKEN if you don't need an AWS\nsession token for that role. You can use the OpenID Connect (OIDC) token obtained from a web identity\nprovider to authenticate to Amazon Elastic Kubernetes Service (EKS) or\nother services. To authenticate with your OIDC token you must first install\n @aws-sdk/credential-providers . You can\ninstall this dependency using the following npm command: Next, create a file that contains your OIDC token. Then\nset the absolute path to this file in an environment variable by using\na shell as shown in the following example: After you've set the preceding environment variable, specify the MONGODB-AWS \nauthentication mechanism in your connection string as shown in the following example: Starting in version 4.11, when you install the optional\n aws-sdk/credential-providers dependency, the driver uses the AWS SDK\nto retrieve credentials from the environment. As a result, if you\nhave a shared AWS credentials file or config file, the driver will\nuse those credentials by default. You can override this behavior by performing one of the following\nactions: Set AWS_SHARED_CREDENTIALS_FILE variable in your shell to point\nto your credentials file. Set the equivalent environment variable in your application to point\nto your credentials file. Create an AWS profile for your MongoDB credentials and set the\n AWS_PROFILE environment variable to that profile name. The X.509 authentication mechanism uses\n TLS with X.509 certificates to\nauthenticate by retrieving the distinguished name (DN) from the\nclient certificate. You can specify this authentication mechanism by setting the following\nparameters of your connection string : Pass the location of your client certificate file as the value of\n tlsCertificateKeyFile as a parameter of the connection URI. The X.509 authentication mechanism is only available in MongoDB versions\n2.6 and later. Set the authMechanism parameter to MONGODB-X509 Set the tls parameter to true Always URI encode the certificate file path using the\n encodeURIComponent method to ensure it is parsed correctly. To learn more about enabling TLS on a connection, see\n Enable TLS on a Connection . The following table describes the TLS options that you can set in a\nconnection URI. Parameter Name Type Default Value Description tls boolean false Specifies whether to enable TLS on the connection. tlsInsecure boolean false Specifies whether to allow invalid certificates and mismatched\nhostnames. When set to true , this is equivalent to setting\n tlsAllowInvalidCertificates and tlsAllowInvalidHostnames to\n true . tlsCAFile string Path to file that contains a single or bundle of trusted certificate\nauthorities used in a TLS connection. tlsCertificateKeyFile string Path to the client certificate file or the client private key file. If\nboth are required, the two must be concatenated into a single file. tlsCertificateKeyFilePassword buffer or string String or buffer that contains the password to decrypt the client\nprivate key. tlsAllowInvalidCertificates boolean false Specifies whether the driver permits an invalid certificate to be used\nto connect. tlsAllowInvalidHostnames boolean false Specifies whether the driver raises an error when there is a mismatch between the\nserver hostname and TLS certificate hostname.", - "code": [ - { - "lang": "javascript", - "value": "const { MongoClient } = require(\"mongodb\");\n\n// Replace the following with values for your environment.\nconst username = encodeURIComponent(\"\");\nconst password = encodeURIComponent(\"\");\nconst clusterUrl = \"\";\n\nconst authMechanism = \"DEFAULT\";\n\n// Replace the following with your MongoDB deployment's connection string.\nconst uri =\n `mongodb+srv://${username}:${password}@${clusterUrl}/?authMechanism=${authMechanism}`;\n\n// Create a new MongoClient\nconst client = new MongoClient(uri);\n\n// Function to connect to the server\nasync function run() {\n try {\n // Establish and verify connection\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server\");\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "javascript", - "value": "const { MongoClient } = require(\"mongodb\");\n\n// Replace the following with values for your environment.\nconst username = encodeURIComponent(\"\");\nconst password = encodeURIComponent(\"\");\nconst clusterUrl = \"\";\n\nconst authMechanism = \"SCRAM-SHA-256\";\n\n// Replace the following with your MongoDB deployment's connection string.\nconst uri =\n `mongodb+srv://${username}:${password}@${clusterUrl}/?authMechanism=${authMechanism}`;\n\n// Create a new MongoClient\nconst client = new MongoClient(uri);\n\n// Function to connect to the server\nasync function run() {\n try {\n // Establish and verify connection\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server\");\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "javascript", - "value": "const { MongoClient } = require(\"mongodb\");\n\n// Replace the following with values for your environment.\nconst username = encodeURIComponent(\"\");\nconst password = encodeURIComponent(\"\");\nconst clusterUrl = \"\";\n\nconst authMechanism = \"SCRAM-SHA-1\";\n\n// Replace the following with your MongoDB deployment's connection string.\nconst uri =\n `mongodb+srv://${username}:${password}@${clusterUrl}/?authMechanism=${authMechanism}`;\n\n// Create a new MongoClient\nconst client = new MongoClient(uri);\n\n// Function to connect to the server\nasync function run() {\n try {\n // Establish and verify connection\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server\");\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "javascript", - "value": "const { MongoClient } = require(\"mongodb\");\n\n// Replace the following with values for your environment.\nconst username = encodeURIComponent(\"\");\nconst password = encodeURIComponent(\"\");\nconst clusterUrl = \"\";\n\n// Replace the following with your MongoDB deployment's connection string.\nconst uri =\n `mongodb+srv://${username}:${password}@${clusterUrl}/?authMechanism=${authMechanism}&tls=true&tlsCertificateKeyFile=${clientPEMFile}`;\n\n// Create a new MongoClient\nconst client = new MongoClient(uri);\n\n// Function to connect to the server\nasync function run() {\n try {\n // Establish and verify connection\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server\");\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "bash", - "value": "npm install aws4" - }, - { - "lang": "javascript", - "value": "const { MongoClient } = require(\"mongodb\");\n\n// Replace the following with values for your environment.\nconst accessKeyId = encodeURIComponent(\"\");\nconst secretAccessKey = encodeURIComponent(\"\");\nconst clusterUrl = \"\";\n\nconst authMechanism = \"MONGODB-AWS\";\n\nlet uri =\n `mongodb+srv://${accessKeyId}:${secretAccessKey}@${clusterUrl}/?authSource=%24external&authMechanism=${authMechanism}`;\n \n// Uncomment the following lines if your AWS authentication setup requires a session token.\n// const sessionToken = encodeURIComponent(\"\");\n// uri = uri.concat(`&authMechanismProperties=AWS_SESSION_TOKEN:${sessionToken}`);\n\n// Create a new MongoClient.\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n // Establish and verify connection.\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server.\");\n } finally {\n // Ensure that the client closes when it finishes/errors.\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "bash", - "value": "export AWS_ACCESS_KEY_ID=\nexport AWS_SECRET_ACCESS_KEY=\nexport AWS_SESSION_TOKEN=" - }, - { - "lang": "javascript", - "value": "const { MongoClient } = require(\"mongodb\");\n\n// Remember to specify your AWS credentials in environment variables.\nconst clusterUrl = \"\";\nconst authMechanism = \"MONGODB-AWS\";\n\nlet uri =\n `mongodb+srv://${clusterUrl}/?authSource=%24external&authMechanism=${authMechanism}`;\n\n// Create a new MongoClient.\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n // Establish and verify connection.\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server.\");\n } finally {\n // Ensure that the client closes when it finishes/errors.\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "bash", - "value": "npm install @aws-sdk/credential-providers" - }, - { - "lang": "bash", - "value": "export AWS_WEB_IDENTITY_TOKEN_FILE=" - }, - { - "lang": "javascript", - "value": "const { MongoClient } = require(\"mongodb\");\n\n// Remember to specify your AWS credentials in environment variables.\nconst clusterUrl = \"\";\nconst authMechanism = \"MONGODB-AWS\";\n\nlet uri =\n `mongodb+srv://${clusterUrl}/?authSource=%24external&authMechanism=${authMechanism}`;\n\n// Create a new MongoClient.\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n // Establish and verify connection.\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server.\");\n } finally {\n // Ensure that the client closes when it finishes/errors.\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "javascript", - "value": "const { MongoClient } = require(\"mongodb\");\n\n// Replace the following with values for your environment.\nconst clusterUrl = \"\";\nconst clientPEMFile = encodeURIComponent(\"\");\n\nconst authMechanism = \"MONGODB-X509\";\n\n// Replace the following with your MongoDB deployment's connection string.\nconst uri =\n `mongodb+srv://${clusterUrl}/?authMechanism=${authMechanism}&tls=true&tlsCertificateKeyFile=${clientPEMFile}`;\n\n// Create a new MongoClient\nconst client = new MongoClient(uri);\n\n// Function to connect to the server\nasync function run() {\n try {\n // Establish and verify connection\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server\");\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - } - ], - "preview": "In this guide, you can find sample code for connection to MongoDB with each\nauthentication mechanism available in the MongoDB Community Edition:\nDEFAULT, SCRAM-SHA-256, SCRAM-SHA-1, MONGODB-CR,\nMONGODB-AWS, and X.509.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/authentication", - "title": "Authentication", - "headings": ["Overview"], - "paragraphs": "These guides show you how to authenticate to a MongoDB instance using the\nNode.js driver. The Authentication Mechanisms guide contains\nsample connection code using each authentication mechanism supported in the\nMongoDB Community Edition which includes: The Enterprise Authentication Mechanisms guide contains sample\nconnection code using authentication mechanisms available only in MongoDB\nEnterprise Edition which includes: DEFAULT SCRAM-SHA-256 SCRAM-SHA-1 MONGODB-CR MONGODB-AWS X.509 Kerberos (GSSAPI/SSPI) LDAP (PLAIN) MONGODB-OIDC For instructions on MongoDB driver installation and deployment setup, see\nour Connect to MongoDB guide . Select your\nMongoDB deployment type and the Node.js client.", - "code": [], - "preview": "These guides show you how to authenticate to a MongoDB instance using the\nNode.js driver.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/bson/undefined-values", - "title": "Undefined Values", - "headings": [ - "Overview", - "Ignore Undefined Values", - "Set the Scope for Serializing Undefined Values" - ], - "paragraphs": "In this guide, you can learn to control how the driver serializes\n undefined values. By default, the driver serializes undefined values\nas null values during write operations. To make the driver ignore fields with\n undefined values during serialization, set the\n ignoreUndefined setting to true . When you specify this setting,\nthe driver does not serialize fields with undefined values. The following example inserts two documents. The first insert operation has\nthe ignoreUndefined setting set to true , so the driver does not\nserialize the salesTax field in that operation. The second operation\ninserts a document that has the salesTax field with a null value: The documents appear in the collection as follows: You can specify the ignoreUndefined setting at the following levels: The ignoreUndefined setting automatically applies to the scope of the\nobject instance in which you specified it and any other objects created\nfrom that instance. For example, if you set the ignoreUndefined setting when\ninstantiating a database object, any collection instance created from\nthat object inherits the setting. Furthermore, any operations that you\ncall on that collection instance also inherit the setting. The following example performs an find-and-update operation that\ninherits the ignoreUndefined setting from the myDB database\nobject. This operation does not produce any data changes because the\ndriver ignores the gasTax field: You can specify the ignoreUndefined setting again at any level to\noverride any inherited settings. For example, if you set ignoreUndefined to true on your\ncollection object, you can override the setting in individual write\noperations that you execute on that collection. The client level The database level The collection level The operation level", - "code": [ - { - "lang": "javascript", - "value": "await myColl.insertOne(\n {\n state: \"Montana\",\n salesTax: undefined,\n },\n { ignoreUndefined: true }\n);\n\nawait myColl.insertOne({\n state: \"New Hampshire\",\n salesTax: undefined,\n});" - }, - { - "lang": "javascript", - "value": "{\n _id: ...,\n state: \"Montana\",\n},\n{\n _id: ...,\n state: \"New Hampshire\",\n salesTax: null\n}" - }, - { - "lang": "javascript", - "value": "const myDB = client.db(\"test\", { ignoreUndefined: true });\n\n// The collection inherits the ignoreUndefined setting\nconst myColl = myDB.collection(\"states\");\n\n// Any write operation will not serialize undefined values\nawait myColl.findOneAndUpdate(\n { state: \"Georgia\" },\n { $set: { gasTax: undefined } }\n);" - }, - { - "lang": "javascript", - "value": "const myColl = myDB.collection(\"states\", { ignoreUndefined: true });\n\n// The insert operation will not serialize undefined values\nawait myColl.insertOne({\n state: \"South Dakota\",\n capitalGainsTax: undefined,\n});\n\n// The insert operation will serialize undefined values\nawait myColl.insertOne(\n { state: \"Texas\", capitalGainsTax: undefined },\n { ignoreUndefined: false }\n);" - } - ], - "preview": "In this guide, you can learn to control how the driver serializes\nundefined values. By default, the driver serializes undefined values\nas null values during write operations.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/bson/utf8-validation", - "title": "UTF-8 Validation", - "headings": [ - "Overview", - "Specify the UTF-8 Validation Setting", - "Set the Validation Scope" - ], - "paragraphs": "In this guide, you can learn how to enable or disable the Node.js driver's\n UTF-8 validation feature. UTF-8 is a character encoding specification\nthat ensures compatibility and consistent presentation across most operating\nsystems, applications, and language character sets. If you enable validation, the driver throws an error when it attempts to\nconvert data that contains invalid UTF-8 characters. The validation adds\nprocessing overhead since it needs to check the data. If you disable validation, your application avoids the validation processing\noverhead, but cannot guarantee consistent presentation of invalid UTF-8 data. The driver enables UTF-8 validation by default. It checks documents for any\ncharacters that are not encoded in a valid UTF-8 format when it transfers data\nbetween your application and MongoDB. Read the sections below to learn how to set UTF-8 validation using the\nNode.js driver. The current version of the Node.js driver automatically substitutes\ninvalid UTF-8 characters with alternate valid UTF-8 ones before\nvalidation when you send data to MongoDB. Therefore, the validation\nonly throws an error when the setting is enabled and the driver\nreceives invalid UTF-8 document data from MongoDB. You can specify whether the driver performs UTF-8 validation by\ndefining the enableUtf8Validation setting in the options parameter\nwhen you create a client, reference a database or collection, or call a\nCRUD operation. If you omit the setting, the driver enables UTF-8 validation. See the following for code examples that demonstrate how to disable UTF-8\nvalidation on the client, database, collection, or CRUD operation: If your application reads invalid UTF-8 from MongoDB while the\n enableUtf8Validation option is enabled, it throws a BSONError that\ncontains the following message: The enableUtf8Validation setting automatically applies to the scope of the\nobject instance on which you included it, and any other objects created by\ncalls on that instance. For example, if you include the option on the call to instantiate a database\nobject, any collection instance you construct from that object inherits\nthe setting. Any operations you call on that collection instance also\ninherit the setting. You can override the setting at any level of scope by including it when\nconstructing the object instance or when calling an operation. For example, if you disable validation on the collection object, you can\noverride the setting in individual CRUD operation calls on that\ncollection.", - "code": [ - { - "lang": "javascript", - "value": "// disable UTF-8 validation on the client\nnew MongoClient('', { enableUtf8Validation: false });\n\n// disable UTF-8 validation on the database\nclient.db('', { enableUtf8Validation: false });\n\n// disable UTF-8 validation on the collection\ndb.collection('', { enableUtf8Validation: false });\n\n// disable UTF-8 validation on a specific operation call\nawait myColl.findOne({ title: 'Cam Jansen'}, { enableUtf8Validation: false });" - }, - { - "lang": null, - "value": "Invalid UTF-8 string in BSON document" - }, - { - "lang": "javascript", - "value": "const database = client.db('books', { enableUtf8Validation: false });\n\n// The collection inherits the UTF-8 validation disabled setting from the database\nconst myColl = database.collection('mystery');\n\n// CRUD operation runs with UTF-8 validation disabled\nawait myColl.findOne({ title: 'Encyclopedia Brown' });" - }, - { - "lang": "javascript", - "value": "const collection = database.collection('mystery', { enableUtf8Validation: false });\n\n// CRUD operation runs with UTF-8 validation enabled\nawait myColl.findOne({ title: 'Trixie Belden' }, { enableUtf8Validation: true });\n\n// CRUD operation runs with UTF-8 validation disabled\nawait myColl.findOne({ title: 'Enola Holmes' });" - } - ], - "preview": "In this guide, you can learn how to enable or disable the Node.js driver's\nUTF-8 validation feature. UTF-8 is a character encoding specification\nthat ensures compatibility and consistent presentation across most operating\nsystems, applications, and language character sets.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/bson", - "title": "BSON Settings", - "headings": ["Overview"], - "paragraphs": "Learn how to configure your application's BSON serialization settings.\nThe guides in this section describe the following topics: Undefined Values : Control how the\ndriver serializes undefined values UTF-8 Validation : Enable or disable\nthe UTF-8 validation feature", - "code": [], - "preview": "Learn how to configure your application's BSON serialization settings.\nThe guides in this section describe the following topics:", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/collations", - "title": "Collations", - "headings": [ - "Overview", - "Usage", - "Collation Parameters", - "Collation Examples", - "Set a Default Collation on a Collection", - "Assign a Collation to an Index", - "Collation Query Examples", - "find() and sort() Example", - "findOneAndUpdate() Example", - "findOneAndDelete() Example", - "Aggregation Example" - ], - "paragraphs": "Collations are available in MongoDB 3.4 and later. This guide shows you how to use collations , a set of sorting rules, to\nrun operations using string ordering for specific languages and locales (a\ncommunity or region that shares common language idioms). MongoDB sorts strings using binary collation by default. This collation\nmethod uses the ASCII standard \ncharacter values to compare and order strings. Languages and locales\nhave specific character ordering conventions that differ from the ASCII\nstandard. For example, in Canadian French, the right-most accented character determines\nthe ordering for strings when the other characters are the same. Consider the\nfollowing French words: cote , cot\u00e9 , c\u00f4te , and c\u00f4t\u00e9 . MongoDB sorts them in the following order using the default binary collation: MongoDB sorts them in the following order using the Canadian French collation: You can specify a collation when you create a new collection or new index. You\ncan also specify a collation for CRUD operations \nand aggregations. When you create a new collection with a collation, you define the default\ncollation for any of the operations that support collation called on that\ncollection. You can override the collation for an operation by specifying a\ndifferent one. When you create an index with a collation, you specify the sort order for\noperations that use that index. To use the collation in the index, you\nmust provide a matching collation in the operation, and the operation must\nuse the index. While most index types support collation, the following\ntypes support only binary comparison: Currently, you cannot create a collation on an existing collection. To use\ncollations with an existing collection, create an index with the collation\nand specify the same collation in your operations on it. text 2d geoHaystack The collation object contains the following parameters: You must specify the locale field in the collation; all other fields\nare optional. For a complete list of supported locales and the default values\nfor the locale fields, see Supported Languages and Locales .\nFor descriptions of each field, see the Collation Document MongoDB\nmanual entry . In the following example, we create a new collection called souvenirs and\nassign a default collation with the \"fr_CA\" locale. The collation applies\nto all operations that support collation performed on that\ncollection. Any of the operations that support collations automatically apply the collation\ndefined on the collection. The query below searches the souvenirs \ncollection and applies the \"fr_CA\" locale collation: You can specify a different collation as a parameter in an operation that\nsupports collations. The following query specifies the \"is\" Iceland locale\nand caseFirst optional parameter with the value \"upper\" : In the following example, we create a new index on the title field of\na collection with a collation set to the \"en_US\" locale. The following query uses the index we created: The following queries do not use the index that we created. The first\nquery does not include a collation and the second contains a different\nstrength value than the collation on the index. Operations that read, update, and delete documents from a collection can use\ncollations. This section includes examples of a selection of these. See the\nMongoDB manual for a full list of operations that support collation . The following example calls both find() and sort() on a collection\nthat uses the default binary collation. We use the German collation by\nsetting the value of the locale parameter to \"de\" . The following example calls the findOneAndUpdate() operation on a\ncollection that uses the default binary collation. The collection contains the\nfollowing documents: Consider the following findOneAndUpdate() operation on this collection\nwhich does not specify a collation: Since \"Gunter\" is the first sorted result when using a binary collation, none\nof the documents come lexically before and match the $lt comparison\noperator in the query document. As a result, the operation does not update any\ndocuments. Consider the same operation with a collation specified with the locale set to\n de@collation=phonebook . This locale specifies the collation=phonebook \noption which contains rules for prioritizing proper nouns, identified by\ncapitalization of the first letter. The de@collation=phonebook locale and\noption sorts characters with umlauts before the same characters without\numlauts. Since \"G\u00fcnter\" lexically comes before \"Gunter\" using the\n de@collation=phonebook collation specified in findOneAndUpdate() ,\nthe operation returns the following updated document: The following example calls the findOneAndDelete() operation on a\ncollection that uses the default binary collation and contains the following\ndocuments: In this example, we set the numericOrdering collation parameter to true \nto sort numeric strings based on their numerical order instead of their\nlexical order. After you run the operation above, the collection contains the following\ndocuments: If you perform the same operation without collation on the original\ncollection of three documents, it matches documents based on the lexical value\nof the strings ( \"16\" , \"84\" , and \"179\" ), and deletes the first\ndocument it finds that matches the query criteria. Since all the documents contain lexical values in the a field that\nmatch the criteria (greater than the lexical value of \"100\" ), the operation\nremoves the first result. After you run the operation above, the collection\ncontains the following documents: To use collation with the aggregate \noperation, pass the collation document in the options field, after the\narray of pipeline stages. The following example shows an aggregation pipeline on a collection that uses\nthe default binary collation. The aggregation groups the first_name field,\ncounts the total number of results in each group, and sorts the results by\nthe German phonebook ( \"de@collation=phonebook\" locale) order. You can specify only one collation on an aggregation.", - "code": [ - { - "lang": "none", - "value": "cote\ncot\u00e9\nc\u00f4te\nc\u00f4t\u00e9" - }, - { - "lang": "none", - "value": "cote\nc\u00f4te\ncot\u00e9\nc\u00f4t\u00e9" - }, - { - "lang": "javascript", - "value": "collation: {\n locale: ,\n caseLevel: ,\n caseFirst: ,\n strength: ,\n numericOrdering: ,\n alternate: ,\n maxVariable: ,\n backwards: \n}" - }, - { - "lang": "javascript", - "value": "db.createCollection(\"souvenirs\", {\n collation: { locale: \"fr_CA\" },\n});" - }, - { - "lang": "javascript", - "value": "myColl.find({type: \"photograph\"});" - }, - { - "lang": "javascript", - "value": " myColl.find({type: \"photograph\"},\n { collation: { locale: \"is\", caseFirst: \"upper\" } }\n );" - }, - { - "lang": "javascript", - "value": "myColl.createIndex(\n { 'title' : 1 },\n { 'collation' : { 'locale' : 'en_US' } });" - }, - { - "lang": "javascript", - "value": "myColl.find({\"year\": 1980}, {\"collation\" : {\"locale\" : \"en_US\" }})\n .sort({\"title\": -1});" - }, - { - "lang": "javascript", - "value": "myColl.find({\"year\": 1980}, {\"collation\" : {\"locale\" : \"en_US\", \"strength\": 2 }})\n .sort({\"title\": -1});" - }, - { - "lang": "javascript", - "value": "myColl.find({\"year\": 1980})\n .sort({\"title\": -1});" - }, - { - "lang": "javascript", - "value": "myColl.find({ city: \"New York\" }, { collation: { locale: \"de\" } })\n .sort({ name: 1 });" - }, - { - "lang": "none", - "value": "{ \"_id\" : 1, \"first_name\" : \"Hans\" }\n{ \"_id\" : 2, \"first_name\" : \"Gunter\" }\n{ \"_id\" : 3, \"first_name\" : \"G\u00fcnter\" }\n{ \"_id\" : 4, \"first_name\" : \"J\u00fcrgen\" }" - }, - { - "lang": "none", - "value": "{ lastErrorObject: { updatedExisting: true, n: 1 },\n value: { _id: 3, first_name: 'G\u00fcnter' },\n ok: 1 }" - }, - { - "lang": "javascript", - "value": "myColl.findOneAndUpdate(\n { first_name : { $lt: \"Gunter\" } },\n { $set: { verified: true } }\n);" - }, - { - "lang": "javascript", - "value": "myColl.findOneAndUpdate(\n { first_name: { $lt: \"Gunter\" } },\n { $set: { verified: true } },\n { collation: { locale: \"de@collation=phonebook\" } },\n);" - }, - { - "lang": "none", - "value": "{ \"_id\" : 1, \"a\" : \"16\" }\n{ \"_id\" : 2, \"a\" : \"84\" }\n{ \"_id\" : 3, \"a\" : \"179\" }" - }, - { - "lang": "none", - "value": "{ \"_id\" : 1, \"a\" : \"16\" }\n{ \"_id\" : 2, \"a\" : \"84\" }" - }, - { - "lang": "none", - "value": "{ \"_id\" : 2, \"a\" : \"84\" }\n{ \"_id\" : 3, \"a\" : \"179\" }" - }, - { - "lang": "javascript", - "value": "myColl.findOneAndDelete(\n { a: { $gt: \"100\" } },\n { collation: { locale: \"en\", numericOrdering: true } },\n);" - }, - { - "lang": "javascript", - "value": "await myColl.findOneAndDelete({ a: { $gt: \"100\" } });" - }, - { - "lang": "javascript", - "value": "myColl.aggregate(\n [\n { $group: { \"_id\": \"$first_name\", \"nameCount\": { \"$sum\": 1 } } },\n { $sort: { \"_id\": 1 } },\n ],\n { collation: { locale: \"de@collation=phonebook\" } },\n);" - } - ], - "preview": "Collations are available in MongoDB 3.4 and later.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/connection/connect", - "title": "Connection Guide", - "headings": [ - "Connection URI", - "Atlas Connection Example", - "Other Ways to Connect to MongoDB", - "Connect to a MongoDB Server on Your Local Machine", - "Connect to a Replica Set", - "Direct Connection" - ], - "paragraphs": "This guide shows you how to connect to a\n MongoDB Atlas deployment ,\na MongoDB instance, or a replica set using the Node.js driver. The connection URI is the set of instructions that the driver uses to connect to a\nMongoDB deployment. It tells the driver how to connect to MongoDB and how to behave\nwhile connected. The following example shows each part of the connection URI: In this example, we connect to an Atlas MongoDB deployment that has a\nDNS SRV record. For more details, see the\n DNS Seed List Connection Format \ndocumentation. This format offers flexibility in deployment and the\nability to change the servers in rotation without reconfiguring clients. If you are connecting to an instance or replica set that does not have a\nDNS SRV address, you must use mongodb for the protocol, which specifies\nthe Standard Connection String Format . After the protocol, the next part of the connection string contains credentials\nif you are using password-based authentication. Replace the value of user \nwith your username and pass with your password. If you are using an\nauthentication mechanism that does not require a username and password, omit\nthis part of the connection URI. The next part of the connection string specifies the hostname or IP address of\nyour MongoDB instance, followed by the port number. In the example above, we use\n sample.host as the hostname and 27017 as the port. Replace these values\nto point to your MongoDB instance. The last part of the connection string contains connection and authentication\noptions as parameters. In the example above, we set two connection options:\n maxPoolSize=20 and w=majority . For more information on connection\noptions, skip to the Connection Options section. To learn how to retrieve your connection string in Atlas, see the\n Atlas driver connection guide . You must create a client to connect to a MongoDB deployment on Atlas. To create\na client, construct an instance of MongoClient , passing in your\nURI and a MongoClientOptions object. Use the serverApi option in your MongoClientOptions object to\nenable the Stable API feature, which forces the server to run operations\nwith behavior compatible with the specified API version. The following code shows how you can specify the connection string and the\nStable API client option when connecting to a MongoDB deployment on Atlas and\nverify that the connection is successful: To learn more about the Stable\nAPI feature, see the Stable API page . As each MongoClient represents a pool of connections to the\ndatabase, most applications only require a single instance of a\n MongoClient , even across multiple requests. To learn more about\nhow connection pools work in the driver, see the FAQ page . The Node.js driver automatically calls the MongoClient.connect() \nmethod when using the client to perform CRUD operations on your MongoDB deployment.\nCall the MongoClient.connect() method explicitly if you want to verify that the\nconnection is successful. If you are connecting to a single MongoDB Server instance or replica set\nthat is not hosted on Atlas, see the following sections to find out how to\nconnect. To test whether you can connect to your server, replace the connection\nstring in the Connect to MongoDB code\nexample and run it. To connect to a MongoDB deployment on your local machine, complete the following\nsteps: After you successfully start your MongoDB Server, specify your connection\nstring in your driver connection code. If your MongoDB Server is running locally, you can use the following\nconnection string: In this connection string, is the port number on which you\nconfigured your server to listen for incoming connections. If you want to specify a different hostname or IP address, see our Server\nManual entry on Connection Strings . Download the Community \nor Enterprise version\nof MongoDB Server. Install and configure MongoDB Server. Start the server. Always secure your MongoDB Server from malicious attacks. See our\n Security Checklist for a\nlist of security recommendations. A MongoDB replica set deployment is a group of connected instances that\nstore the same set of data. This configuration of instances provides data\nredundancy and high data availability. To connect to a replica set deployment, specify the hostname and port numbers\nof each instance, separated by a comma, and the replica set name as the value\nof the replicaSet parameter in the connection string. When making a connection, the driver takes the following actions by default: Discovers all replica set members when given the address of any one member. Dispatches operations to the appropriate member, such as write against the primary . To ensure connectivity if one host is unavailable, provide the full\nlist of hosts when connecting to a replica set. To force your operations to run on the host specified in your connection\nURI, you can specify the directConnection connection option. If you\nspecify this option, you must use the standard connection URI format. The\ndriver does not accept the DNS seedlist connection format (SRV) when you\nspecify this option. When you specify directConnection and connect to a secondary member of the\nreplica set, your write operations fail because the client isn't\nconnected to the primary member. To perform read operations, you must\nenable secondary reads. See the read preference options \nfor more information.", - "code": [ - { - "lang": "javascript", - "value": "const { MongoClient, ServerApiVersion } = require(\"mongodb\");\n\n// Replace the placeholder with your Atlas connection string\nconst uri = \"\";\n\n// Create a MongoClient with a MongoClientOptions object to set the Stable API version\nconst client = new MongoClient(uri, {\n serverApi: {\n version: ServerApiVersion.v1,\n strict: true,\n deprecationErrors: true,\n }\n }\n);\n\nasync function run() {\n try {\n // Connect the client to the server (optional starting in v4.7)\n await client.connect();\n\n // Send a ping to confirm a successful connection\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Pinged your deployment. You successfully connected to MongoDB!\");\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "none", - "value": "mongodb://localhost:" - }, - { - "lang": "none", - "value": "mongodb://host1:27017,host2:27017,host3:27017/?replicaSet=myRs" - } - ], - "preview": "Learn how to connect to a MongoDB Atlas or local MongoDB deployment by using the Node.js driver.", - "tags": "node.js, code example, connection string, local connection, Stable API, Atlas", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/connection/connection-options", - "title": "Connection Options", - "headings": [], - "paragraphs": "This section explains the MongoDB connection and authentication options\nsupported by the driver. You can pass the connection options as\nparameters of the connection URI to specify the behavior of the client. Name Accepted Values Default Value Description appName string null Specifies the app name the driver passes to the server in the client\nmetadata as part of the connection handshake. The driver sends the\n appName value to MongoDB when establishing the connection.\nThis value is recorded in the log file, the slow query logs, and\nprofile collections. authMechanism string null Specifies the authentication mechanism method to use for connection to the\nserver. If you do not specify a value, the driver uses the default mechanism,\neither SCRAM-SHA-1 or SCRAM-SHA-256 depending on the server version. See\n authentication mechanism for available\nauthentication mechanisms. authMechanismProperties comma separated key:value pairs, for example, \"opt1:val1,opt2:val2\" null Specifies other options provided for authentication, such as the option to enable\nhostname canonicalization for GSSAPI. authSource string null Specifies the database that connections authenticate against. compressors comma separated list of strings, for example, \"snappy,zlib,zstd\" null Specifies the allowed compression types for wire protocol messages\nsent to or received from the server. See Network Compression \nfor more information. connectTimeoutMS non-negative integer 30000 Specifies the amount of time, in milliseconds, to wait to establish a single TCP\nsocket connection to the server before raising an error. Specifying\n 0 disables the connection timeout. directConnection boolean false Specifies whether to force dispatch all operations to the host\nspecified in the connection URI. enableUtf8Validation boolean true Specifying true enables UTF-8 validation for the\nconnection. MongoDB throws an error when\nit attempts to serialize string data that contains invalid\nUTF-8 characters to BSON. This applies to both document keys and\ndocument values, this validation adds processing overhead. Specifying false disables UTF-8 validation for the\nconnection. MongoDB does not throw errors when\ndata contains invalid UTF-8 data. If you disable validation,\nyour application avoids the validation processing overhead.\n Editing data while validation is disabled\ncan result in loss of data. Disabling UTF-8 validation is a\ntemporary workaround to query or export data only. You can also set UTF-8 validation in your Node.js code . To learn more about UTF-8 characters,\nsee UTF-8 on Wikipedia. heartbeatFrequencyMS integer greater than or equal to 500 null Specifies the interval, in milliseconds, between regular server monitoring checks. journal boolean null Specifies the journal write concern for the client. See\n write concern for more information. loadBalanced boolean null Specifies whether the driver is connecting to a load balancer. localThresholdMS non-negative integer 15 Specifies the size of the latency window, in milliseconds, on round trip time for\nselecting between suitable servers. Specifying 0 means no wait,\nmeaning the fastest available server. maxIdleTimeMS non-negative integer 0 Specifies the amount of time, in milliseconds, a connection can be idle before it's closed.\nSpecifying 0 means no minimum. maxPoolSize non-negative integer 100 Specifies the maximum number of clients or connections the driver\ncan create in its connection pool. This count includes connections\nin use. maxConnecting non-negative integer 2 Specifies the maximum number of connections a driver's connection\npool may be establishing concurrently. maxStalenessSeconds -1, or an integer greater than or equal 90 null Specifies the maximum replication lag, in wall clock time, that\na secondary can experience and still be eligible for server selection.\nSpecifying -1 means no maximum. minPoolSize non-negative integer 0 Specifies the number of connections the driver creates and\nmaintains in the connection pool even when no operations are occurring.\nThis count includes connections in use. proxyHost string null Specifies the SOCKS5 proxy IPv4 address, IPv6 address, or domain\nname. proxyPort non-negative integer null Specifies the TCP port number of the SOCKS5 proxy server. If you\nset the proxyHost option, the value of this option defaults\nto 1080 . proxyUsername string null Specifies the username for authentication to the SOCKS5\nproxy server. If you set\nthis option to a zero-length string, the driver ignores it. proxyPassword string null Specifies the password for authentication to the SOCKS5\nproxy server. If you set\nthis option to a zero-length string, the driver ignores it. readConcernLevel string null Specifies the default read concern for the client. See read concern for more information. readPreference string \"primary\" Specifies the default read preference for the client (excluding tags). See read preference for more information. readPreferenceTags comma-separated key:value pairs, for example, \"dc:ny,rack:1\" and \"dc:ny\ncan be specified multiple times, each instance of this key is a\nseparate tag set null Specifies the default read preference tags for the client. This option is\nvalid only if the read preference mode is not primary. The driver uses the order of the tags in the URI as the order\nfor the read preference. replicaSet string null Specifies the name of the replica set to connect to. retryReads boolean true Enables retryable reads. retryWrites boolean true Enables retryable writes. serverMonitoringMode auto , stream , poll auto Specifies the monitoring mode that the driver monitors use. When\nthis option is set to auto , the monitoring mode is determined\nby the environment in which the driver is running. The driver\nuses polling mode in function-as-a-service (FaaS) environments\nand the streaming mode in other environments. serverSelectionTimeoutMS non-negative integer 30000 Specifies the timeout, in milliseconds, to block for server selection\nbefore raising an error. serverSelectionTryOnce boolean true Specifies to scan the topology only once after a server selection\nfailure instead of repeatedly until the server selection times out. socketTimeoutMS non-negative integer 0 Specifies the amount of time, in milliseconds, spent attempting to send or receive on a\nsocket before timing out. Specifying 0 means no timeout. srvMaxHosts non-negative integer 0 Specifies the maximum number of SRV results to randomly select when initially\npopulating the seedlist or, during SRV polling, adding new hosts to the\ntopology. srvServiceName a valid SRV service name according to RFC 6335 \"mongodb\" Specifies the service name to use for SRV lookup in initial DNS seedlist discovery. ssl boolean false The ssl is an alias for the tls option. tls boolean false Specifies whether TLS is required for connections to the server.\nUsing a srvServiceName of \"mongodb+srv\" , or specifying other\n tls -prefixed options implicitly sets the value of tls to\n true . tlsAllowInvalidCertificates boolean false Specifies whether the driver generates an error when the server's\nTLS certificate is invalid. Set this option to true for testing\npurposes only. tlsAllowInvalidHostnames boolean false Specifies whether the driver generates an error when there is a mismatch\nbetween the server's hostname and the hostname specified by the\nTLS certificate. Set this option to true for testing purposes only. tlsCAFile string null Specifies the path to a file with either a single or bundle of certificate\nauthorities to trust when making a TLS connection. To learn more\nabout setting this connection option, see the Provide\nCertificate Filepaths section of the TLS guide. tlsCertificateKeyFile string null Specifies the path to the client certificate file or the client\nprivate key file. If you need both, you must concatenate the\nfiles. To learn more about setting this connection option, see\nthe Provide Certificate Filepaths \nsection of the TLS guide. tlsCertificateKeyFilePassword string null Specifies the password to decrypt the client private key to be used\nfor TLS connections. tlsInsecure boolean false Specifies to relax TLS constraints as much as possible, such as\nallowing invalid certificates or hostname mismatches. Set this option\nto true for testing purposes only. w non-negative integer or string null Specifies the default write concern \"w\" field for the client. waitQueueTimeoutMS non-negative integer 0 Specifies the amount of time, in milliseconds, spent attempting to check out a connection\nfrom a server's connection pool before timing out. wTimeoutMS non-negative integer null Specifies the default write concern timeout field for the client. zlibCompressionLevel integer between -1 and 9 (inclusive) -1 Specifies the level of compression when using zlib to compress wire\nprotocol messages. -1 signifies the default level, 0 signifies\nno compression, 1 signifies the fastest speed, and 9 signifies\nthe best compression. See Network Compression for more information.", - "code": [], - "preview": "This section explains the MongoDB connection and authentication options\nsupported by the driver. You can pass the connection options as\nparameters of the connection URI to specify the behavior of the client.", - "tags": "node.js, customize", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/connection/network-compression", - "title": "Network Compression", - "headings": [ - "Specify Compression Algorithms", - "Compression Algorithm Dependencies" - ], - "paragraphs": "You can enable a driver option to compress messages, which reduces the amount\nof data passed over the network between MongoDB and your application. The driver supports the following compression algorithms: If you specify multiple compression algorithms, the driver selects the\nfirst one in the list supported by your MongoDB instance. Snappy : available in MongoDB 3.6 and later. Zlib : available in MongoDB 3.6 and later. Zstandard : available in MongoDB 4.2 and later. When using the Snappy or Zstandard compression algorithm, you must\n add explicit dependencies . You can enable compression for the connection to your MongoDB instance\nby specifying the algorithms in one of two ways: Specify compression algorithms using the following strings: Adding the parameter to your connection string. Specifying the compressors option in your MongoClientOptions . To enable compression using the connection string, add the\n compressors parameter in the connection string. You can\nspecify one or more compression algorithms, separating them with\ncommas: To enable compression using the MongoClientOptions ,\npass the compressors option and the compression\nalgorithm you want to use. You can specify one or more compression\nalgorithms, separating them with commas: \"snappy\" for Snappy compression \"zlib\" for Zlib compression \"zstd\" for Zstandard compression To add the Snappy compression algorithm to your application, run the\nfollowing code: To add the Zstandard compression algorithm to your application, run the\nfollowing code:", - "code": [ - { - "lang": "javascript", - "value": "const uri =\n \"mongodb+srv://:@/?compressors=snappy,zlib\";\n\nconst client = new MongoClient(uri);" - }, - { - "lang": "javascript", - "value": "const uri =\n \"mongodb+srv://:@\";\n\nconst client = new MongoClient(uri,\n {\n compressors: [\"snappy\"]\n });" - }, - { - "lang": "javascript", - "value": "npm install --save snappy" - }, - { - "lang": "javascript", - "value": "npm install --save @mongodb-js/zstd" - } - ], - "preview": "You can enable a driver option to compress messages, which reduces the amount\nof data passed over the network between MongoDB and your application.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/connection/socks", - "title": "Enable SOCKS5 Proxy Support", - "headings": [ - "Overview", - "Install the socks Package", - "SOCKS5 Client Options", - "Example", - "Additional Information", - "API Documentation" - ], - "paragraphs": "In this guide, you can learn how to connect to MongoDB instances by using\na SOCKS5 proxy. SOCKS5 is a standardized protocol for connecting\nto network services through a proxy server. To learn more about the SOCKS5 protocol, see the Wikipedia entry on\n SOCKS . Starting in version 6.0 of the Node.js driver, you must install\nthe socks package to use SOCKS5 proxy support in your\napplication. You can install socks by running the following command\nin your shell: You can set options in your MongoClientOptions instance or\nin your connection URI to configure SOCKS5 proxy support for\nyour connection. The following table describes the client options\nrelated to SOCKS5: Name Accepted Values Default Value Description proxyHost string null Specifies the SOCKS5 proxy IPv4 address, IPv6 address, or domain\nname. proxyPort non-negative integer null Specifies the TCP port number of the SOCKS5 proxy server. If you\nset the proxyHost option, the value of this option defaults\nto 1080 . proxyUsername string null Specifies the username for authentication to the SOCKS5\nproxy server. If you set\nthis option to a zero-length string, the driver ignores it. proxyPassword string null Specifies the password for authentication to the SOCKS5\nproxy server. If you set\nthis option to a zero-length string, the driver ignores it. The driver throws an error if you set the proxyPort ,\n proxyUsername , or proxyPassword options without setting the\n proxyHost option. This example shows how to instantiate a MongoClient that uses SOCKS5\nproxy support. The following example code specifies proxy server options\nand connects to MongoDB: The preceding sample code uses placeholders for the connection URI\nand proxy server details. To run this code, you must replace these\nplaceholders with the information for your deployment and proxy server. For more information about SOCKS5 proxy support, see the\n MongoDB SOCKS5 specification . To learn more about the methods and types discussed in this\nguide, see the following API Documentation: MongoClientOptions MongoClient ProxyOptions", - "code": [ - { - "lang": "bash", - "value": "npm i socks" - }, - { - "lang": "javascript", - "value": "// Replace the placeholder with your connection string\nconst uri = \"\";\n\n// Replace the placeholders with your SOCKS5 proxy server details\nconst socksOptions = {\n proxyHost: \"\",\n proxyPort: 1080,\n proxyUsername: \"\",\n proxyPassword: \"\",\n};\n\n// Create a new client with the proxy server details\nconst client = new MongoClient(uri, socksOptions);" - } - ], - "preview": "In this guide, you can learn how to connect to MongoDB instances by using\na SOCKS5 proxy. SOCKS5 is a standardized protocol for connecting\nto network services through a proxy server.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/connection/tls", - "title": "Enable TLS on a Connection", - "headings": [ - "Overview", - "Enable TLS", - "Configure Certificates", - "Reference Certificates in a Client", - "Create a SecureContext Object to Store Certificates", - "Provide Certificate Filepaths", - "Create Buffer Objects to Store Certificates", - "SecureContext Example", - "Additional Information", - "API Documentation" - ], - "paragraphs": "In this guide, you can learn how to connect to MongoDB instances with\nthe TLS security protocol. To configure your connection to use TLS, enable\nthe TLS option and provide your certificates for validation. To learn more about TLS, see the Wikipedia entry on\n Transport Layer Security . You can enable TLS on a connection to your MongoDB instance\nin the following ways: In addition to the tls client option, the driver provides more\noptions to configure TLS on your connection. For testing purposes ,\nyou can set the tlsAllowInvalidHostnames ,\n tlsAllowInvalidCertificates , and tlsInsecure client options. Setting the tlsAllowInvalidHostnames option to true disables\nhostname verification, and setting the\n tlsAllowInvalidCertificates to true disables certificate\nvalidation. Setting the tlsInsecure option to true disables\nboth certificate and hostname validation. For a full list of client options, see Connection Options . Setting the tls option to true in your MongoClientOptions object Setting the tls option to true in your connection string A MongoClient instance can connect with TLS if you set tls \nto true in your MongoClientOptions object: A MongoClient instance can connect with TLS if you set the\n tls option to true in your connection string: If you use a DNS SRV record when connecting to MongoDB by specifying\nthe +srv modification in your connection string, you enable\nTLS on your connection by default. To disable it, set the tls or ssl parameter\nvalue to false in your connection string or MongoClientOptions object. To learn more about connection behavior when you use a DNS seedlist,\nsee the SRV Connection Format \nsection in the Server manual. Specifying any of these options in a production environment makes\nyour application insecure and potentially\nvulnerable to expired certificates and to foreign processes posing\nas valid client instances. To successfully initiate a TLS request, an application must prove its\nidentity by referencing cryptographic certificates. To connect to\nMongoDB with TLS, your certificates must be stored as PEM\nfiles. The following list describes the components required to establish\na connection with TLS: For production use, we recommend that your MongoDB deployment use valid\ncertificates generated and signed by the same certificate authority.\nFor testing, you can use self-signed certificates. TLS Component Description Certificate Authority (CA) One or more certificate authorities to\ntrust when making a TLS connection. Client Certificate A digital certificate and key that allow the server to verify the identity\nof your application to establish an encrypted network connection. Certificate Key The client certificate private key file. This key is often\nincluded within the certificate file itself. Passphrase The password to decrypt the private client key if it is encrypted. To learn more about the PEM format, see the Wikipedia entry on\n Privacy-Enhanced Mail . You must reference your certificates in your MongoClientOptions \nobject so that the server can validate them before the client connects.\nYou can reference your certificates in the following ways: Create a SecureContext object to store certificates (Recommended) Provide filepath strings that point to your certificates Create Buffer objects to store certificates We recommend that you use the secureContext option to configure\nyour TLS connection. SecureContext objects are native to Node.js\nand allow you to keep all your TLS options in a single reusable object. To create a SecureContext object, import the createSecureContext() \nmethod from the tls module. Next, call the createSecureContext() \nmethod and pass the contents of your certificates in the options parameter.\nThis method returns a SecureContext object that you can use in your\n MongoClientOptions object. The following code shows how to create a SecureContext object and\npass it to your client: To learn more about the createSecureContext() method and the\n tls package, see the Node.js TLS API documentation . For a runnable example that uses a SecureContext object, see\nthe SecureContext Example . You can include the filepaths for your certificates as client options to\nretrieve your certificates while connecting with TLS. The driver reads\nthese files when you call the connect() method on your\n MongoClient instance. The following code shows how to provide certificate filepaths as options\nin your MongoClient : Your TLS configuration might require that you present a certificate\nrevocation list (CRL) when connecting to MongoDB. Starting in version\n6.0 of the driver, you can pass the filepath of your CRL file to the\n tlsCRLFile option in your connection string or your\n MongoClientOptions instance. You can pass the contents of your certificate files as Buffer \nobjects in your client options to connect with TLS. The following code shows how to read the contents of your certificate\nfiles and pass the resulting Buffer objects as options in your\n MongoClient : This example shows how to create a SecureContext object and\na MongoClient instance that includes TLS options. The example\nconnects to MongoDB and executes a find query: For more information about enabling TLS on a connection, see the\nfollowing Server manual documentation: TLS/SSL (Transport Encryption) TLS/SSL Configuration for Clients MongoClientOptions MongoClient tlsAllowInvalidHostnames client option tlsAllowInvalidCertificates client option secureContext client option tlsCAFile client option tlsCertificateKeyFile client option ca client option cert client option key client option", - "code": [ - { - "lang": "js", - "value": "const client = new MongoClient(uri, { tls: true });" - }, - { - "lang": "js", - "value": "const uri = \"mongodb://:?tls=true\";\nconst client = new MongoClient(uri, myClientSettings);" - }, - { - "lang": "js", - "value": "// Create a SecureContext object\nconst secureContext = tls.createSecureContext({\n ca: fs.readFileSync(``),\n cert: fs.readFileSync(``),\n key: fs.readFileSync(``),\n});\n\n// Pass the SecureContext as a client option\nconst client = new MongoClient(uri, { tls: true, secureContext });" - }, - { - "lang": "js", - "value": "// Pass filepaths as client options\nconst client = new MongoClient(uri, {\n tls: true,\n tlsCAFile: ``,\n tlsCertificateKeyFile: ``,\n});" - }, - { - "lang": "js", - "value": "// Read file contents\nconst ca = fs.readFileSync(``);\nconst cert = fs.readFileSync(``);\nconst key = fs.readFileSync(``);\n\n// Pass Buffers as client options\nconst client = new MongoClient(uri, { tls: true, ca, cert, key });" - }, - { - "lang": "js", - "value": "import { MongoClient } from \"mongodb\";\nimport * as fs from \"fs\";\nimport * as tls from \"tls\";\n\n// Replace the uri string with your connection string.\nconst uri = \"\";\n\n// Replace the filepaths with your certificate filepaths.\nconst secureContext = tls.createSecureContext({\n ca: fs.readFileSync(``),\n cert: fs.readFileSync(``),\n key: fs.readFileSync(``),\n});\n\n// Create a client with the secureContext option\nconst client = new MongoClient(uri, { tls: true, secureContext });\n\nasync function run() {\n try {\n const db = client.db(\"myDB\");\n const myColl = db.collection(\"myColl\");\n const doc = await myColl.findOne({});\n console.log(doc);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);" - } - ], - "preview": "In this guide, you can learn how to connect to MongoDB instances with\nthe TLS security protocol.", - "tags": "code example, node.js, security, encrypt", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/connection", - "title": "Connection", - "headings": ["Overview", "Compatibility"], - "paragraphs": "Learn how to configure your application's connection to a MongoDB\ndeployment using the Node.js driver. In the following sections, you will\nlearn: How to Connect to MongoDB The Available Connection Options How to Enable Network Compression How to Enable TLS on a Connection How to Enable SOCKS5 Proxy Support How to Connect to MongoDB Atlas from AWS Lambda For information about authenticating to MongoDB,\nsee Authentication and\n Enterprise Authentication Mechanisms . You can use the Node.js driver to connect and use the Node.js driver for\ndeployments hosted in the following environments: MongoDB Atlas : The fully\nmanaged service for MongoDB deployments in the cloud MongoDB Enterprise : The\nsubscription-based, self-managed version of MongoDB MongoDB Community : The\nsource-available, free-to-use, and self-managed version of MongoDB To learn more about using drivers to connect for deployments hosted in MongoDB\nAtlas, see Connect Your Application .", - "code": [], - "preview": "Learn how to configure your application's connection to a MongoDB deployment by using the Node.js driver.", - "tags": "node.js", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/compound-operations", - "title": "Compound Operations", - "headings": [ - "Overview", - "Built-in Methods", - "includeResultMetadata Option" - ], - "paragraphs": "Most database requests either read data from a database or write data into\na database. However, there are instances where you may require a single\noperation that reads and writes data. Compound operations combine read and write operations\nin a single atomic statement, so there's no chance of data changing in\nbetween a read and a subsequent write. If you execute each operation separately, another request may alter the\ndata between the read and write operations. These data changes may not\nprevent your operation from succeeding, but they can make error handling\nmore difficult. When your application handles potential errors at\nany stage of the process, it can become brittle and difficult\nto test. The Node.js driver provides the following methods to perform compound\noperations: These methods accept an optional options object with\nconfigurable sort and\n projection options. You can also set the includeResultMetadata \noption to specify the return type of each\nof these methods. To learn more about this option, see the\n includeResultMetadata Option \nsection of this guide. The findOneAndUpdate() and findOneAndDelete() methods take the\n returnDocument setting, which specifies if the method returns the\npre-update or post-update version of the modified document. findOneAndDelete() findOneAndUpdate() findOneAndReplace() The includeResultMetadata option determines the return type of the\ncompound methods. This setting defaults to false , which means that each method returns the matched\ndocument. If no document is matched, each method returns null . If you set\n includeResultMetadata to true , the method returns a ModifyResult type that\ncontains the found document and metadata. Suppose a collection contains only the following document: The following table shows how the value of the\n includeResultMetadata option changes the return type of\nthe findOneAndDelete() method: Option Value Syntax and Output Default: false Document matched No document matched true", - "code": [ - { - "lang": "json", - "value": "{ _id: 1, x: \"on\" }" - }, - { - "lang": "js", - "value": "await coll.findOneAndDelete({ x: \"on\" });" - }, - { - "lang": "js", - "value": "{ _id: 1, x: 'on' }" - }, - { - "lang": "js", - "value": "await coll.findOneAndDelete({ x: \"off\" });" - }, - { - "lang": "js", - "value": "null" - }, - { - "lang": "js", - "value": "await coll.findOneAndDelete({ x: \"on\" }, { includeResultMetadata: true });" - }, - { - "lang": "js", - "value": "{ lastErrorObject: { n: 1 }, value: { _id: 1, x: 'on' }, ok: 1, ... }" - } - ], - "preview": "Most database requests either read data from a database or write data into\na database. However, there are instances where you may require a single\noperation that reads and writes data.", - "tags": "node.js, atomic operation, read, write", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/query-document", - "title": "Specify a Query", - "headings": [ - "Overview", - "Literal Value Queries", - "Comparison Operators", - "Logical Operators", - "Element Operators", - "Evaluation Operators" - ], - "paragraphs": "Most CRUD operations allow you to narrow the set of matched documents by\nspecifying matching criteria in a query document . Query documents contain\none or more query operators that apply to specific fields which determine which\ndocuments to include in the result set. In a query document, you can match fields against literal values, such as\n { title: 'The Room' } , or you can compose\n query operators to express more\ncomplex matching criteria. In this guide, we cover the following categories\nof query operators in MongoDB and show examples on how to use them: To follow the examples in this guide, use the following code\nsnippet to insert documents that describe fruits into the myDB.fruits collection: Comparison Operators Logical Operators Element Operators Evaluation Operators Your query operation may return a reference to a\ncursor that contains matching documents. To learn how to\nexamine data stored in the cursor, see the\n Cursor Fundamentals page . Literal value queries allow you to query for data that exactly matches\na value you provide in the query document. A literal value query has two\nparts: a field name and a value. Documents returned from such a query\nmust contain a field that has exactly the same name as the provided name\nand a value for that field that is exactly the same as the provided\nvalue. The following operation uses a literal query to search for\ndocuments containing a field called \"name\" that has a value of \"apples\": This code snippet returns the following results: Literal value queries are equivalent to the $eq comparison\noperator. As a result, the following two queries are equivalent: Comparison operators allow you to query for data based on comparisons\nwith values in a collection. Common comparison operators include\n $gt for \"greater than\" comparisons, $lt for \"less than\" comparisons,\nand $ne for \"not equal to\" comparisons. The following operation uses\nthe comparison operator $gt to search for documents in which the qty \nfield value is greater than 5 and prints them out: This code snippet returns the following results: Logical operators allow you to query for data using logic applied to the\nresults of field-level operators. For instance, you can use the $or \nmethod to query for documents that match either a $gt comparison\noperator or a literal value query. The following operation uses the\nlogical operator $not to search for documents with a quantity value\nthat is not greater than 5 and prints them out: This code snippet returns the following results: For more information on comparison operators, see the reference manual\nentry for Comparison Query Operators . Whenever a query document contains multiple elements, those elements\nare combined together with an implicit $and logical operator to\nfigure out which documents match the query. As a result, the following\ntwo queries are equivalent: Element operators allow you to query based on the presence, absence, or\ntype of a field. The following operation uses the element operator\n $exists to search for documents containing the color \nfield: This code snippet returns the following results: For more information on this operator, see the reference manual entry for\nthe $exists operator . Evaluation operators allow you to execute higher level logic, like\nregex and text searches, when querying for documents in a collection.\nCommon evaluation operators include $regex and $text .\nThe following operation uses the evaluation operator $mod to search\nfor documents in which the qty field value is divisible by 3 with\na remainder of 0: This code snippet returns the following results: For more information on this operator, see the reference manual entry for\nthe $mod operator .", - "code": [ - { - "lang": "javascript", - "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"fruits\");\n\nawait myColl.insertMany([\n { \"_id\": 1, \"name\": \"apples\", \"qty\": 5, \"rating\": 3 },\n { \"_id\": 2, \"name\": \"bananas\", \"qty\": 7, \"rating\": 1, \"color\": \"yellow\" },\n { \"_id\": 3, \"name\": \"oranges\", \"qty\": 6, \"rating\": 2 },\n { \"_id\": 4, \"name\": \"avocados\", \"qty\": 3, \"rating\": 5 },\n]);" - }, - { - "lang": "javascript", - "value": "const query = { \"name\": \"apples\" };\nconst cursor = myColl.find(query);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" - }, - { - "lang": "javascript", - "value": "{ \"_id\": 1, \"name\": \"apples\", \"qty\": 5, \"rating\": 3 }" - }, - { - "lang": "javascript", - "value": "myColl.find({\n rating: { $eq: 5 }\n});" - }, - { - "lang": "javascript", - "value": "myColl.find({\n rating: 5\n});" - }, - { - "lang": "javascript", - "value": "// $gt means \"greater than\"\nconst query = { qty: { $gt : 5 } };\nconst cursor = myColl.find(query);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" - }, - { - "lang": "javascript", - "value": "{ \"_id\": 2, \"name\": \"bananas\", \"qty\": 7, \"rating\": 1 }\n{ \"_id\": 3, \"name\": \"oranges\", \"qty\": 6, \"rating\": 2 }" - }, - { - "lang": "javascript", - "value": "const query = { qty: { $not: { $gt: 5 }}};\nconst cursor = myColl.find(query);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" - }, - { - "lang": "javascript", - "value": "{ \"_id\": 4, \"name\": \"avocados\", \"qty\": 3, \"rating\": 5 }\n{ \"_id\": 1, \"name\": \"apples\", \"qty\": 5, \"rating\": 3 }" - }, - { - "lang": "javascript", - "value": "myColl.find({\n rating: { $eq: 5 },\n qty: { $gt: 4 }\n});" - }, - { - "lang": "javascript", - "value": "myColl.find({\n $and: [\n { rating: { $eq: 5 }},\n { qty: { $gt: 4 }}\n ]\n});" - }, - { - "lang": "javascript", - "value": "const query = { color: { $exists: true } };\nconst cursor = myColl.find(query);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" - }, - { - "lang": "javascript", - "value": "{ \"_id\": 2, \"name\": \"bananas\", \"qty\": 7, \"rating\": 1, \"color\": \"yellow\" }" - }, - { - "lang": "javascript", - "value": "// $mod means \"modulo\" and returns the remainder after division\nconst query = { qty: { $mod: [ 3, 0 ] } };\nconst cursor = myColl.find(query);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" - }, - { - "lang": "javascript", - "value": "{ \"_id\": 3, \"name\": \"oranges\", \"qty\": 6, \"rating\": 2 }\n{ \"_id\": 4, \"name\": \"avocados\", \"qty\": 3, \"rating\": 5 }" - } - ], - "preview": "Most CRUD operations allow you to narrow the set of matched documents by\nspecifying matching criteria in a query document. Query documents contain\none or more query operators that apply to specific fields which determine which\ndocuments to include in the result set.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/read-operations/cursor", - "title": "Access Data From a Cursor", - "headings": [ - "Overview", - "Cursor Paradigms", - "Asynchronous Iteration", - "Manual Iteration", - "Return an Array of All Documents", - "Stream API", - "Event API", - "Cursor Utility Methods", - "Rewind", - "Close" - ], - "paragraphs": "Read operations that return multiple documents do not immediately return all values\nmatching the query. Because a query can potentially match very large sets of documents,\nthese operations return an object called a cursor, which references documents identified\nby the query. A cursor fetches documents in batches to reduce both memory consumption and\nnetwork bandwidth usage. Cursors are highly configurable and offer multiple interaction\nparadigms for different use cases. The following functions directly return cursors: Other methods such as Collection.findOne() \nand Collection.watch() use\ncursors internally, and return the results of the operations instead of\na cursor. Collection.find() Collection.aggregate() Collection.listIndexes() Collection.listSearchIndexes() Db.aggregate() Db.listCollections() You can use several different cursor paradigms to access data.\nMost cursor paradigms allow you to access query results one document at\na time, abstracting away network and caching logic. However, since use\ncases differ, other paradigms offer different access patterns, like\npulling all matching documents into a collection in process memory. Do not combine different cursor paradigms on a single cursor.\nOperations such as hasNext() and toArray() \neach predictably modify the original cursor. If you mix these calls\non a single cursor, you may receive unexpected results. Because asynchronous calls directly modify the cursor, executing\nasynchronous calls on a single cursor simultaneously can also cause\nundefined behavior. Always wait for the previous\nasynchronous operation to complete before running another. When you reach the last result through iteration or through an at-once\nfetch, the cursor is exhausted which means it ceases to respond to methods\nthat access the results. Cursors implement the AsyncIterator interface, which\nallows you to use cursors in for await...of loops: You can use the hasNext() \nmethod to check if a cursor can retrieve more data, and then use\nthe next() \nmethod to retrieve the subsequent element of the cursor: For use cases that require all documents matched by a query to be held\nin memory at the same time, use the toArray() \nmethod. Note that large numbers of matched documents can cause performance issues\nor failures if the operation exceeds memory constraints. Consider using\nthe for await...of syntax to iterate\nthrough results rather than returning all documents at once. Cursors expose the stream() method to convert them to Node Readable Streams. These streams operate in Object\nMode , which passes JavaScript objects rather than Buffers or Strings through the pipeline. As Readable Streams, cursors also support the Event API's\n close , data , end , and readable events: To reset a cursor to its initial position in the set of returned\ndocuments, use rewind() . Cursors consume memory and network resources both in the client\napplication and in the connected instance of MongoDB. Use\n close() \nto free up a cursor's resources in both the client application\nand the MongoDB Server:", - "code": [ - { - "lang": "javascript", - "value": " const cursor = myColl.find({});\n console.log(\"async\");\n for await (const doc of cursor) {\n console.log(doc);\n }" - }, - { - "lang": "javascript", - "value": " const cursor = myColl.find({});\n\n while (await cursor.hasNext()) {\n console.log(await cursor.next());\n }" - }, - { - "lang": "javascript", - "value": " const cursor = myColl.find({});\n const allValues = await cursor.toArray();" - }, - { - "lang": "javascript", - "value": " const cursor = myColl.find({});\n cursor.stream().on(\"data\", doc => console.log(doc));" - }, - { - "lang": "javascript", - "value": " const cursor = myColl.find({});\n // the \"data\" event is fired once per document\n cursor.on(\"data\", data => console.log(data));" - }, - { - "lang": "javascript", - "value": " const cursor = myColl.find({});\n const firstResult = await cursor.toArray();\n console.log(\"First count: \" + firstResult.length);\n await cursor.rewind();\n const secondResult = await cursor.toArray();\n console.log(\"Second count: \" + secondResult.length);" - }, - { - "lang": "javascript", - "value": " await cursor.close();" - } - ], - "preview": "Read operations that return multiple documents do not immediately return all values\nmatching the query. Because a query can potentially match very large sets of documents,\nthese operations return an object called a cursor, which references documents identified\nby the query. A cursor fetches documents in batches to reduce both memory consumption and\nnetwork bandwidth usage. Cursors are highly configurable and offer multiple interaction\nparadigms for different use cases.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/read-operations/distinct", - "title": "Retrieve Distinct Values", - "headings": [ - "Overview", - "Sample Documents", - "Distinct", - "Document Field Parameter", - "Example", - "Query Parameter", - "Example", - "Options Parameter", - "Example", - "Additional Information", - "API Documentation" - ], - "paragraphs": "Use the distinct() method to retrieve all distinct values for a specified field\nacross a collection. To follow the examples in this guide, use the following code snippet to insert documents\nthat describe restaurants into the myDB.restaurants collection: Your query operation may return a reference to a\ncursor that contains matching documents. To learn how to\nexamine data stored in the cursor, see the\n Cursor Fundamentals page . The distinct() method requires a document field as a parameter. You can specify the\nfollowing optional parameters to adjust the method output: A query parameter to refine your results An options parameter to set collation rules Pass the name of the document field to return a list of the field's unique values. The \"Queens\" and \"Manhattan\" borough values each appear more than\nonce in the sample documents. However, the following example retrieves the\nunique values of the borough field: This code outputs the following borough values: You can specify a query parameter to return unique values for documents that match\nyour query. Visit Specify a Query for more information on constructing a\nquery filter. The following example outputs the distinct values of the cuisine field but\nexcludes restaurants in \"Brooklyn\" : In this case, the query filter matches every borough value except for \"Brooklyn\" . This\nprevents distinct() from outputting one cuisine value, \"Middle Eastern\" .\nThe code outputs the following values: You can specify the collation to the distinct() method by defining a\n collation field as an options parameter. This field allows you to set\nregional rules for string ordering and comparisons. See Collations for instructions on applying collations. When using the options parameter, you must also specify a query parameter. If\nyou don't want to use a query filter, define the query as {} . The following example uses a collation field to specify German language ordering\nconventions when outputting the distinct restaurant values: In this case, German string ordering conventions place words beginning with \"\u00c4\" before\nthose beginning with \"B\". The code outputs the following: If you do not specify the collation field, the output order follows default\nbinary collation rules. These rules place words beginning with \"\u00c4\" after the those\nwith unaccented first letters: For a runnable example of retrieving distinct values, see Retrieve Distinct Values of a Field . To learn more about the distinct() method and its parameters, you can visit the\n API documentation .", - "code": [ - { - "lang": "javascript", - "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"restaurants\");\n\nawait myColl.insertMany([\n { \"_id\": 1, \"restaurant\": \"White Bear\", \"borough\": \"Queens\", \"cuisine\": \"Chinese\" },\n { \"_id\": 2, \"restaurant\": \"Via Carota\", \"borough\": \"Manhattan\", \"cuisine\": \"Italian\" },\n { \"_id\": 3, \"restaurant\": \"Borgatti's\", \"borough\": \"Bronx\", \"cuisine\": \"Italian\" },\n { \"_id\": 4, \"restaurant\": \"Tanoreen\", \"borough\": \"Brooklyn\", \"cuisine\": \"Middle Eastern\" },\n { \"_id\": 5, \"restaurant\": \"\u00c4pfel\", \"borough\": \"Queens\", \"cuisine\": \"German\" },\n { \"_id\": 6, \"restaurant\": \"Samba Kitchen\", \"borough\": \"Manhattan\", \"cuisine\": \"Brazilian\" },\n]);" - }, - { - "lang": "javascript", - "value": "// specify \"borough\" as the field to return values for\nconst cursor = myColl.distinct(\"borough\");\nfor await (const doc of cursor) {\n console.dir(doc);\n}" - }, - { - "lang": "json", - "value": "[ \"Bronx\", \"Brooklyn\", \"Manhattan\", \"Queens\" ]" - }, - { - "lang": "javascript", - "value": "// exclude Brooklyn restaurants from the output\nconst query = { borough: { $ne: \"Brooklyn\" }};\n\n// find the filtered distinct values of \"cuisine\"\nconst cursor = myColl.distinct(\"cuisine\", query);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" - }, - { - "lang": "json", - "value": "[ \"Brazilian\", \"Chinese\", \"German\", \"Italian\" ]" - }, - { - "lang": "javascript", - "value": "// define an empty query document\nconst query = {};\n// specify German string ordering conventions\nconst options = { collation: { locale: \"de\" }};\n\nconst cursor = myColl.distinct(\"restaurant\", query, options);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" - }, - { - "lang": "json", - "value": "[ \"\u00c4pfel\", \"Borgatti's\", \"Samba Kitchen\", \"Tanoreen\", \"Via Carota\", \"White Bear\" ]" - }, - { - "lang": "json", - "value": "[ \"Borgatti's\", \"Samba Kitchen\", \"Tanoreen\", \"Via Carota\", \"White Bear\", \"\u00c4pfel\" ]" - } - ], - "preview": "Use the distinct() method to retrieve all distinct values for a specified field\nacross a collection.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/read-operations/geo", - "title": "Search Geospatially", - "headings": [ - "Overview", - "Coordinates on an Earth-like Sphere", - "Coordinates on a 2D Plane", - "Examples", - "Query by Proximity", - "Query Within a Range" - ], - "paragraphs": "You can query data based on geographical location using geospatial query\noperators. You can format geospatial queries using one of the following\ncoordinate systems: This section contains examples of geospatial queries using different\nquery operators that you can run against your Atlas sample dataset. Coordinates on an Earth-like Sphere Coordinates on a 2D Plane For geospatial queries using longitude and latitude coordinates\non an Earth-like sphere, use the GeoJSON \nquery format. While GeoJSON has multiple types , all GeoJSON data\ntypes use some form of the following structure: The object type determines the number of coordinates. For instance, a\n Point requires only one coordinate: a longitude and a latitude.\nA Line uses two coordinates: a longitude and a latitude for each end.\nA Polygon consists of a list of coordinates in which the first and last\ncoordinate are the same, effectively closing the polygon. To learn more\nabout the GeoJSON shapes you can use in MongoDB, consult the\n GeoJSON manual entry . To enable querying GeoJSON data, you must add the field to a 2dsphere \nindex. The following snippet creates an index on the location.geo field in\nthe theaters collection using the createIndex() method: You can also express geospatial queries using x and y coordinates in\na two-dimensional Euclidean plane. Until MongoDB, this was the only format\ncompatible with geospatial queries, and are now referred to as\n\"legacy coordinate pairs\". Legacy coordinate pairs use the following structure: The field contains an array of two values in which the first represents\nthe x axis value and the second represents the y axis value. To enable querying using legacy coordinate pairs, create a 2d index on\nthe field on the collection. The following snippet creates an index on the\n coordinates field in the shipwrecks collection using the\n createIndex() method: See the\n MongoDB Server manual page on legacy coordinate pairs \nfor more information. Spherical ( 2dsphere ) and flat ( 2d ) indexes support some, but\nnot all, of the same query operators. For a full list of operators\nand their index compatibility, consult the\n manual entry for geospatial queries . The following examples use the MongoDB Atlas sample dataset. You can learn how to set up your own free-tier Atlas cluster and how to load the sample dataset in our\n quick start guide . The examples use the theaters collection in the sample_mflix database\nfrom the sample dataset. The theaters collection contains a 2dsphere index\non the location.geo field. The $near \noperator accepts a set of longitude-latitude coordinates and returns\ndocuments ordered from nearest to farthest. To limit the results to a\nmaximum distance in meters, use the $maxDistance option. For a\ncomplete list of options, see the reference documentation for $near .\nThe following example queries for theaters within 10,000 meters of\n [ -73.9667, 40.78 ] . The $geoWithin operator\nselects documents with geospatial data that exist within a specified\nshape. The following example searches for movie theaters in the New\nEngland area: See the MongoDB Server manual page on geospatial query operators \nfor more information on the operators you can use in your query.", - "code": [ - { - "lang": "javascript", - "value": " : {\n type: ,\n coordinates: [\n [longitude_1, latitude_1],\n ...\n [longitude_n, latitude_n]\n ]\n}" - }, - { - "lang": "javascript", - "value": "db.theaters.createIndex({location.geo: \"2dsphere\"});" - }, - { - "lang": "javascript", - "value": " : [ x, y ]" - }, - { - "lang": "javascript", - "value": "db.shipwrecks({coordinates: \"2d\"});" - }, - { - "lang": "javascript", - "value": "// Find theaters within a certain proximity\nasync function proximity(theaters) {\n // Define the query to find theaters near a specific location\n const query = {\n \"location.geo\": {\n $near: {\n $geometry: { type: \"Point\", coordinates: [-73.9667, 40.78] },\n $maxDistance: 10000,\n },\n },\n };\n // Find documents based on our query\n const cursor = theaters.find(query);" - }, - { - "lang": "javascript", - "value": "// Find theaters within a specific geographic range\nasync function range(theaters) {\n // Define the query to find theaters within a specified polygon\n const query = {\n \"location.geo\": {\n $geoWithin: {\n $geometry: {\n type: \"Polygon\",\n coordinates: [\n [\n [-72, 40], // Polygon coordinates defining the range\n [-74, 41],\n [-72, 39],\n [-72, 40],\n ],\n ],\n },\n },\n },\n };\n\n // Find documents based on our query\n const cursor = theaters.find(query);" - } - ], - "preview": "You can query data based on geographical location using geospatial query\noperators. You can format geospatial queries using one of the following\ncoordinate systems:", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/read-operations/limit", - "title": "Limit the Number of Returned Results", - "headings": ["Overview", "Sample Documents", "Limit", "Skip"], - "paragraphs": "Use limit to cap the number of documents that can be returned from a\nread operation. limit functions as a cap on the maximum number of\ndocuments that the operation can return, but the operation can return\na smaller number of documents if there are not enough documents present\nto reach the limit. If limit is used with the\n skip method, the skip applies\nfirst and the limit only applies to the documents left over after\nthe skip. To follow the examples in this guide, use the following code snippet to insert documents\nthat describe books into the myDB.books collection: Your query operation may return a reference to a\ncursor that contains matching documents. To learn how to\nexamine data stored in the cursor, see the\n Cursor Fundamentals page . The following example queries the collection to return the top three\nlongest books. It matches all documents because the query filter is\nempty. Then, it applies a descending sort on the length field to\nreturn longer books before shorter books and a limit to\nreturn only the 3 first results: The code example above outputs the following three documents, sorted by\nlength: You can also apply sort and limit by specifying them in an\n options object in your call to the find() method. The following two\ncalls are equivalent: For more information on the options settings for the find() \nmethod, see the\n API documentation on find() . The order in which you call limit and sort does not matter\nbecause the driver reorders the calls to apply the sort first and the\nlimit after it. The following two calls are equivalent: To see the next three books in the results, append the skip() method,\npassing the number of documents to bypass as shown below: This operation returns the documents that describe the fourth through sixth\nbooks in order of longest-to-shortest length: You can combine skip and limit in this way to implement paging for your\ncollection, returning only small \"slices\" of the collection at once.", - "code": [ - { - "lang": "javascript", - "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"books\");\n\nawait myColl.insertMany([\n { \"_id\": 1, \"name\": \"The Brothers Karamazov\", \"author\": \"Dostoyevsky\", \"length\": 824 },\n { \"_id\": 2, \"name\": \"Les Mis\u00e9rables\", \"author\": \"Hugo\", \"length\": 1462 },\n { \"_id\": 3, \"name\": \"Atlas Shrugged\", \"author\": \"Rand\", \"length\": 1088 },\n { \"_id\": 4, \"name\": \"Infinite Jest\", \"author\": \"Wallace\", \"length\": 1104 },\n { \"_id\": 5, \"name\": \"Cryptonomicon\", \"author\": \"Stephenson\", \"length\": 918 },\n { \"_id\": 6, \"name\": \"A Dance With Dragons\", \"author\": \"Martin\", \"length\": 1104 },\n]);" - }, - { - "lang": "javascript", - "value": "// define an empty query document\nconst query = {};\n// sort in descending (-1) order by length\nconst sort = { length: -1 };\nconst limit = 3;\nconst cursor = myColl.find(query).sort(sort).limit(limit);\nfor await (const doc of cursor) {\n console.dir;\n}" - }, - { - "lang": "json", - "value": "{ \"_id\": 2, \"title\": \"Les Mis\u00e9rables\", \"author\": \"Hugo\", \"length\": 1462 }\n{ \"_id\": 6, \"title\": \"A Dance With Dragons\", \"author\": \"Martin\", \"length\": 1104 }\n{ \"_id\": 4, \"title\": \"Infinite Jest\", \"author\": \"Wallace\", \"length\": 1104 }" - }, - { - "lang": "javascript", - "value": "myColl.find(query).sort({ length: -1 }).limit(3);\nmyColl.find(query, { sort: { length: -1 }, limit: 3 });" - }, - { - "lang": "javascript", - "value": "myColl.find(query).sort({ length: -1 }).limit(3);\nmyColl.find(query).limit(3).sort({ length: -1 });" - }, - { - "lang": "javascript", - "value": "// define an empty query document\nconst query = {};\n// sort in descending (-1) order by length\nconst sort = { length: -1 };\nconst limit = 3;\nconst skip = 3;\nconst cursor = myColl.find(query).sort(sort).limit(limit).skip(skip);\nfor await (const doc of cursor) {\n console.dir;\n}" - }, - { - "lang": "json", - "value": "{ \"_id\": 3, \"title\": \"Atlas Shrugged\", \"author\": \"Rand\", \"length\": 1088 }\n{ \"_id\": 5, \"title\": \"Cryptonomicon\", \"author\": \"Stephenson\", \"length\": 918 }\n{ \"_id\": 1, \"title\": \"The Brothers Karamazov\", \"author\": \"Dostoyevsky\", \"length\": 824 }" - } - ], - "preview": "Use limit to cap the number of documents that can be returned from a\nread operation. limit functions as a cap on the maximum number of\ndocuments that the operation can return, but the operation can return\na smaller number of documents if there are not enough documents present\nto reach the limit. If limit is used with the\nskip method, the skip applies\nfirst and the limit only applies to the documents left over after\nthe skip.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/read-operations/project", - "title": "Specify Which Fields to Return", - "headings": [ - "Overview", - "Sample Documents", - "Single Field", - "Multiple Fields" - ], - "paragraphs": "Use a projection to control which fields appear in the documents\nreturned by read operations. Many requests only require certain fields,\nso projections can help you limit unnecessary network bandwidth usage.\nProjections work in two ways: These two methods of projection are mutually exclusive: if you\nexplicitly include fields, you cannot explicitly exclude fields, and\nvice versa. Explicitly include fields with a value of 1 . This has the\nside-effect of implicitly excluding all unspecified fields. Implicitly exclude fields with a value of 0 . This has the\nside-effect of implicitly including all unspecified fields. To follow the examples in this guide, use the following code snippet to insert documents\nthat describe fruits into the myDB.fruits collection: Your query operation may return a reference to a\ncursor that contains matching documents. To learn how to\nexamine data stored in the cursor, see the\n Cursor Fundamentals page . In the following query, pass the projection to only return the name \nfield of each document: The projection document specifies a value of 1 for name . This instructs\nthe operation to include the name field of each returned document in\nthe results and exclude the qty and rating fields. Passing this projection\nto find() with an empty query document and no sort document yields the following\nresults: Although this projection only explicitly included the name field, the query returned\nthe _id field as well. The _id field is a special case because it is always included in every query unless\nexplicitly specified otherwise. This is because _id is a unique identifier for each\ndocument, a property that is often used when constructing queries. The movies \ncollection data demonstrates why this property is necessary: two or more movies can share\nthe same title, such as movie remakes. Because of this, you need a unique _id value to\nreliably reference a specific movie. _id is the only exception to the mutually\nexclusive include-exclude behavior in projections: you can explicitly exclude _id \neven when explicitly including other fields if you do not want _id to be present in\nreturned documents. The projection document specifies a value of 1 for name and 0 for\n _id . This instructs the operation to include the name field of each\nreturned document in the results and exclude the _id , qty , and rating \nfields. Passing this projection to find() with an empty query document and\nno sort document yields the following results: You can also specify multiple fields to include in your projection. Note: the\norder in which you specify the fields in the projection does not alter the\norder in which they are returned. This example that identifies two fields to include in the projection yields\nthe following results: For more projection examples, see the\n MongoDB Manual page on Project Fields to Return from Query .", - "code": [ - { - "lang": "javascript", - "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"fruits\");\n\nawait myColl.insertMany([\n { \"_id\": 1, \"name\": \"apples\", \"qty\": 5, \"rating\": 3 },\n { \"_id\": 2, \"name\": \"bananas\", \"qty\": 7, \"rating\": 1 },\n { \"_id\": 3, \"name\": \"oranges\", \"qty\": 6, \"rating\": 2 },\n { \"_id\": 4, \"name\": \"avocados\", \"qty\": 3, \"rating\": 5 },\n]);" - }, - { - "lang": "javascript", - "value": "// return only* the name field\nconst projection = { name: 1 };\nconst cursor = myColl.find().project(projection);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" - }, - { - "lang": "json", - "value": "{ \"_id\": 1, \"name\": \"apples\" }\n{ \"_id\": 2, \"name\": \"bananas\" }\n{ \"_id\": 3, \"name\": \"oranges\" }\n{ \"_id\": 4, \"name\": \"avocados\" }" - }, - { - "lang": "javascript", - "value": "// return only the name field\nconst projection = { _id: 0, name: 1 };\nconst cursor = myColl.find().project(projection);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" - }, - { - "lang": "json", - "value": "{ \"name\": \"apples\" }\n{ \"name\": \"bananas\" }\n{ \"name\": \"oranges\" }\n{ \"name\": \"avocados\" }" - }, - { - "lang": "javascript", - "value": "const projection = { _id: 0, rating: 1, name: 1 };\nconst cursor = myColl.find().project(projection);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" - }, - { - "lang": "json", - "value": " { \"name\": \"apples\", \"rating\": 3 }\n { \"name\": \"bananas\", \"rating\": 1 }\n { \"name\": \"oranges\", \"rating\": 2 }\n { \"name\": \"avocados\", \"rating\": 5 }" - } - ], - "preview": "Use a projection to control which fields appear in the documents\nreturned by read operations. Many requests only require certain fields,\nso projections can help you limit unnecessary network bandwidth usage.\nProjections work in two ways:", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/read-operations/retrieve", - "title": "Retrieve Data", - "headings": [ - "Overview", - "Find Documents", - "Additional Information", - "Aggregate Data from Documents", - "Additional Information", - "Monitor Data Changes", - "Additional Information" - ], - "paragraphs": "You can perform find operations to retrieve data from your MongoDB database.\nYou can perform a find operation to match documents on a set of criteria\nby calling the find() or findOne() method. You can also further specify the information that the find operation\nreturns by specifying optional parameters or by chaining other methods,\nas shown in the following guides: You can also use an aggregation operation to retrieve data. This type of\noperation allows you to apply an ordered pipeline of transformations to the\nmatched data. If you want to monitor the database for incoming data that matches a set of\ncriteria, you can use the watch operation to be notified in real-time when\nmatching data is inserted. This page includes a short interactive lab that demonstrates how to\nretrieve data by using the find() method. You can complete this lab\ndirectly in your browser window without installing MongoDB or a code editor. To start the lab, click the Open Interactive Tutorial button at the\ntop of the page. To expand the lab to a full-screen format, click the\nfull-screen button ( \u26f6 ) in the top-right corner of the lab pane. Sort Results Skip Returned Results Limit the Number of Returned Results Specify Which Fields to Return Your query operation may return a reference to a\ncursor that contains matching documents. To learn how to\nexamine data stored in the cursor, see the\n Cursor Fundamentals page . You can use the Node.js driver to connect and perform read operations for\ndeployments hosted in the following environments: MongoDB Atlas : The fully\nmanaged service for MongoDB deployments in the cloud MongoDB Enterprise : The\nsubscription-based, self-managed version of MongoDB MongoDB Community : The\nsource-available, free-to-use, and self-managed version of MongoDB To learn more about performing read operations in the Atlas UI for deployments hosted in MongoDB\nAtlas, see View, Filter, and Sort Documents . You can call the find() method on a Collection object. The\nmethod accepts a query document that describes the documents you want to\nretrieve. For more information on how to specify your query document,\nsee the Specify a Query guide. The find() method returns a Cursor instance from which you can\naccess the matched documents. The findOne() method returns a Promise \ninstance, which you can resolve to access either the matching document or\na null value if there are no matches. To execute a find operation that has no query criteria, you can\npass an empty query or omit the query document in your find\nmethod parameters. The following operations both return all documents in the\n myColl collection: If you don't pass a query or pass an empty query\nto the findOne() method, the operation returns a single\ndocument from a collection. You can specify options in a find operation even when you pass an\nempty query. For example, the following code shows how you can\nspecify a projection as an option while executing a find operation\nthat receives an empty query parameter: For more information about projecting document fields, see the\n Specify Which Fields to Return guide. A pizza restaurant wants to find all pizzas ordered by Lemony Snicket\nyesterday. They run the following find() query on the\n orders collection: Once the operation returns, the findResult variable references a\n Cursor . You can print the documents retrieved using the for await...of \nsyntax as shown below: The output might resemble the following: For runnable code examples that demonstrate find operations, see the following\nusage examples: For more information about the findOne() and find() methods, see the\nfollowing Server manual documentation: Find a Document Find Multiple Documents findOne() find() If you want to run a custom processing pipeline to retrieve data from your\ndatabase, you can use the aggregate() method. This method accepts\naggregation expressions to run in sequence. These expressions let you filter,\ngroup, and arrange the result data from a collection. A pizza restaurant wants to run a status report on-demand to\nsummarize pizza orders over the past week. They run the following\n aggregate() query on the orders collection to fetch the\ntotals for each distinct \"status\" field: Once the operation returns, the aggregateResult variable references a\n Cursor . You can print the documents retrieved using the for await...of \nsyntax as shown below: The output might resemble the following: For more information on how to construct an aggregation pipeline, see\nthe Aggregation guide or Aggregation Operations \nin the Server manual. You can use the watch() method to monitor a collection for changes to\na collection that match certain criteria. These changes include inserted,\nupdated, replaced, and deleted documents. You can pass this method\na pipeline of aggregation commands that sequentially runs on the changed\ndata whenever write operations are executed on the collection. A pizza restaurant wants to receive a notification whenever a new pizza\norder comes in. To accomplish this, they create an aggregation pipeline\nto filter on insert operations and return specific fields. They pass\nthis pipeline to the watch() method called on the orders \ncollection as shown below: For a runnable example of the watch() method, see the\n Watch for Changes usage example.", - "code": [ - { - "lang": "javascript", - "value": "myColl.find(); // no query\nmyColl.find({}); // empty query" - }, - { - "lang": "javascript", - "value": "const options = {\n projection: { _id: 0, field1: 1 },\n};\n\nconst findResult = await myColl.findOne({}, options);" - }, - { - "lang": "javascript", - "value": "for await (const doc of findResult) {\n console.log(doc);\n}" - }, - { - "lang": "javascript", - "value": "[\n { name: \"Lemony Snicket\", type: \"horseradish pizza\", qty: 1, status: \"delivered\", date: ... },\n { name: \"Lemony Snicket\", type: \"coal-fired oven pizza\", qty: 3, status: \"canceled\", date: ...},\n ...\n]" - }, - { - "lang": "javascript", - "value": " // Search for orders by name and within a specific date range\n const findResult = orders.find({\n name: \"Lemony Snicket\",\n date: {\n $gte: new Date(new Date().setHours(00, 00, 00)),\n $lt: new Date(new Date().setHours(23, 59, 59)),\n },\n });" - }, - { - "lang": "javascript", - "value": "for await (const doc of aggregateResult) {\n console.log(doc);\n}" - }, - { - "lang": "javascript", - "value": "[\n { _id: 'delivering', count: 5 },\n { _id: 'delivered', count: 37 },\n { _id: 'created', count: 9 }\n]" - }, - { - "lang": "javascript", - "value": " // Group orders by status within the last week\n const aggregateResult = orders.aggregate([\n {\n $match: {\n date: {\n $gte: new Date(new Date().getTime() - 1000 * 3600 * 24 * 7),\n $lt: new Date(),\n },\n },\n },\n {\n $group: {\n _id: \"$status\",\n count: {\n $sum: 1,\n },\n },\n },\n ]);" - }, - { - "lang": "javascript", - "value": " // Set up a change stream to listen for new order insertions\n const changeStream = orders.watch([\n { $match: { operationType: \"insert\" } },\n {\n $project: {\n \"fullDocument.name\": 1,\n \"fullDocument.address\": 1,\n },\n },\n ]);\n changeStream.on(\"change\", change => {\n const { name, address } = change.fullDocument;\n console.log(`New order for ${name} at ${address}.`);\n });" - } - ], - "preview": "Learn how to retrieve data, aggregate data, and monitor data changes in MongoDB by using the Node.js driver.", - "tags": "node.js, code example, find one, find many", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/read-operations/skip", - "title": "Skip Returned Results", - "headings": ["Overview", "Sample Documents", "Example"], - "paragraphs": "Use skip to omit documents from the beginning of the list of\nreturned documents for a read operation. You can combine skip with\n sort to omit the top\n(for descending order) or bottom (for ascending order) results for a\ngiven query. Since the order of documents returned is not guaranteed in\nthe absence of a sort, using skip without using sort omits\narbitrary documents. If the value of skip exceeds the number of matched documents for\na query, then that query returns no documents. To follow the examples in this guide, use the following code snippet to insert documents\nthat describe fruits into the myDB.fruits collection: Your query operation may return a reference to a\ncursor that contains matching documents. To learn how to\nexamine data stored in the cursor, see the\n Cursor Fundamentals page . In the following example, we query the collection with a filter that\nmatches all the documents and pass options that specifies sort and\n skip commands as query options. The sort option specifies that fruit\ndocuments that have higher rating values are returned before ones with lower\nratings. The skip option specifies that the first 2 documents are\nomitted from the result: Since we specified that query skip the first 2 documents, the third and fourth highest\nrating documents are printed by the code snippet above: The sort and skip options can also be specified as methods chained to\nthe find method. The following two commands are equivalent:", - "code": [ - { - "lang": "javascript", - "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"fruits\");\n\nawait myColl.insertMany([\n { \"_id\": 1, \"name\": \"apples\", \"qty\": 5, \"rating\": 3 },\n { \"_id\": 2, \"name\": \"bananas\", \"qty\": 7, \"rating\": 1 },\n { \"_id\": 3, \"name\": \"oranges\", \"qty\": 6, \"rating\": 2 },\n { \"_id\": 4, \"name\": \"avocados\", \"qty\": 3, \"rating\": 5 },\n]);" - }, - { - "lang": "javascript", - "value": "// define an empty query document\nconst query = {};\nconst options = {\n // sort in descending (-1) order by rating\n sort : { rating: -1 },\n // omit the first two documents\n skip : 2,\n}\n\nconst cursor = myColl.find(query, options);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" - }, - { - "lang": "json", - "value": "{ \"_id\": 3, \"name\": \"oranges\", \"qty\": 6, \"rating\": 2 }\n{ \"_id\": 2, \"name\": \"bananas\", \"qty\": 7, \"rating\": 1 }" - }, - { - "lang": "javascript", - "value": "myColl.find(query, { sort: { rating: -1}, skip: 2});\nmyColl.find(query).sort({rating: -1}).skip(2);" - } - ], - "preview": "Use skip to omit documents from the beginning of the list of\nreturned documents for a read operation. You can combine skip with\nsort to omit the top\n(for descending order) or bottom (for ascending order) results for a\ngiven query. Since the order of documents returned is not guaranteed in\nthe absence of a sort, using skip without using sort omits\narbitrary documents.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/read-operations/sort", - "title": "Sort Results", - "headings": ["Overview", "Sample Documents", "Example"], - "paragraphs": "Use sort to change the order in which read operations return\ndocuments. Sort tells MongoDB to order returned documents by the\nvalues of one or more fields in a certain direction. To sort returned\ndocuments by a field in ascending (lowest first) order, use a value of\n 1 . To sort in descending (greatest first) order instead, use -1 .\nIf you do not specify a sort, MongoDB does not guarantee the order of\nquery results. Follow the instructions in the examples below to insert data into\nthe myDB.books collection and perform a sort on the results of a query.\nConsider a collection containing documents that describe books. To\ninsert this data into a collection, run the following operation: Your query operation may return a reference to a\ncursor that contains matching documents. To learn how to\nexamine data stored in the cursor, see the\n Cursor Fundamentals page . Pass the following sort document to a read operation to ensure that the\noperation returns books with longer lengths before books with shorter\nlengths: In this case, the number -1 tells the read operation to sort the\nbooks in descending order by length. find() returns the following\ndocuments when this sort is used with an empty query: Sometimes, the order of two or more documents is ambiguous using a\nspecified sort. In the above case, both \"A Dance with Dragons\" and\n\"Infinite Jest\" have 1104 pages, so the order in which they are\nreturned is not guaranteed. To resolve ties in your sorted results in a\nrepeatable way, add more fields to the sort document: With the addition of the author field to the sort document, the read operation sorts\nmatching documents first by length then, if there is a tie, by author . Matched\ndocument fields are compared in the same order as fields are specified in the sort\ndocument. find() returns the following ordering of documents when this sort is used on\nthe documents matching the query, sorting \"Martin\" before \"Wallace\" for the two books with\nthe same length:", - "code": [ - { - "lang": "javascript", - "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"books\");\n\nawait myColl.insertMany([\n { \"_id\": 1, \"name\": \"The Brothers Karamazov\", \"author\": \"Dostoyevsky\", \"length\": 824 },\n { \"_id\": 2, \"name\": \"Les Mis\u00e9rables\", \"author\": \"Hugo\", \"length\": 1462 },\n { \"_id\": 3, \"name\": \"Atlas Shrugged\", \"author\": \"Rand\", \"length\": 1088 },\n { \"_id\": 4, \"name\": \"Infinite Jest\", \"author\": \"Wallace\", \"length\": 1104 },\n { \"_id\": 5, \"name\": \"Cryptonomicon\", \"author\": \"Stephenson\", \"length\": 918 },\n { \"_id\": 6, \"name\": \"A Dance with Dragons\", \"author\": \"Martin\", \"length\": 1104 },\n]);" - }, - { - "lang": "javascript", - "value": "// define an empty query document\nconst query = {};\n// sort in descending (-1) order by length\nconst sort = { length: -1 };\nconst cursor = myColl.find(query).sort(sort);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" - }, - { - "lang": "json", - "value": "{ \"_id\": 2, \"title\": \"Les Mis\u00e9rables\", \"author\": \"Hugo\", \"length\": 1462 }\n{ \"_id\": 4, \"title\": \"Infinite Jest\", \"author\": \"Wallace\", \"length\": 1104 }\n{ \"_id\": 6, \"title\": \"A Dance with Dragons\", \"author\": \"Martin\", \"length\": 1104 }\n{ \"_id\": 3, \"title\": \"Atlas Shrugged\", \"author\": \"Rand\", \"length\": 1088 }\n{ \"_id\": 5, \"title\": \"Cryptonomicon\", \"author\": \"Stephenson\", \"length\": 918 }\n{ \"_id\": 1, \"title\": \"The Brothers Karamazov\", \"author\": \"Dostoyevsky\", \"length\": 824 }" - }, - { - "lang": "javascript", - "value": "// define an empty query document\nconst query = {};\n// sort in ascending (1) order by length\nconst sort = { length: 1, author: 1 };\nconst cursor = myColl.find(query).sort(sort);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" - }, - { - "lang": "json", - "value": "{ \"_id\": 1, \"title\": \"The Brothers Karamazov\", \"author\": \"Dostoyevsky\", \"length\": 824 }\n{ \"_id\": 5, \"title\": \"Cryptonomicon\", \"author\": \"Stephenson\", \"length\": 918 }\n{ \"_id\": 3, \"title\": \"Atlas Shrugged\", \"author\": \"Rand\", \"length\": 1088 }\n{ \"_id\": 6, \"title\": \"A Dance with Dragons\", \"author\": \"Martin\", \"length\": 1104 }\n{ \"_id\": 4, \"title\": \"Infinite Jest\", \"author\": \"Wallace\", \"length\": 1104 }\n{ \"_id\": 2, \"title\": \"Les Mis\u00e9rables\", \"author\": \"Hugo\", \"length\": 1462 }" - } - ], - "preview": "Use sort to change the order in which read operations return\ndocuments. Sort tells MongoDB to order returned documents by the\nvalues of one or more fields in a certain direction. To sort returned\ndocuments by a field in ascending (lowest first) order, use a value of\n1. To sort in descending (greatest first) order instead, use -1.\nIf you do not specify a sort, MongoDB does not guarantee the order of\nquery results.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/read-operations/text", - "title": "Search Text", - "headings": [ - "Overview", - "Examples", - "Query for Words", - "Query By Phrase", - "Query with Negations", - "Sort by Relevance" - ], - "paragraphs": "Text searches let you search string type fields in your collection for specified words or\nphrases. You can perform a text search by using the $text operator, which performs a\nlogical OR on each term separated by a space in the search string. You can also\nspecify more options to the operator to handle case sensitivity, stop words, and word\nstemming (such as plural forms or other tenses) for a supported language.\nThis is often used for unstructured text such as transcripts, essays, or web pages. The $text query operator requires that you specify the search field in\na text index on your collection. See the examples below for sample\ncode for creating a text index and using the $text query operator. Atlas Search helps you build fast,\nrelevance-based search capabilities on top of your MongoDB data. Try it today on\n MongoDB Atlas , our\nfully managed database as a service. The following examples use sample data from the movies collection in the\n sample_mflix database. To enable text searches on the title field, create a\n text index by using the following command: We use a single field text index for the examples in this guide, but you can\ncreate a compound text index that broadens your text queries to multiple\nfields. The following command creates a text index on two fields in the\n movies collection: You can only create one text index per collection. Every text search\nqueries all the fields specified in that index for matches. To learn more about text indexes, see Text Indexes in the Server manual. When creating a compound text index, you can specify a weight option to\nprioritize certain text fields in your index. When you execute a text\nsearch, the field weights influence how MongoDB calculates the\n text search score for each matching\ndocument. To learn more about specifying field weights when creating a text\nindex, see the Text Indexes \nsection in the Indexes guide. This example queries for Star Trek movies by searching for titles\ncontaining the word \"trek\". If you want to query using multiple words,\nseparate your words with spaces to query for documents that match any of\nthe search terms (logical OR ). This operation returns the following documents: Success! The query found every document in the movies collection\nwith a title including the word \"trek\". Unfortunately, the search included\none unintended item: \"Trek Nation,\" which is a movie about Star Trek and not\npart of the Star Trek movie series. To solve this, we can query with a more\nspecific phrase . To make your query more specific, try using the phrase \"star trek\"\ninstead of just the word \"trek\". To search by phrase, surround your\nmulti-word phrase with escaped quotes ( \\\"\\\" ): Querying by the phrase \"star trek\" instead of just the term \"trek\" \nmatches the following documents: These results include all movies in the database that contain the phrase\n \"star trek\" , which in this case results in only fictional Star Trek\nmovies. Unfortunately, this query returned \"Star Trek Into\nDarkness\" , a movie that was not part of the original series of movies. To\nresolve this issue, we can omit that document with a negation . To use a negated term, place a negative sign, - , in front of the term\nyou to omit from the result set. The query operation omits any\ndocuments that contain this term from the search result. Since this query\nincludes two distinct terms, separate them with a space. Querying with the negated term yields the following documents: Your query operation may return a reference to a\ncursor that contains matching documents. To learn how to\nexamine data stored in the cursor, see the\n Cursor Fundamentals page . Now that the result set reflects the desired results, you can use the\ntext search textScore , accessed using the $meta operator in the query\nprojection, to order the results by relevance: Querying in this way returns the following documents in the following\norder. In general, text relevance increases as a string matches more\nterms and decreases as the unmatched portion of the string lengthens. For more information about the $text operator and its options, see the\n manual entry .", - "code": [ - { - "lang": "javascript", - "value": "db.movies.createIndex({ title: \"text\" });" - }, - { - "lang": "javascript", - "value": "db.movies.createIndex({ title: \"text\", plot: \"text\" });" - }, - { - "lang": "javascript", - "value": "{ title: 'Trek Nation' }\n{ title: 'Star Trek' }\n{ title: 'Star Trek Into Darkness' }\n{ title: 'Star Trek: Nemesis' }\n{ title: 'Star Trek: Insurrection' }\n{ title: 'Star Trek: Generations' }\n{ title: 'Star Trek: First Contact' }\n{ title: 'Star Trek: The Motion Picture' }\n{ title: 'Star Trek VI: The Undiscovered Country' }\n{ title: 'Star Trek V: The Final Frontier' }\n{ title: 'Star Trek IV: The Voyage Home' }\n{ title: 'Star Trek III: The Search for Spock' }\n{ title: 'Star Trek II: The Wrath of Khan' }" - }, - { - "lang": "javascript", - "value": " // Create a query that searches for the string \"trek\"\n const query = { $text: { $search: \"trek\" } };\n\n // Return only the `title` of each matched document\n const projection = {\n _id: 0,\n title: 1,\n };\n\n // Find documents based on our query and projection\n const cursor = movies.find(query).project(projection);" - }, - { - "lang": "javascript", - "value": "{ title: 'Star Trek' }\n{ title: 'Star Trek Into Darkness' }\n{ title: 'Star Trek: Nemesis' }\n{ title: 'Star Trek: Insurrection' }\n{ title: 'Star Trek: Generations' }\n{ title: 'Star Trek: First Contact' }\n{ title: 'Star Trek: The Motion Picture' }\n{ title: 'Star Trek VI: The Undiscovered Country' }\n{ title: 'Star Trek V: The Final Frontier' }\n{ title: 'Star Trek IV: The Voyage Home' }\n{ title: 'Star Trek III: The Search for Spock' }\n{ title: 'Star Trek II: The Wrath of Khan' }" - }, - { - "lang": "javascript", - "value": " // Create a query that searches for the phrase \"star trek\"\n const query = { $text: { $search: \"\\\"star trek\\\"\" } };\n\n // Return only the `title` of each matched document\n const projection = {\n _id: 0,\n title: 1,\n };\n\n // Find documents based on the query and projection\n const cursor = movies.find(query).project(projection);" - }, - { - "lang": "javascript", - "value": "{ title: 'Star Trek' }\n{ title: 'Star Trek: Nemesis' }\n{ title: 'Star Trek: Insurrection' }\n{ title: 'Star Trek: Generations' }\n{ title: 'Star Trek: First Contact' }\n{ title: 'Star Trek: The Motion Picture' }\n{ title: 'Star Trek VI: The Undiscovered Country' }\n{ title: 'Star Trek V: The Final Frontier' }\n{ title: 'Star Trek IV: The Voyage Home' }\n{ title: 'Star Trek III: The Search for Spock' }\n{ title: 'Star Trek II: The Wrath of Khan' }" - }, - { - "lang": "javascript", - "value": " // Create a query that searches for the phrase \"star trek\" while omitting \"into darkness\"\n const query = { $text: { $search: \"\\\"star trek\\\" -\\\"into darkness\\\"\" } };\n\n // Include only the `title` field of each matched document\n const projection = {\n _id: 0,\n title: 1,\n };\n\n // Find documents based on the query and projection\n const cursor = movies.find(query).project(projection);" - }, - { - "lang": "javascript", - "value": "{ title: 'Star Trek', score: 1.5 }\n{ title: 'Star Trek: Generations', score: 1.3333333333333333 }\n{ title: 'Star Trek: Insurrection', score: 1.3333333333333333 }\n{ title: 'Star Trek: Nemesis', score: 1.3333333333333333 }\n{ title: 'Star Trek: The Motion Picture', score: 1.25 }\n{ title: 'Star Trek: First Contact', score: 1.25 }\n{ title: 'Star Trek II: The Wrath of Khan', score: 1.2 }\n{ title: 'Star Trek III: The Search for Spock', score: 1.2 }\n{ title: 'Star Trek IV: The Voyage Home', score: 1.2 }\n{ title: 'Star Trek V: The Final Frontier', score: 1.2 }\n{ title: 'Star Trek VI: The Undiscovered Country', score: 1.2 }" - }, - { - "lang": "javascript", - "value": " // Create a query that searches for the phrase \"star trek\" while omitting \"into darkness\"r\n const query = { $text: { $search: \"\\\"star trek\\\" -\\\"into darkness\\\"\" } };\n\n // Sort returned documents by descending text relevance score\n const sort = { score: { $meta: \"textScore\" } };\n\n // Include only the `title` and `score` fields in each returned document\n const projection = {\n _id: 0,\n title: 1,\n score: { $meta: \"textScore\" },\n };\n\n // Find documents based on the query, sort, and projection\n const cursor = movies\n .find(query)\n .sort(sort)\n .project(projection);" - } - ], - "preview": "Text searches let you search string type fields in your collection for specified words or\nphrases. You can perform a text search by using the $text operator, which performs a\nlogical OR on each term separated by a space in the search string. You can also\nspecify more options to the operator to handle case sensitivity, stop words, and word\nstemming (such as plural forms or other tenses) for a supported language.\nThis is often used for unstructured text such as transcripts, essays, or web pages.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/read-operations", - "title": "Read Operations", - "headings": [], - "paragraphs": "Retrieve Data Access Data From a Cursor Retrieve Distinct Values Sort Results Skip Returned Results Limit the Number of Returned Results Specify Which Fields to Return Search Geospatially Search Text", - "code": [], - "preview": "Learn about the commands for running MongoDB read operations by using the MongoDB Node.js driver.", - "tags": null, - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/read-write-pref", - "title": "Specify How CRUD Operations Run on Replica Sets", - "headings": [ - "Overview", - "Write Concern", - "Example: Set the Write Concern for a Single Write Operation", - "Example: Retrieve and Apply an Existing Write Concern", - "Read Concern", - "Example: Set the Read Concern Level of an Aggregation", - "Example: Change the Read Concern of a Database", - "Read Preference", - "Example: Set Read Preference and Concerns for a Transaction", - "Example: Set the Read Preference of a Cluster in the Connection String", - "API Documentation" - ], - "paragraphs": "In this guide, you can learn how to use the write concern , read concern , and\n read preference configurations to modify the way that MongoDB runs\ncreate, read, update, and delete (CRUD) operations on replica sets. You can set write concern, read concern, and read preference options at the following\nlevels: This list also indicates the increasing order of precedence of the option settings. For\nexample, if you set a read concern level for a transaction, it will override a read\nconcern level set for the client. These options allow you to customize the causal consistency and availability of the data\nin your replica sets. Client, which sets the default for all operation executions unless overridden Session Transaction Database Collection The write concern specifies the level of acknowledgement requested from MongoDB for write\noperations, such as an insert or update, before the operation successfully returns.\nOperations that do not specify an explicit write concern inherit the global default write\nconcern settings. For more information, see Write Concern in the\nServer manual. For detailed API documentation, see the WriteConcern API documentation . The following table describes the WriteConcern parameters: Parameter Type Description w (optional) W Requests acknowledgment that the write operation has propagated to a specified\nnumber of mongod instances or to mongod instances that are labelled specified tags wtimeoutMS (optional) number Specifies a time limit to prevent write operations from blocking indefinitely journal (optional) boolean Requests acknowledgment that the write operation has been written to the on-disk journal This code uses custom WriteConcern settings while creating new a document: This code uses the fromOptions() method to construct a WriteConcern from the\noptions of an existing database reference, myDB . Note that myDB could be replaced\nwith a reference to any entity that accepts a write concern option. Then the new write\nconcern is applied to a document, myDoc . The read concern specifies the following behaviors: You can specify the read concern setting by using the level parameter. The default\nread concern level is local . This means that the client returns the data from the\nreplica set member that the client is connected to, with no guarantee that the data has\nbeen written to all replica set members. Note that lower read concern level requirements\nmay reduce latency. For more information about read concerns or read concern levels, see\n Read Concern in the Server manual. For more detail on\nthe ReadConcern type and definitions of the read concern levels, see the ReadConcern in\nthe API documentation. Level of causal consistency across replica sets Isolation guarantees maintained during a query This code sets the read concern level of an an aggregation to \"majority\" : For more information about aggregates, see the Aggregation page. This code changes the read concern level of a database to \"local\" : The read preference determines which member of a replica set MongoDB reads when running a\nquery. You can also customize how the server evaluates members. For more detailed API documentation, see the ReadPreference API\ndocumentation . The following table describes the ReadPreference parameters: Parameter Type Description mode ReadPreferenceMode Specifies a requirement or preference for which replica set\nmember the server reads from. The default mode, primary , specifies that\noperations read from the primary member of the replica set. tags (optional) TagSet List Assigns tags to secondary replica set members to customize how the server evaluates\nthem. Tags cannot be used with the primary read preference mode setting. options (optional) ReadPreferenceOptions Sets various options, including hedge \nand maxStalenessSeconds that can be\napplied to your read preference. This code sets the read preference, read concern, and write concern for the operations in\na transaction: For more information about transactions, see Transactions . This code example creates a MongoClient that uses the \"secondary\" read preference mode\nwhen performing queries on a cluster: This example also sets the maxStalenessSeconds option. For more information about connection string options, see the Connection String Options \nsection in the manual. To learn more about the methods and types mentioned in this guide, see the following API\ndocumentation: API WriteConcern API ReadConcern API ReadPreference", - "code": [ - { - "lang": "js", - "value": "myDB.myCollection.insertOne(\n { name: \"anotherDocumentName\" },\n { writeConcern:\n { w: 2, wtimeoutMS: 5000 }\n }\n);" - }, - { - "lang": "js", - "value": "const newWriteConcern = WriteConcern.fromOptions(myDB);\nconst myDoc = { name: \"New Document\" };\nWriteConcern.apply(myDoc,newWriteConcern);" - }, - { - "lang": "js", - "value": "const pipeline = [\n {\"$match\": {\n category: \"KITCHENWARE\",\n }},\n {\"$unset\": [\n \"_id\",\n \"category\",\n ]}\n ];\n\nresult = await myDB.collection(\"mycollection\")\n .aggregate(\n pipeline,\n { readConcern:\n { level: \"available\" }\n }\n );" - }, - { - "lang": "js", - "value": "const options = { readConcern: { level: \"local\" } };\nconst myDB = client.db(\"mydb\", options);" - }, - { - "lang": "js", - "value": "const transactionOptions = {\n readPreference: \"primary\",\n readConcern: { level: \"local\" },\n writeConcern: { w: \"majority\" },\n};\n\nconst session = client.startSession();\nsession.startTransaction(transactionOptions);\n// ...\nawait session.commitTransaction();\nawait session.endSession();" - }, - { - "lang": "js", - "value": "const uri = \"mongodb+srv://:@?readPreference=secondary&maxStalenessSeconds=120\";\nconst client = new MongoClient(uri);" - } - ], - "preview": "In this guide, you can learn how to use the write concern, read concern, and\nread preference configurations to modify the way that MongoDB runs\ncreate, read, update, and delete (CRUD) operations on replica sets.", - "tags": "node.js, customize, preferences, replica set, consistency", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/write-operations/delete", - "title": "Delete Documents", - "headings": ["Overview", "Delete"], - "paragraphs": "In this section, we show you how to call the write operations to remove \ndocuments from a collection in your MongoDB database. If you want to remove existing documents from a collection, you can\nuse deleteOne() to remove one document or deleteMany() for one or\nmore documents. These methods accept a query document that matches the\ndocuments you want to delete. You can specify the document or documents to be deleted by the\n deleteOne() or deleteMany() write operations in a JSON object as\nfollows: To delete the first matching document using the deleteOne() method or\nto delete all matching documents using the deleteMany() method, pass the\ndocument as the method parameter: You can print the number of documents deleted by the operation by\naccessing the deletedCount field of the result for each of the\nmethod calls above as follows: If the delete operation is successful, these statements print the number of documents\ndeleted by the associated operation. To see fully runnable examples and more information on the available options, see the usage\nexamples for deleteOne() and\n deleteMany() .", - "code": [ - { - "lang": "javascript", - "value": "const doc = {\n pageViews: {\n $gt: 10,\n $lt: 32768\n }\n};" - }, - { - "lang": "javascript", - "value": "const deleteResult = await myColl.deleteOne(doc);\nconst deleteManyResult = await myColl.deleteMany(doc);" - }, - { - "lang": "javascript", - "value": "console.dir(deleteResult.deletedCount);\nconsole.dir(deleteManyResult.deletedCount);" - } - ], - "preview": "In this section, we show you how to call the write operations to remove\ndocuments from a collection in your MongoDB database.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/write-operations/embedded-arrays", - "title": "Update Arrays in a Document", - "headings": [ - "Overview", - "Specifying Array Elements", - "The First Matching Array Element", - "Example", - "Matching All Array Elements", - "Example", - "Matching Multiple Array Elements", - "Usage", - "Example" - ], - "paragraphs": "In this guide, you can learn how to use the following array update\noperators to modify an array embedded within a document: For a list of array update operators, see Update Operators in the Server\nManual documentation. Positional Operator : $ All Positional Operator : $[] Filtered Positional Operator : $[] Positional operators specify which array elements to update. You can use these operators to apply updates to the first element, all elements, or\ncertain elements of an array that match a criteria. To specify elements in an array with positional operators, use dot\nnotation . Dot notation is a property access syntax for navigating BSON\nobjects. To learn more, see dot notation . To update the first array element of each document that matches your\nquery, use the positional operator $ . The positional operator $ references the array matched by the query.\nYou cannot use this operator to reference a nested array. If you want to\naccess a nested array, use the filtered positional operator . Do not use the $ operator in an upsert call because the\ndriver treats $ as a field name in the insert document. This example uses the following sample document to show how to update\nthe first matching array element: The following code shows how to increment a value in the first array\nelement that matches a query. The query matches elements in the entries array where the value of\n x is a string type. The update increases the y value by\n 33 in the first matching element. After you run the update operation, the document resembles the\nfollowing: The example includes the entries.x field in the\nquery to match the array that the $ operator applies an update to. If you\nomit the entries.x field from the query while using the\n $ operator in an update, the driver is unable to identify the\nmatching array and raises the following error: To perform the update on all array elements of each document that\nmatches your query, use the all positional operator $[] . This example uses the following sample documents, which describe phone\ncall logs, to show how to update all matching array elements: The following code shows how to remove the duration field from\nall calls array entries in the document whose date is\n \"5/15/2023\" : After you run the update operation, the documents resemble the following: To perform an update on all embedded array elements of each document\nthat matches your query, use the filtered positional operator\n $[] . The filtered positional operator $[] specifies the\nmatching array elements in the update document. To identify which array\nelements to match, pair this operator with in an\n arrayFilters object. The placeholder represents an element of the array\nfield. You must select a value for that starts with a\nlowercase letter and contains only alphanumeric characters. You can use a filtered positional operator in an update operation.\nAn update operation takes a query, an update document, and\noptionally, an options object as its parameters. The following steps describe how to use a filtered positional operator\nin an update operation: Format your update document as follows: This update document contains the following placeholders: $ : The array update operator : The array in the document to update : The identifier for the filtered positional operator : The field in the array element to update : The value that describes the update Add the matching criteria in the arrayFilters object. This object\nis an array of queries that specify which array elements to include\nin the update. Set this object in an options parameter: Pass the query, the update document, and options to an\nupdate method. The following sample code shows how to call the\n updateOne() method with these parameters: This example uses the following sample documents, which describe\nshopping lists for specific recipes, to show how to update certain matching array elements: Suppose you want to increase the quantity of items you purchase for a\nrecipe on your \"11/12/2023\" grocery trip. You want to double the quantity if\nthe item meets all the following criteria: To double the quantity value in the matching array\nentries, use the filtered positional operator as shown in the following\ncode: The update multiplied the quantity value by 2 for\nitems that matched the criteria. The item \"Sesame oil\" did not match\nthe criteria in the arrayFilters object and therefore was excluded\nfrom the update. The following documents reflect these changes: The item is for the \"Fried rice\" recipe. The item name does not include the word \"oil\" .", - "code": [ - { - "lang": "javascript", - "value": "{\n _id: ...,\n entries: [\n { x: false, y: 1 },\n { x: \"hello\", y: 100 },\n { x: \"goodbye\", y: 1000 }\n ]\n}" - }, - { - "lang": "javascript", - "value": "{\n _id: ...,\n entries: [\n { x: false, y: 1 },\n { x: \"hello\", y: 133 },\n { x: \"goodbye\", y: 1000 }\n ]\n}" - }, - { - "lang": "none", - "value": "MongoServerError: The positional operator did not find the match needed from the query." - }, - { - "lang": "javascript", - "value": "// Query for all elements in entries array where the value of x is a string\nconst query = { \"entries.x\": { $type : \"string\" } };\n\n// On first matched element, increase value of y by 33\nconst updateDocument = {\n $inc: { \"entries.$.y\": 33 }\n};\n\n// Execute the update operation\nconst result = await myColl.updateOne(query, updateDocument);" - }, - { - "lang": "javascript", - "value": "{\n _id: ...,\n date: \"5/15/2023\",\n calls: [\n { time: \"10:08 AM\", caller: \"Mom\", duration: 67 },\n { time: \"04:11 PM\", caller: \"Dad\", duration: 121 },\n { time: \"06:36 PM\", caller: \"Grandpa\", duration: 13 }\n ]\n},\n{\n _id: ...,\n date: \"5/16/2023\",\n calls: [\n { time: \"11:47 AM\", caller: \"Mom\", duration: 4 },\n ]\n}" - }, - { - "lang": "javascript", - "value": "{\n _id: ...,\n date: \"5/15/2023\",\n calls: [\n { time: \"10:08 AM\", caller: \"Mom\" },\n { time: \"04:11 PM\", caller: \"Dad\" },\n { time: \"06:36 PM\", caller: \"Grandpa\" }\n ]\n},\n{\n _id: ...,\n date: \"5/16/2023\",\n calls: [\n { time: \"11:47 AM\", caller: \"Mom\", duration: 4 },\n ]\n}" - }, - { - "lang": "javascript", - "value": "// Query for all documents where date is the string \"5/15/2023\"\nconst query = { date: \"5/15/2023\" };\n\n// For each matched document, remove duration field from all entries in calls array \nconst updateDocument = {\n $unset: { \"calls.$[].duration\": \"\" }\n};\n\n// Execute the update operation\nconst result = await myColl.updateOne(query, updateDocument);" - }, - { - "lang": "javascript", - "value": "{ $: { \".$[].\": } }" - }, - { - "lang": "javascript", - "value": "arrayFilters: [\n { \".\": },\n { \".\": },\n ...\n]" - }, - { - "lang": "javascript", - "value": "await myColl.updateOne(query, updateDocument, options);" - }, - { - "lang": "javascript", - "value": "{\n _id: ...,\n date: \"11/12/2023\",\n items: [\n { item: \"Scallions\", quantity: 3, recipe: \"Fried rice\" },\n { item: \"Mangos\", quantity: 4, recipe: \"Salsa\" },\n { item: \"Pork shoulder\", quantity: 1, recipe: \"Fried rice\" },\n { item: \"Sesame oil\", quantity: 1, recipe: \"Fried rice\" }\n ]\n},\n{\n _id: ...,\n date: \"11/20/2023\",\n items: [\n { item: \"Coffee beans\", quantity: 1, recipe: \"Coffee\" }\n ]\n}" - }, - { - "lang": "javascript", - "value": "{\n _id: ...,\n date: \"11/12/2023\",\n items: [\n { item: \"Scallions\", quantity: 6, recipe: \"Fried rice\" },\n { item: \"Mangos\", quantity: 4, recipe: \"Salsa\" },\n { item: \"Pork shoulder\", quantity: 2, recipe: \"Fried rice\" },\n { item: \"Sesame oil\", quantity: 1, recipe: \"Fried rice\" }\n ]\n},\n{\n _id: ...,\n date: \"11/20/2023\",\n items: [\n { item: \"Coffee beans\", quantity: 1, recipe: \"Coffee\" }\n ]\n}" - }, - { - "lang": "javascript", - "value": "// Query for all documents where date is the string \"11/12/2023\"\nconst query = { date: \"11/12/2023\" };\n\n// For each matched document, change the quantity of items to 2 \nconst updateDocument = {\n $mul: { \"items.$[i].quantity\": 2 }\n};\n\n// Update only non-oil items used for fried rice \nconst options = {\n arrayFilters: [\n {\n \"i.recipe\": \"Fried rice\",\n \"i.item\": { $not: { $regex: \"oil\" } },\n }\n ]\n};\n\n// Execute the update operation\nconst result = await myColl.updateOne(query, updateDocument, options);" - } - ], - "preview": "In this guide, you can learn how to use the following array update\noperators to modify an array embedded within a document:", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/write-operations/insert", - "title": "Insert Documents", - "headings": [ - "Overview", - "A Note About _id", - "Insert a Single Document", - "Example", - "Insert Multiple Documents", - "Example" - ], - "paragraphs": "In this guide, you can learn how to insert documents into MongoDB. You can use MongoDB to retrieve, update, and delete information that is already stored\nin MongoDB. To store information, use an insert operation . An insert operation inserts one or more documents into a MongoDB collection.\nThe Node.js driver provides the following methods to perform insert\noperations: The following sections focus on insertOne() and insertMany() . For an\nexample on how to use the bulkWrite() method, see our runnable Bulk\nOperations Example . insertOne() insertMany() bulkWrite() This page includes a short interactive lab that demonstrates how to\ninsert data by using the insertOne() method. You can complete this lab\ndirectly in your browser window without installing MongoDB or a code editor. To start the lab, click the Open Interactive Tutorial button at the\ntop of the page. To expand the lab to a full-screen format, click the\nfull-screen button ( \u26f6 ) in the top-right corner of the lab pane. When inserting a document, MongoDB enforces one constraint on your\ndocuments by default. Each document must contain a unique _id \nfield. There are two ways to manage this field: Unless you have provided strong guarantees for uniqueness, we recommend\nyou let the driver automatically generate _id values. For more information about _id , see the Server manual entry on\n Unique Indexes . You can manage this field yourself, ensuring each value you use is unique. You can let the driver automatically generate unique ObjectId values\nwith the primary key factory . Duplicate _id values violate unique index constraints, resulting\nin a WriteError . Use the insertOne() method when you want to insert a single\ndocument. On successful insertion, the method returns an\n InsertOneResult instance representing the _id of\nthe new document. The following example uses the insertOne() method to insert a new\ndocument into the myDB.pizzaMenu collection: Your output looks similar to the following text: For more information on the classes and methods mentioned in this\nsection, see the following resources: API Documentation on insertOne() API Documentation on InsertOneResult Server manual entry on insertOne() Runnable Insert a Document Example Use the insertMany() method when you want to insert multiple\ndocuments. This method inserts documents in the order specified until an\nexception occurs, if any. For example, assume you want to insert the following documents: If you attempt to insert these documents, a WriteError occurs when the third\ndocument is processed, but the documents before the error are inserted into your\ncollection. On successful insertion, the method returns an\n InsertManyResult instance representing the number of\ndocuments inserted and the _id of the new document. Use a try-catch block to get an acknowledgment for successfully\nprocessed documents before the error occurs: The output consists of documents MongoDB can process and looks similar to the\nfollowing: If you look inside your collection, you see the following documents: The following example uses the insertMany() method to insert three new\ndocuments into the myDB.pizzaMenu collection: Your output looks similar to the following: For more information on the classes and methods mentioned in this\nsection, see the following resources: API Documentation on insertMany() API Documentation on InsertManyResult API Documentation on PkFactory Server manual entry on insertMany() Runnable Insert Multiple Documents Example", - "code": [ - { - "lang": "javascript", - "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"pizzaMenu\");\n\nconst doc = { name: \"Neapolitan pizza\", shape: \"round\" };\nconst result = await myColl.insertOne(doc);\nconsole.log(\n `A document was inserted with the _id: ${result.insertedId}`,\n);" - }, - { - "lang": null, - "value": "A document was inserted with the _id: 60c79c0f4cc72b6bb31e3836" - }, - { - "lang": "json", - "value": "{ \"_id\": 1, \"color\": \"red\" }\n{ \"_id\": 2, \"color\": \"purple\" }\n{ \"_id\": 1, \"color\": \"yellow\" }\n{ \"_id\": 3, \"color\": \"blue\" }" - }, - { - "lang": "javascript", - "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"colors\");\n\ntry {\n const docs = [\n { \"_id\": 1, \"color\": \"red\"},\n { \"_id\": 2, \"color\": \"purple\"},\n { \"_id\": 1, \"color\": \"yellow\"},\n { \"_id\": 3, \"color\": \"blue\"}\n ];\n\n const insertManyresult = await myColl.insertMany(docs);\n let ids = insertManyresult.insertedIds;\n\n console.log(`${insertManyresult.insertedCount} documents were inserted.`);\n for (let id of Object.values(ids)) {\n console.log(`Inserted a document with id ${id}`);\n }\n} catch(e) {\n console.log(`A MongoBulkWriteException occurred, but there are successfully processed documents.`);\n let ids = e.result.result.insertedIds;\n for (let id of Object.values(ids)) {\n console.log(`Processed a document with id ${id._id}`);\n }\n console.log(`Number of documents inserted: ${e.result.result.nInserted}`);\n}" - }, - { - "lang": null, - "value": "A MongoBulkWriteException occurred, but there are successfully processed documents.\nProcessed a document with id 1\nProcessed a document with id 2\nProcessed a document with id 1\nProcessed a document with id 3\nNumber of documents inserted: 2" - }, - { - "lang": "json", - "value": "{ \"_id\": 1, \"color\": \"red\" }\n{ \"_id\": 2, \"color\": \"purple\" }" - }, - { - "lang": "javascript", - "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"pizzaMenu\");\n\nconst docs = [\n { name: \"Sicilian pizza\", shape: \"square\" },\n { name: \"New York pizza\", shape: \"round\" },\n { name: \"Grandma pizza\", shape: \"square\" }\n];\n\nconst insertManyresult = await myColl.insertMany(docs);\nlet ids = insertManyresult.insertedIds;\n\nconsole.log(`${insertManyresult.insertedCount} documents were inserted.`);\n\nfor (let id of Object.values(ids)) {\n console.log(`Inserted a document with id ${id}`);\n}" - }, - { - "lang": null, - "value": "3 documents were inserted.\nInserted a document with id 60ca09f4a40cf1d1afcd93a2\nInserted a document with id 60ca09f4a40cf1d1afcd93a3\nInserted a document with id 60ca09f4a40cf1d1afcd93a4" - } - ], - "preview": "In this guide, you can learn how to insert documents into MongoDB.", - "tags": "code example, node.js, add data", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/write-operations/modify", - "title": "Modify Documents", - "headings": [ - "Overview", - "Update Documents", - "Example", - "Replace a Document", - "Example" - ], - "paragraphs": "You can modify documents in a MongoDB collection by using update \nand replace operations. Update operations modify the fields and\nvalues of a document while keeping other fields and values\nunchanged. Replace operations substitute all fields and values\nin an existing document with specified fields and values while keeping\nthe _id field value unchanged. The Node.js driver provides the following methods to change documents: updateOne() updateMany() replaceOne() This page includes a short interactive lab that demonstrates how to\nmodify data by using the updateMany() method. You can complete this lab\ndirectly in your browser window without installing MongoDB or a code editor. To start the lab, click the Open Interactive Tutorial button at the\ntop of the page. To expand the lab to a full-screen format, click the\nfull-screen button ( \u26f6 ) in the top-right corner of the lab pane. To perform an update to one or more documents, create an update\ndocument that specifies the update operator (the type of update to\nperform) and the fields and values that describe the change. Update\ndocuments use the following format: The top level of an update document contains one or more of the following\nupdate operators: See the MongoDB Server manual for a complete list of update operators\nand their usage . The update operators apply only to the fields associated with them in your\nupdate document. $set : replaces the value of a field with a specified one $inc : increments or decrements field values $rename : renames fields $unset : removes fields $mul : multiplies a field value by a specified number If you are using MongoDB Version 4.2 or later, you can use aggregation\npipelines made up of a subset of aggregation stages in update operations. For\nmore information on the aggregation stages MongoDB supports in\naggregation pipelines used in update operations, see our tutorial on building\n updates with aggregation pipelines . Consider a document in the myDB.items collection with fields\ndescribing an item for sale, its price, and the quantity available: If you apply the $set update operator with a new value for\n quantity , you can use the following update document: The updated document resembles the following, with an updated value in\nthe quantity field and all other values unchanged: If an update operation fails to match any documents in a collection, it\ndoes not make any changes. Update operations can be configured to perform\nan upsert which\nattempts to perform an update, but if no documents are matched, inserts\na new document with the specified fields and values. You cannot modify the _id field of a document nor change a field to\na value that violates a unique index constraint. See the MongoDB Server manual\nfor more information on unique indexes . To perform a replacement operation, create a replacement document that\nconsists of the fields and values that you want to use in your\n replace operation. Replacement documents use the following format: Replacement documents are the documents that you want to take the place of\nexisting documents that match the query filters. Consider a document in the myDB.items collection with fields\ndescribing an item for sale, its price, and the quantity available: Suppose you wanted to replace this document with one that contains a\ndescription for an entirely different item. Your replacement operation might\nresemble the following: The replaced document contains the contents of the replacement document\nand the immutable _id field as follows: If a replace operation fails to match any documents in a collection, it\ndoes not make any changes. Replace operations can be configured to perform\nan upsert which\nattempts to perform the replacement, but if no documents are matched, it\ninserts a new document with the specified fields and values. You cannot modify the _id field of a document nor change a field to\na value that violates a unique index constraint. See the MongoDB Server manual\nfor more information on unique indexes .", - "code": [ - { - "lang": "javascript", - "value": "{\n : {\n : {\n ...\n },\n : {\n }\n },\n : {\n ...\n }\n}" - }, - { - "lang": "javascript", - "value": "{\n _id: 465,\n item: \"Hand-thrown ceramic plate\",\n price: 32.50,\n quantity: 7,\n}" - }, - { - "lang": "javascript", - "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"items\");\n\nconst filter = { _id: 465 };\n\n// update the value of the 'quantity' field to 5\nconst updateDocument = {\n $set: {\n quantity: 5,\n },\n};\nconst result = await myColl.updateOne(filter, updateDocument);" - }, - { - "lang": "javascript", - "value": "{\n _id: 465,\n item: \"Hand-thrown ceramic plate\",\n price: 32.50,\n quantity: 5,\n}" - }, - { - "lang": "javascript", - "value": "{\n : {\n \n },\n : {\n ...\n }\n}" - }, - { - "lang": "javascript", - "value": "{\n _id: 501,\n item: \"3-wick beeswax candle\",\n price: 18.99,\n quantity: 10,\n}" - }, - { - "lang": "javascript", - "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"items\");\n\nconst filter = { _id: 501 };\n\n// replace the matched document with the replacement document\nconst replacementDocument = {\n item: \"Vintage silver flatware set\",\n price: 79.15,\n quantity: 1,\n};\nconst result = await myColl.replaceOne(filter, replacementDocument);" - }, - { - "lang": "javascript", - "value": "{\n _id: 501,\n item: \"Vintage silver flatware set\",\n price: 79.15,\n quantity: 1,\n}" - } - ], - "preview": "You can modify documents in a MongoDB collection by using update\nand replace operations. Update operations modify the fields and\nvalues of a document while keeping other fields and values\nunchanged. Replace operations substitute all fields and values\nin an existing document with specified fields and values while keeping\nthe _id field value unchanged.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/write-operations/pkFactory", - "title": "Generate Custom Values for _id", - "headings": [ - "Overview", - "Specify a Primary Key Factory", - "Additional Information" - ], - "paragraphs": "In this guide, you can learn how to use the MongoDB Node.js driver to generate your\nown _id values using the primary key factory . The primary key factory allows you to create unique identifiers in your\ndocuments when you choose not to specify an _id during an\n insert operation . The\ndefault primary key factory generates ObjectId values. The driver doesn't use the primary key factory for\n upsert operations because it's\nunable to determine whether to apply the primary key factory. If you\nspecified the primary key factory in an upsert operation and it\nperforms an insert operation, the server autogenerates an\n ObjectId for that document. If you want to use your specified primary key factory, perform a\n find operation , then an\n update or\n insert operation. To specify a primary key factory, apply the pkFactory option to your\n MongoClient instance. The following code snippet applies the pkFactory option to\ngenerate _id values of type uuid : If you insert a document with an _id field with a different\ntype than the type specified by the primary key factory, then you\nwill have inconsistent data. For example, if you run the following insert operation on a primary\nkey factory that generates uuid types, your _id values will\ncontain both the uuid and string types: To learn more about the types, interfaces, and classes discussed in this\nsection, see the following resources: pkFactory The _id Field Insert or Update in a Single Operation Retrieve Data Modify Documents Insert Documents", - "code": [ - { - "lang": "javascript", - "value": "const { UUID } = require('bson');\n...\nconst client = new MongoClient(uri, {\n pkFactory: { createPk: () => new UUID().toBinary() }\n});" - }, - { - "lang": "javascript", - "value": "myColl.insertOne({ _id: \"user1388\", ... });" - } - ], - "preview": "In this guide, you can learn how to use the MongoDB Node.js driver to generate your\nown _id values using the primary key factory.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/write-operations/upsert", - "title": "Insert or Update in a Single Operation", - "headings": ["Overview", "Performing an Update", "Performing an Upsert"], - "paragraphs": "If your application stores and modifies data in MongoDB, you probably use\ninsert and update operations. In certain workflows, whether you perform\nan insert or update operation depends on whether the document exists.\nIn these cases, you can streamline your application logic by using the\n upsert option available in the following methods: If the query filter passed to these methods does not find any matches and\nyou set the upsert option to true , MongoDB inserts the update\ndocument. Let's go through an example. updateOne() replaceOne() updateMany() Suppose your application tracks the current location of food trucks,\nstoring the nearest address data in the myDB.foodTrucks collection,\nwhich resembles the following: As an application user, you read about a food truck changing its regular\nlocation and want to apply the update. This update might resemble the\nfollowing: If a food truck named \"Deli Llama\" exists, the method call above updates\nthe document in the collection. However, if there are no food trucks named\n\"Deli Llama\" in your collection, no changes are made. Consider the case in which you want to add information about the food\ntruck even if it does not yet exist in your collection. Rather than\nfirst querying whether it exists to determine whether to insert or\nupdate the document, we can set upsert to true in our call to\n updateOne() as follows: After you run the operation above, your collection looks similar to the\nfollowing, even if the \"Deli Llama\" document did not exist in your collection\nbefore the operation:", - "code": [ - { - "lang": "javascript", - "value": "[\n { name: \"Haute Skillet\", address: \"42 Avenue B\" },\n { name: \"Lady of the Latke\", address: \"35 Fulton Rd\" },\n ...\n]" - }, - { - "lang": "javascript", - "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"foodTrucks\");\n\nconst query = { name: \"Deli Llama\" };\nconst update = { $set: { name: \"Deli Llama\", address: \"3 Nassau St\" }};\nconst options = {};\nmyColl.updateOne(query, update, options);" - }, - { - "lang": "javascript", - "value": "const query = { name: \"Deli Llama\" };\nconst update = { $set: { name: \"Deli Llama\", address: \"3 Nassau St\" }};\nconst options = { upsert: true };\nmyColl.updateOne(query, update, options);" - }, - { - "lang": "javascript", - "value": "[\n { name: \"Haute Skillet\", address: \"42 Avenue B\" },\n { name: \"Lady of the Latke\", address: \"35 Fulton Rd\" },\n { name: \"Deli Llama\", address: \"3 Nassau St\" },\n ...\n]" - } - ], - "preview": "If your application stores and modifies data in MongoDB, you probably use\ninsert and update operations. In certain workflows, whether you perform\nan insert or update operation depends on whether the document exists.\nIn these cases, you can streamline your application logic by using the\nupsert option available in the following methods:", - "tags": "code example, node.js, write, add data", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud/write-operations", - "title": "Write Operations", - "headings": [], - "paragraphs": "Insert Documents Generate Custom Values for _id Delete Documents Modify Documents Update Arrays in a Document Insert or Update in a Single Operation", - "code": [], - "preview": "Learn about the commands for running MongoDB write operations by using the MongoDB Node.js driver.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/crud", - "title": "CRUD Operations", - "headings": ["Compatibility"], - "paragraphs": "CRUD (Create, Read, Update, Delete) operations allow you to work with\nthe data stored in MongoDB. The CRUD operation documentation is categorized in two sections: Some operations combine aspects of read and write operations. See our\nguide on compound operations \nto learn more about these hybrid methods. Read Operations find and return\ndocuments stored within your MongoDB database. Write Operations insert, modify,\nor delete documents in your MongoDB database. You can use the Node.js driver to connect and perform CRUD operations for\ndeployments hosted in the following environments: MongoDB Atlas : The fully\nmanaged service for MongoDB deployments in the cloud MongoDB Enterprise : The\nsubscription-based, self-managed version of MongoDB MongoDB Community : The\nsource-available, free-to-use, and self-managed version of MongoDB To learn more about performing CRUD operations in the Atlas UI for deployments hosted in MongoDB\nAtlas, see Create, View, Update, and Delete Documents . To learn more about performing CRUD operations, see the following posts on the MongoDB\nDeveloper Hub : Learn how to apply CRUD Operations \nwith an example scenario. Analyze data in MongoDB Atlas using the Aggregation Pipeline .", - "code": [], - "preview": "Learn how to perform create, read, update, and delete (CRUD) operations to work with the data stored in MongoDB by using the Node.js driver.", - "tags": "node.js", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/encrypt-fields", - "title": "In-Use Encryption", - "headings": [ - "Overview", - "Queryable Encryption", - "Client-side Field Level Encryption" - ], - "paragraphs": "You can use the Node.js driver to encrypt specific document fields by using a\nset of features called in-use encryption . In-use encryption allows\nyour application to encrypt data before sending it to MongoDB\nand query documents with encrypted fields. In-use encryption prevents unauthorized users from viewing plaintext\ndata as it is sent to MongoDB or while it is in an encrypted database. To\nenable in-use encryption in an application and authorize it to decrypt\ndata, you must create encryption keys that only your application can\naccess. Only applications that have access to your encryption\nkeys can access the decrypted, plaintext data. If an attacker gains\naccess to the database, they can only see the encrypted ciphertext data\nbecause they lack access to the encryption keys. You might use in-use encryption to encrypt fields in your MongoDB\ndocuments that contain the following types of sensitive data: MongoDB offers the following features to enable in-use encryption: Credit card numbers Addresses Health information Financial information Any other sensitive or personally identifiable information (PII) Queryable Encryption Client-side Field Level Encryption Queryable Encryption is the next-generation in-use encryption feature,\nfirst introduced as a preview feature in MongoDB Server version 6.0 and\nas a generally available (GA) feature in MongoDB 7.0. Queryable\nEncryption supports searching encrypted fields for equality and encrypts\neach value uniquely. To learn more about Queryable Encryption, see Queryable\nEncryption in the Server manual. The implementation of Queryable Encryption in MongoDB 6.0 is incompatible with the GA version introduced in MongoDB 7.0. The Queryable Encryption preview feature is no longer supported. Client-side Field Level Encryption (CSFLE) was introduced in MongoDB\nServer version 4.2 and supports searching encrypted fields for equality.\nCSFLE differs from Queryable Encryption in that you can select either a\ndeterministic or random encryption algorithm to encrypt fields. You can only\nquery encrypted fields that use a deterministic encryption algorithm when\nusing CSFLE. When you use a random encryption algorithm to encrypt\nfields in CSFLE, they can be decrypted, but you cannot perform equality\nqueries on those fields. When you use Queryable Encryption, you cannot\nspecify the encryption algorithm, but you can query all encrypted\nfields. When you deterministically encrypt a value, the same input value\nproduces the same output value. While deterministic encryption allows\nyou to perform queries on those encrypted fields, encrypted data with\nlow cardinality is susceptible to code breaking by frequency analysis. To learn more about CSFLE, see CSFLE in the\nServer manual. To learn more about these concepts, see the following Wikipedia\nentries: Cardinality Frequency Analysis", - "code": [], - "preview": "You can use the Node.js driver to encrypt specific document fields by using a\nset of features called in-use encryption. In-use encryption allows\nyour application to encrypt data before sending it to MongoDB\nand query documents with encrypted fields.", - "tags": "node.js", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/gridfs", - "title": "GridFS", - "headings": [ - "Overview", - "How GridFS Works", - "Create a GridFS Bucket", - "Upload Files", - "Retrieve File Information", - "Download Files", - "Rename Files", - "Delete Files", - "Delete a GridFS Bucket", - "Additional Resources" - ], - "paragraphs": "In this guide, you can learn how to store and retrieve large files in\nMongoDB using GridFS . GridFS is a specification that describes how\nto split files into chunks during storage\nand reassemble them during retrieval. The driver implementation of\nGridFS manages the operations and organization of\nthe file storage. Use GridFS if the size of your file exceeds the BSON-document\nsize limit of 16 megabytes. For more detailed information on whether GridFS is\nsuitable for your use case, see the GridFS Server manual page . Navigate the following sections to learn more about GridFS operations\nand implementation: Create a GridFS Bucket Upload Files Retrieve File Information Download Files Rename Files Delete Files Delete a GridFS Bucket GridFS organizes files in a bucket , a group of MongoDB collections\nthat contain the chunks of files and descriptive information.\nBuckets contain the following collections, named using the convention\ndefined in the GridFS specification: When you create a new GridFS bucket, the driver creates the chunks \nand files collections, prefixed with the default bucket name fs , unless\nyou specify a different name. The driver also creates an index on each\ncollection to ensure efficient retrieval of files and related\nmetadata. The driver only creates the GridFS bucket on the first write\noperation if it does not already exist. The driver only creates indexes if\nthey do not exist and when the bucket is empty. For more information on\nGridFS indexes, see the Server manual page on GridFS Indexes . When storing files with GridFS, the driver splits the files into smaller\npieces, each represented by a separate document in the chunks collection.\nIt also creates a document in the files collection that contains\na unique file id, file name, and other file metadata. You can upload the file from\nmemory or from a stream. The following diagram describes how GridFS splits\nfiles when uploading to a bucket: When retrieving files, GridFS fetches the metadata from the files \ncollection in the specified bucket and uses the information to reconstruct\nthe file from documents in the chunks collection. You can read the file\ninto memory or output it to a stream. The chunks collection stores the binary file chunks. The files collection stores the file metadata. Create a bucket or get a reference to an existing one to begin storing\nor retrieving files from GridFS. Create a GridFSBucket \ninstance, passing a database as the parameter. You can then use the\n GridFSBucket instance to call read and write operations on the files\nin your bucket: Pass your bucket name as the second parameter to the create() method\nto create or reference a bucket with a custom name other than the\ndefault name fs , as shown in the following example: For more information, see the GridFSBucket API documentation . Use the openUploadStream() method from GridFSBucket to create an upload\nstream for a given file name. You can use the pipe() method to\nconnect a Node.js read stream to the upload stream. The\n openUploadStream() method allows you to specify configuration information\nsuch as file chunk size and other field/value pairs to store as metadata. The following example shows how to pipe a Node.js read stream, represented by the\nvariable fs , to the openUploadStream() method of a GridFSBucket instance: See the openUploadStream() API documentation for more information. In this section, you can learn how to retrieve file metadata stored in the\n files collection of the GridFS bucket. The metadata contains information\nabout the file it refers to, including: Call the find() method on the GridFSBucket instance to retrieve\nfiles from a GridFS bucket. The method returns a FindCursor instance\nfrom which you can access the results. The following code example shows you how to retrieve and print file metadata\nfrom all your files in a GridFS bucket. Among the different ways that you can\ntraverse the retrieved results from the FindCursor iterable, the\nfollowing example uses the for await...of syntax to display the results: The find() method accepts various query specifications and can be\ncombined with other methods such as sort() , limit() , and project() . For more information on the classes and methods mentioned in this section,\nsee the following resources: The _id of the file The name of the file The length/size of the file The upload date and time A metadata document in which you can store any other information find() API documentation FindCursor API documentation Cursor Fundamentals page Read Operations page You can download files from your MongoDB database by using the\n openDownloadStreamByName() method from GridFSBucket to create a\ndownload stream. The following example shows you how to download a file referenced\nby the file name, stored in the filename field, into your working\ndirectory: Alternatively, you can use the openDownloadStream() \nmethod, which takes the _id field of a file as a parameter: For more information on the openDownloadStreamByName() method, see\nits API documentation . If there are multiple documents with the same filename value,\nGridFS will stream the most recent file with the given name (as\ndetermined by the uploadDate field). The GridFS streaming API cannot load partial chunks. When a download\nstream needs to pull a chunk from MongoDB, it pulls the entire chunk\ninto memory. The 255 kilobyte default chunk size is usually\nsufficient, but you can reduce the chunk size to reduce memory\noverhead. Use the rename() method to update the name of a GridFS file in your\nbucket. You must specify the file to rename by its _id field\nrather than its file name. The following example shows how to update the filename field to\n\"newFileName\" by referencing a document's _id field: For more information on this method, see the rename() \nAPI documentation. The rename() method only supports updating the name of one file at\na time. To rename multiple files, retrieve a list of files matching the\nfile name from the bucket, extract the _id field from the files you\nwant to rename, and pass each value in separate calls to the rename() \nmethod. Use the delete() method to remove a file from your bucket. You must\nspecify the file by its _id field rather than its file name. The following example shows you how to delete a file by referencing its _id field: For more information on this method, see the delete() \nAPI documentation. The delete() method only supports deleting one file at a time. To\ndelete multiple files, retrieve the files from the bucket, extract\nthe _id field from the files you want to delete, and pass each value\nin separate calls to the delete() method. Use the drop() method to remove a bucket's files and chunks \ncollections, which effectively deletes the bucket. The following\ncode example shows you how to delete a GridFS bucket: For more information on this method, see the drop() \nAPI documentation. MongoDB GridFS specification", - "code": [ - { - "lang": "javascript", - "value": "const db = client.db(dbName);\nconst bucket = new mongodb.GridFSBucket(db);" - }, - { - "lang": "javascript", - "value": "const bucket = new mongodb.GridFSBucket(db, { bucketName: 'myCustomBucket' });" - }, - { - "lang": "javascript", - "value": "fs.createReadStream('./myFile').\n pipe(bucket.openUploadStream('myFile', {\n chunkSizeBytes: 1048576,\n metadata: { field: 'myField', value: 'myValue' }\n }));" - }, - { - "lang": "javascript", - "value": "const cursor = bucket.find({});\nfor await (const doc of cursor) {\n console.log(doc);\n}" - }, - { - "lang": "javascript", - "value": "bucket.openDownloadStreamByName('myFile').\n pipe(fs.createWriteStream('./outputFile'));" - }, - { - "lang": "javascript", - "value": "bucket.openDownloadStream(ObjectId(\"60edece5e06275bf0463aaf3\")).\n pipe(fs.createWriteStream('./outputFile'));" - }, - { - "lang": "javascript", - "value": "bucket.rename(ObjectId(\"60edece5e06275bf0463aaf3\"), \"newFileName\");" - }, - { - "lang": "javascript", - "value": "bucket.delete(ObjectId(\"60edece5e06275bf0463aaf3\"));" - }, - { - "lang": "javascript", - "value": "bucket.drop();" - } - ], - "preview": "In this guide, you can learn how to store and retrieve large files in\nMongoDB using GridFS. GridFS is a specification that describes how\nto split files into chunks during storage\nand reassemble them during retrieval. The driver implementation of\nGridFS manages the operations and organization of\nthe file storage.", - "tags": "node.js, code example, file storage", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/indexes", - "title": "Indexes", - "headings": [ - "Overview", - "Query Coverage and Performance", - "Operational Considerations", - "List Indexes", - "Index Types", - "Single Field Indexes", - "Compound Indexes", - "Multikey Indexes (Indexes on Array Fields)", - "Clustered Indexes", - "Text Indexes", - "Geospatial Indexes", - "Unique Indexes", - "Search Indexes", - "Create a Search Index", - "List Search Indexes", - "Update a Search Index", - "Drop a Search Index" - ], - "paragraphs": "Indexes are data structures that support the efficient execution of queries in\nMongoDB. They contain copies of parts of the data in documents to make\nqueries more efficient. Without indexes, MongoDB must scan every document in a collection to find\nthe documents that match each query. These collection scans are slow and can\nnegatively affect the performance of your application. By using an index to\nlimit the number of documents MongoDB scans, queries can be more efficient\nand therefore return faster. When you execute a query against MongoDB, your query can include three\nparts: When all the fields specified in the query criteria and projection of a\nquery are indexed, MongoDB returns results directly from the index\nwithout scanning any documents in the collection or loading them into\nmemory. For more information on how to ensure your index covers your query\ncriteria and projection, see the MongoDB manual articles on\n query coverage \nand index intersection . Query criteria that specify one or more fields and values that you are looking for Options that affect the query's execution, such as read concern Projection criteria to specify the fields you want MongoDB to return (optional) To improve query performance, build indexes on fields that appear often in your\napplication's queries and operations that return sorted results. Each index that you add\nconsumes disk space and memory when active, so it might be necessary to track index memory\nand disk usage for capacity planning. In addition, when a write operation updates an\nindexed field, MongoDB also updates the related index. For more information on designing your data model and choosing indexes\nappropriate for your application, see the MongoDB Server documentation on\n Indexing Strategies and\n Data Modeling and Indexes . You can use the listIndexes() method to list all the indexes\nfor a collection. The listIndexes() method takes an\noptional ListIndexesOptions parameter. The listIndexes() method returns an\nobject of type ListIndexesCursor . The following code uses the listIndexes() method to list all the\nindexes in a collection: MongoDB supports several different index types to support querying\nyour data. The following sections describe the most common index types\nand provide sample code for creating each index type. Single field indexes are indexes that improve performance for queries\nthat specify ascending or descending sort order on a single field of a\ndocument. The following example uses the createIndex() method to create an\nascending order index on the title field in the movies collection in\nthe sample_mflix database. The following is an example of a query that is covered by the index\ncreated above. To learn more, see Single Field Indexes . Compound indexes are indexes that improve performance for queries that\nspecify ascending or descending sort order for multiple fields of\na document. You must specify the direction (ascending or descending) for\neach field in the index. The following example uses the createIndex() method to create a compound\nindex on the type and genre fields in the movies collection in the\n sample_mflix database. The following is an example of a query that is covered by the index\ncreated above. To learn more, see Compound Indexes . Multikey indexes are indexes that improve the performance of queries on\nfields that contain array values. You can create a multikey index on a field with an array value by\ncalling the createIndex() method. The following code creates an ascending\nindex on the cast field in the movies collection of the\n sample_mflix database: The following code queries the multikey index to find\ndocuments in which the cast field value contains \"Viola Davis\" : Multikey indexes behave differently from non-multikey indexes in terms of\nquery coverage, index bound computation, and sort behavior. For a full\nexplanation of multikey indexes, including a discussion of their behavior\nand limitations, see the Multikey Indexes page in the MongoDB Server manual. Clustered indexes are indexes that improve the performance of\ninsert, update, and delete operations on clustered collections .\nClustered collections store documents ordered by the clustered index key\nvalue. To create a clustered index, specify the clusteredIndex option in\nthe CollectionOption . The clusteredIndex option must specify the\n _id field as the key and the unique field as true . The following example uses the createCollection() method to create a\nclustered index on the _id field in the vendors collection of the\n tea database. To learn more, see\n Clustered Indexes and\n Clustered Collections . Text indexes support text search queries on string content. These indexes\ncan include any field whose value is a string or an array of string elements. MongoDB supports text search for various languages, so you can specify the\ndefault language as an option when creating the index. You can also\nspecify a weight option to prioritize certain text fields in your\nindex. These weights denote the significance of fields relative to the\nother indexed fields. To learn more about text searches, see our guide on text search queries . The following example uses the createIndex() method to perform the\nfollowing actions: The following query uses the text index created in the preceding code: To learn more about text indexes, see Text Indexes in the Server manual. Create a text index on the title and body fields in the\n blogPosts collection Specify english as the default language Set the field weight of body to 10 and title to 3 MongoDB supports queries of geospatial coordinate data using 2dsphere\nindexes . With a 2dsphere index, you can query the geospatial data for\ninclusion, intersection, and proximity. For more information on querying\ngeospatial data with the MongoDB Node.js driver, read our\n Search Geospatial guide. To create a 2dsphere index, you must specify a field that contains\nonly GeoJSON objects . For more details on this type, see the MongoDB\nServer manual page on GeoJSON objects . The location.geo field in following sample document from the\n theaters collection in the sample_mflix database is a GeoJSON Point\nobject that describes the coordinates of the theater: The following example uses the createIndexes() method to create a\n 2dsphere index on the location.geo field in the theaters \ncollection in the sample_mflix database to enable geospatial searches. MongoDB also supports 2d indexes for calculating distances on a\nEuclidean plane and for working with the \"legacy coordinate pairs\" syntax\nused in MongoDB 2.2 and earlier. To learn more, see\n Geospatial Queries . Unique indexes ensure that the indexed fields do not store duplicate\nvalues. By default, MongoDB creates a unique index on the _id field\nduring the creation of a collection. To create a unique index, specify the\nfield or combination of fields that you want to prevent duplication on and\nset the unique option to true . The following example uses the createIndex() method to create a unique\nindex on the theaterId field in the theaters collection of the\n sample_mflix database. If you attempt to perform a write operation that stores a duplicate value\nthat violates the unique index, MongoDB will throw an error that resembles\nthe following: To learn more, see Unique Indexes . Atlas Search is a feature that allows you to perform full-text\nsearches. To learn more, see the Atlas Search \ndocumentation. Before you can perform a search on an Atlas collection, you must first\ncreate an Atlas Search index on the collection. An Atlas Search\nindex is a data structure that categorizes data in a searchable format. You can use the following methods to manage your Search indexes: The following sections provide code samples that use each of the preceding\nmethods to manage Search indexes. createSearchIndex() createSearchIndexes() listSearchIndexes() updateSearchIndex() dropSearchIndex() You can use the createSearchIndex() and\n createSearchIndexes() \nmethods to create new Search indexes. The following code shows how to\nuse the createSearchIndex() method to create an index called\n search1 : When connecting to MongoDB Server v6.0.11 and later v6 versions, or\nv7.0.2 and later v7 versions, you can use the driver to create an Atlas\nVector Search index on a collection. Learn more about this feature in\nthe Atlas Vector Search documentation . The following code shows how to use the createSearchIndex() method\nto create a search index in which the type field is\n vectorSearch : You can use the listSearchIndexes() \nmethod to return a cursor that contains the Search indexes of a given\ncollection. The listSearchIndexes() method takes an optional string\nparameter, name , to return only the indexes with matching names. It\nalso takes an optional aggregateOptions parameter. The following code uses the listSearchIndexes() method to list the\nSearch indexes in a collection: You can use the updateSearchIndex() method to update a Search\nindex. The following code shows how to\nuse the updateSearchIndex() method to update an index called\n search1 to specify a string type for the description field: You can use the dropSearchIndex() method to remove a Search\nindex. The following code shows how to\nuse the dropSearchIndex() method to remove an index called\n search1 :", - "code": [ - { - "lang": "javascript", - "value": "// List the indexes on the collection and output them as an array\nconst result = await collection.listIndexes().toArray();\n\n// Print the list of indexes\nconsole.log(\"Existing indexes:\\n\");\nfor(const doc in result){\n console.log(doc);\n}" - }, - { - "lang": "js", - "value": "const database = client.db(\"sample_mflix\");\nconst movies = database.collection(\"movies\");\n\n// Create an ascending index on the \"title\" field in the\n// \"movies\" collection.\nconst result = await movies.createIndex({ title: 1 });\nconsole.log(`Index created: ${result}`);" - }, - { - "lang": "js", - "value": "// Define the query parameters\nconst query = { title: \"Batman\" }\nconst sort = { title: 1 };\nconst projection = { _id: 0, title: 1 };\n// Execute the query using the defined parameters\nconst cursor = movies\n .find(query)\n .sort(sort)\n .project(projection);" - }, - { - "lang": "js", - "value": "// Connect to the \"sample_mflix\" database\nconst database = client.db(\"sample_mflix\");\n// Access the database's \"movies\" collection\nconst movies = database.collection(\"movies\");\n\n// Create an ascending index on the \"type\" and \"genre\" fields\n// in the \"movies\" collection.\nconst result = await movies.createIndex({ type: 1, genre: 1 });\nconsole.log(`Index created: ${result}`);" - }, - { - "lang": "js", - "value": "// Define a query to find movies in the \"Drama\" genre\nconst query = { type: \"movie\", genre: \"Drama\" };\n// Define sorting criteria for the query results\nconst sort = { type: 1, genre: 1 };\n// Include only the type and genre fields in the query results\nconst projection = { _id: 0, type: 1, genre: 1 };\n\n// Execute the query using the defined criteria and projection\nconst cursor = movies\n .find(query)\n .sort(sort)\n .project(projection);" - }, - { - "lang": "js", - "value": "const database = client.db(\"sample_mflix\");\nconst movies = database.collection(\"movies\");\n\n// Create a multikey index on the \"cast\" field in the \"movies\" collection\nconst result = await movies.createIndex({ cast: 1 });" - }, - { - "lang": "js", - "value": "const query = { cast: \"Viola Davis\" };\nconst projection = { _id: 0, cast: 1 , title: 1 };\n\n// Perform a find operation with the preceding filter and projection\nconst cursor = movies\n .find(query)\n .project(projection);" - }, - { - "lang": "javascript", - "value": "const db = client.db('tea');\nawait db.createCollection('ratings', {\n clusteredIndex: {\n key: { _id: 1 },\n unique: true\n }\n});" - }, - { - "lang": "js", - "value": "// Get the database and collection on which to create the index \nconst myDB = client.db(\"testDB\");\nconst myColl = myDB.collection(\"blogPosts\");\n\n// Create a text index on the \"title\" and \"body\" fields\nconst result = await myColl.createIndex(\n { title: \"text\", body: \"text\" },\n { default_language: \"english\" },\n { weights: { body: 10, title: 3 } }\n);" - }, - { - "lang": "js", - "value": "// Query for documents where body or title contain \"life ahead\"\nconst query = { $text: { $search: \"life ahead\" } };\n\n// Show only the title field\nconst projection = { _id: 0, title: 1 };\n\n// Execute the find operation\nconst cursor = myColl.find(query).project(projection);" - }, - { - "lang": "json", - "value": "{\n \"_id\" : ObjectId(\"59a47286cfa9a3a73e51e75c\"),\n \"theaterId\" : 104,\n \"location\" : {\n \"address\" : {\n \"street1\" : \"5000 W 147th St\",\n \"city\" : \"Hawthorne\",\n \"state\" : \"CA\",\n \"zipcode\" : \"90250\"\n },\n \"geo\" : {\n \"type\" : \"Point\",\n \"coordinates\" : [\n -118.36559,\n 33.897167\n ]\n }\n }\n}" - }, - { - "lang": "js", - "value": "const database = client.db(\"sample_mflix\");\nconst movies = database.collection(\"movies\");\n\n/* Create a 2dsphere index on the \"location.geo\" field in the\n\"movies\" collection */\nconst result = await movies.createIndex({ \"location.geo\": \"2dsphere\" });\n\n// Print the result of the index creation\nconsole.log(`Index created: ${result}`);" - }, - { - "lang": "none", - "value": "E11000 duplicate key error index" - }, - { - "lang": "js", - "value": "const database = client.db(\"sample_mflix\");\nconst movies = database.collection(\"movies\");\n\n// Create a unique index on the \"theaterId\" field in the \"theaters\" collection.\nconst result = await movies.createIndex({ theaterId: 1 }, { unique: true });\nconsole.log(`Index created: ${result}`);" - }, - { - "lang": "javascript", - "value": "// Create a search index\nconst index1 = {\n name: \"search1\",\n definition: {\n \"mappings\": {\n \"dynamic\": true\n }\n }\n}\nawait collection.createSearchIndex(index1);" - }, - { - "lang": "javascript", - "value": "// Create a Vector Search index\nconst vectorSearchIdx = {\n name: \"vsidx1\",\n type: \"vectorSearch\",\n definition: {\n fields: [{\n type: \"vector\",\n numDimensions: 384,\n path: \"summary\",\n similarity: \"dotProduct\"\n }]\n }\n}\n\nawait collection.createSearchIndex(vectorSearchIdx);" - }, - { - "lang": "javascript", - "value": "// List search indexes\nconst result = await collection.listSearchIndexes().toArray();\nconsole.log(\"Existing search indexes:\\n\");\nfor (const doc in result) {\n console.log(doc);\n}" - }, - { - "lang": "javascript", - "value": "// Update a search index\nconst index2 = {\n \"mappings\": {\n \"dynamic\": true,\n \"fields\": {\n \"description\": {\n \"type\": \"string\"\n }\n }\n }\n}\nawait collection.updateSearchIndex(\"search1\", index2);" - }, - { - "lang": "javascript", - "value": "// Dropping (deleting) a search index\nawait collection.dropSearchIndex(\"search1\");" - } - ], - "preview": "Indexes are data structures that support the efficient execution of queries in\nMongoDB. They contain copies of parts of the data in documents to make\nqueries more efficient.", - "tags": "node.js, code example, Atlas search", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/logging", - "title": "Logging", - "headings": ["Temporary Alternative"], - "paragraphs": "The driver doesn't use the logger in versions 4.0 and later.\nAttempting to use prior logger settings in this version won't print\nanything in the log. Instead, see our monitoring guides: Command Monitoring Cluster Monitoring Connection Pool Monitoring We are developing a new logging framework. In the meantime, you can output monitor events\nby using the following snippet:", - "code": [ - { - "lang": "javascript", - "value": "const uri = \"mongodb+srv://:@?writeConcern=majority\";\nconst client = new MongoClient(uri, { monitorCommands:true });\n\nclient.on('commandStarted', (event) => console.debug(event));\nclient.on('commandSucceeded', (event) => console.debug(event));\nclient.on('commandFailed', (event) => console.debug(event));" - } - ], - "preview": "We are developing a new logging framework. In the meantime, you can output monitor events\nby using the following snippet:", - "tags": "code example, deprecated, replace", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/monitoring/cluster-monitoring", - "title": "Cluster Monitoring", - "headings": [ - "Overview", - "Event Subscription Example", - "Event Descriptions", - "Example Event Documents", - "serverDescriptionChanged", - "serverHeartbeatStarted", - "serverHeartbeatSucceeded", - "serverHeartbeatFailed", - "serverOpening", - "serverClosed", - "topologyOpening", - "topologyClosed", - "topologyDescriptionChanged" - ], - "paragraphs": "This guide shows you how to monitor topology events in a MongoDB instance,\nreplica set, or sharded cluster. The driver creates topology events, also\nknown as Server Discovery and Monitoring (SDAM) events, when there is\na change in the state of the instance or cluster that you connected to.\nFor example, the driver creates an event when you establish a new connection\nor if the cluster elects a new primary. The following sections demonstrate how to record topology changes in your application\nand explore the information provided in these events. You can access one or more SDAM events using the driver by subscribing to them\nin your application. The following example demonstrates connecting to a\nreplica set and subscribing to one of the SDAM events created by the MongoDB\ndeployment: You can subscribe to any of the following SDAM events: Event Name Description serverOpening Created when a connection to an instance opens. serverClosed Created when a connection to an instance closes. serverDescriptionChanged Created when an instance state changes (such as from secondary to\nprimary). topologyOpening Created before attempting a connection to an instance. topologyClosed Created after all instance connections in the topology close. topologyDescriptionChanged Created when the topology changes, such as an election of a new\nprimary or a mongos proxy disconnecting. serverHeartbeatStarted Created before issuing a hello command to a MongoDB instance. serverHeartbeatSucceeded Created when the hello command returns successfully from a\nMongoDB instance. serverHeartbeatFailed Created when a hello command issued to a specific MongoDB\ninstance fails to return a successful response. The following sections show sample output for each type of SDAM event. The type field of the ServerDescription object in this event contains\none of the following possible values: Type Description Unknown Unknown instance Standalone Standalone instance Mongos Mongos proxy instance PossiblePrimary At least one server recognizes this as the primary, but is not yet\nverified by all instances. RSPrimary Primary instance RSSecondary Secondary instance RSArbiter Arbiter instance RSOther See the RSGhost and RSOther specification \nfor more details RSGhost See the RSGhost and RSOther specification \nfor more details The type field of the TopologyDescription object in this event contains\none of the following possible values: Type Description Single Standalone instance ReplicaSetWithPrimary Replica set with a primary ReplicaSetNoPrimary Replica set with no primary Sharded Sharded cluster Unknown Unknown topology", - "code": [ - { - "lang": "javascript", - "value": "/* Subscribe to SDAM event */\n\nconst { MongoClient } = require(\"mongodb\");\n\n// Replace the following with your MongoDB deployment's connection string\nconst uri = \"mongodb+srv:///?replicaSet=rs&writeConcern=majority\";\n\nconst client = new MongoClient(uri);\n\n// Replace with the name of the event you are subscribing to\nconst eventName = \"\";\n\n// Subscribe to a specified event and print a message when the event is received\nclient.on(eventName, event => {\n console.log(`received ${eventName}: ${JSON.stringify(event, null, 2)}`);\n});\n\nasync function run() {\n try {\n // Establish and verify connection to the database\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully\");\n } finally {\n // Close the database connection on completion or error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "javascript", - "value": "ServerDescriptionChangedEvent {\n topologyId: 0,\n address: 'localhost:27017',\n previousDescription: ServerDescription {\n address: 'localhost:27017',\n error: null,\n roundTripTime: 0,\n lastUpdateTime: 1571251089030,\n lastWriteDate: null,\n opTime: null,\n type: 'Unknown',\n minWireVersion: 0,\n maxWireVersion: 0,\n hosts: [],\n passives: [],\n arbiters: [],\n tags: []\n },\n newDescription: ServerDescription {\n address: 'localhost:27017',\n error: null,\n roundTripTime: 0,\n lastUpdateTime: 1571251089051,\n lastWriteDate: 2019-10-16T18:38:07.000Z,\n opTime: { ts: Timestamp, t: 18 },\n type: 'RSPrimary',\n minWireVersion: 0,\n maxWireVersion: 7,\n maxBsonObjectSize: 16777216,\n maxMessageSizeBytes: 48000000,\n maxWriteBatchSize: 100000,\n me: 'localhost:27017',\n hosts: [ 'localhost:27017' ],\n passives: [],\n arbiters: [],\n tags: [],\n setName: 'rs',\n setVersion: 1,\n electionId: ObjectID,\n primary: 'localhost:27017',\n logicalSessionTimeoutMinutes: 30,\n '$clusterTime': ClusterTime\n }\n}" - }, - { - "lang": "javascript", - "value": "ServerHeartbeatStartedEvent {\n connectionId: 'localhost:27017'\n}" - }, - { - "lang": "javascript", - "value": "ServerHeartbeatSucceededEvent {\n duration: 1.939997,\n reply:{\n hosts: [ 'localhost:27017' ],\n setName: 'rs',\n setVersion: 1,\n isWritablePrimary: true,\n secondary: false,\n primary: 'localhost:27017',\n me: 'localhost:27017',\n electionId: ObjectID,\n lastWrite: {\n opTime: { ts: [Timestamp], t: 18 },\n lastWriteDate: 2019-10-16T18:38:17.000Z,\n majorityOpTime: { ts: [Timestamp], t: 18 },\n majorityWriteDate: 2019-10-16T18:38:17.000Z\n },\n maxBsonObjectSize: 16777216,\n maxMessageSizeBytes: 48000000,\n maxWriteBatchSize: 100000,\n localTime: 2019-10-16T18:38:19.589Z,\n logicalSessionTimeoutMinutes: 30,\n minWireVersion: 0,\n maxWireVersion: 7,\n readOnly: false,\n ok: 1,\n operationTime: Timestamp,\n '$clusterTime': ClusterTime\n },\n connectionId: 'localhost:27017'\n}" - }, - { - "lang": "javascript", - "value": "ServerHeartbeatFailed {\n duration: 20,\n failure: MongoError('some error'),\n connectionId: 'localhost:27017'\n}" - }, - { - "lang": "javascript", - "value": "ServerOpeningEvent {\n topologyId: 0,\n address: 'localhost:27017'\n}" - }, - { - "lang": "javascript", - "value": "ServerClosedEvent {\n topologyId: 0,\n address: 'localhost:27017'\n}" - }, - { - "lang": "javascript", - "value": "TopologyOpeningEvent {\n topologyId: 0\n}" - }, - { - "lang": "javascript", - "value": "TopologyClosedEvent {\n topologyId: 0\n}" - }, - { - "lang": "javascript", - "value": "TopologyDescriptionChangedEvent {\n topologyId: 0,\n previousDescription: TopologyDescription {\n type: 'ReplicaSetNoPrimary',\n setName: null,\n maxSetVersion: null,\n maxElectionId: null,\n servers: Map {\n 'localhost:27017' => ServerDescription\n },\n stale: false,\n compatible: true,\n compatibilityError: null,\n logicalSessionTimeoutMinutes: null,\n heartbeatFrequencyMS: 10000,\n localThresholdMS: 15,\n options: Object,\n error: undefined,\n commonWireVersion: null\n },\n newDescription: TopologyDescription {\n type: 'ReplicaSetWithPrimary',\n setName: 'rs',\n maxSetVersion: 1,\n maxElectionId: null,\n servers: Map {\n 'localhost:27017' => ServerDescription\n },\n stale: false,\n compatible: true,\n compatibilityError: null,\n logicalSessionTimeoutMinutes: 30,\n heartbeatFrequencyMS: 10000,\n localThresholdMS: 15,\n options: Object,\n error: undefined,\n commonWireVersion: 7\n }\n}" - } - ], - "preview": "This guide shows you how to monitor topology events in a MongoDB instance,\nreplica set, or sharded cluster. The driver creates topology events, also\nknown as Server Discovery and Monitoring (SDAM) events, when there is\na change in the state of the instance or cluster that you connected to.\nFor example, the driver creates an event when you establish a new connection\nor if the cluster elects a new primary.", - "tags": "code example, node.js, watch", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/monitoring/command-monitoring", - "title": "Command Monitoring", - "headings": [ - "Overview", - "Event Subscription Example", - "Event Descriptions", - "Example Event Documents", - "commandStarted", - "commandSucceeded", - "commandFailed" - ], - "paragraphs": "This guide shows you how to monitor the success or failure of commands\nsent by the driver to your MongoDB deployment. The following sections demonstrate how to record command status in your\napplication and explore the information provided in these events. You can access one or more command monitoring events using the driver by\nsubscribing to them in your application. The following example demonstrates\nconnecting to a replica set and subscribing to one of the command monitoring\nevents created by the MongoDB deployment: Command monitoring is disabled by default. To enable command\nmonitoring, pass the monitorCommands option as true to\nyour MongoClient constructor. You can subscribe to any of the following command monitoring events: Event Name Description commandStarted Created when a command is started. commandSucceeded Created when a command succeeded. commandFailed Created when a command failed. The following sections show sample output for each type of command monitoring event.", - "code": [ - { - "lang": "javascript", - "value": "/* Subscribe to an event */\n\nconst { MongoClient } = require(\"mongodb\");\n\n// Replace the following with your MongoDB deployment's connection string\nconst uri = \"mongodb+srv:///?replicaSet=rs&writeConcern=majority\";\n\nconst client = new MongoClient(uri, { monitorCommands:true });\n\n// Replace with the name of the event you are subscribing to\nconst eventName = \"\";\n\n// Subscribe to a specified event and print a message when the event is received\nclient.on(eventName, event => {\n console.log(`received ${eventName}: ${JSON.stringify(event, null, 2)}`);\n});\n\nasync function run() {\n try {\n // Establish and verify connection to the \"admin\" database\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully\");\n } finally {\n // Close the database connection on completion or error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "javascript", - "value": "CommandStartedEvent {\n requestId: 1534,\n databaseName: \"app\",\n commandName: \"find\",\n address: 'localhost:27017',\n connectionId: 812613,\n command: {\n find: { firstName: \"Jane\", lastName: \"Doe\" }\n }\n}" - }, - { - "lang": "javascript", - "value": "CommandSucceededEvent {\n requestId: 1534,\n commandName: \"find\",\n address: 'localhost:27017',\n connectionId: 812613,\n duration: 15,\n reply: {\n cursor: {\n firstBatch: [\n {\n _id: ObjectId(\"5e8e2ca217b5324fa9847435\"),\n firstName: \"Jane\",\n lastName: \"Doe\"\n }\n ],\n _id: 0,\n ns: \"app.users\"\n },\n ok: 1,\n operationTime: 1586380205\n }\n}" - }, - { - "lang": "javascript", - "value": "CommandFailedEvent {\n requestId: 1534,\n commandName: \"find\",\n address: 'localhost:27017',\n connectionId: 812613,\n failure: Error(\"something failed\"),\n duration: 11\n}" - } - ], - "preview": "This guide shows you how to monitor the success or failure of commands\nsent by the driver to your MongoDB deployment.", - "tags": "code example, node.js, watch, command status", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/monitoring/connection-monitoring", - "title": "Connection Pool Monitoring", - "headings": [ - "Overview", - "Event Subscription Examples", - "Event Descriptions", - "Example Event Documents", - "connectionPoolCreated", - "connectionPoolReady", - "connectionPoolClosed", - "connectionCreated", - "connectionReady", - "connectionClosed", - "connectionCheckOutStarted", - "connectionCheckOutFailed", - "connectionCheckedOut", - "connectionCheckedIn", - "connectionPoolCleared" - ], - "paragraphs": "This guide shows you how to monitor the driver's connection pool . A\nconnection pool is a set of open TCP connections your driver maintains\nwith a MongoDB instance. Connection pools help reduce the number of\nnetwork handshakes your application needs to perform and can help your\napplication run faster. The following sections demonstrate how to record connection pool events in your\napplication and explore the information provided in these events. You can access one or more connection pool events using the driver by\nsubscribing to them in your application. The following example demonstrates\nconnecting to a replica set and subscribing to one of the connection\npool monitoring events created by the MongoDB deployment: Connection pool monitoring events can aid you in debugging and understanding\nthe behavior of your application's connection pool. The following example uses connection\npool monitoring events to return a count of checked-out connections in the pool: You can subscribe to any of the following connection pool monitoring events: Event Name Description connectionPoolCreated Created when a connection pool is created. connectionPoolReady Created when a connection pool is ready. connectionPoolClosed Created when a connection pool is closed before server\ninstance destruction. connectionCreated Created when a connection is created, but not necessarily\nwhen it is used for an operation. connectionReady Created after a connection has successfully completed a\nhandshake and is ready to be used for operations. connectionClosed Created when a connection is closed. connectionCheckOutStarted Created when an operation attempts to acquire a connection for\nexecution. connectionCheckOutFailed Created when an operation fails to acquire a connection for\nexecution. connectionCheckedOut Created when an operation successfully acquires a connection for\nexecution. connectionCheckedIn Created when a connection is checked back into the pool after an operation\nis executed. connectionPoolCleared Created when a connection pool is cleared. The following sections show sample output for each type of connection\npool monitoring event.", - "code": [ - { - "lang": "javascript", - "value": "const { MongoClient } = require(\"mongodb\");\n\n// Replace the following with your MongoDB deployment's connection string\nconst uri =\n \"mongodb+srv:///?replicaSet=rs&writeConcern=majority\";\n\nconst client = new MongoClient(uri);\n\n// Replace with the name of the event you are subscribing to\nconst eventName = \"\";\n\n// Subscribe to the event\nclient.on(eventName, (event) =>\n console.log(\"\\nreceived event:\\n\", event)\n);\n\nasync function run() {\n try {\n // Establish and verify connection\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"\\nConnected successfully!\\n\");\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "javascript", - "value": "function connectionPoolStatus(client) {\n let checkedOut = 0;\n\n function onCheckout() {\n checkedOut++;\n }\n\n function onCheckin() {\n checkedOut--;\n }\n\n function onClose() {\n client.removeListener('connectionCheckedOut', onCheckout);\n client.removeListener('connectionCheckedIn', onCheckin);\n\n checkedOut = NaN;\n }\n\n // Decreases count of connections checked out of the pool when connectionCheckedIn event is triggered\n client.on('connectionCheckedIn', onCheckin);\n\n // Increases count of connections checked out of the pool when connectionCheckedOut event is triggered\n client.on('connectionCheckedOut', onCheckout);\n\n // Cleans up event listeners when client is closed\n client.on('close', onClose);\n\n return {\n count: () => checkedOut,\n cleanUp: onClose\n };\n}" - }, - { - "lang": "none", - "value": "ConnectionPoolCreatedEvent {\n time: 2023-02-13T15:54:06.944Z,\n address: '...',\n options: {...}\n}" - }, - { - "lang": "none", - "value": "ConnectionPoolReadyEvent {\n time: 2023-02-13T15:56:38.440Z,\n address: '...'\n}" - }, - { - "lang": "none", - "value": "ConnectionPoolClosedEvent {\n time: 2023-02-13T15:56:38.440Z,\n address: '...'\n}" - }, - { - "lang": "none", - "value": "ConnectionCreatedEvent {\n time: 2023-02-13T15:56:38.291Z,\n address: '...',\n connectionId: 1\n}" - }, - { - "lang": "none", - "value": "ConnectionReadyEvent {\n time: 2023-02-13T15:56:38.291Z,\n address: '...',\n connectionId: 1\n}" - }, - { - "lang": "none", - "value": "ConnectionClosedEvent {\n time: 2023-02-13T15:56:38.439Z,\n address: '...',\n connectionId: 1,\n reason: 'poolClosed',\n serviceId: undefined\n}" - }, - { - "lang": "none", - "value": "ConnectionCheckOutStartedEvent {\n time: 2023-02-13T15:56:38.291Z,\n address: '...',\n}" - }, - { - "lang": "none", - "value": "ConnectionCheckOutFailedEvent {\n time: 2023-02-13T15:56:38.291Z,\n address: '...',\n reason: ...\n}" - }, - { - "lang": "none", - "value": "ConnectionCheckedOutEvent {\n time: 2023-02-13T15:54:07.188Z,\n address: '...',\n connectionId: 1\n}" - }, - { - "lang": "none", - "value": "ConnectionCheckedInEvent {\n time: 2023-02-13T15:54:07.189Z,\n address: '...',\n connectionId: 1\n}" - }, - { - "lang": "none", - "value": "ConnectionPoolClearedEvent {\n time: 2023-02-13T15:56:38.439Z,\n address: '...',\n serviceId: undefined,\n interruptInUseConnections: true,\n}" - } - ], - "preview": "This guide shows you how to monitor the driver's connection pool. A\nconnection pool is a set of open TCP connections your driver maintains\nwith a MongoDB instance. Connection pools help reduce the number of\nnetwork handshakes your application needs to perform and can help your\napplication run faster.", - "tags": "code example, node.js, watch, deployment", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/monitoring", - "title": "Monitoring", - "headings": [], - "paragraphs": "Cluster Monitoring : monitoring\nchanges in a cluster Command Monitoring : monitoring\nthe execution status of commands Connection Pool Monitoring : monitoring\nthe driver's connection pool", - "code": [], - "preview": null, - "tags": null, - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/promises", - "title": "Promises", - "headings": [ - "Overview", - "Promises", - "Await", - "Operational Considerations" - ], - "paragraphs": "The Node.js driver uses the asynchronous Javascript API to communicate with\nyour MongoDB cluster. Asynchronous Javascript allows you to execute operations without waiting for\nthe processing thread to become free. This helps prevent your application\nfrom becoming unresponsive when\nexecuting long-running operations. For more information about asynchronous\nJavascript, see the MDN web documentation on\n Asynchronous Javascript . This section describes Promises that you can use with the Node.js driver to\naccess the results of your method calls to your MongoDB cluster. A Promise is an object returned by the asynchronous method call that allows\nyou to access information on the eventual success or failure of the operation\nthat they wrap. The Promise is in the Pending state if the operation is\nstill running, Fulfilled if the operation completed successfully, and\n Rejected if the operation threw an exception. For more information on\nPromises and related terminology, see the MDN documentation on\n Promises . Most driver methods that communicate with your MongoDB cluster, such as\n findOneAndUpdate() and countDocuments() , return Promise\nobjects and already contain logic to handle the success or failure of the\noperation. You can define your own logic that executes once the Promise reaches the\n Fulfilled or Rejected state by appending the then() method.\nThe first parameter of then() is the method that gets called when the\nPromise reaches the Fulfilled state and the optional second parameter is\nthe method that gets called when it reaches the Rejected state. The\n then() method returns a Promise to which you can append more\n then() methods. When you append one or more then() methods to a Promise, each call passes\nits execution result to the next one. This pattern is called\n Promise chaining . The following code snippet shows an example of Promise\nchaining by appending a single then() method. To handle only Promise transitions to the Rejected state, use the catch() method\nrather than passing a null first parameter to then() . The catch() method\naccepts a single callback that is executed when the Promise transitions to the Rejected \nstate. The catch() method is often appended at the end of a Promise chain to\nhandle any exceptions thrown. The following code snippet demonstrates appending\na catch() method to the end of a Promise chain. Certain methods in the driver such as find() return a Cursor \ninstead of a Promise. To determine what type each method returns, see\nthe Node.js API documentation . If you are using async functions, you can use the await operator on\na Promise to pause further execution until the Promise reaches either the\n Fulfilled or Rejected state and returns. Since the await operator\nwaits for the resolution of the Promise, you can use it in place of\nPromise chaining to sequentially execute your logic. The following code\nsnippet uses await to execute the same logic as the first Promise\nchaining example. For more information, see the MDN documentation on\n await . One common mistake when using async methods is to forget to use await \noperator on Promises to get the value of the result rather than the Promise\nobject. Consider the following example in which we iterate over a cursor\nusing hasNext() , which returns a Promise that resolves to a boolean that\nindicates whether more results exist, and next() which returns a\nPromise that resolves to the next entry the cursor is pointing to. Since the call to hasNext() returns a Promise , the conditional\nstatement returns true regardless of the value that it resolves to. If we alter the code to await the call to next() only, as demonstrated\nin the following code snippet, it throws the following error:\n MongoError: Cursor is closed . While hasNext() is not called until after the result of next() returns,\nthe call to hasNext() returns a Promise which evaluates to true rather\nthan the value it resolves to, similar to the prior example. The code\nattempts to call next() on a Cursor that has already returned its results\nand closed as a result. If we alter the code to only await the call to hasNext() as shown in\nthe following example, the console prints Promise objects rather than the\ndocument objects. Use await before both the hasNext() and next() method calls to\nensure that you are operating on the correct return values as demonstrated\nin the following code:", - "code": [ - { - "lang": "js", - "value": "collection\n .updateOne({ name: \"Mount McKinley\" }, { $set: { meters: 6190 } })\n .then(\n res => console.log(`Updated ${res.result.n} documents`),\n err => console.error(`Something went wrong: ${err}`),\n );" - }, - { - "lang": "js", - "value": "deleteOne({ name: \"Mount Doom\" })\n .then(result => {\n if (result.deletedCount !== 1) {\n throw \"Could not find Mount Doom!\";\n }\n return new Promise((resolve, reject) => {\n ...\n });\n })\n .then(result => console.log(`Vanquished ${result.quantity} Nazgul`))\n .catch(err => console.error(`Fatal error occurred: ${err}`));" - }, - { - "lang": "js", - "value": "async function run() {\n ...\n try {\n res = await myColl.updateOne(\n { name: \"Mount McKinley\" },\n { $set: { meters: 6190 } },\n );\n console.log(`Updated ${res.result.n} documents`);\n } catch (err) {\n console.error(`Something went wrong: ${err}`);\n }\n}" - }, - { - "lang": "js", - "value": "async function run() {\n ...\n // WARNING: this snippet may cause an infinite loop\n const cursor = myColl.find();\n\n while (cursor.hasNext()) {\n console.log(cursor.next());\n }\n}" - }, - { - "lang": "js", - "value": "async function run() {\n ...\n // WARNING: this snippet throws a MongoError\n const cursor = myColl.find();\n\n while (cursor.hasNext()) {\n console.log(await cursor.next());\n }\n}" - }, - { - "lang": "js", - "value": "async function run() {\n ...\n // WARNING: this snippet prints Promises instead of the objects they resolve to\n const cursor = myColl.find();\n\n while (await cursor.hasNext()) {\n console.log(cursor.next());\n }\n}" - }, - { - "lang": "js", - "value": "async function run() {\n ...\n const cursor = myColl.find();\n\n while (await cursor.hasNext()) {\n console.log(await cursor.next());\n }\n}" - } - ], - "preview": "The Node.js driver uses the asynchronous Javascript API to communicate with\nyour MongoDB cluster.", - "tags": "code example, node.js, operation status, chain", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/run-command", - "title": "Run a Command", - "headings": [ - "Overview", - "Execute a Command", - "Command Options", - "Response", - "Example", - "Output", - "Additional Information" - ], - "paragraphs": "In this guide, you can learn how to run a database command with the\nNode.js driver. You can use database commands to perform a variety of\nadministrative and diagnostic tasks, such as fetching server statistics,\ninitializing a replica set, or running an aggregation pipeline. The driver provides wrapper methods for many database commands.\nWe recommend using driver methods instead of executing database\ncommands when possible. To perform administrative tasks, use the MongoDB Shell \ninstead of the Node.js driver. Calling the db.runCommand() \nmethod inside the shell is the preferred method to issue database\ncommands, as it provides a consistent interface between the shell and\ndrivers. To run a database command, you must specify the command and any relevant\nparameters in a document, then pass this document to a\ncommand execution method. The Node.js driver provides the following methods\nto run database commands: The following code shows how you can use the command() \nmethod to run the hello command, which returns information about\nthe current member's role in the replica set, on a database: For a full list of database commands and corresponding parameters, see\nthe Additional Information section . command() , which returns the command response as a\n Document type. You can use this method with any database command. runCursorCommand() , which returns the command response as an iterable\n RunCommandCursor type. You can use this method only if your database command\nreturns multiple result documents. You can specify optional command behavior for the command() \nand runCursorCommand() methods. The command() method accepts a RunCommandOptions object. To learn\nmore about the RunCommandOptions type, see the API documentation . The runCursorCommand() method accepts a RunCursorCommandOptions \nobject. To learn more about the RunCursorCommandOptions type, see\nthe API documentation . Starting in version 6.0 of the Node.js driver, you can pass only the\nfollowing options to the command() method: You can set more options in the document that you pass to the command() method. To\nlearn more about a command and the options that it accepts, locate the command and follow\nthe link on the Database Commands section of the Server\nmanual. The following code shows how to specify a grantRolesToUser command\nthat executes with a majority write concern: comment enableUtf8Validation raw readPreference session The command() and runCursorCommand() methods ignore\nthe read preference setting you may have set on your Db object.\nBy default, these methods use the primary read preference. The following code shows how to specify a read preference and pass it\nas an option to the command() method: For more information on read preference options, see Read\nPreference in the Server manual. Each method returns a Document object or a cursor that contains\nthe response from the database after the command has been executed. Each\ndatabase command performs a different function, so the response content\ncan vary across commands. However, every response contains documents\nwith the following fields: Field Description Provides fields specific to the database command. For example,\n count returns the n field and explain returns the\n queryPlanner field. ok Indicates whether the command has succeeded ( 1 )\nor failed ( 0 ). operationTime Indicates the logical time of the operation. MongoDB uses the\nlogical time to order operations. To learn more about logical time, see our blog post about\nthe Global Logical Clock . $clusterTime Provides a document that returns the signed cluster time. Cluster time is a\nlogical time used for ordering of operations. The document contains the following fields: clusterTime , which is the timestamp of the highest known cluster time for the member. signature , which is a document that contains the hash of the cluster time and the ID\nof the key used to sign the cluster time. The following code shows how you can use the runCursorCommand() method to\nrun the checkMetadataConsistency command on the testDB database\nand iterate through the results: The output contains the contents of the cursor object. The documents\ndescribe any metadata inconsistencies in the database: If you store the command response in a cursor, you see only the\ncommand result documents when you access the contents of the cursor. You won't\nsee the ok , operationTime , and $clusterTime fields. For more information about the concepts in this guide, see the following documentation: To learn how to retrieve data from a cursor, see the\n Access Data From a Cursor fundamentals page. db.runCommand() Database Commands hello Command find Command", - "code": [ - { - "lang": "javascript", - "value": "const result = await myDB.command({ hello: 1 });" - }, - { - "lang": "javascript", - "value": "const commandDoc = {\n grantRolesToUser: \"user011\",\n roles: [ \"readWrite\" ],\n writeConcern: { w: \"majority\" }\n};\nconst result = await myDB.command(commandDoc);" - }, - { - "lang": "javascript", - "value": "const commandOptions = { readPreference: \"nearest\" };\nconst result = await myDB.command(commandDoc, commandOptions);" - }, - { - "lang": "javascript", - "value": "// Connect to the \"testDB\" database\nconst db = client.db(\"testDB\");\n\n// Run a cursor command to check metadata consistency within the database\nconst cursor = await db.runCursorCommand({\n checkMetadataConsistency: 1,\n});\n// Iterate through the cursor's results and print the contents\nfor await (const doc of cursor) {\n console.log(doc);\n}" - }, - { - "lang": "javascript", - "value": "{\n type: ...,\n description: ...,\n details: {\n namespace: ...,\n info: ...\n }\n}\n{\n type: ...,\n description: ...,\n details: {\n namespace: ...,\n collectionUUID: ...,\n maxKeyObj: ...,\n ...\n }\n}" - } - ], - "preview": "In this guide, you can learn how to run a database command with the\nNode.js driver. You can use database commands to perform a variety of\nadministrative and diagnostic tasks, such as fetching server statistics,\ninitializing a replica set, or running an aggregation pipeline.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/stable-api", - "title": "Stable API", - "headings": [ - "Overview", - "Enable the Stable API on a MongoDB Client", - "Stable API Options" - ], - "paragraphs": "The Stable API feature requires MongoDB Server 5.0 or later. Use the Stable API feature only if all the MongoDB\nservers you are connecting to support this feature. In this guide, you can learn how to specify the Stable API when\nconnecting to a MongoDB instance or replica set. You can use the\nStable API feature to force the server to run operations with behavior\ncompatible with the specified API version . An API version defines the\nexpected behavior of the operations it covers and the format of server\nresponses. If you change to a different API version, the operations are not\nguaranteed to be compatible and the server responses are not guaranteed to\nbe similar. When you use the Stable API feature with an official MongoDB driver, you\ncan update your driver or server without worrying about backward compatibility\nissues of the commands covered by the Stable API. See the MongoDB reference page on the Stable API \nfor more information including a list of commands it covers. The following sections describe how you can enable the Stable API for\nyour MongoDB client and the options that you can specify. To enable the Stable API, you must specify an API version in the MongoClientOptions \npassed to your MongoClient . Once you instantiate a MongoClient instance with\na specified API version, all commands you run with that client use that\nversion of the Stable API. The example below shows how you can instantiate a MongoClient that\nsets the Stable API version and connects to a server by performing the\nfollowing operations: For more information on the methods and classes referenced in this\nsection, see the following API Documentation: To run commands that are not covered by the Stable API, make sure the\n\"strict\" option is disabled. See the section on\n Stable API Options for more\ninformation. which you want to run a command. Specify a server URI to connect to. Specify a Stable API version in the MongoClientOptions object, using a\nconstant from the ServerApiVersion object. Instantiate a MongoClient , passing the URI and the MongoClientOptions \nto the constructor. If you specify an API version and connect to a MongoDB Server that does\nnot support the Stable API, your application may throw an error when\nconnecting to your MongoDB Server with the following text: ServerApiVersion MongoClientOptions MongoClient You can enable or disable optional behavior related to the Stable API as\ndescribed in the following table. The following example shows how you can set the options of the ServerApi \ninterface. For more information on the options in this section, see the following\nAPI Documentation: Option Name Description version strict deprecationErrors ServerApi", - "code": [ - { - "lang": "javascript", - "value": "const { MongoClient, ServerApiVersion } = require(\"mongodb\");\n\n// Replace the placeholders in the connection string uri with your credentials\nconst uri = \"mongodb+srv://:@?retryWrites=true&w=majority\";\n\n// Create a client with options to specify Stable API Version 1\nconst client = new MongoClient(uri, { serverApi: ServerApiVersion.v1 });" - }, - { - "lang": "none", - "value": "MongoParseError: Invalid server API version=..." - }, - { - "lang": "javascript", - "value": "const { MongoClient, ServerApiVersion } = require(\"mongodb\");\n\n// Replace the placeholders in the connection string uri with your credentials\nconst uri = \"mongodb+srv://:@?retryWrites=true&w=majority\";\n\n/* Create a client with options to specify Stable API Version 1, return\nerrors for commands outside of the API version, and raise exceptions\nfor deprecated commands */\nconst client = new MongoClient(uri,\n {\n serverApi: {\n version: ServerApiVersion.v1,\n strict: true,\n deprecationErrors: true,\n }\n });" - } - ], - "preview": "In this guide, you can learn how to specify the Stable API when\nconnecting to a MongoDB instance or replica set. You can use the\nStable API feature to force the server to run operations with behavior\ncompatible with the specified API version. An API version defines the\nexpected behavior of the operations it covers and the format of server\nresponses. If you change to a different API version, the operations are not\nguaranteed to be compatible and the server responses are not guaranteed to\nbe similar.", - "tags": "code example, node.js, safe, breaking change", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/time-series", - "title": "Time Series", - "headings": [ - "Overview", - "Create a Time Series Collection", - "Query a Time Series Collection" - ], - "paragraphs": "In this guide, you can learn about time series collections in the MongoDB\nNode.js driver. We recommend that you create a time series collection using the MongoDB Shell.\nLearn more about how to install and run the MongoDB Shell in the MongoDB Shell documentation .\nFor detailed instructions on creating a time series collection\nusing the MongoDB Shell, see our\n MongoDB Manual entry on time series collections . Since you query a time series collection in the same way you query other\ncollection types in MongoDB, the Node.js driver has no features specifically for\nquerying time series data. For more information on querying data in the MongoDB Node.js driver, see the\nfollowing resources: Guide On Read Operations Guide On Aggregation MongoDB version 5.0 introduces window functions into the MongoDB aggregation\npipeline. You can use window functions to perform operations on a\ncontiguous span of time series data. For more information, see\n the reference documentation for the $setWindowFields aggregation stage .", - "code": [], - "preview": "In this guide, you can learn about time series collections in the MongoDB\nNode.js driver.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/transactions", - "title": "Transactions", - "headings": [ - "Overview", - "Transaction APIs", - "Core API", - "Convenient Transaction API", - "Transaction Options", - "Transaction Errors" - ], - "paragraphs": "In this guide, you can learn how to use the\nNode.js driver to perform transactions . Transactions allow you\nto run a series of operations that do not change any data until the\nentire transaction is committed. If any operation in the transaction fails, the\ndriver ends the transaction and discards all data changes before they\never become visible. This feature is called atomicity . Since all write operations on a single document in MongoDB are atomic, you\nmight want to use transactions to make an atomic change that\nmodifies multiple documents. This situation requires a multi-document transaction.\nMulti-document transactions are ACID compliant because MongoDB\nguarantees that the data involved in your transaction operations remains\nconsistent, even if the driver encounters unexpected errors. To learn more about ACID compliance and transactions, see our article on\nACID transactions . In MongoDB, multi-document transactions run within a client session .\nA client session is a grouping of related read or write operations that\nyou want to execute sequentially. We recommend you reuse\nyour client for multiple sessions and transactions instead of\ninstantiating a new client each time. When combined with majority read and\nwrite concerns, the driver guarantees causal consistency between the\noperations. To learn more, see Client Sessions and Causal Consistency Guarantees in the\nServer manual. Learn more about how to use the driver to perform multi-document\ntransactions in the following sections of this guide: To execute a multi-document transaction, you must be connected to a\ndeployment running MongoDB Server version 4.0 or later. For a detailed list of limitations, see the Transactions and\nOperations section in\nthe Server manual. Transaction APIs Transaction Options Transaction Errors The driver provides two APIs for performing transactions, the Core\nAPI and the Convenient Transaction API . The Core API is a framework that enables\nyou to create, commit, and end transactions. When using this API,\nyou must explicitly perform the following actions: The Convenient Transaction API is a\nframework that enables you to perform transactions without being\nresponsible for committing or ending them. This API automatically\nincorporates error-handling logic to retry operations when the server\nraises certain error types. To learn more about this behavior, see the\n Transaction Errors section of this guide. Create, commit, and end the transaction. Create and end the session in which you run the transaction. Implement error-handling logic. When you connect to MongoDB Server version 4.2 or\nearlier, you can perform write operations in a transaction only on\ncollections that already exist. When you connect to MongoDB Server\nversion 4.4 and later, the server automatically creates collections\nas necessary when you perform write operations in a transaction. To\nlearn more about this behavior, see Create Collections and\nIndexes in a Transaction \nin the Server manual. The Core API provides the following methods to implement transactions: You must perform the following steps when using this API: The following code demonstrates how to perform a transaction by using\nthe Core API: To see a fully runnable example that uses this API, see the\n Use the Core API usage example. startSession() :\ncreates a new ClientSession instance startTransaction() : starts a new\ntransaction commitTransaction() : commits the\nactive transaction in the session that it was created in abortTransaction() : ends the\nactive transaction in the session that it was created in endSession() : ends the\nactive session Pass the session instance to each operation that\nyou want to run in that session. Implement a catch block in which you identify\nserver transaction errors and implement error-handling logic. The driver throws an error if you provide a session from one MongoClient \ninstance to a different client instance. For example, the following code generates an\n MongoInvalidArgumentError error because it creates\na ClientSession instance from the client1 client, but provides\nthis session to the client2 client for a write operation: The Convenient Transaction API provides the following methods to\nimplement transactions: These methods return the value that the callback returns. For example,\nif a callback you pass to the withTransaction() method returns the\ndocument { hello: \"world\" } , then the withTransaction() method\nalso returns that document. When you use the Convenient Transaction API, you\ncan propagate return values from the callback as the return values of\nthe withTransaction() and withSession() methods to\nwork with them elsewhere in your code. You must perform the following steps when using this API: The following code demonstrates how to perform a transaction by using\nthe Convenient Transaction API: To see a fully runnable example that uses this API, see the\n Use the Convenient Transaction API usage example. withSession() : Runs\nthe callback passed to it within a session. The API handles the creation and\ntermination of the session automatically. withTransaction() :\nRuns the callback passed to it within a transaction and calls the\n commitTransaction() method when the callback returns. Pass the session instance to each operation that\nyou want to run in that session. Implement the async await syntax for each operation in the\nsession. Avoid parallelism, such as calling the Promise.all() method.\nUsing sessions in parallel usually leads to server errors. You can pass a TransactionOptions instance to the\n startTransaction() and withTransaction() methods to configure\nhow the driver performs a transaction. When you specify an option,\nit overrides the value of the option that you might have set on your\n MongoClient instance. The following table includes options that you can specify\nin a TransactionOptions instance: For a full list of options, see the API documentation for\n TransactionOptions . The following code shows how to define and pass transaction options to\nthe startTransaction() method: Setting Description readConcern writeConcern readPreference maxCommitTimeMS Specifies the length of time that a commit action on a\ntransaction can run, in milliseconds. The transaction inherits settings from your MongoClient instance unless you\nspecify them in your transaction options. If you are using the Core API to perform a transaction, you must incorporate\nerror-handling logic into your application for the following errors: The Convenient Transaction API incorporates retry logic for these error\ntypes, so the driver retries the transaction until there is a successful commit. TransientTransactionError : Raised if a write operation errors\nbefore the driver commits the transaction. To learn more about this error, see the\n TransientTransactionError description on\nthe Driver API page in the Server manual. UnknownTransactionCommitResult : Raised if the commit operation\nencounters an error. To learn more about this error, see the\n UnknownTransactionCommitResult description on\nthe Driver API page in the Server manual.", - "code": [ - { - "lang": "javascript", - "value": "async function coreTest(client) {\n const session = client.startSession();\n try {\n session.startTransaction();\n\n const savingsColl = client.db(\"bank\").collection(\"savings_accounts\");\n await savingsColl.findOneAndUpdate(\n {account_id: \"9876\"}, \n {$inc: {amount: -100 }}, \n { session });\n\n const checkingColl = client.db(\"bank\").collection(\"checking_accounts\");\n await checkingColl.findOneAndUpdate(\n {account_id: \"9876\"}, \n {$inc: {amount: 100 }}, \n { session });\n\n // ... perform other operations\n\n await session.commitTransaction();\n console.log(\"Transaction committed.\");\n } catch (error) {\n console.log(\"An error occurred during the transaction:\" + error);\n await session.abortTransaction();\n } finally {\n await session.endSession();\n }\n}" - }, - { - "lang": "js", - "value": "const session = client1.startSession();\nclient2.db('myDB').collection('myColl').insertOne({ name: 'Jane Eyre' }, { session });" - }, - { - "lang": "javascript", - "value": "async function convTest(client) {\n let txnRes = await client.withSession(async (session) =>\n session.withTransaction(async (session) => {\n const savingsColl = client.db(\"bank\").collection(\"savings_accounts\");\n await savingsColl.findOneAndUpdate(\n {account_id: \"9876\"}, \n {$inc: {amount: -100 }}, \n { session });\n \n const checkingColl = client.db(\"bank\").collection(\"checking_accounts\");\n await checkingColl.findOneAndUpdate(\n {account_id: \"9876\"}, \n {$inc: {amount: 100 }}, \n { session });\n\n // ... perform other operations\n\n return \"Transaction committed.\";\n }, null)\n );\n console.log(txnRes);\n}" - }, - { - "lang": "javascript", - "value": "const txnOpts = {\n readPreference: 'primary',\n readConcern: { level: 'local' },\n writeConcern: { w: 'majority' },\n maxCommitTimeMS: 1000\n};\nsession.startTransaction(txnOpts);" - } - ], - "preview": "In this guide, you can learn how to use the\nNode.js driver to perform transactions. Transactions allow you\nto run a series of operations that do not change any data until the\nentire transaction is committed. If any operation in the transaction fails, the\ndriver ends the transaction and discards all data changes before they\never become visible. This feature is called atomicity.", - "tags": "modify, customize", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals/typescript", - "title": "TypeScript", - "headings": [ - "Overview", - "Features", - "Type Parameters that Extend Document", - "Type Parameters of Any Type", - "Type Safety and Dot Notation", - "Referencing Keys that Incorporate Variables", - "Working with the _id Field", - "Insert Operations and the _id Field", - "Find Methods and the _id Field", - "Known Limitations", - "Recursive Types and Dot Notation", - "Mutual Recursion" - ], - "paragraphs": "In this guide, you can learn about the TypeScript features and limitations\nof the MongoDB Node.js driver. TypeScript is a strongly typed programming\nlanguage that compiles to JavaScript. The TypeScript compiler offers type checking in real time. Code editors that\nsupport TypeScript can provide autocomplete suggestions, display documentation\ninline, and identify type-related errors. All TypeScript features of the driver are optional. All valid JavaScript\ncode written with the driver is also valid TypeScript code. For more information, see the\n TypeScript website . If you use TypeScript, you can specify a type for some classes in the driver.\nAll classes that accept a type parameter in the driver have the default type\n Document . The Document interface has the following definition: All object types extend the Document interface. For more information on object types, see the\n TypeScript handbook . The following classes accept all types that extend the Document interface: You can pass a type parameter that extends the Document interface like this: Collection ChangeStream Keys not listed in your specified type parameter receive the any type.\nThe following code snippet demonstrates this behavior: The following classes accept all type parameters: You can find a code snippet that shows how to specify a type for the FindCursor \nclass in the\n Find Multiple Documents Usage Example . FindCursor AggregationCursor Starting in version 5.0, by default, the Node.js driver does not provide type\nsafety for operations that search on fields expressed in dot notation . Dot\nnotation is a syntax you can use to navigate nested JSON objects. When\nyou construct a filter to pass to a query, the driver will not raise a\ntype error even if you specify an incorrectly typed value for a field expressed\nin dot notation. The following code snippet defines the ClassificationPet interface,\nwhich includes a classification field that enables you to specify the\ngenus and color of dogs and cats: The driver does not raise a type error for the following code sample,\neven though the value of classification.color is a boolean\ninstead of a string: You can enable type-checking by constructing filters as StrictFilter or\n StrictUpdateFilter types. In the following code sample, the filter is assigned a\n StrictFilter type. Given this filter type, the Node.js driver\nreports a type error because the value of classification.color is a\nboolean instead of a string. The following example assigns a StrictUpdateFilter type to an update\nfilter. The Node.js driver reports a type error because the value of\n classification.color is a boolean instead of a string. The StrictFilter and StrictUpdateFilter types are experimental and\nmight incorrectly show type errors in valid queries. To query a collection or perform another operation with a key that incorporates\nvariables, you must use an as const assertion when specifying the key. This\nmechanism allows your code to compile successfully if the input types are\ncorrect. The following code snippet defines the ClassificationPet interface\nand the Mealtime interface. ClassificationPet includes a\n mealtimes field that contains an array of Mealtime interfaces,\neach of which includes a time field: The following code snippet performs a find-and-update operation on a\ncollection of ClassificationPet documents. The operation\nupdates the nested time field of the Mealtime instance at index\n 1 . The index position is specified by the variable mealCounter : To learn more about dot notation, see\n Dot Notation \nin the MongoDB manual. To learn more about the limitations of dot notation in the\nNode.js driver, see the\n Recursive Types and Dot Notation \nsection. MongoDB does not recommend specifying the _id as a part of your model.\nOmitting the _id field makes the model more generic and reusable and more accurately\nmodels the data important to an application. The Node driver\u2019s TypeScript integration\ntakes care of adding the _id field to the return types for relevant methods. The following sections provide information about write and read operations that\nuse the _id field. How you specify the _id field in type parameters passed to your\n Collection instance affects the behavior\nof insert operations. The following table describes how different\n _id field specifications affect insert operations: If you must specify the _id field as required in the type you define to represent\ndocuments in your collection but you do not want to specify values for the\n _id field in insert operations, use the OptionalId helper type when you\ncreate your collection. The OptionalId type accepts a type parameter as an\nargument and returns that type with an optional _id field. The following code snippet defines the IdPet interface, which\nincludes a type for the _id field: The following code uses the preceding interface and the\n OptionalId type to insert a document without specifying a value for the\n _id field: To learn more about the _id field, see\n The _id Field in the MongoDB\nmanual. To learn more about the types, interfaces, and classes discussed in this section, see the\nfollowing resources: _id field type Example Type Required on insert Behavior on insert OptionalId API documentation PkFactory API documentation ObjectId source code The find and findOne methods of the Collection class include\nthe _id field in their return type. The driver infers the type of the\nreturned _id field based on the type parameter you passed to your\n Collection instance. If the type parameter you passed to your Collection instance includes the\n _id field in its schema, the driver infers that the _id field returned\nfrom the method is of the type specified in the schema. However, if the type parameter you passed to your Collection instance does not\ninclude the _id field in its schema, the driver infers that the type of the\n _id field returned from the method is ObjectId . The following code uses the Pet \ninterface to return a document with an _id inferred to be of type ObjectId : The following code uses the IdNumberPet interface to return a\ndocument with an _id inferred to be of type number : To learn more about the classes and methods discussed in this section, see the following\nAPI documentation: The type parameter passed to your Collection influences only the type\ninference of the fields returned from the method. The driver does not convert\nthe field to the specified type. The type of each field in your type\nparameter's schema must match the type of the corresponding field in the\ncollection. If you specify a projection in a find\nmethod, you must pass a type parameter to your find method that reflects\nthe structure of your projected documents.\nWithout a type parameter, TypeScript cannot check at compile time that you\nare using your projected documents safely. To show this behavior, the following code snippet passes type checking but\nraises an error at runtime: To catch this error at compile time, pass a type parameter that does not include\nthe _id field to your find method: To view a runnable TypeScript example that includes a find method applying a\nprojection, see the\n Find a Document page. Collection find findOne Learn about the following TypeScript specific limitations of the Node.js driver: No type safety for dot notation references to nested instances of recursive types Depth limitations on type safety for mutually recursive types The Node.js driver cannot provide type safety within nested instances of\n recursive types referenced through dot notation. A recursive type is a type that references itself. You can update\nthe Pet interface\nto be recursive by allowing a pet to have its own pet. The following is the\nrecursive Pet interface: The following code snippet references a nested instance of the\n RecursivePet interface\nwith an incorrect type using dot notation, but the TypeScript compiler\ndoes not raise a type error: The following code snippet references a top-level instance of the\n RecursivePet interface with an incorrect type and raises a type error: The error raised by the preceding code snippet is as follows: If you must have type safety within nested instances of recursive types,\nyou must write your query or update without dot notation. To learn more about dot notation, see\n Dot Notation \nin the MongoDB manual. The Node.js driver does not traverse nested recursive types when\ntype checking dot notation keys to avoid hitting\nTypeScript's recursive depth limit. A mutually recursive type exists when two types contain a property that is of\nthe other's type. You can update the Pet \ninterface to be mutually recursive by allowing a pet to have a handler, and\ndefining a handler to have a pet. The following examples reference the mutually\nrecursive Pet and Handler interfaces: The Node.js driver provides type safety for mutually recursive types\nreferenced through dot notation up to a depth of eight. The following code\nsnippet assigns a string to a number and raises a type error because\nthe referenced property is at a depth of four: The error raised by the preceding code snippet is as follows: At a depth greater than or equal to eight, TypeScript compiles your code but no\nlonger type checks it. The following code assigns a string to a number \nproperty but does not cause a compilation error because the referenced property\nis at a depth of 10:", - "code": [ - { - "lang": "typescript", - "value": "interface Document {\n [key: string]: any;\n}" - }, - { - "lang": "typescript", - "value": "interface Pet {\n name: string;\n age: number;\n}\n\nconst database = client.db(\"\");\nconst collection = database.collection(\"\");\n" - }, - { - "lang": "typescript", - "value": "interface User {\n email: string;\n}\n\nconst database = client.db(\"\");\nconst myColl = db.collection(\"\");\nmyColl.find({ age: \"Accepts any type!\" });" - }, - { - "lang": "typescript", - "value": "interface ClassificationPet {\n name: string;\n age: number;\n classification: { genus: \"Canis\" | \"Felis\"; color: string };\n}" - }, - { - "lang": "typescript", - "value": "await myColl.findOneAndDelete({ \"classification.color\": false });" - }, - { - "lang": "typescript", - "value": "const filterPredicate: StrictFilter = { \"classification.color\": false };\nawait myColl.findOneAndDelete(filterPredicate);" - }, - { - "lang": "typescript", - "value": "const updateFilter: StrictUpdateFilter = { $set: { \"classification.color\": false } }\nawait pets.updateOne({}, updateFilter);" - }, - { - "lang": "typescript", - "value": "interface ClassificationPet {\n name: string;\n mealtimes: Mealtime[];\n}\n\ninterface Mealtime{\n time: string;\n amount: number;\n}" - }, - { - "lang": "typescript", - "value": "const mealCounter = 1;\n\nawait myColl.findOneAndUpdate(\n { name: \"Lassie\" },\n { $set: { [`mealtimes.${mealCounter}.time` as const]: '04:00 PM' } },\n);" - }, - { - "lang": "typescript", - "value": "interface IdPet {\n _id: ObjectId;\n name: string;\n age: number;\n}" - }, - { - "lang": "typescript", - "value": "const database = client.db(\"\");\nconst collection = db.collection>(\"\");\n\nmyColl.insertOne({\n name: \"Spot\",\n age: 2\n});" - }, - { - "lang": "typescript", - "value": "const database = client.db(\"\");\nconst collection = db.collection(\"\");\n\nconst document = await myColl.findOne({\n name: \"Spot\",\n});\nconst id : ObjectId = document._id;" - }, - { - "lang": "typescript", - "value": "interface IdNumberPet {\n _id: number;\n name: string;\n age: number;\n}\n\nconst database = client.db(\"\");\nconst collection = db.collection(\"\");\n\nconst document = await myColl.findOne({\n name: \"Spot\",\n});\nconst id : number = document._id;" - }, - { - "lang": "typescript", - "value": "const doc = await myColl.findOne(\n {},\n { projection: { _id: 0, name: 1 } }\n);\nconsole.log(doc._id.generationTime);" - }, - { - "lang": "typescript", - "value": "interface ProjectedDocument {\n name: string\n}\n\nconst doc = await myColl.findOne(\n {},\n { projection: { _id: 0, name: 1 } }\n);\n// Compile time error: Property '_id' does not exist on type 'ProjectedDocument'.\nconsole.log(doc._id.generationTime);" - }, - { - "lang": "typescript", - "value": "interface RecursivePet {\n pet?: RecursivePet;\n name: string;\n age: number;\n}" - }, - { - "lang": "typescript", - "value": "database\n .collection(\"\")\n .findOne({ \"pet.age\": \"Spot\" });" - }, - { - "lang": "typescript", - "value": "database\n .collection(\"\")\n .findOne({ pet: \"Spot\" });" - }, - { - "lang": "none", - "value": "index.ts(19,59): error TS2769: No overload matches this call.\nThe last overload gave the following error.\nType 'string' is not assignable to type 'Condition'." - }, - { - "lang": "typescript", - "value": "interface Pet {\n handler?: Handler;\n name: string;\n age: number;\n}\n\ninterface Handler {\n pet: Pet;\n name: string;\n}" - }, - { - "lang": "typescript", - "value": "database\n .collection(\"\")\n .findOne({'handler.pet.handler.pet.age': \"four\"});" - }, - { - "lang": "none", - "value": "index.ts(19,59): error TS2769: No overload matches this call.\nThe last overload gave the following error.\nType 'string' is not assignable to type 'Condition | undefined'." - }, - { - "lang": "typescript", - "value": "database\n .collection(\"\")\n .findOne({'handler.pet.handler.pet.handler.pet.handler.pet.handler.pet.age': \"four\"});" - } - ], - "preview": "In this guide, you can learn about the TypeScript features and limitations\nof the MongoDB Node.js driver. TypeScript is a strongly typed programming\nlanguage that compiles to JavaScript.", - "tags": "code example, node.js, static typing", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "fundamentals", - "title": "Fundamentals", - "headings": [], - "paragraphs": "Learn how to perform the following tasks using the Node.js driver in the\nFundamentals section: Connect to MongoDB Use the Stable API Authenticate with MongoDB Read from and Write to MongoDB Access Return Values Transform your Data Create and Manage Transactions Run a Database Command Create Indexes to Speed Up Queries Sort Using Collations Log Events in the Driver Monitor Driver Events Store and Retrieve Large Files in MongoDB Encrypt Fields from the Client Create and Query Time Series Collection Specify Type Parameters with TypeScript Specify BSON Serialization Settings", - "code": [], - "preview": null, - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "", - "title": "MongoDB Node Driver", - "headings": [ - "Introduction", - "Quick Start", - "Quick Reference", - "What's New", - "Usage Examples", - "Fundamentals", - "Aggregation Tutorials", - "API", - "FAQ", - "Connection Troubleshooting", - "Issues & Help", - "Compatibility", - "Upgrade Driver Versions", - "Related Tools and Libraries", - "Object Document Mappers", - "Packages", - "Learn", - "Developer Hub", - "MongoDB University", - "Take the Following Free Online Courses Taught by MongoDB Instructors" - ], - "paragraphs": "Welcome to the documentation site for the official MongoDB Node.js driver.\nYou can add the driver to your application to work with MongoDB\nin JavaScript or TypeScript. For more information about downloading and\ninstalling the Node.js driver, see\n Download and Install in the\nQuick Start guide. You can connect using the Node.js driver for\ndeployments hosted in the following environments: MongoDB Atlas : The fully\nmanaged service for MongoDB deployments in the cloud MongoDB Enterprise : The\nsubscription-based, self-managed version of MongoDB MongoDB Community : The\nsource-available, free-to-use, and self-managed version of MongoDB Learn how to establish a connection to MongoDB Atlas and begin\nworking with data in the step-by-step Quick Start . See driver syntax examples for common MongoDB commands in the\n Quick Reference section. For a list of new features and changes in each version, see the\n What's New section. For fully runnable code snippets and explanations for common\nmethods, see the Usage Examples section. Learn how to perform the following tasks using the Node.js driver in the\nFundamentals section: Connect to MongoDB Use the Stable API Authenticate with MongoDB Read from and Write to MongoDB Access Return Values Transform your Data Create and Manage Transactions Run a Database Command Create Indexes to Speed Up Queries Sort Using Collations Log Events in the Driver Monitor Driver Events Store and Retrieve Large Files in MongoDB Encrypt Fields from the Client Create and Query Time Series Collection Specify Type Parameters with TypeScript Specify BSON Serialization Settings For step-by-step explanations of common\naggregation tasks, see the Aggregation Tutorials \nsection. For detailed information about classes and methods in the MongoDB\nNode.js driver, see the MongoDB Node.js driver API documentation . For answers to commonly asked questions about the MongoDB\nNode.js Driver, see the Frequently Asked Questions (FAQ) \nsection. For solutions to issues you might encounter when using the driver to connect to\na MongoDB deployment, see the Connection Troubleshooting section. Learn how to report bugs, contribute to the driver, and to find help in the\n Issues & Help section. For the compatibility tables that show the recommended Node.js driver\nversion for each MongoDB Server version, see the\n Compatibility section. Learn what changes you must make to your application to upgrade\ndriver versions in the Upgrade Driver Versions section. MongoDB and our partners provide several object-document mappers (ODMs) for Node.js that\nlet developers work with MongoDB data as objects. One popular ODM is Mongoose ,\nwhich helps enforce a semi-rigid schema at the application level and provides features\nto assist with data modeling and manipulation. Prisma , another ODM, helps\nensure data consistency by offering a type-safe database client and an intuitive schema. For more information about using ODMs with MongoDB, see the following resources: MongoDB ORMs, ODMs, and Libraries Mongoose official documentation Prisma official documentation You can install the following packages to expand the functionality of the Node.js driver: For information about each package's version compatibility, see the Component Support Matrix in the Node.js driver Github\nrepository. Package Description kerberos C++ extension for Node.js that provides support for Kerberos authentication mongodb-legacy Legacy Node.js driver with optional callback support Visit the Developer Hub and MongoDB University to learn more about the\nNode.js driver. The Developer Hub provides tutorials and social engagement for developers. To learn how to use MongoDB features with the Node.js driver, see the\n How To's and Articles page . To ask questions and engage in discussions with fellow developers using\nthe Node.js driver, see the Developer Community forums . MongoDB University provides free courses to teach everyone how to use MongoDB. Using MongoDB with Node.js Learn the essentials of Node.js application development with MongoDB. MongoDB Node.js Developer Path Gain a comprehensive understanding of Node.js application development, complex operations, interactions\nwith MongoDB Atlas datasets, and more.", - "code": [], - "preview": "Learn how to connect to and interact with data stored in MongoDB by using JavaScript or TypeScript with the Node.js driver.", - "tags": "node.js, object-relational, object-document", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "issues-and-help", - "title": "Issues & Help", - "headings": ["Bugs / Feature Requests", "Pull Requests"], - "paragraphs": "Our developer community is vibrant and highly engaged, with extensive experience using Node.js with MongoDB. Often, the quickest way to get support for general questions is through the\n MongoDB Community Forums . Refer to our support channels documentation for more information. To report a bug or to request a new feature in the Node.js driver,\nplease open a case in our issue management tool, JIRA: Bug reports in JIRA for the Node.js driver and the Core Server (SERVER) project are public . If you\u2019ve identified a security vulnerability in a driver or any other\nMongoDB project, please report it according to the instructions found in\nthe Create a Vulnerability Report . Create an account and login . Navigate to the NODE project . Click Create Issue . Please provide as much information as possible about the\nissue and the steps to reproduce it. We are happy to accept contributions to help improve the driver. We will review user\ncontributions to ensure they meet the standards of the codebase. Pull requests must pass\nthe travis.ci checks, include documentation, and include tests. To get started check out the source and work on a branch: To run the test suite, you must have a server topology running and provide the URI to the command.\nFor example, if you have a single server running at \"mongodb://localhost:27017\" , you can run the following: Note that the tests run on your feature are different depending on the type of topology\nthat you are running, such as for a standalone instance or replica set. There are many tools that can help you with setting up different topologies for local testing.\nSome examples are mtools and mongo-orchestration .", - "code": [ - { - "lang": "bash", - "value": "git clone https://github.com/mongodb/node-mongodb-native.git\ncd node-mongodb-native\nnpm install\ngit checkout -b myNewFeature" - }, - { - "lang": "bash", - "value": "MONGODB_URI=\"mongodb://localhost:27017\" npm test" - } - ], - "preview": "Our developer community is vibrant and highly engaged, with extensive experience using Node.js with MongoDB.", - "tags": null, - "facets": { - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "quick-reference", - "title": "Quick Reference", - "headings": ["Compatibility"], - "paragraphs": "This page shows the driver syntax for several MongoDB commands and links to\ntheir related reference and API documentation. You can use the Node.js driver to connect and execute commands for\ndeployments hosted in the following environments: MongoDB Atlas : The fully\nmanaged service for MongoDB deployments in the cloud MongoDB Enterprise : The\nsubscription-based, self-managed version of MongoDB MongoDB Community : The\nsource-available, free-to-use, and self-managed version of MongoDB To learn more about performing common CRUD operations in the Atlas UI for deployments hosted in MongoDB\nAtlas, see Create, View, Update, and Delete Documents . Command Syntax", - "code": [ - { - "lang": "js", - "value": "await coll.findOne({ title: 'Hamlet' });" - }, - { - "lang": "js", - "value": "{ title: 'Hamlet', type: 'movie', ... }" - }, - { - "lang": "js", - "value": "coll.find({ year: 2005 });" - }, - { - "lang": "js", - "value": "[\n { title: 'Christmas in Boston', year: 2005, ... },\n { title: 'Chicken Little', year: 2005, ... },\n ...\n]" - }, - { - "lang": "javascript", - "value": "await coll.insertOne({ title: 'Jackie Robinson' });" - }, - { - "lang": "javascript", - "value": "await coll.insertMany([\n { title: 'Dangal', rating: 'Not Rated' },\n { title: 'The Boss Baby', rating: 'PG' }\n ]);" - }, - { - "lang": "js", - "value": "await coll.updateOne(\n { title: 'Amadeus' },\n { $set: { 'imdb.rating': 9.5 } }\n);" - }, - { - "lang": "js", - "value": "{ title: 'Amadeus', imdb: { rating: 9.5, ... } }" - }, - { - "lang": "js", - "value": "await coll.updateMany(\n { year: 2001 },\n { $inc: { 'imdb.votes': 100 } }\n);" - }, - { - "lang": "js", - "value": "[\n { title: 'A Beautiful Mind', year: 2001, imdb: { votes: 826257, ... },\n { title: 'Shaolin Soccer', year: 2001, imdb: { votes: 65442, ... },\n ...\n]" - }, - { - "lang": "js", - "value": "await coll.updateOne(\n { title: 'Cosmos' },\n { $push: { genres: 'Educational' } }\n):" - }, - { - "lang": "js", - "value": "{ title: 'Cosmos', genres: [ 'Documentary', 'Educational' ] }" - }, - { - "lang": "js", - "value": "await coll.replaceOne(\n { name: 'Deli Llama', address: '2 Nassau St' },\n { name: 'Lord of the Wings', zipcode: 10001 }\n);" - }, - { - "lang": "js", - "value": "{ name: 'Lord of the Wings', zipcode: 10001 }" - }, - { - "lang": "javascript", - "value": "await coll.deleteOne({ title: 'Congo' });" - }, - { - "lang": "javascript", - "value": "await coll.deleteMany({ title: { $regex: /^Shark.*/ } });" - }, - { - "lang": "js", - "value": "await coll.bulkWrite([\n {\n insertOne: {\n document: {\n title: 'A New Movie',\n year: 2022\n }\n }\n },\n {\n deleteMany: {\n filter: { year: { $lt: 1970 } }\n }\n }\n]);" - }, - { - "lang": "js", - "value": "BulkWriteResult {\n result: {\n ...\n },\n ...\n}" - }, - { - "lang": "javascript", - "value": "coll.watch([ { $match: { year: { $gte: 2022 } } } ]);" - }, - { - "lang": "js", - "value": "const cursor = coll.find();\nfor await (const doc of cursor) {\n console.dir(doc);\n}" - }, - { - "lang": "js", - "value": "[\n { title: '2001: A Space Odyssey', ... },\n { title: 'The Sound of Music', ... },\n ...\n]" - }, - { - "lang": "js", - "value": "const cursor = coll.find();\nconst results = await cursor.toArray();" - }, - { - "lang": "js", - "value": "[\n { title: '2001: A Space Odyssey', ... },\n { title: 'The Sound of Music', ... },\n ...\n]" - }, - { - "lang": "js", - "value": "await coll.countDocuments({ year: 2000 });" - }, - { - "lang": "js", - "value": "618" - }, - { - "lang": "js", - "value": "await coll.distinct('year');" - }, - { - "lang": "js", - "value": "[ 1891, 1893, 1894, 1896, 1903, ... ]" - }, - { - "lang": "js", - "value": "coll.find().limit(2);" - }, - { - "lang": "js", - "value": "[\n { title: 'My Neighbor Totoro', ... },\n { title: 'Am\u00e9lie', ... }\n]" - }, - { - "lang": "js", - "value": "coll.find({ title: { $regex: /^Rocky/} }, { skip: 2 });" - }, - { - "lang": "js", - "value": "[\n { title: 'Rocky III', ... },\n { title: 'Rocky IV', ... },\n { title: 'Rocky V'}, ... }\n]" - }, - { - "lang": "js", - "value": "coll.find().sort({ year: 1});" - }, - { - "lang": "js", - "value": "[\n { title: 'Newark Athlete', year: 1891, ... },\n { title: 'Blacksmith Scene', year: 1893, ...},\n { title: 'Dickson Experimental Sound Film', year: 1894},\n ...\n]" - }, - { - "lang": "js", - "value": "coll.find().project({ _id: 0, year: 1, imdb: 1 });" - }, - { - "lang": "js", - "value": "[\n { year: 2012, imdb: { rating: 5.8, votes: 230, id: 8256 }},\n { year: 1985, imdb: { rating: 7.0, votes: 447, id: 1654 }},\n ...\n]" - }, - { - "lang": "javascript", - "value": "await coll.createIndex({ title: 1, year: -1 });" - }, - { - "lang": "js", - "value": "// only searches fields with text indexes\ncoll.find({ $text: { $search: 'zissou' } });" - }, - { - "lang": "js", - "value": "[\n { title: 'The Life Aquatic with Steve Zissou', ... }\n]" - }, - { - "lang": "javascript", - "value": "\"dependencies\": {\n \"mongodb\": \"^6.8\",\n ...\n}" - } - ], - "preview": "See Node.js driver code examples of frequently-used MongoDB commands and links to their related reference and API documentation.", - "tags": "node.js, code example", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "quick-start/connect-to-mongodb", - "title": "Connect to MongoDB", - "headings": [ - "Create your Node.js Application", - "Assign the Connection String", - "Run your Node.js Application" - ], - "paragraphs": "After you complete these steps, you have a working application that\nuses the driver to connect to your MongoDB deployment, runs a query on\nthe sample data, and prints out the result. Create a file to contain your application called index.js in your\n node_quickstart project directory. Copy and paste the following code into the index.js file: Replace the placeholder with the\nconnection string that you copied from the Create a Connection String \nstep of this guide. In your shell, run the following command to start this application: The output includes details of the retrieved movie document: If you encounter an error or see no output, check whether you specified the\nproper connection string in the index.js file, and that you loaded the\nsample data. If you run into issues on this step, ask for help in the\n MongoDB Community Forums \nor submit feedback by using the Rate this page \ntab on the right or bottom right side of this page.", - "code": [ - { - "lang": "js", - "value": "const { MongoClient } = require(\"mongodb\");\n\n// Replace the uri string with your connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const database = client.db('sample_mflix');\n const movies = database.collection('movies');\n\n // Query for a movie that has the title 'Back to the Future'\n const query = { title: 'Back to the Future' };\n const movie = await movies.findOne(query);\n\n console.log(movie);\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);" - }, - { - "lang": "none", - "value": "node index.js" - }, - { - "lang": "none", - "value": "{\n _id: ...,\n plot: 'A young man is accidentally sent 30 years into the past...',\n genres: [ 'Adventure', 'Comedy', 'Sci-Fi' ],\n ...\n title: 'Back to the Future',\n ...\n}" - } - ], - "preview": "After you complete these steps, you have a working application that\nuses the driver to connect to your MongoDB deployment, runs a query on\nthe sample data, and prints out the result.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "quick-start/create-a-connection-string", - "title": "Create a Connection String", - "headings": [ - "Find your MongoDB Atlas Connection String", - "Copy your Connection String", - "Update the Placeholders" - ], - "paragraphs": "You can connect to your MongoDB deployment by providing a\n connection URI , also called a connection string , which\ninstructs the driver on how to connect to a MongoDB deployment\nand how to behave while connected. The connection string includes the hostname or IP address and\nport of your deployment, the authentication mechanism, user credentials\nwhen applicable, and connection options. To connect to an instance or deployment not hosted on Atlas, see\n Other Ways to Connect to MongoDB . After completing these steps, you have a connection string that\ncontains your database username and password. To retrieve your connection string for the deployment that\nyou created in the previous step ,\nlog into your Atlas account and navigate to the\n Database section and click the Connect button\nfor your new deployment. Proceed to the Connect your application section and select\n\"Node.js\" from the Driver selection menu and the version\nthat best matches the version you installed from the Version \nselection menu. Select the Password (SCRAM) authentication mechanism. Deselect the Include full driver code example to view\nthe connection string. Click the button on the right of the connection string to copy it to\nyour clipboard as shown in the following screenshot: Paste this connection string into a a file in your preferred text editor\nand replace the \"\" and \"\" placeholders with\nyour database user's username and password. Save this file to a safe location for use in the next step. If you run into issues on this step, ask for help in the\n MongoDB Community Forums \nor submit feedback by using the Rate this page \ntab on the right or bottom right side of this page.", - "code": [], - "preview": "You can connect to your MongoDB deployment by providing a\nconnection URI, also called a connection string, which\ninstructs the driver on how to connect to a MongoDB deployment\nand how to behave while connected.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "quick-start/create-a-deployment", - "title": "Create a MongoDB Deployment", - "headings": [ - "Create a Free MongoDB deployment on Atlas", - "Save your Credentials" - ], - "paragraphs": "You can create a free tier MongoDB deployment on MongoDB Atlas\nto store and manage your data. MongoDB Atlas hosts and manages\nyour MongoDB database in the cloud. After you complete these steps, you have a new free tier MongoDB\ndeployment on Atlas, database user credentials, and sample data loaded\nin your database. Complete the Get Started with Atlas \nguide to set up a new Atlas account and load sample data into a new free\ntier MongoDB deployment. After you create your database user, save that user's\nusername and password to a safe location for use in an upcoming step. If you run into issues on this step, ask for help in the\n MongoDB Community Forums \nor submit feedback by using the Rate this page \ntab on the right or bottom right side of this page.", - "code": [], - "preview": "You can create a free tier MongoDB deployment on MongoDB Atlas\nto store and manage your data. MongoDB Atlas hosts and manages\nyour MongoDB database in the cloud.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "quick-start/download-and-install", - "title": "Download and Install", - "headings": [ - "Install Node and npm", - "Create a Project Directory", - "Install the Node.js Driver" - ], - "paragraphs": "After you complete these steps, you have Node.js and npm installed\nand a new project directory with the driver dependencies installed. Ensure you have Node.js v16 or later and\nnpm (Node Package Manager) installed in your development environment. For information on how to install Node.js and npm, see\n downloading and installing Node.js and npm . In your shell, run the following command to create a\ndirectory called node_quickstart for this project: Run the following command to navigate into the project\ndirectory: Run the following command to initialize your Node.js project: When this command successfully completes, you have a package.json \nfile in your node_quickstart directory. Run the following command in your shell to install\nthe driver in your project directory: This command performs the following actions: Downloads the mongodb package and the dependencies it requires Saves the package in the node_modules directory Records the dependency information in the package.json file If you run into issues on this step, ask for help in the\n MongoDB Community Forums \nor submit feedback by using the Rate this page \ntab on the right or bottom right side of this page.", - "code": [ - { - "lang": "bash", - "value": "mkdir node_quickstart" - }, - { - "lang": "bash", - "value": "cd node_quickstart" - }, - { - "lang": "bash", - "value": "npm init -y" - }, - { - "lang": "bash", - "value": "npm install mongodb@6.8" - } - ], - "preview": "After you complete these steps, you have Node.js and npm installed\nand a new project directory with the driver dependencies installed.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "quick-start/next-steps", - "title": "Next Steps", - "headings": [], - "paragraphs": "Congratulations on completing the quick start tutorial! In this tutorial, you created a Node.js application that\nconnects to a MongoDB deployment hosted on MongoDB Atlas\nand retrieves a document that matches a query. Learn more about the MongoDB Node.js driver from the following resources: Discover how to perform read and write operations in the\n CRUD Operations section. See examples of frequently-used operations in the\n Usage Examples section.", - "code": [], - "preview": "Congratulations on completing the quick start tutorial!", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "quick-start", - "title": "Node Driver Quick Start", - "headings": ["Overview"], - "paragraphs": "This guide shows you how to create an application that uses the\nMongoDB Node.js driver to connect to a MongoDB cluster hosted on MongoDB Atlas. If\nyou prefer to connect to MongoDB using a different driver or programming\nlanguage, see our list of official drivers . The Node.js driver is a library of functions that you can use to connect\nto and communicate with MongoDB. MongoDB Atlas is a fully managed cloud database service that hosts your\nMongoDB deployments. You can create your own free (no credit card\nrequired) MongoDB Atlas deployment by following the steps in this guide. Follow the steps in this guide to connect a sample Node.js application to\na MongoDB Atlas deployment.", - "code": [], - "preview": "Learn how to create an app to connect to MongoDB deployment by using the Node.js driver.", - "tags": "node.js", - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "upgrade", - "title": "Upgrade Driver Versions", - "headings": [ - "Overview", - "How to Upgrade", - "Breaking Changes", - "Version 6.0 Breaking Changes", - "Version 5.0 Breaking Changes", - "Version 4.0 Breaking Changes", - "Server Release Compatibility Changes", - "Version 4.2 Server Release Support Changes" - ], - "paragraphs": "On this page, you can learn how to upgrade your driver to a new version. This page also\nincludes the changes you must make to your application to upgrade your driver\nwithout losing functionality, if applicable. Before you upgrade, perform the following actions: To upgrade your driver version, run the following command in your application's\ndirectory: To upgrade to a different version of the driver, replace the information after the\n @ symbol with your preferred version number. For more information about the\n npm install command, see the npm-install \nnpm documentation. Ensure the new driver version is compatible with the MongoDB Server version\nyour application connects to and the version of Node.js that your\napplication runs on. See the Compatibility \npage for this information. Address any breaking changes between the version of the driver\nyour application uses now and your planned upgrade version in the\n Breaking Changes section of this guide. To learn\nmore about the MongoDB Server release compatibility changes, see the\n Server Release Compatibility Changes section. You can minimize the amount of changes that you must make to your\napplication when upgrading driver versions by using the\n Stable API . A breaking change is a modification in a convention or behavior in\na specific version of the driver that may prevent your application from\nworking as expected. The breaking changes in this section are categorized by the major\nversion releases that introduced them. When upgrading driver versions,\naddress all the breaking changes between your current version and the\nplanned upgrade version. For example, if you are upgrading the driver\nfrom v3.x to v5.x, address all breaking changes listed under v4.0 and\nv5.0. Version 6.0 of the Node.js driver requires Node.js v16.20.1 or later. The driver removes support for the addUser() helper command. Use the\n createUser MongoDB Shell command instead. The driver removes support for the collStats operation. Use the\n $collStats aggregation operator\ninstead. The driver removes all the deprecated ssl -prefixed options and the\n tlsCertificateFile option in the MongoClientOptions type.\nCreate a SecureContext object or set the tls -prefixed options\nin your MongoClientOptions instance instead. The driver reads files set in the tlsCAFile and\n tlsCertificateKeyFile connection options when you call the\n MongoClient.connect() method, not when you create the\n MongoClient instance. The driver removes the keepAlive and keepAliveInitialDelay connection\noptions. The value of keepAlive is permanently set to true and the\nvalue of keepAliveInitialDelay is set to 300000 milliseconds (300\nseconds). The Db.command() method accepts only options that are not related\nto a specific command. To learn more about these options, see the\n Command Options section of the Run a\nCommand guide. If you add mongodb-client-encryption as a dependency,\nthe major version number must match that of the Node.js driver. For example,\nNode.js driver v6.x.x requires mongodb-client-encryption v6.x.x. Automatic Encryption methods are now in the Node.js driver. You must\nimport these methods from the driver instead of from\n mongodb-client-encryption . Removed the ObjectId constructor that accepted a 12-character string. Modified abortTransaction() and commitTransaction() methods to return\n null instead of the raw command results. Removed connection option helpers that accepted values other than true \nor false as booleans. You must provide either true or false values in\nthe connection string or to the MongoClient constructor. Removed the Binary BSON type constructor that accepted a string. The Binary.write() method no longer accepts a string to write to the binary\nBSON object. The ClientEncryption API returns promises instead of callbacks. The socks package, which enables SOCKS5 proxy support, is a\npeer-optional dependency. You must install the package to enable\nSOCKS5 in your application. To learn more, see Enable SOCKS5 Proxy Support . If you start a session on a client, then pass that session to a\ndifferent client, the driver throws an error when you\nperform any operations in the session. The includeResultMetadata option for compound operation methods is\n false by default. See the Built-in Methods \nsection of the Compound Operations guide for more information. The withSession() method returns the value that the provided\nfunction returns. In previous driver versions, this method returns\n undefined . The withTransaction() method returns the value that the\ncallback returns. In previous driver versions, this method\nreturns the server command response, which varies depending on the MongoDB\nServer version or type that the driver connects to. To learn more\nabout transactions, see the Perform a Transaction usage\nexamples and the Transactions guide. Raised the optional kerberos dependency minimum version to 2.0.1 and\nremoved support for version 1.x. Raised the optional zstd dependency minimum version to 1.1.0. The driver is no longer compatible with Node.js v12 or earlier. If you want to use\nthis version of the driver, you must use Node.js v14.20.1 or greater. The driver removes support for callbacks in favor of a promise-based API.\nThe following list provides some strategies for callback users to adopt this\nversion: For more information about these strategies, see\n the v5.0 changelog . Migrate to the promise-based API (recommended) Use the promise-based API and util.callbackify Add mongodb-legacy to continue using callbacks The driver removes support for the Collection.insert() ,\n Collection.update() , and Collection.remove() helper methods.\nThe following list provides instructions on how to replace the\nfunctionality of the removed methods: Migrate from Collection.insert() to insertOne() or insertMany() Migrate from Collection.update() to updateOne() or updateMany() Migrate from Collection.remove() to deleteOne() or deleteMany() The driver no longer includes AWS SDK modules by default. The driver no longer automatically imports the bson-ext package. The driver removes support for custom Promise libraries. The driver no\nlonger supports the promiseLibrary option of MongoClient and the Promise.set \nexport that allows specifying a custom Promise library. The driver removes support for the Collection.mapReduce() helper. The BulkWriteResult type no longer has the publicly enumerable\n result property. The following types, options, and methods have been removed: BulkResult.lastOp() method opTime property of BulkResult BulkWriteOptions.keepGoing option WriteConcernError.err() method AddUserOptions.digestPassword option Kerberos gssapiCanonicalizeHostName option slaveOk options and method removed in favor of secondaryOk ObjectID type removed in favor of ObjectId AsyncIterator interface removed in favor of AsyncGenerator For more information about these changes, see\n the v4.0 changelog . The driver is no longer compatible with Node.js v12.8 or earlier. If you\nwant to use this version of the driver, you must use Node.js v12.9 or greater. Cursor types no longer extend Readable directly. You cannot use a ChangeStream instance as an iterator after using\nit as an EventEmitter . You also cannot do the reverse\u2014using an\n EventEmitter instance as an iterator after using it as a ChangeStream . The following methods no longer accept a callback parameter: Collection.find() Collection.aggregate() Db.aggregate() The default value of the maxPoolSize connection option is now\n 100 . The driver no longer supports the gssapiServiceName Kerberos\noption. Use authMechanismProperties.SERVICE_NAME instead. The driver no longer accepts non-boolean types, such as 0 or\n 1 , for boolean options. The db.collection type no longer accepts a callback. The Db type is no longer an EventEmitter . You can listen to\nany events directly from the MongoClient instance. The driver removes support for the Collection.group() helper. The driver no longer includes the deprecated GridStore API. A server release compatibility change is a modification\nto the driver that discontinues support for a set of\nMongoDB Server versions. The driver discontinues support for a MongoDB Server version after it reaches\nend-of-life (EOL). To learn more about the MongoDB support for EOL products,\nsee the Legacy Support Policy . The v4.2 driver drops support for MongoDB Server v3.4 and earlier.\nTo use the v4.2 driver, your MongoDB Server must be v3.6 or later. To learn\nhow to upgrade your MongoDB Server deployment, see Release\nNotes in the MongoDB Server manual.", - "code": [ - { - "lang": "bash", - "value": "npm install mongodb@6.8" - } - ], - "preview": "On this page, you can learn how to upgrade your driver to a new version. This page also\nincludes the changes you must make to your application to upgrade your driver\nwithout losing functionality, if applicable.", - "tags": "backwards compatibility, update", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/bulkWrite", - "title": "Perform Bulk Operations", - "headings": ["Example"], - "paragraphs": "The bulkWrite() method performs batch write operations against a\n single collection. This method reduces the number of network round trips from\nyour application to the server which therefore increases the throughput and\nperformance. Bulk writes return a collection of results for all operations\nonly after all operations passed to the method complete. You can specify one or more of the following write operations in\n bulkWrite() : The bulkWrite() method accepts the following parameters: If you create an index with a unique index \nconstraint, you might encounter a duplicate key write error during an\noperation in the following format: Similarly, if you attempt to perform a bulk write against a collection\nthat uses schema validation , you may\nencounter warnings or errors related to the formatting of inserted or\nmodified documents. insertOne updateOne updateMany deleteOne deleteMany replaceOne operations : specifies the bulk write operations to\nperform. Pass each operation to bulkWrite() as an object in\nan array. For examples that show the syntax for each write operation, see\nthe bulkWrite API documentation . options : optional settings that affect the execution\nof the operation, such as whether the write operations executes in\nsequential order and the write concern. By default, MongoDB executes bulk write operations one-by-one in the specified order\n(serially). During an ordered bulk write, if an error occurs during the processing of an\noperation, MongoDB returns without processing the remaining operations in the list. In\ncontrast, when ordered is false , MongoDB continues to process remaining write\noperations in the list. Unordered operations are theoretically faster since MongoDB can\nexecute them in parallel, but only use them if the writes do not depend on order. The following code sample performs a bulk write operation on the\n theaters collection in the sample_mflix database. The example call\nto bulkWrite() includes examples of insertOne , updateMany , and\n deleteOne write operations: Running the preceding example results in the following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", - "code": [ - { - "lang": "sh", - "value": "Error during bulkWrite, BulkWriteError: E11000 duplicate key error collection: ..." - }, - { - "lang": "javascript", - "value": "BulkWriteResult {\n insertedCount: 2,\n matchedCount: 1,\n modifiedCount: 1,\n deletedCount: 0,\n upsertedCount: 0,\n upsertedIds: {},\n insertedIds: {\n '0': new ObjectId(\"...\"),\n '1': new ObjectId(\"...\")\n }\n}" - }, - { - "lang": "javascript", - "value": "// Bulk write operation\n\n// Import MongoClient from the MongoDB node driver package\nconst { MongoClient } = require(\"mongodb\");\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const theaters = database.collection(\"theaters\");\n\n // Insert a new document into the \"theaters\" collection\n const result = await theaters.bulkWrite([\n {\n insertOne: {\n document: {\n location: {\n address: {\n street1: \"3 Main St.\",\n city: \"Anchorage\",\n state: \"AK\",\n zipcode: \"99501\",\n },\n },\n },\n },\n },\n {\n insertOne: {\n document: {\n location: {\n address: {\n street1: \"75 Penn Plaza\",\n city: \"New York\",\n state: \"NY\",\n zipcode: \"10001\",\n },\n },\n },\n },\n },\n {\n // Update documents that match the specified filter\n updateMany: {\n filter: { \"location.address.zipcode\": \"44011\" },\n update: { $set: { is_in_ohio: true } },\n upsert: true,\n },\n },\n {\n // Delete a document that matches the specified filter\n deleteOne: { filter: { \"location.address.street1\": \"221b Baker St\" } },\n },\n ]);\n // Log the result of the bulk write operation \n console.log(result);\n } finally {\n // Close the database connection when the operations are completed or if an error occurs\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "typescript", - "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\ninterface Address {\n street1: string;\n city: string;\n state: string;\n zipcode: string;\n}\n\ninterface Theater {\n location: { address: Address };\n is_in_ohio?: boolean;\n}\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const theaters = database.collection(\"theaters\");\n\n const result = await theaters.bulkWrite([\n {\n insertOne: {\n document: {\n location: {\n address: {\n street1: \"3 Main St.\",\n city: \"Anchorage\",\n state: \"AK\",\n zipcode: \"99501\",\n },\n },\n },\n },\n },\n {\n insertOne: {\n document: {\n location: {\n address: {\n street1: \"75 Penn Plaza\",\n city: \"New York\",\n state: \"NY\",\n zipcode: \"10001\",\n },\n },\n },\n },\n },\n {\n updateMany: {\n // Important: You lose type safety when you use dot notation in queries\n filter: { \"location.address.zipcode\": \"44011\" },\n update: { $set: { is_in_ohio: true } },\n upsert: true,\n },\n },\n {\n deleteOne: {\n filter: { \"location.address.street1\": \"221b Baker St\" },\n },\n },\n ]);\n\n console.log(result);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - } - ], - "preview": "The bulkWrite() method performs batch write operations against a\nsingle collection. This method reduces the number of network round trips from\nyour application to the server which therefore increases the throughput and\nperformance. Bulk writes return a collection of results for all operations\nonly after all operations passed to the method complete.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/changeStream", - "title": "Watch for Changes", - "headings": [ - "Open a Change Stream", - "Examples", - "Iteration", - "Listener Function" - ], - "paragraphs": "You can watch for changes in MongoDB using the watch() method on the\nfollowing objects: For each object, the watch() method opens a change stream to\nemit change event documents when they occur. The watch() method optionally takes an aggregation pipeline which consists of an array of aggregation stages \nas the first parameter. The aggregation stages filter and transform the change events. In the following snippet, the $match stage matches all change event documents with a runtime value of less than\n15, filtering all others out. The watch() method accepts an options object as the second parameter. Refer to the links at the end of this\nsection for more information on the settings you can configure with this object. The watch() method returns an instance of a ChangeStream . You can read events from\nchange streams by iterating over them or listening for events. Select the tab that corresponds to the way you want to\nread events from the change stream: Collection Database MongoClient Starting in version 4.12, ChangeStream objects are async\niterables. With this change, you can use for-await loops to\nretrieve events from an open change stream: You can call methods on the ChangeStream object such as: hasNext() to check for remaining documents in the stream next() to request the next document in the stream close() to close the ChangeStream You can attach listener functions to the ChangeStream object\nby calling the on() method. This method is inherited from the\nJavascript EventEmitter class. Pass the string \"change\" as\nthe first parameter and your listener function as the second parameter as shown below: The listener function triggers when a change event is emitted. You\ncan specify logic in the listener to process the change event document\nwhen it is received. You can control the change stream by calling pause() to stop emitting events or resume() to continue to emit events. To stop processing change events, call the close() method on the\n ChangeStream instance. This closes the change stream and frees resources. Using a ChangeStream in EventEmitter and Iterator mode\nconcurrently is not supported by the driver and causes an error. This\nis to prevent undefined behavior, where the driver cannot guarantee\nwhich consumer receives documents first. The following example opens a change stream on the haikus collection in\nthe insertDB database and prints change events as they occur: When you run this code and then make a change to the haikus \ncollection, such as performing an insert or delete operation, you can\nsee the change event document printed in your terminal. For example, if you insert a document to the collection, the code prints\nthe following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide . The JavaScript and TypeScript code snippets above are identical. There are no\nTypeScript specific features of the driver relevant to this use case. Change events that contain information on update operations only return the modified\nfields by default rather than the full updated document. You can configure\nyour change stream to also return the most current version of the document\nby setting the fullDocument field of the options object to\n \"updateLookup\" as follows: The following example opens a change stream on the haikus collection in\nthe insertDB database. Let's create a listener function to receive and\nprint change events that occur on the collection. First, open the change stream on the collection and then define a listener\non the change stream using the on() method. Once you set the\nlistener, generate a change event by performing a change to the collection. To generate the change event on the collection, let's use the insertOne() \nmethod to add a new document. Since insertOne() may run before the\nlistener function can register, we use a timer, defined as\n simulateAsyncPause to wait 1 second before executing the insert. We also use simulateAsyncPause after the insertion of the document.\nThis provides ample time for the listener function to receive the change\nevent and for the listener to complete its execution before\nclosing the ChangeStream instance using the close() method. Visit the following resources for more material on the classes and\nmethods mentioned on this page: The timers used in this example are only for demonstration\npurposes. They make sure that there is enough time to register\nthe listener and have the listener process the change event before\nexiting. The JavaScript and TypeScript code snippets above are identical. There are no\nTypeScript specific features of the driver relevant to this use case. Change streams Change events Aggregation pipeline Aggregation stages ChangeStream class API documentation Collection.watch() , Db.watch() , MongoClient.watch() API documentation", - "code": [ - { - "lang": "javascript", - "value": "const pipeline = [ { $match: { runtime: { $lt: 15 } } } ];\nconst changeStream = myColl.watch(pipeline);" - }, - { - "lang": "js", - "value": "for await (const change of changeStream) {\n console.log(\"Received change: \", change);\n}" - }, - { - "lang": "javascript", - "value": "changeStream.on(\"change\", (changeEvent) => { /* your listener function */ });" - }, - { - "lang": "javascript", - "value": "changeStream.close();" - }, - { - "lang": "none", - "value": "Received change:\n{\n _id: {\n _data: '...'\n },\n operationType: 'insert',\n clusterTime: new Timestamp({ t: 1675800603, i: 31 }),\n fullDocument: {\n _id: new ObjectId(\"...\"),\n ...\n },\n ns: { db: 'insertDB', coll: 'haikus' },\n documentKey: { _id: new ObjectId(\"...\") }\n}" - }, - { - "lang": "javascript", - "value": "// Watch for changes in a collection by using a change stream\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\n// Declare a variable to hold the change stream\nlet changeStream;\n\n// Define an asynchronous function to manage the change stream\nasync function run() {\n try {\n const database = client.db(\"insertDB\");\n const haikus = database.collection(\"haikus\");\n\n // Open a Change Stream on the \"haikus\" collection\n changeStream = haikus.watch();\n\n // Print change events as they occur\n for await (const change of changeStream) {\n console.log(\"Received change:\\n\", change);\n }\n // Close the change stream when done\n await changeStream.close();\n \n } finally {\n // Close the MongoDB client connection\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "javascript", - "value": "// Watch for changes in a collection by using a change stream\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\n// Declare a variable to hold the change stream\nlet changeStream;\n\n// Define an asynchronous function to manage the change stream\nasync function run() {\n try {\n const database = client.db(\"insertDB\");\n const haikus = database.collection(\"haikus\");\n\n // Open a Change Stream on the \"haikus\" collection\n changeStream = haikus.watch();\n\n // Print change events as they occur\n for await (const change of changeStream) {\n console.log(\"Received change:\\n\", change);\n }\n // Close the change stream when done\n await changeStream.close();\n \n } finally {\n // Close the MongoDB client connection\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "javascript", - "value": "const options = { fullDocument: \"updateLookup\" };\n// This could be any pipeline.\nconst pipeline = [];\n\nconst changeStream = myColl.watch(pipeline, options);" - }, - { - "lang": "javascript", - "value": "/* Change stream listener */\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nconst simulateAsyncPause = () =>\n new Promise(resolve => {\n setTimeout(() => resolve(), 1000);\n });\n\nlet changeStream;\nasync function run() {\n try {\n const database = client.db(\"insertDB\");\n const haikus = database.collection(\"haikus\");\n\n // Open a Change Stream on the \"haikus\" collection\n changeStream = haikus.watch();\n\n // Set up a change stream listener when change events are emitted\n changeStream.on(\"change\", next => {\n // Print any change event\n console.log(\"received a change to the collection: \\t\", next);\n });\n\n // Pause before inserting a document\n await simulateAsyncPause();\n\n // Insert a new document into the collection\n await myColl.insertOne({\n title: \"Record of a Shriveled Datum\",\n content: \"No bytes, no problem. Just insert a document, in MongoDB\",\n });\n\n // Pause before closing the change stream\n await simulateAsyncPause();\n\n // Close the change stream and print a message to the console when it is closed\n await changeStream.close(); \n console.log(\"closed the change stream\");\n } finally {\n // Close the database connection on completion or error\n await client.close();\n }\n}\nrun().catch(console.dir);" - }, - { - "lang": "javascript", - "value": "/* Change stream listener */\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nconst simulateAsyncPause = () =>\n new Promise(resolve => {\n setTimeout(() => resolve(), 1000);\n });\n\nlet changeStream;\nasync function run() {\n try {\n const database = client.db(\"insertDB\");\n const haikus = database.collection(\"haikus\");\n\n // Open a Change Stream on the \"haikus\" collection\n changeStream = haikus.watch();\n\n // Set up a change stream listener when change events are emitted\n changeStream.on(\"change\", next => {\n // Print any change event\n console.log(\"received a change to the collection: \\t\", next);\n });\n\n // Pause before inserting a document\n await simulateAsyncPause();\n\n // Insert a new document into the collection\n await myColl.insertOne({\n title: \"Record of a Shriveled Datum\",\n content: \"No bytes, no problem. Just insert a document, in MongoDB\",\n });\n\n // Pause before closing the change stream\n await simulateAsyncPause();\n\n // Close the change stream and print a message to the console when it is closed\n await changeStream.close(); \n console.log(\"closed the change stream\");\n } finally {\n // Close the database connection on completion or error\n await client.close();\n }\n}\nrun().catch(console.dir);" - } - ], - "preview": "You can watch for changes in MongoDB using the watch() method on the\nfollowing objects:", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/command", - "title": "Run a Command", - "headings": ["Example"], - "paragraphs": "You can execute database commands by using the\n command() method on a Db \ninstance. You can specify a command and options in a document. To run the\ncommand, pass this document to the command() method. To see a full\nlist of database commands, see Database Commands in the Server manual. You can specify optional command behavior by passing a\n RunCommandOptions object to the command() method. To learn more\nabout the supported options, see the\n Db.command() API documentation . Use the MongoDB Shell for administrative tasks instead of\nthe Node.js driver whenever possible. Running the preceding command, you see the following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide . The JavaScript and TypeScript code snippets above are identical. There are no\nTypeScript specific features of the driver relevant to this use case.", - "code": [ - { - "lang": "javascript", - "value": "{\n db: 'sample_mflix',\n collections: 6,\n views: 0,\n objects: 75620,\n ...\n}" - }, - { - "lang": "javascript", - "value": "/* Run a database command */\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n // Get the \"sample_mflix\" database\n const db = client.db(\"sample_mflix\");\n\n // Find and print the storage statistics for the \"sample_mflix\" database using the 'dbStats' command\n const result = await db.command({\n dbStats: 1,\n });\n console.log(result);\n } finally {\n // Close the database connection on completion or error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "javascript", - "value": "/* Run a database command */\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n // Get the \"sample_mflix\" database\n const db = client.db(\"sample_mflix\");\n\n // Find and print the storage statistics for the \"sample_mflix\" database using the 'dbStats' command\n const result = await db.command({\n dbStats: 1,\n });\n console.log(result);\n } finally {\n // Close the database connection on completion or error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - } - ], - "preview": "You can execute database commands by using the\ncommand() method on a Db\ninstance.", - "tags": "code example, multiple, modify, customize, debug", - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/count", - "title": "Count Documents", - "headings": ["Example"], - "paragraphs": "The Node.js driver provides two methods for counting documents in a\ncollection: estimatedDocumentCount() is faster than countDocuments() because\nthe estimation uses the collection's metadata rather than scanning the\ncollection. In contrast, countDocuments() takes longer to return, but\nprovides an accurate count of the number of documents and supports\nspecifying a filter. Choose the appropriate method for your workload. To specify which documents you wish to count, countDocuments() \naccepts a query parameter.\n countDocuments() counts the documents that match the specified query. countDocuments() and estimatedDocumentCount() support optional\nsettings that affect the method's execution. Refer to the reference\ndocumentation for each method for more information. collection.countDocuments() returns the number of documents in\nthe collection that match the specified query. If you specify an empty\nquery document, countDocuments() returns the total number of\ndocuments in the collection. collection.estimatedDocumentCount() returns an\n estimation of the number of documents in the collection based on\ncollection metadata. You can improve performance when using countDocuments() to return the\ntotal number of documents in a collection by avoiding a collection scan. To\ndo this, use a hint to take\nadvantage of the built-in index on the _id field. Use this technique only\nwhen calling countDocuments() with an empty query parameter. The following example estimates the number of documents in the\n movies collection in the sample_mflix database, and then returns\nan accurate count of the number of documents in the movies \ncollection with Canada in the countries field. Running the preceding sample code, you see the following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide . The JavaScript and TypeScript code snippets above are identical. There are no\nTypeScript specific features of the driver relevant to this use case.", - "code": [ - { - "lang": "javascript", - "value": "collection.countDocuments({}, { hint: \"_id_\" });" - }, - { - "lang": "none", - "value": "Estimated number of documents in the movies collection: 23541\nNumber of movies from Canada: 1349" - }, - { - "lang": "javascript", - "value": "// Count documents in a collection\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n /* Print the estimate of the number of documents in the\n \"movies\" collection */\n const estimate = await movies.estimatedDocumentCount();\n console.log(`Estimated number of documents in the movies collection: ${estimate}`);\n\n /* Print the number of documents in the \"movies\" collection that\n match the specified query */\n const query = { countries: \"Canada\" };\n const countCanada = await movies.countDocuments(query);\n console.log(`Number of movies from Canada: ${countCanada}`);\n } finally {\n // Close the connection after the operations complete\n await client.close();\n }\n}\n// Run the program and print any thrown exceptions\nrun().catch(console.dir);\n" - }, - { - "lang": "javascript", - "value": "// Count documents in a collection\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n /* Print the estimate of the number of documents in the\n \"movies\" collection */\n const estimate = await movies.estimatedDocumentCount();\n console.log(`Estimated number of documents in the movies collection: ${estimate}`);\n\n /* Print the number of documents in the \"movies\" collection that\n match the specified query */\n const query = { countries: \"Canada\" };\n const countCanada = await movies.countDocuments(query);\n console.log(`Number of movies from Canada: ${countCanada}`);\n } finally {\n // Close the connection after the operations complete\n await client.close();\n }\n}\n// Run the program and print any thrown exceptions\nrun().catch(console.dir);\n" - } - ], - "preview": "The Node.js driver provides two methods for counting documents in a\ncollection:", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/delete-operations", - "title": "Delete Operations", - "headings": [], - "paragraphs": "Delete a Document Delete Multiple Documents", - "code": [], - "preview": null, - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/deleteMany", - "title": "Delete Multiple Documents", - "headings": ["Example"], - "paragraphs": "You can delete multiple documents in a collection at once using the\n collection.deleteMany() method.\nPass a query document to the deleteMany() method to specify a subset\nof documents in the collection to delete. If you do not provide a query\ndocument (or if you provide an empty document), MongoDB matches all documents\nin the collection and deletes them. While you can use deleteMany() \nto delete all documents in a collection, consider using\n drop() instead for better performance\nand clearer code. You can specify more options in the options object passed in\nthe second parameter of the deleteMany() method. For more detailed\ninformation, see the\n deleteMany() API documentation . The following snippet deletes multiple documents from the movies \ncollection. It uses a query document that configures the query to\nmatch and delete movies with the title \"Santa Claus\". Running the preceding example for the first time, you see the following output: If you run the example more than once, you see the following output because\nyou deleted the matching documents in the first run: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", - "code": [ - { - "lang": "none", - "value": "Deleted 19 documents" - }, - { - "lang": "none", - "value": "Deleted 0 documents" - }, - { - "lang": "javascript", - "value": "// Delete multiple documents\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n /* Delete all documents that match the specified regular\n expression in the title field from the \"movies\" collection */\n const query = { title: { $regex: \"Santa\" } };\n const result = await movies.deleteMany(query);\n\n // Print the number of deleted documents\n console.log(\"Deleted \" + result.deletedCount + \" documents\");\n } finally {\n // Close the connection after the operation completes\n await client.close();\n }\n}\n// Run the program and print any thrown exceptions\nrun().catch(console.dir);\n" - }, - { - "lang": "typescript", - "value": "// Delete multiple documents\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n /* Delete all documents that match the specified regular\n expression in the title field from the \"movies\" collection */\n const result = await movies.deleteMany({ title: { $regex: \"Santa\" } });\n \n // Print the number of deleted documents\n console.log(\"Deleted \" + result.deletedCount + \" documents\");\n } finally {\n // Close the connection after the operation completes\n await client.close();\n }\n}\n// Run the program and print any thrown exceptions\nrun().catch(console.dir);\n" - } - ], - "preview": "You can delete multiple documents in a collection at once using the\ncollection.deleteMany() method.\nPass a query document to the deleteMany() method to specify a subset\nof documents in the collection to delete. If you do not provide a query\ndocument (or if you provide an empty document), MongoDB matches all documents\nin the collection and deletes them. While you can use deleteMany()\nto delete all documents in a collection, consider using\ndrop() instead for better performance\nand clearer code.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/deleteOne", - "title": "Delete a Document", - "headings": ["Example"], - "paragraphs": "You can delete a single document in a collection with\n collection.deleteOne() .\nThe deleteOne() method uses a query document that you provide\nto match the subset of the documents in the collection that match\nthe query. If you do not provide a query document (or if you provide an\nempty document), MongoDB matches all documents in the collection and\ndeletes the first match. You can specify more query options using the\n options object passed as the second parameter of the\n deleteOne method. For more information on this method,\nsee the\n deleteOne() API documentation . If your application requires the deleted document after deletion,\nconsider using the\n collection.findOneAndDelete() \nmethod, which has a similar interface to deleteOne() but also\nreturns the deleted document. The following snippet deletes a single document from the movies \ncollection. It uses a query document that configures the query\nto match movies with a title value of \"Annie Hall\". Running the preceding example, you see the following output: If you run the example more than once, you see the following output because\nyou deleted the matching document in the first run: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide . The JavaScript and TypeScript code snippets above are identical. There are no\nTypeScript specific features of the driver relevant to this use case.", - "code": [ - { - "lang": "none", - "value": "Successfully deleted one document." - }, - { - "lang": "none", - "value": "No documents matched the query. Deleted 0 documents." - }, - { - "lang": "javascript", - "value": "// Delete a document\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n /* Delete the first document in the \"movies\" collection that matches\n the specified query document */\n const query = { title: \"Annie Hall\" };\n const result = await movies.deleteOne(query);\n\n /* Print a message that indicates whether the operation deleted a\n document */\n if (result.deletedCount === 1) {\n console.log(\"Successfully deleted one document.\");\n } else {\n console.log(\"No documents matched the query. Deleted 0 documents.\");\n }\n } finally {\n // Close the connection after the operation completes\n await client.close();\n }\n}\n// Run the program and print any thrown exceptions\nrun().catch(console.dir);\n" - }, - { - "lang": "javascript", - "value": "// Delete a document\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n /* Delete the first document in the \"movies\" collection that matches\n the specified query document */\n const query = { title: \"Annie Hall\" };\n const result = await movies.deleteOne(query);\n\n /* Print a message that indicates whether the operation deleted a\n document */\n if (result.deletedCount === 1) {\n console.log(\"Successfully deleted one document.\");\n } else {\n console.log(\"No documents matched the query. Deleted 0 documents.\");\n }\n } finally {\n // Close the connection after the operation completes\n await client.close();\n }\n}\n// Run the program and print any thrown exceptions\nrun().catch(console.dir);\n" - } - ], - "preview": "You can delete a single document in a collection with\ncollection.deleteOne().\nThe deleteOne() method uses a query document that you provide\nto match the subset of the documents in the collection that match\nthe query. If you do not provide a query document (or if you provide an\nempty document), MongoDB matches all documents in the collection and\ndeletes the first match.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/distinct", - "title": "Retrieve Distinct Values of a Field", - "headings": ["Example"], - "paragraphs": "You can retrieve a list of distinct values for a field across a collection by using\nthe collection.distinct() \nmethod. Call the distinct() method on a Collection object with a document\nfield name parameter as a String to produce a list that contains one of each\nof the different values found in the specified document field as shown below: You can specify a document field within an embedded document using\n dot notation . If you call\n distinct() on an document field that contains an array, the method\ntreats each element as a separate value. See the following example of\na method call to the wins field in the awards subdocument: You can specify more query options using the options object passed\nas the third parameter to the distinct() method. For details on the\nquery parameters, see the\n distinct() method in the API documentation . If you specify a value for the document field name that is not of type\n String such as a Document , Array , Number , or null ,\nthe method does not execute and returns a TypeMismatch error with a\nmessage that resembles the following: Visit Retrieve Distinct Values for more information about the distinct() \nmethod. \"key\" had the wrong type. Expected string, found The following snippet retrieves a list of distinct values for the year \ndocument field from the movies collection. It uses a query document to\nmatch movies that include \"Barbara Streisand\" as a director . Running the preceding example, you see the following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", - "code": [ - { - "lang": "javascript", - "value": "const distinctValues = myColl.distinct(\"countries\", query);" - }, - { - "lang": "javascript", - "value": "const distinctValues = myColl.distinct(\"awards.wins\", query);" - }, - { - "lang": "json", - "value": "[ 1983, 1991, 1996 ]" - }, - { - "lang": "javascript", - "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n \n // Get the database and collection on which to run the operation\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n // Specify the document field to find distinct values for\n const fieldName = \"year\";\n\n // Specify an optional query document to narrow results\n const query = { directors: \"Barbra Streisand\" };\n\n // Execute the distinct operation\n const distinctValues = await movies.distinct(fieldName, query);\n\n // Print the result\n console.log(distinctValues);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "typescript", - "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\ninterface Movie {\n directors: string;\n year: number;\n}\n\nasync function run() {\n try {\n // define a database and collection on which to run the method\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n const distinctValues = await movies.distinct(\"year\", {\n directors: \"Barbra Streisand\",\n });\n\n console.log(distinctValues);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - } - ], - "preview": "You can retrieve a list of distinct values for a field across a collection by using\nthe collection.distinct()\nmethod. Call the distinct() method on a Collection object with a document\nfield name parameter as a String to produce a list that contains one of each\nof the different values found in the specified document field as shown below:", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/find-operations", - "title": "Find Operations", - "headings": [], - "paragraphs": "Find a Document Find Multiple Documents", - "code": [], - "preview": "Learn by example: how to create queries and retrieve data from MongoDB by using the MongoDB Node.js driver.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/find", - "title": "Find Multiple Documents", - "headings": ["Compatibility", "Example"], - "paragraphs": "You can query for multiple documents in a collection with\n collection.find() . The find() method uses a query document that you\nprovide to match the subset of the documents in the collection that match the\nquery. If you don't provide a query document (or if you provide an empty\ndocument), MongoDB returns all documents in the collection. For more\ninformation on querying MongoDB, see our\n documentation on query documents . You can also define more query options such as\n sort \nand\n projection \nto configure the result set. You can specify these in the options\nparameter in your find() method call in sort and projection \nobjects. See collection.find() for more\ninformation on the parameters you can pass to the method. The find() method returns a FindCursor that\nmanages the results of your query. You can iterate through the matching\ndocuments using the for await...of syntax, or one of the following\n cursor methods : If no documents match the query, find() returns an empty cursor. next() toArray() You can use the Node.js driver to connect and use the find() method for\ndeployments hosted in the following environments: MongoDB Atlas : The fully\nmanaged service for MongoDB deployments in the cloud MongoDB Enterprise : The\nsubscription-based, self-managed version of MongoDB MongoDB Community : The\nsource-available, free-to-use, and self-managed version of MongoDB To learn more about finding documents in the Atlas UI for deployments hosted in MongoDB\nAtlas, see Create, View, Update, and Delete Documents . The following snippet finds documents from the movies collection. It\nuses the following parameters: Running the preceding example, you see the following output: The sort and projection options can also be specified as methods\n( sort() and project() ) chained to the find() method.\nThe following two commands are equivalent: A query document that configures the query to return only\nmovies with a runtime of less than 15 minutes. A sort that organizes returned documents in ascending order by\ntitle (alphabetical order in which \"A\" comes before \"Z\" and \"1\" before\n\"9\"). A projection that explicitly excludes the _id field from\nreturned documents and explicitly includes only the title and\n imdb object (and its embedded fields). You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", - "code": [ - { - "lang": "javascript", - "value": "{ title: '10 Minutes', imdb: { rating: 7.9, votes: 743, id: 339976 } }\n{ title: '3x3', imdb: { rating: 6.9, votes: 206, id: 1654725 } }\n{ title: '7:35 in the Morning', imdb: { rating: 7.3, votes: 1555, id: 406501 } }\n{ title: '8', imdb: { rating: 7.8, votes: 883, id: 1592502 } }\n..." - }, - { - "lang": "javascript", - "value": "movies.find({ runtime: { $lt: 15 } }, { sort: { title: 1 }, projection: { _id: 0, title: 1, imdb: 1 }});\nmovies.find({ runtime: { $lt: 15 } }).sort({ title: 1}).project({ _id: 0, title: 1, imdb: 1 });" - }, - { - "lang": "javascript", - "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n \n // Get the database and collection on which to run the operation\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n // Query for movies that have a runtime less than 15 minutes\n const query = { runtime: { $lt: 15 } };\n\n const options = {\n // Sort returned documents in ascending order by title (A->Z)\n sort: { title: 1 },\n // Include only the `title` and `imdb` fields in each returned document\n projection: { _id: 0, title: 1, imdb: 1 },\n };\n\n // Execute query \n const cursor = movies.find(query, options);\n\n // Print a message if no documents were found\n if ((await movies.countDocuments(query)) === 0) {\n console.log(\"No documents found!\");\n }\n\n // Print returned documents\n for await (const doc of cursor) {\n console.dir(doc);\n }\n\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "typescript", - "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\ntype Minutes = number;\n\ninterface IMDB {\n rating: number;\n votes: number;\n id: number;\n}\n\ninterface Movie {\n title: string;\n imdb: IMDB;\n runtime: Minutes;\n}\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n const query = { runtime: { $lt: 15 } };\n const cursor = movies.find(\n query,\n {\n sort: { title: 1 },\n projection: { _id: 0, title: 1, imdb: 1 },\n }\n );\n\n if ((await movies.countDocuments(query)) === 0) {\n console.warn(\"No documents found!\");\n }\n\n for await (const doc of cursor) {\n console.dir(doc);\n } \n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - } - ], - "preview": "Learn how to retrieve multiple documents from MongoDB by using the Node.js driver.", - "tags": "code example, node.js, sample dataset", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/findOne", - "title": "Find a Document", - "headings": ["Compatibility", "Example"], - "paragraphs": "You can query for a single document in a collection with the\n collection.findOne() method. The findOne() method uses a query\ndocument that you provide to match only the subset of the documents in the\ncollection that match the query. If you don't provide a query document or if\nyou provide an empty document, MongoDB matches all documents in the\ncollection. The findOne() operation only returns the first matched\ndocument. For more information on querying MongoDB, see our\n documentation on query documents . You can also define more query options such as\n sort \nand projection \nto configure the returned document. You can specify the more options\nin the options object passed as the second parameter of the\n findOne method. For detailed reference documentation, see\n collection.findOne() . You can use the Node.js driver to connect and use the findOne() method for\ndeployments hosted in the following environments: MongoDB Atlas : The fully\nmanaged service for MongoDB deployments in the cloud MongoDB Enterprise : The\nsubscription-based, self-managed version of MongoDB MongoDB Community : The\nsource-available, free-to-use, and self-managed version of MongoDB To learn more about finding documents in the Atlas UI for deployments hosted in MongoDB\nAtlas, see Create, View, Update, and Delete Documents . The following snippet finds a single document from the movies \ncollection. It uses the following parameters: Running the preceding example, you see the following output: A query document that configures the query to return only\nmovies with the title of exactly the text 'The Room' . A sort that organizes matched documents in descending order by\nrating, so if our query matches multiple documents the returned\ndocument will be the document with the highest rating. A projection that explicitly excludes the _id field from\nreturned documents and explicitly includes only the title and\n imdb object (and its embedded fields). You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", - "code": [ - { - "lang": "javascript", - "value": "{ title: 'The Room', imdb: { rating: 3.5, votes: 25673, id: 368226 } }" - }, - { - "lang": "javascript", - "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n \n // Get the database and collection on which to run the operation\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n // Query for a movie that has the title 'The Room'\n const query = { title: \"The Room\" };\n\n const options = {\n // Sort matched documents in descending order by rating\n sort: { \"imdb.rating\": -1 },\n // Include only the `title` and `imdb` fields in the returned document\n projection: { _id: 0, title: 1, imdb: 1 },\n };\n\n // Execute query\n const movie = await movies.findOne(query, options);\n\n // Print the document returned by findOne()\n console.log(movie);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "typescript", - "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\ninterface IMDB {\n rating: number;\n votes: number;\n id: number;\n}\n\nexport interface Movie {\n title: string;\n year: number;\n released: Date;\n plot: string;\n type: \"movie\" | \"series\";\n imdb: IMDB;\n}\n\ntype MovieSummary = Pick;\n\nasync function run(): Promise {\n try {\n const database = client.db(\"sample_mflix\");\n // Specifying a Schema is always optional, but it enables type hinting on\n // finds and inserts\n const movies = database.collection(\"movies\");\n\n const movie = await movies.findOne(\n { title: \"The Room\" },\n {\n sort: { rating: -1 },\n projection: { _id: 0, title: 1, imdb: 1 },\n }\n );\n console.log(movie);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - } - ], - "preview": "Learn how to retrieve one document from MongoDB by using the Node.js driver.", - "tags": "code example, node.js, sample dataset", - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/insert-operations", - "title": "Insert Operations", - "headings": [], - "paragraphs": "Insert a Document Insert Multiple Documents", - "code": [], - "preview": "Learn by example: how to insert data into MongoDB by using the MongoDB Node.js driver.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/insertMany", - "title": "Insert Multiple Documents", - "headings": ["Example"], - "paragraphs": "You can insert multiple documents using the\n collection.insertMany() method. The insertMany() takes an array\nof documents to insert into the specified collection. You can specify more options in the options object passed as the\nsecond parameter of the insertMany() method. Specify ordered:true \nto prevent inserting the remaining documents if the insertion failed for a\nprevious document in the array. Specifying incorrect parameters for your insertMany() operation can\ncause problems. Attempting to insert a field with a value that violates\nunique index rules results in a duplicate key error . Running the preceding example, you see the following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", - "code": [ - { - "lang": "none", - "value": "3 documents were inserted" - }, - { - "lang": "javascript", - "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n\n // Get the database and collection on which to run the operation\n const database = client.db(\"insertDB\");\n const foods = database.collection(\"foods\");\n\n // Create an array of documents to insert\n const docs = [\n { name: \"cake\", healthy: false },\n { name: \"lettuce\", healthy: true },\n { name: \"donut\", healthy: false }\n ];\n\n // Prevent additional documents from being inserted if one fails\n const options = { ordered: true };\n\n // Execute insert operation\n const result = await foods.insertMany(docs, options);\n \n // Print result\n console.log(`${result.insertedCount} documents were inserted`);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "typescript", - "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\ninterface Food {\n name: string;\n healthy: boolean;\n}\n\nasync function run() {\n try {\n const database = client.db(\"insertDB\");\n // Specifying a schema is optional, but it enables type hints on\n // finds and inserts\n const foods = database.collection(\"foods\");\n\n const result = await foods.insertMany(\n [\n { name: \"cake\", healthy: false },\n { name: \"lettuce\", healthy: true },\n { name: \"donut\", healthy: false },\n ],\n { ordered: true }\n );\n console.log(`${result.insertedCount} documents were inserted`);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - } - ], - "preview": "You can insert multiple documents using the\ncollection.insertMany() method. The insertMany() takes an array\nof documents to insert into the specified collection.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/insertOne", - "title": "Insert a Document", - "headings": ["Compatibility", "Example"], - "paragraphs": "You can insert a document into a collection using the\n collection.insertOne() method. To\ninsert a document, define an object that contains the fields and values that\nyou want to store. If the specified collection does not exist, the\n insertOne() method creates the collection. You can specify more query options using the options parameter.\nFor more information on the method parameters, see the\n insertOne() API documentation .\nFor more information on this method, see the\n insertOne() API documentation . If the operation successfully inserts a document, it appends an\n insertedId field to the object passed in the method call, and sets the\nvalue of the field to the _id of the inserted document. You can use the Node.js driver to connect and use the insertOne() method for\ndeployments hosted in the following environments: MongoDB Atlas : The fully\nmanaged service for MongoDB deployments in the cloud MongoDB Enterprise : The\nsubscription-based, self-managed version of MongoDB MongoDB Community : The\nsource-available, free-to-use, and self-managed version of MongoDB To learn more about inserting documents in the Atlas UI for deployments hosted in MongoDB\nAtlas, see Create, View, Update, and Delete Documents . Running the preceding example, you see the following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", - "code": [ - { - "lang": "none", - "value": "A document was inserted with the _id: " - }, - { - "lang": "javascript", - "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\n// Create a new client and connect to MongoDB\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n // Connect to the \"insertDB\" database and access its \"haiku\" collection\n const database = client.db(\"insertDB\");\n const haiku = database.collection(\"haiku\");\n \n // Create a document to insert\n const doc = {\n title: \"Record of a Shriveled Datum\",\n content: \"No bytes, no problem. Just insert a document, in MongoDB\",\n }\n // Insert the defined document into the \"haiku\" collection\n const result = await haiku.insertOne(doc);\n\n // Print the ID of the inserted document\n console.log(`A document was inserted with the _id: ${result.insertedId}`);\n } finally {\n // Close the MongoDB client connection\n await client.close();\n }\n}\n// Run the function and handle any errors\nrun().catch(console.dir);\n" - }, - { - "lang": "typescript", - "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\ninterface Haiku {\n title: string;\n content: string;\n}\n\nasync function run() {\n try {\n const database = client.db(\"insertDB\");\n // Specifying a Schema is optional, but it enables type hints on\n // finds and inserts\n const haiku = database.collection(\"haiku\");\n const result = await haiku.insertOne({\n title: \"Record of a Shriveled Datum\",\n content: \"No bytes, no problem. Just insert a document, in MongoDB\",\n });\n console.log(`A document was inserted with the _id: ${result.insertedId}`);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - } - ], - "preview": "Learn how to insert a document into MongoDB by using the Node.js driver.", - "tags": "code example, node.js, sample dataset", - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/replaceOne", - "title": "Replace a Document", - "headings": ["Example"], - "paragraphs": "You can replace a single document using the\n collection.replaceOne() method.\n replaceOne() accepts a query document and a replacement document. If\nthe query matches a document in the collection, it replaces the first\ndocument that matches the query with the provided replacement document.\nThis operation removes all fields and values in the original document and\nreplaces them with the fields and values in the replacement document. The\nvalue of the _id field remains the same unless you explicitly specify\na new value for _id in the replacement document. You can specify more options, such as upsert , using the\noptional options parameter. If you set the upsert option field to\n true the method inserts a new document if no document matches the query. The replaceOne() method throws an exception if an error occurs\nduring execution. For example, if you specify a value that violates a\nunique index rule, replaceOne() throws a duplicate key error . If your application requires the document after updating,\nuse the collection.findOneAndReplace() \nmethod which has a similar interface to replaceOne() .\nYou can configure findOneAndReplace() to return either the\noriginal matched document or the replacement document. Running the preceding example, you see the following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", - "code": [ - { - "lang": "none", - "value": "Modified 1 document(s)" - }, - { - "lang": "javascript", - "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n \n // Get the database and collection on which to run the operation\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n // Create a query for documents where the title contains \"The Cat from\"\n const query = { title: { $regex: \"The Cat from\" } };\n \n // Create the document that will replace the existing document\n const replacement = {\n title: `The Cat from Sector ${Math.floor(Math.random() * 1000) + 1}`,\n };\n\n // Execute the replace operation\n const result = await movies.replaceOne(query, replacement);\n \n // Print the result \n console.log(`Modified ${result.modifiedCount} document(s)`);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "typescript", - "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\ninterface Movie {\n title: string;\n}\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n const result = await movies.replaceOne(\n { title: { $regex: \"The Cat from\" } },\n {\n title: `The Cat from Sector ${Math.floor(Math.random() * 1000) + 1}`,\n }\n );\n console.log(`Modified ${result.modifiedCount} document(s)`);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - } - ], - "preview": "You can replace a single document using the\ncollection.replaceOne() method.\nreplaceOne() accepts a query document and a replacement document. If\nthe query matches a document in the collection, it replaces the first\ndocument that matches the query with the provided replacement document.\nThis operation removes all fields and values in the original document and\nreplaces them with the fields and values in the replacement document. The\nvalue of the _id field remains the same unless you explicitly specify\na new value for _id in the replacement document.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/transaction-conv", - "title": "Use the Convenient Transaction API", - "headings": [ - "Example", - "Sample Data", - "Implementation", - "Sample Orders and Transaction Results", - "API Documentation" - ], - "paragraphs": "You can perform a transaction to run a series of operations that do not\nchange any data until the entire transaction is committed. This usage\nexample uses the Convenient Transaction API to perform a transaction. To learn more about the performing transactions in the\nNode.js driver, see the Transactions guide. The Node.js driver also provides the Core API to perform\ntransactions. To learn more about the Core API, see the\n Use the Core API usage example. Consider a situation in which a customer purchases items from your shop.\nTo record the purchase, your application must update\nyour inventory and record the order information. The following table describes the collections that store purchase data\nand how a purchase changes the data in each collection. Collection Operation Description of the Change orders insert Inserts a document that describes the order inventory update Updates the quantities of items available after a purchase The inventory collection contains the\nfollowing documents: You store purchase records in the orders collection of the\n testdb database. This collection is empty, as there have been no\npurchases. The code example in this section demonstrates how to use the Convenient\nTransaction API to perform a multi-document transaction in a session. In\nthis example, the transaction makes the changes needed when a\ncustomer purchases items from your shop. This example code performs a transaction through the following actions: Calls the withSession() method on the client to implicitly create\nthe session and run the callback passed to it within the session. Calls the withTransaction() method on the session to create a\ntransaction, run the callback passed to it, and commit the\ntransaction. If the transaction fails, this method ends the\ntransaction and returns an error message. Performs the following operations within the transaction: Updates the inventory and orders collections if there is\nsufficient inventory to fulfill the purchase Ends the transaction and throws an exception if there isn't\nsufficient inventory for any item in the order Returns a message acknowledging that the transaction\ncommitted successfully with a copy of the purchase record Prints the return type of withSession() , which is either the\nerror message or the acknowledgment that the transaction completed. This section describes the results of the transactions performed for two\nsample orders. Sufficient inventory exists for the following order, so the transaction\nsuccessfully completes: After passing this order to the example transaction code, the code\noutputs the following result: In the inventory collection, the quantity of\n \"sunblock\" is now 82 and the quantity of \"beach chair\" \nis 29 . The orders collection contains the record of the\npurchase. There is not sufficient inventory for the following order, so the\ndriver ends the transaction: After passing this order to the example transaction code, the code\noutputs the following result: Since the driver ends the transaction, there are no changes to\nthe inventory and orders collections. To learn more about any of the methods or types discussed in this\nusage example, see the following API Documentation: withSession() withTransaction() abortTransaction() \"", - "code": [ - { - "lang": "javascript", - "value": "{ item: \"sunblock\", qty: 85, price: 6.0 },\n{ item: \"beach chair\", qty: 30, price: 25.0 }" - }, - { - "lang": "javascript", - "value": "const txnResult = await client.withSession(async (session) =>\n session\n .withTransaction(async (session) => {\n const invColl = client.db(\"testdb\").collection(\"inventory\");\n const recColl = client.db(\"testdb\").collection(\"orders\");\n\n let total = 0;\n for (const item of order) {\n /* Update the inventory for the purchased items. End the\n transaction if the quantity of an item in the inventory is\n insufficient to complete the purchase. */\n const inStock = await invColl.findOneAndUpdate(\n {\n item: item.item,\n qty: { $gte: item.qty },\n },\n { $inc: { qty: -item.qty } },\n { session }\n );\n if (inStock === null) {\n await session.abortTransaction();\n return \"Item not found or insufficient quantity.\";\n }\n const subTotal = item.qty * inStock.price;\n total = total + subTotal;\n }\n\n // Create a record of the purchase\n const receipt = {\n date: new Date(),\n items: order,\n total: total,\n };\n await recColl.insertOne(receipt, { session });\n return (\n \"Order successfully completed and recorded!\\nReceipt:\\n\" +\n JSON.stringify(receipt, null, 1)\n );\n }, null)\n .finally(async () => await client.close())\n);\n\nconsole.log(txnResult);" - }, - { - "lang": "none", - "value": "Order successfully completed and recorded!\nReceipt:\n{\n \"date\": \"2023-08-25T20:06:52.564Z\",\n \"items\": [\n { \"item\": \"sunblock\", \"qty\": 3 },\n { \"item\": \"beach chair\", \"qty\": 1 }\n ],\n \"total\": 43,\n \"_id\": \"...\"\n}" - }, - { - "lang": "none", - "value": "Item not found or insufficient quantity." - }, - { - "lang": "javascript", - "value": "{ item: \"sunblock\", qty: 3 },\n{ item: \"beach chair\", qty: 1 }" - }, - { - "lang": "javascript", - "value": "{ item: \"volleyball\", qty: 1 }" - } - ], - "preview": "You can perform a transaction to run a series of operations that do not\nchange any data until the entire transaction is committed. This usage\nexample uses the Convenient Transaction API to perform a transaction.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/transaction-core", - "title": "Use the Core API", - "headings": [ - "Example", - "Sample Data", - "Implementation", - "Transaction Results", - "API Documentation" - ], - "paragraphs": "You can perform a transaction to run a series of operations that do not\nchange any data until the entire transaction is committed. This usage\nexample uses the Core API to perform a transaction. To learn more about the performing transactions in the\nNode.js driver, see the Transactions guide. The Node.js driver also provides the Convenient Transaction API to\nperform transactions. To learn more about the Convenient Transaction\nAPI, see the Use the Convenient Transaction API usage example. Consider a situation in which a customer purchases items from your online\nshop. To record the purchase, your application must update\nyour inventory and the customer's orders. Your\napplication also needs to save the order details. The following table describes the collections that store purchase data\nand how a purchase changes the data in each collection. Collection Operation Description of the Change orders insert Inserts a document that describes the order customers update or upsert Appends the _id from the order document to the order history\nin the customer document inventory update Updates the quantities of items available after a purchase The code examples use the following sample data in the testdb \ndatabase: The following document is in the customers collection: The inventory collection contains the following documents: You store purchase records in the orders collection of the\n testdb database. This collection is empty, as there have been no\npurchases. The code examples use the cart and payment variables to represent\na sample list of items purchased and the order payment details. The\nfollowing code describes the contents of the cart and payment variables: Documents in the customers collection that describe customers and\ntheir past orders Documents in the inventory collection that include quantities and\ndescriptions of all items The code example in this section demonstrates how to use the Core API to\nperform a multi-document transaction in a session. In this example, the\ntransaction makes the changes needed when a customer purchases items from\nyour shop. This example code performs a transaction through the following actions: Calls the startSession() method to create a new session Calls the startTransaction() method with an options parameter to\ncreate a new transaction Performs the following operations within the transaction: Inserts a document to the orders collection that contains\ninformation about the purchase and customer Updates the inventory collection if there is\nsufficient inventory to fulfill the purchase Ends the transaction and throws an exception if there isn't\nsufficient inventory for any item in the order Adds the ID of the order to the list of past orders for the customer Returns a message acknowledging that the transaction\ncommitted successfully with a copy of the purchase record Calls the commitTransaction() method to commit the transaction if\nall operations complete successfully Implements a catch block that contains error-handling logic Calls the abortTransaction() method to end the transaction Calls the endSession() method to end the session This section describes the data changes created by the transaction. The customers collection contains the customer document with an\norder _id appended to the orders field: The inventory collection contains updated quantities for the\nitems \"sunblock\" and \"beach towel\" : The orders collection contains the order and payment\ninformation: To learn more about any of the methods or types discussed in this\nusage example, see the following API Documentation: TransactionOptions ClientSession startSession() startTransaction() commitTransaction() abortTransaction() endSession()", - "code": [ - { - "lang": "json", - "value": "{ _id: 98765, orders: [] }" - }, - { - "lang": "json", - "value": "{ item: \"sunblock\", item_id: 5432, qty: 85 },\n{ item: \"beach towel\", item_id: 7865, qty: 41 }" - }, - { - "lang": "javascript", - "value": "const cart = [\n { item: 'sunblock', item_id: 5432, qty: 1, price: 5.19 },\n { item: 'beach towel', item_id: 7865, qty: 2, price: 15.99 }\n];\nconst payment = { customer: 98765, total: 37.17 };" - }, - { - "lang": "javascript", - "value": "async function placeOrder(client, cart, payment) {\n const transactionOptions = {\n readConcern: { level: 'snapshot' },\n writeConcern: { w: 'majority' },\n readPreference: 'primary'\n };\n\n // Start the session\n const session = client.startSession();\n try {\n // Start the transaction in the session, specifying the transaction options\n session.startTransaction(transactionOptions);\n\n const ordersCollection = client.db('testdb').collection('orders');\n /* Within the session, insert an order that contains information about the\n customer, items purchased, and the total payment */\n const orderResult = await ordersCollection.insertOne(\n {\n customer: payment.customer,\n items: cart,\n total: payment.total,\n },\n { session }\n );\n\n const inventoryCollection = client.db('testdb').collection('inventory');\n \n for (const item of order) { \n /* Update the inventory for the purchased items. End the\n transaction if the quantity of an item in the inventory is\n insufficient to complete the purchase. */\n const inStock = await inventoryCollection.findOneAndUpdate(\n {\n item_id: item.item_id,\n item_id: { $gte: item.qty }\n },\n { $inc: { 'qty': -item.qty }},\n { session }\n )\n if (inStock === null) {\n throw new Error('Insufficient quantity or item ID not found.');\n }\n }\n\n const customerCollection = client.db('testdb').collection('customers');\n\n // Within the session, add the order details to the \"orders\" array of the customer document\n await customerCollection.updateOne(\n { _id: payment.customer },\n { $push: { orders: orderResult.insertedId }},\n { session }\n );\n\n // Commit the transaction to apply all updates performed within it\n await session.commitTransaction();\n console.log('Transaction successfully committed.');\n\n } catch (error) {\n /*\n Handle any exceptions thrown during the transaction and end the\n transaction. Roll back all the updates performed in the transaction.\n */\n if (error instanceof MongoError && error.hasErrorLabel('UnknownTransactionCommitResult')) {\n // Add your logic to retry or handle the error\n }\n else if (error instanceof MongoError && error.hasErrorLabel('TransientTransactionError')) {\n // Add your logic to retry or handle the error\n } else {\n console.log('An error occured in the transaction, performing a data rollback:' + error);\n }\n await session.abortTransaction();\n } finally {\n // End the session\n await session.endSession();\n }\n}" - }, - { - "lang": "json", - "value": "{\n \"_id\": 98765,\n \"orders\": [\n \"61dc...\"\n ]\n}" - }, - { - "lang": "json", - "value": "[\n {\n \"_id\": ...,\n \"item\": \"sunblock\",\n \"item_id\": 5432,\n \"qty\": 84\n },\n {\n \"_id\": ...,\n \"item\": \"beach towel\",\n \"item_id\": 7865,\n \"qty\": 39\n }\n]" - }, - { - "lang": "json", - "value": "[\n {\n \"_id\": \"...\",\n \"customer\": 98765,\n \"items\": [\n {\n \"item\": \"sunblock\",\n \"item_id\": 5432,\n \"qty\": 1,\n \"price\": 5.19\n },\n {\n \"item\": \"beach towel\",\n \"item_id\": 7865,\n \"qty\": 2,\n \"price\": 15.99\n }\n ],\n \"total\": 37.17\n }\n]" - } - ], - "preview": "You can perform a transaction to run a series of operations that do not\nchange any data until the entire transaction is committed. This usage\nexample uses the Core API to perform a transaction.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/transactions", - "title": "Perform a Transaction", - "headings": [], - "paragraphs": "The following usage examples demonstrate how to perform transactions by\nusing the transaction APIs in the Node.js driver: Use the Convenient Transaction API Use the Core API", - "code": [], - "preview": "The following usage examples demonstrate how to perform transactions by\nusing the transaction APIs in the Node.js driver:", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/update-and-replace-operations", - "title": "Update & Replace Operations", - "headings": [], - "paragraphs": "Update a Document Update Multiple Documents Replace a Document", - "code": [], - "preview": "Learn by example: how to update and replace data in MongoDB by using the MongoDB Node.js driver.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/updateMany", - "title": "Update Multiple Documents", - "headings": ["Example"], - "paragraphs": "You can update multiple documents using the\n collection.updateMany() method.\nThe updateMany() method accepts a filter document and an update document. If the query matches documents in the\ncollection, the method applies the updates from the update document to fields\nand values of the matching documents. The update document requires an update operator to modify a field in a document. You can specify more options in the options object passed in\nthe third parameter of the updateMany() method. For more detailed\ninformation, see\n the updateMany() API documentation . Running the preceding example, you see the following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", - "code": [ - { - "lang": "none", - "value": "Updated 477 documents" - }, - { - "lang": "javascript", - "value": "/* Update multiple documents */\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n // Get the \"movies\" collection in the \"sample_mflix\" database\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n // Create a filter to update all movies with a 'G' rating\n const filter = { rated: \"G\" };\n\n // Create an update document specifying the change to make\n const updateDoc = {\n $set: {\n random_review: `After viewing I am ${\n 100 * Math.random()\n }% more satisfied with life.`,\n },\n };\n // Update the documents that match the specified filter\n const result = await movies.updateMany(filter, updateDoc);\n console.log(`Updated ${result.modifiedCount} documents`);\n } finally {\n // Close the database connection on completion or error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - }, - { - "lang": "typescript", - "value": "/* Update multiple documents */\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nenum Rating {\n G = \"G\",\n PG = \"PG\",\n PG_13 = \"PG-13\",\n R = \"R\",\n NR = \"NOT RATED\",\n}\n\n// Create a Movie interface\ninterface Movie {\n rated: Rating;\n random_review?: string;\n}\n\nasync function run() {\n try {\n // Get the \"movies\" collection in the \"sample_mflix\" database\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n // Update all documents that match the specified filter\n const result = await movies.updateMany(\n { rated: Rating.G },\n {\n $set: {\n random_review: `After viewing I am ${\n 100 * Math.random()\n }% more satisfied with life.`,\n },\n }\n );\n console.log(`Updated ${result.modifiedCount} documents`);\n } finally {\n // Close the database connection on completion or error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" - } - ], - "preview": "You can update multiple documents using the\ncollection.updateMany() method.\nThe updateMany() method accepts a filter document and an update document. If the query matches documents in the\ncollection, the method applies the updates from the update document to fields\nand values of the matching documents. The update document requires an update operator to modify a field in a document.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples/updateOne", - "title": "Update a Document", - "headings": ["Example"], - "paragraphs": "You can update a single document using the\n collection.updateOne() \nmethod. The updateOne() method accepts a filter\ndocument and an update document. If the query matches documents in the\ncollection, the method applies the updates from the update document to fields\nand values of them. The update document contains update operators that instruct the method\non the changes to make to the matches. You can specify more query options using the options object\npassed as the second parameter of the updateOne() method.\nSet the upsert option to true to create a new document\nif no documents match the filter. For more information, see the\n updateOne() API documentation . updateOne() throws an exception if an error occurs during execution.\nIf you specify a value in your update document for the immutable field\n _id , the method throws an exception. If your update document contains\na value that violates unique index rules, the method throws a duplicate\nkey error exception. If your application requires the document after updating,\nconsider using the\n collection.findOneAndUpdate() .\nmethod, which has a similar\ninterface to updateOne() but also returns the original or updated\ndocument. The following example uses the $set update operator which specifies\nupdate values for document fields. For more information on update operators,\nsee the MongoDB update operator reference documentation . If you run the example above, you see the following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", - "code": [ - { - "lang": "none", - "value": "1 document(s) matched the filter, updated 1 document(s)" - }, - { - "lang": "javascript", - "value": "// Update a document\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n // Create a filter for movies with the title \"Random Harvest\"\n const filter = { title: \"Random Harvest\" };\n\n /* Set the upsert option to insert a document if no documents match\n the filter */\n const options = { upsert: true };\n\n // Specify the update to set a value for the plot field\n const updateDoc = {\n $set: {\n plot: `A harvest of random numbers, such as: ${Math.random()}`\n },\n };\n\n // Update the first document that matches the filter\n const result = await movies.updateOne(filter, updateDoc, options);\n \n // Print the number of matching and modified documents\n console.log(\n `${result.matchedCount} document(s) matched the filter, updated ${result.modifiedCount} document(s)`,\n );\n } finally {\n // Close the connection after the operation completes\n await client.close();\n }\n}\n// Run the program and print any thrown errors\nrun().catch(console.dir);\n" - }, - { - "lang": "typescript", - "value": "// Update a document\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\n// Define the Movie interface\ninterface Movie {\n plot: string;\n title: string;\n}\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n /* Update a document that has the title \"Random Harvest\" to have a\n plot field with the specified value */\n const result = await movies.updateOne(\n { title: \"Random Harvest\" },\n {\n $set: {\n plot: `A harvest of random numbers, such as: ${Math.random()}`,\n },\n },\n /* Set the upsert option to insert a document if no documents\n match the filter */\n { upsert: true }\n );\n\n // Print the number of matching and modified documents\n console.log(\n `${result.matchedCount} document(s) matched the filter, updated ${result.modifiedCount} document(s)`\n );\n } finally {\n // Close the connection after the operation completes\n await client.close();\n }\n}\n// Run the program and print any thrown errors\nrun().catch(console.dir);\n" - } - ], - "preview": "You can update a single document using the\ncollection.updateOne()\nmethod. The updateOne() method accepts a filter\ndocument and an update document. If the query matches documents in the\ncollection, the method applies the updates from the update document to fields\nand values of them. The update document contains update operators that instruct the method\non the changes to make to the matches.", - "tags": null, - "facets": { - "genre": ["tutorial"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "usage-examples", - "title": "Usage Examples", - "headings": [ - "Overview", - "How to Use the Usage Examples", - "Available Usage Examples" - ], - "paragraphs": "Usage examples provide convenient starting points for popular MongoDB\noperations. Each example provides the following information: Explanation of the operation in the example, including the\npurpose and a sample use case for the method Explanation of how to use the operation, including parameters,\nreturn values, and common exceptions you might encounter Full Node.js program that you can copy and paste to run the example\nin your own environment These examples use the\n MongoDB Atlas sample data \ndatabase. You can use this sample data on the free tier\nof MongoDB Atlas by following the Get Started with Atlas guide or you\ncan import the sample dataset into a local MongoDB instance . Once you have imported the dataset, you can copy and paste a usage\nexample into your development environment of choice. You can follow the\n quick start guide to learn more about getting\nstarted with Node.js, npm, and the Node.js driver. Once you've copied\na usage example, you must edit one line to get the example running\nwith your instance of MongoDB: All examples use ES module imports. You can\n enable ES module imports \nby adding the following key-value pair to your package.json file: You can use the Atlas Connectivity Guide to enable connectivity to your instance of\nAtlas and find the connection string to replace the uri variable in the\nusage example. If your instance uses SCRAM authentication , you can replace with your username,\n with your password, and with the IP\naddress or URL of your instance. Consult the\n Connection Guide for more information\nabout getting connected to your MongoDB instance. You can use any usage example with CommonJS require . To use CommonJS require , you\nmust swap out the ES module import statement for your CommonJS require \nstatement. Click on the tabs to see the syntax for importing the driver with ES module\n import and CommonJS require : Find Operations Insert Operations Update Operations Delete Operations Count Documents Retrieve Distinct Values of a Field Run a Command Watch for Changes Perform Bulk Operations Perform a Transaction", - "code": [ - { - "lang": "javascript", - "value": "// Replace the following with your MongoDB deployment's connection string.\nconst uri =\n \"mongodb+srv://:@?retryWrites=true&writeConcern=majority\";" - }, - { - "lang": "json", - "value": "\"type\": \"module\"" - }, - { - "lang": "javascript", - "value": "import { MongoClient } from 'mongodb'" - }, - { - "lang": "javascript", - "value": "const { MongoClient } = require('mongodb')" - } - ], - "preview": "Learn how to load sample data into a MongoDB Atlas deployment and run Node.js driver usage examples.", - "tags": "node.js", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - }, - { - "slug": "whats-new", - "title": "What's New", - "headings": [ - "What's New in 6.8", - "What's New in 6.7", - "What's New in 6.6", - "What's New in 6.5", - "What's New in 6.4", - "What's New in 6.3", - "What's New in 6.2", - "What's New in 6.1", - "What's New in 6.0", - "What's New in 5.9", - "What's New in 5.8", - "What's New in 5.7", - "What's New in 5.6", - "What's New in 5.5", - "What's New in 5.4", - "What's New in 5.3", - "What's New in 5.2", - "What's New in 5.1", - "What's New in 5.0", - "What's New in 4.17", - "What's New in 4.16", - "What's New in 4.15", - "What's New in 4.14", - "What's New in 4.13", - "What's New in 4.12", - "What's New in 4.11", - "Prioritization Order in Monitoring", - "Changes to AWS Authentication", - "Mutually Recursive Schema Type Checking", - "Example", - "What's New in 4.10", - "What's New in 4.9", - "What's New in 4.8", - "What's New in 4.7", - "What's New in 4.6", - "What's New in 4.5", - "What's New in 4.4", - "What's New in 4.3", - "What's New in 4.2", - "What's New in 4.1", - "What's New in 4.0", - "TypeScript", - "Key Changes", - "Node.js Version", - "Cursor Improvements", - "Cursor Stream API", - "MongoClientOptions Interface", - "createCollection()", - "BulkWriteError \u2192 MongoBulkWriteError", - "DB", - "Collection.group()", - "Authentication", - "GridStore Removal", - "Construction", - "File Seeking", - "File Upload & Download", - "File Deletion", - "Finding File Metadata", - "Unified Topology", - "Explain", - "Command Monitoring", - "What's New in 3.7", - "What's New in 3.6" - ], - "paragraphs": "Learn what's new in: Version 6.8 Version 6.7 Version 6.6 Version 6.5 Version 6.4 Version 6.3 Version 6.2 Version 6.1 Version 6.0 Version 5.9 Version 5.8 Version 5.7 Version 5.6 Version 5.5 Version 5.4 Version 5.3 Version 5.2 Version 5.1 Version 5.0 Version 4.17 Version 4.16 Version 4.15 Version 4.14 Version 4.13 Version 4.12 Version 4.11 Version 4.10 Version 4.9 Version 4.8 Version 4.7 Version 4.6 Version 4.5 Version 4.4 Version 4.3 Version 4.2 Version 4.1 Version 4.0 Version 3.7 Version 3.6 The Node.js driver v6.8 release includes the following features: To learn more about this release, see the\n v6.8.0 Release Notes on\nGitHub. Fixes a bug where a local KMS provider accepted a BSON Binary instance at\nruntime, but the TypeScript compiler allowed only values of type Buffer and\n string . The ReadConcernMajorityNotAvailableYet error is now a retryable read error. You can now associate a name with, and provide multiple keys for, KMS providers.\nThis feature requires mongodb-client-encryption v6.0.1 or later.\nYou can't use named KMS providers if your application uses the automatic\nKMS provider refresh capability. The following code example shows how to configure a ClientEncryption object with\nmultiple AWS keys: When you create a KMIP data key, you can now specify the delegated option. If this\noption is set to true , the KMIP provider performs encryption and decryption of\nthe data key locally, ensuring that the encryption key never leaves the KMIP server.\nThis feature requires mongodb-client-encryption v6.0.1 or later. The following code example shows how to specify this option: The driver now decodes BSON responses as the cursor iterates over them,\nrather than decoding the entire BSON response when it is received. The Github release for the mongodb package now contains a detached signature file,\n mongodb-X.Y.Z.tgz.sig , for the NPM package. This change applies to every major\nand patch release for versions 5.x and 6.x of the driver. To verify the package signature,\nfollow the instructions in the Release Integrity section of the\n README.md \nfile in the driver's GitHub repository. The Node.js driver v6.7 release includes the following features: To learn more about this release, see the\n v6.7.0 Release Notes on\nGitHub. Adds support for the MONGODB-OIDC authentication mechanism when connected to\nMongoDB Server v7.0 and later. The driver supports authentication with Azure\nmachine authentication, GCP machine authentication, callback authentication,\nand human interaction callback authentication facets. Fixes an issue where setting the useBigInt64 flag to true caused the internal\n compareTopologyVersion function to generate an error. The Node.js driver v6.6 release includes the following features: To learn more about this release, see the\n v6.6.0 Release Highlights on\nGitHub. Upgrades to using BSON 6.7.0. For details about the new BSON features, see the\nrelease notes for BSON 6.5.0 ,\n BSON 6.6.0 , and\n BSON 6.7.0 . Adds the addStage() method to the fluid aggregation API. You can use this method to\nadd aggregation pipeline stages individually, as shown in the following\nexample: Adds the cause and dependencyName fields to the MongoMissingDependencyError \nclass. You can use these fields to programmatically determine if a package is missing\nor why a package didn't load. Adds the minRoundTripTime property to the ServerDescription class. This\nproperty contains the minimum round-trip time over the last 10 heartbeats. Adds the toJSON() method to the TopologyDescription class. Although you can use\nthis method to stringify TopologyDescription objects to JSON, we\nrecommend using Node's util.inspect() method instead, because it properly handles\nall types used in JavaScript and the driver. Adds cursor options support for the Collection.indexExists() ,\n Collection.indexes() , and Collection.indexInformation() methods in Typescript. Removes support for the readConcern and writeConcern options from the\n Collection.listSearchIndexes() method. listSearchIndexes() is an Atlas-specific method, and Atlas\nsearch indexes don't support these options. Redefines the ServerDescription.roundTripTime property as a moving average. Previously,\nit was a weighted average of the most recently observed heartbeat duration and the\nprevious duration. You can specify the type of a search index when creating the index, as shown\nin the following example: The UpdateFilter.$currentDate property no longer throws an error when you pass\nit to a compound method, such as findOneAndUpdate() , on a collection with a limited schema. The driver throws a MongoTransactionError only if you provide a\n ReadPreferenceMode other than primary and then try to perform a command that\ninvolves a read operation. The data type of the TopologyDescription.error property is MongoError . The Collection.indexExists() method no longer supports the full option. The Collection.indexInformation() , Collection.indexes() , and\n Db.indexInformation() methods have a return type of\n IndexDescriptionCompact | IndexDescriptionInfo[] in TypeScript. When retrieving AWS KMS (Key Management System) credentials, the driver no longer\nthrows an error when it receives an access key that includes\nan expiration timestamp. The ClusterTime interface no longer defines the signature field as required in\nTypeScript. The Node.js driver v6.5 release includes the following features: To learn more about this release, see the\n v6.5.0 Release Highlights on\nGitHub. Updates bulk write operations to use the pkFactory class for document\nID generation. If you previously specified an instance of a pkFactory to handle\nbulk writes, the _id fields of the documents inserted by using bulk\nwrites may be inconsistent with the behavior in this version. Fixes the read preference that is sent with read operations to\n primaryPreferred when the driver is connected to a secondary node in\nthe replica set. Fixes a memory leak in promise creation for socket operations. Reduces first-time connection latency when connecting to a DNS seedlist by\nquerying the SRV and TXT records in parallel. Adds tracking to container metadata when running a client in Kubernetes\nor a container environment in the client.env.container field of the\nhandshake document. Adds the original error document returned by the server to the\n errorResponse field of the MongoServerError document. Deprecates the CloseOptions interface which is unused by the driver. The Node.js driver v6.4 release includes the following features: To learn more about this release, see the\n v6.4.0 Release Highlights on\nGitHub. When multiple mongos instances are available, different servers are used\nfor read and write retry attempts. Caches AWS credentials at the client level, rather than for each\nauthentication. Upgrades to using BSON 6.4.0. For details about the new BSON features, see the\nrelease notes for BSON 6.3.0 and BSON 6.4.0 . Read operations that result in an ExceededTimeLimit error are retried. Fixes a request issue related to TLS sockets and\n KMS Providers . Fixes the base64 padding on the saslContinue command to allow for mongosh\nauthentication. Types countDocuments using Filter rather than Document ,\nwhich enables autocompletion and helps prevent downstream typing issues. Fixes a type error in the $addToSet option of the bulkWrite command.\nThe driver skips $addToSet validation you extend your types from\n Document or any , or use properties of any type. Fixes the ServerHeartbeatSucceeded and ServerHeartbeatFailed event\nheartbeat duration so that it does not include the time to create the socket. Appropriately emits errors from cursor transform streams, rather than\nabsorbing them. Makes AWS session tokens optional when a username and password are provided,\nand allows AWS SDK to handle the authentication requests. The Node.js driver v6.3 release includes the following features: To learn more about this release, see the\n v6.3.0 Release Highlights . Adds the serverMonitoringMode client option to control the\nbehavior of the monitoring connection among the nodes in a topology.\nThis option takes a value of auto (default), poll , or\n stream . To learn more, see the entry for this option in the\n Connection Options guide. You can set the serverMonitoringMode option in a\n MongoClientOptions instance or as a connection string option. The\nfollowing example shows how to create a client with the option set to\n stream : Fixes a connection leak when the serverApi client option is set. Deprecates the contentType and aliases GridFS options. To\nstore the content type and aliases of a file, add contentType and aliases \nfields to the metadata document. The Node.js driver v6.2 release includes the following features: To learn more about this release, see the\n v6.2.0 Release Highlights . Updates the bson package version to 6.2.0 to include\ncolor visualization of types, as shown in the following image: To learn more, see the bson v6.2.0 release notes . Ensures that the result.insertedIds property of a bulk write error type\ncontains the _id values of successfully inserted documents. In\nprevious versions, when a bulk write operation rejected an insert\noperation, the result.insertedIds property contained the\n _id values for all attempted inserts. Closes the implicit session created when running the findOne() \nmethod on a time series collection regardless of the outcome of the\noperation. Allows the creation of collections that have names that start or end with the\n . character. This change aligns the driver's database and\ncollection name-checking behavior with the server's. The Node.js driver v6.1 release includes the following features: To learn more about this release, see the\n v6.1.0 Release Highlights . Updates the bson package version to 6.1.0 to expose the\n Decimal128.fromStringWithRounding() method. To learn more, see the\n v6.1.0 bson release notes . Detects environment variables for region settings when\nauthenticating by using the MONGODB-AWS authentication mechanism.\nTo instruct the driver to use your region options, you must set both\nof the following environment variables: AWS_STS_REGIONAL_ENDPOINTS AWS_REGION Fixes a memory leak issue caused by recursive calls to the next() \nmethod of the ChangeStream type. The Node.js driver v6.0 release includes the following features: To learn more about this release, see the\n v6.0.0 Release Highlights . This driver version introduces breaking changes. For a list of these changes, see\nthe Version 6.0 Breaking Changes section in the\nUpgrade guide. All of the ssl -prefixed options in the MongoClientOptions \ntype are deprecated. In addition, the tlsCertificateFile option\nis deprecated. Instead, you should store your certificates in a SecureContext \nobject or set the tls -prefixed options in your\n MongoClientOptions instance. To learn more, see Enable TLS on a Connection . Removal of support for the addUser() helper command. Use the\n createUser MongoDB Shell command instead. Removal of support for the collStats operation. Use the\n $collStats aggregation operator\ninstead. The options field of the ConnectionPoolCreatedEvent type\ncontains only the following fields, which are the non-default pool\noptions: maxConnecting maxPoolSize minPoolSize maxIdleTimeMS waitQueueTimeoutMS The driver asynchronously reads files set in the tlsCAFile and\n tlsCertificateKeyFile connection options when you call\nthe MongoClient.connect() method, not when you create a\n MongoClient instance. Removal of the keepAlive and keepAliveInitialDelay connection\noptions. The value of keepAlive is permanently set to true and the\nvalue of keepAliveInitialDelay is set to 300000 milliseconds (300\nseconds). To learn how to set keepalive settings at a system level,\nsee the Does TCP keepalive time affect MongoDB Deployments? \nFAQ entry in the Server manual. Removes the following options for the Db.command() method: Although you cannot pass these options to the\n Db.command() method, you can still set them in the command\ndocument. To learn more, see the Command Options section of the Run a Command guide. willRetryWrite omitReadPreference writeConcern explain readConcern collation maxTimeMS comment retryWrites dbName authdb noResponse The Node.js driver v5.9 release includes the following features: To learn more about this release, see the\n v5.9.0 Release Highlights . This version includes a fix to a memory leak introduced in v5.7.\nWe recommend upgrading to v5.9. Fixed a memory leak introduced in v5.7. The Decimal128 constructor and fromString() methods now throw an exception\nwhen they detect a loss of precision of more than 34 significant digits.\nThe Decimal128 class exposes a new fromStringWithRounding() static method that\nuses the rounding behavior from previous versions of the driver. For more information,\nsee the Release Notes for v5.5 of the js-bson package \non GitHub. Added support for detecting the AWS_STS_REGIONAL_ENDPOINTS and AWS_REGION \nenvironment variables and setting the appropriate options when calling the\n fromNodeProviderChain() method in the AWS SDK. The Node.js driver v5.8 release includes the following features: To learn more about this release, see the\n v5.8.0 Release Highlights . This version includes a fix to a memory leak introduced in v5.7.\nWe recommend upgrading to v5.9. The AutoEncrypter interface is deprecated. Support for Kerberos versions 1.x and 2.x. Deprecation errors are not emitted for the\n tlsCertificateFile property when you set the\n tlsCertificateKeyFile property. Removes credential availability in the\n ConnectionPoolCreatedEvent type. You can still access credentials\nthrough the credentials property of a MongoOptions instance. Lowers the @aws-sdk/credential-providers version to 3.188.0\nand zstd to ^1.0.0. The Node.js driver v5.7 release includes the following features: To learn more about this release, see the\n v5.7.0 Release Highlights . The following Write Concern options are deprecated: To specify the write concern behavior, use the wtimeoutMS and\n journal options instead. To learn more about these options, see the\n Connection Options page. wtimeout j fsync SSL options and other transport encryption options are deprecated.\nTo learn more about the deprecated options and which options to use\ninstead, see the Legacy SSL options deprecated section in the\nv5.7.0 Release Highlights linked at the end of this section. A new option for compound operation methods. The\n includeResultMetaData \noption allows you to specify whether to include information about the\noperation result. See the Built-in Methods section of the Compound Operations\nguide for more information. Support for change stream split events which enables processing change\nstream documents that exceed the 16MB maximum BSON size limit. An API to manage Search indexes from within your application. To\nlearn more, see Search Indexes . The Node.js driver v5.6 release includes the following features: To learn more about this release, see the v5.6.0 Release Highlights . The driver now supports Node.js v20. The driver can return a cursor as the response to a server command when you\ncall the runCursorCommand() method. To learn more about this feature,\nsee the runCursorCommand API documentation . Support for specifying time series collection creation options\n bucketMaxSpanSeconds \nand\n bucketRoundingSeconds .\nTo learn more about these time series collection options, see\n Set Granularity for Time Series Data \nin the Server manual. New features of the 5.5 Node.js driver release include: To learn more about this release, see the v5.5.0 Release Highlights . The driver now accurately detects Function-as-a-Service (FaaS)\nenvironments in AWS by considering AWS environment variables only if\nthey begin with AWS_Lambda_ . You must upgrade mongodb-client-encryption to version 2.8.0 or\nlater if you want to create an encrypted collection by using the\nQueryable Encryption feature. New features of the 5.4 Node.js driver release include: To learn more, see the v5.4.0 Release Highlights . The collStats operation is deprecated. Use the $collStats aggregation operator instead. The TypeScript interface passed to the db.command() method incorrectly\nincludes certain options. These options have been deprecated. The ChangeStream.tryNext method now uses the schema-specific\n TChange generic type instead of the Document interface. New features of the 5.3 Node.js driver release include: To learn more, see the v5.3.0 Release Highlights . The forEach() cursor method, which allows you to iteratively access\nresults from queries and aggregations, is deprecated. Use the\n for await...of syntax instead, as shown\n here. The addUser() method is deprecated. Use createUser() instead. The keepAlive and keepAliveInitialDelay connection options are\ndeprecated. Methods that contain duplicated functionality in the BulkWriteResult class are deprecated.\nSee the\n API documentation \nfor a full list of deprecated methods and the preferred alternatives. Client metadata now includes function as a service (FaaS) environment information\nand alternative runtime detection. The driver now allows SRV record addresses that contain a trailing dot. UpdateResult.upsertedId now returns null when no documents are updated. New features of the 5.2 Node.js driver release include: To learn more, see the v5.2.0 Release Highlights . The driver now supports automatically obtaining Azure credentials when using\nautomatic Queryable Encryption. New features of the 5.1 Node.js driver release include: To learn more, see the v5.1.0 Release Highlights . The driver now supports automatic serialization of JavaScript bigint to\n BSON.Long . It also supports the deserialization of BSON.Long values returned\nfrom the server to bigint values when the useBigInt64 flag is passed\nas true. New features of the 5.0 Node.js driver release include: This driver version introduces breaking changes. For a list of these changes, see\nthe Version 5.0 Breaking Changes section in the\nUpgrade guide. By default, the driver no longer checks types referenced in dot notation\nunless the StrictFilter type annotation is explicitly\nused. To learn more about this change, see the Typescript fundamentals\npage . This change is for Typescript only, and does not affect queries or operations\nat runtime. Optional installation of @aws-sdk/credential-providers as a peer dependency. The driver no longer includes AWS SDK modules by default. Use the\nfollowing npm command to install the SDK: If you install the SDK, npm notifies you if the version of the SDK you\ninstalled is incompatible with the driver. Once you install the\ndependency successfully, the driver uses the AWS SDK itself to\nmanage credentials from the environment. New features of the 4.17 Node.js driver release include: To learn more, see the v4.17.0 Release Highlights . Adds the mongodb-js/saslprep package as a driver dependency. Improves compatibility with the Queryable Encryption feature. New features of the 4.16 Node.js driver release include: To learn more, see the v4.16.0 Release Highlights . Includes Function-as-a-Service (FaaS) platform information in the driver\nhandshake metadata. Identifies Deno runtime usage in the client metadata. New features of the 4.15 Node.js driver release include: To learn more, see the v4.15.0 Release Highlights . Support for AWS IAM roles for service accounts. New features of the 4.14 Node.js driver release include: This version includes a fix to a memory leak introduced in v4.13.\nWe recommend upgrading to v4.14. Fixed a memory leak introduced in v4.13. Deprecated methods and options that reference the legacy Logger. New features of the 4.13 Node.js driver release include: Automatic cancellation of in-flight operations in the connection pool when\nthe driver encounters network timeout errors. Disabled causal consistency in implicit sessions to prevent conflicting\nwith the linearizable and available read concern settings. Fixed a potential memory leak by ensuring that the driver destroys\n MessageStream instances whenever their connections are destroyed. New features of the 4.12 Node.js driver release include: To learn more, see the v4.12.0 Release Highlights . The 4.12.1 Node.js driver includes a fix to a regression in monitoring logic\nthat could cause processes to crash. Redefinition of the ChangeStream class as an async iterable.\nYou can use ChangeStream instances in any context that expects an\n AsyncIterator . Notably, change streams can now be used in Javascript for-await \nloops: Fix to server monitoring when the driver skips monitoring events. In\nthis release, the driver always updates its view of the topology when\nprocessing monitoring events. Performance improvements with buffering as a result of modification to\ndata structures used internally in the driver. When connecting to MongoDB Server version 6.0 or later, the driver prioritizes\n electionId settings before setVersion settings during Server Discovery and\nMonitoring events. In previous versions, the prioritization order was reversed. When you install the optional aws-sdk/credential-providers \ndependency, the driver uses the AWS SDK to retrieve AWS credentials from the\nenvironment. To learn more about this behavior, see the MONGODB-AWS section of the Authentication Mechanisms guide. This release includes added support for mutually\nrecursive collection schema types. The driver also provides type safety for\ndot-notation queries up to a depth of eight in this release. At a depth greater\nthan or equal to eight, Typescript successfully compiles your code but does not\nprovide type safety. This depth limit on recursive types is a current limitation\nof TypeScript. Suppose we have a collection of type Collection that contains the\nfollowing mutually recursive types: TypeScript enforces type checking up to a depth of eight. The following\ncode causes a TypeScript compilation error because the name property\nvalue must be a string type: At a depth greater than or equal to eight, TypeScript compiles your code but no\nlonger type checks it. For example, the following code assigns a number to a\n string property but does not cause a compilation error because the\nreferenced property is at a depth of 10: To learn more, see the v4.11.0 Release Highlights . New features of the 4.10 Node.js driver release include: To learn more, see v4.10.0 Release Highlights . Callback Deprecation Callbacks are now deprecated in favor of Promises. Callbacks will\nbe removed in the next major release. The Node driver team recommends\nmigrating to promises where possible: Use async/await syntax. Use the Node.js callbackify utility : Use then syntax: If you are unable to migrate to Promises in a large codebase, you can\nuse the legacy Node.js driver with optional callback support . New features of the 4.9 Node.js driver release include: To learn more, see v4.9.0 Release Highlights . Fixed an inconsistency with writeConcern options in the type definitions. Included the latest BSON release, which adds automatic UUID support. See the\nBSON release notes here . New features of the 4.8 Node.js driver release include: To learn more, see v4.8.0 Release Highlights . Version 4.8.1 fixes a type regression issue introduced in v4.8.0. By\nupgrading to v4.8.1, you can specify _id values and sub-documents\nwhen performing updates with the $set or $setOnInsert operators. Added auto-completion and type safety for nested keys in an update filter client.startSession() can now be called before connecting to MongoDB estimatedDocumentCount() method can now accept a comment New features of the 4.7 Node.js driver release include: The MongoClient.connect() method is now optional when connecting to your MongoDB instance Ability to compress messages with the Zstandard compression algorithm Added support for the maxConnecting connection option Ability for change stream documents to show your documents before and after an update Added support for new change stream fields related to Cluster to Cluster Replication The estimatedDocumentCount() method now uses the $count database command Improved connecting to MongoDB in the AWS Lambda Init phase The ResumeOptions interface is deprecated. Use the\n ChangeStreamCursorOptions interface instead. New features of the 4.6 Node.js driver release include: To learn more, see v4.6.0 Release Highlights . Improved the ChangeStreamDocument in TypeScript. Even distribution of server selection based on load across servers. See v4.5.0 Release Highlights \non GitHub. New features of the 4.4 Node.js driver release include: KMIP provider support when using CSFLE. TLS support when using CSFLE. Hostname canonicalization now accepts \"none\", \"forward\", and \"forwardAndReverse\" as authMechanismProperties when using GSSAPI. In the 4.0.0 release of the driver, the deprecated collection.count() method was inadvertently changed to behave like collection.countDocuments() .\nIn this release, the collection.count() method is updated to match legacy behavior: If a query is provided, collection.count() behaves the same as collection.countDocuments() and performs a collection scan. If no query is provided, collection.count() behaves the same as collection.estimatedDocumentCount() and relies on\ncollection metadata. The cursor.count() method is deprecated and will be removed in the next major version, along with collection.count() .\nUse the collection.estimatedDocumentCount() or collection.countDocuments() \nmethods instead. New features of the 4.3 Node.js driver release include: SOCKS5 support Option to disable UTF-8 validation Type inference for nested documents New features of the 4.2 Node.js driver release include: srvMaxHosts and srvServiceName DNS seedlist connection options New features of the 4.1 Node.js driver release include: Added load balanced connection support for all cluster types including\nthe beta Serverless platform . Added support for the advanceClusterTime() method to determine if\nthe ClientSession should update its cluster time. New features of the 4.0 Node.js driver release include: This driver version introduces breaking changes. For a list of these changes, see\nthe Version 4.0 Breaking Changes section in\nthe Upgrade guide. In this release of the driver, the deprecated collection.count() method was\ninadvertently changed to behave like collection.countDocuments() . This behavior\nis corrected in version 4.4 . We'd love to hear your TypeScript related feature requests. Please submit\nideas on our JIRA project here . We've migrated the driver to TypeScript. You can now harness the type\nhinting and intellisense features in editors that support it to develop\nyour MongoDB applications. Enjoy the benefits of this work in pure JavaScript\nprojects as well. The underlying BSON library used by this version is now migrated to\nTypeScript. Inline documentation is now consistently formatted to improve display\nin editors. If you are a user of the community types @types/mongodb , there will\n likely be issues adopting the types from our codebase. We could not\nachieve a one to one match in types due to the details of writing the\ncodebase in TypeScript. The minimum supported version of Node.js is now v12.9 or greater for\nversion 4 of the driver. Support for our 3.x branches will continue\nuntil mid-year 2022 to allow time for users to upgrade. 3.x supports back to Node.js v4. Our Cursor implementation is now updated to make it clear what is possible\nbefore and after execution of an operation. There was inconsistency surrounding how the cursor would error if a\nsetting was applied after cursor execution began. Now, the cursor will\nthrow an error when attempting to apply operations in an invalid state,\nsimilar to the following: MongoError: Cursor is already initialized Affected classes: AbstractCursor FindCursor AggregationCursor ChangeStreamCursor (This is the underlying cursor for ChangeStream ) ListCollectionsCursor Our Cursor types no longer extend Readable directly. They must be\ntransformed into a stream by calling cursor.stream() . Use hasNext() and next() for manual iteration.\nUse for await of syntax or any Promise helpers for\nasynchronous iteration. With type hinting, you should find that options passed to a MongoClient \nare enumerated and discoverable. We've made a large effort to process\nall options in the driver to give early warnings about incompatible settings\nto get your app up and running in a correct state quickly. checkServerIdentity is no longer checked before being passed to the\nunderlying Node API. Previously, accepted values were false , or\na function. Now, the argument must be a function. Specifying a\nboolean will result in an error being thrown. It is no longer required to specify useUnifiedTopology or useNewUrlParser . This method no longer supports a strict option, which returned\nan error if the collection did not exist. To assert the existence of\na collection, use the listCollections() method instead. BulkWriteError is now renamed to MongoBulkWriteError . When running bulk operations that make writes you can encounter errors\ndepending on your settings. Import the new class name MongoBulkWriteError \nwhen testing for errors in bulk operations. DB is no longer an EventEmitter . Listen for events directly from your\n MongoClient instance. The Collection.group() helper, deprecated since MongoDB 3.4,\nis now removed. Use the aggregation pipeline $group \noperator instead. gssapiServiceName is now removed. Use authMechanismProperties.SERVICE_NAME in the URI or as an option on MongoClientOptions . Specifying username and password as options is only supported in the URI\nor as an option on MongoClientOptions . The GridStore API (already deprecated in 3.x) is now replaced with GridFSBucket .\nFor more information on GridFS , see the mongodb manual . Below are some snippets that represent equivalent operations. GridFSBucket uses the Node.js Stream API. You can replicate file seeking\nby using the start and end options, creating a download stream\nfrom your GridFSBucket . GridFSBucket does not need to be closed like GridStore . File metadata that used to be accessible on the GridStore instance can be\nfound by querying the bucket. We internally now only manage a unifiedTopology when you connect\nto a mongod . The differences between this and previous versions\nis detailed here . It is no longer required to specify useUnifiedTopology or useNewUrlParser . You must use the new directConnection option \nto connect to uninitialized replica set members. Support is now added for fine-grained verbosity modes. You can learn more\nabout each mode here . The instrument() method is now removed. Use command monitoring instead.\nSee our guide on command monitoring \nfor more information. New features of the 3.7 Node.js driver release include: Added support for load balancer mode while enabling the useUnifiedTopology option Added support for Stable API while enabling the useUnifiedTopology option New features of the 3.6 Node.js driver release include: Added support for the MONGODB-AWS authentication mechanism using Amazon Web Services (AWS) Identity and Access Management (IAM) credentials The find() method supports allowDiskUse() for sorts that require too much memory to execute in RAM The update() and replaceOne() methods support index hints A reduction in recovery time for topology changes and failover events Improvements in validation testing for the default writeConcern Authentication requires fewer round trips to the server, resulting in faster connection setup Shorter Salted Challenge Response Authentication Mechanism ( SCRAM ) conversations Ability to create collections and indexes for multiple document transactions Running validation for a collection in the background", - "code": [ - { - "lang": "javascript", - "value": "const clientEncryption = new ClientEncryption(keyVaultClient, {\n 'aws:key1': {\n accessKeyId: ...,\n secretAccessKey: ...\n },\n 'aws:key2': {\n accessKeyId: ...,\n secretAccessKey: ...\n },\n\nclientEncryption.createDataKey('aws:key-1', { ... });" - }, - { - "lang": "javascript", - "value": "clientEncryption.createDataKey('kmip', { masterKey: { delegated: true } } );" - }, - { - "lang": "javascript", - "value": "const documents = await users.aggregate().addStage({ $project: { name: true } }).toArray();" - }, - { - "lang": "js", - "value": "const indexName = await collection.createSearchIndex({\n name: 'my-vector-search-index',\n type: 'vectorSearch',\n definition: {\n mappings: { dynamic: false }\n }\n});" - }, - { - "lang": "js", - "value": "new MongoClient('', { serverMonitoringMode: 'stream' });" - }, - { - "lang": "bash", - "value": "npm install --save \"@aws-sdk/credential-providers@^3.201.0\"" - }, - { - "lang": "js", - "value": "const changeStream = myColl.watch();\nfor await (const change of changeStream) {\n console.log(\"Received change: \", change);\n}" - }, - { - "lang": "js", - "value": "interface Author {\n name: string;\n bestBook: Book;\n}\n\ninterface Book {\n title: string;\n author: Author;\n }" - }, - { - "lang": "js", - "value": "myColl.findOne({ 'bestBook.author.bestBook.title': 25 });" - }, - { - "lang": "js", - "value": "myColl.findOne({\n 'bestBook.author.bestBook.author.bestBook.author.bestBook.author.bestBook.author.name': 25\n});" - }, - { - "lang": "js", - "value": "require('util').callbackify(() => myColl.findOne())(callback)" - }, - { - "lang": "js", - "value": "myColl.findOne().then(res => callback(null, res), err => callback(err))" - }, - { - "lang": "js", - "value": "const fc = myColl.find({a: 2.3}).skip(1)\nfor await (const doc of fc) {\n console.log(doc)\n fc.limit(1) // incorrect usage, cursor already executing\n}" - }, - { - "lang": "js", - "value": "const cursor = myColl.find({});\nconst stream = cursor.stream();\nstream.on(\"data\", data => console.log);\nstream.on(\"error\", () => client.close());" - }, - { - "lang": "js", - "value": "const collections = (await db.listCollections({}, { nameOnly: true })\n .toArray()).map(\n ({name}) => name\n );\nif (!collections.includes(myNewCollectionName)) {\n throw new Error(`${myNewCollectionName} doesn't exist`);\n}" - }, - { - "lang": "js", - "value": "?authMechanismProperties.SERVICE_NAME\n// or\nnew MongoClient(url, { SERVICE_NAME: \"alternateServiceName\" })" - }, - { - "lang": "js", - "value": "new MongoClient(\"mongodb://username:password@\")\n// or\nnew MongoClient(url, { auth: { username: \"<>\", password: \"<>\" } })" - }, - { - "lang": "javascript", - "value": "// old way\nconst gs = new GridStore(db, filename, mode[, options])\n// new way\nconst bucket = new GridFSBucket(client.db('test')[,options])" - }, - { - "lang": "js", - "value": "bucket.openDownloadStreamByName(filename, { start: 0, end: 100 })" - }, - { - "lang": "javascript", - "value": "await client.connect();\nconst filename = 'test.txt'; // whatever local file name you want\nconst db = client.db();\nconst bucket = new GridFSBucket(db);\n\nfs.createReadStream(filename)\n .pipe(bucket.openUploadStream(filename))\n .on('error', console.error)\n .on('finish', () => {\n console.log('done writing to db!');\n\n bucket\n .find()\n .toArray()\n .then(files => {\n console.log(files);\n\n bucket\n .openDownloadStreamByName(filename)\n .pipe(fs.createWriteStream('downloaded_' + filename))\n .on('error', console.error)\n .on('finish', () => {\n console.log('done downloading!');\n client.close();\n });\n });\n });" - }, - { - "lang": "js", - "value": "// old way\nGridStore.unlink(db, name, callback);\n// new way\nbucket.delete(file_id);" - }, - { - "lang": "js", - "value": "const fileMetaDataList: GridFSFile[] = bucket.find({}).toArray();" - } - ], - "preview": "Learn what's new in:", - "tags": "version, update, upgrade, backwards compatibility", - "facets": { - "genre": ["reference"], - "target_product": ["drivers"], - "programming_language": ["javascript/typescript"] - } - } - ] + "url": "http://mongodb.com/docs/drivers/node/current", + "includeInGlobalSearch": true, + "documents": [ + { + "slug": "aggregation-tutorials/filtered-subset", + "title": "Filtered Subset", + "headings": [ + "Introduction", + "Aggregation Task Summary", + "Before You Get Started", + "Tutorial", + "Add a match stage for people who are engineers", + "Add a sort stage to sort from youngest to oldest", + "Add a limit stage to see only three results", + "Add an unset stage to remove unneeded fields", + "Run the aggregation pipeline", + "Interpret results" + ], + "paragraphs": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app. This aggregation performs the following operations: Matches a subset of documents by a field value Formats result documents You can also query for a subset of documents in a collection by using the\nQuery API. To learn how to specify a query, see the\n Read Operations guides . This tutorial demonstrates how to query a collection for a specific\nsubset of documents in a collection. The results contain\ndocuments that describe the three youngest people who are engineers. This example uses one collection, persons , which contains\ndocuments describing people. Each document includes a person's name,\ndate of birth, vocation, and other details. Before you start this tutorial, complete the\n Aggregation Template App instructions to set up a working\nNode.js application. After you set up the app, access the persons collection by adding the\nfollowing code to the application: Delete any existing data in the collections and insert sample data into\nthe persons collection as shown in the following code: To view the complete code for this tutorial, see the Completed Filtered Subset App \non GitHub. First, add a $match stage that finds documents in which\nthe value of the vocation field is \"ENGINEER\" : Next, add a $sort stage that sorts the\ndocuments in descending order by the dateofbirth field to\nlist the youngest people first: Next, add a $limit \nstage to the pipeline to output only the first three documents in\nthe results. Finally, add an $unset stage. The\n $unset stage removes unnecessary fields from the result documents: Use the $unset operator instead of $project to avoid\nmodifying the aggregation pipeline if documents with\ndifferent fields are added to the collection. Add the following code to the end of your application to perform\nthe aggregation on the persons collection: Finally, run the following command in your shell to start your\napplication: The aggregated result contains three documents. The documents\nrepresent the three youngest people with the vocation of \"ENGINEER\" ,\nordered from youngest to oldest. The results omit the _id and address \nfields.", + "code": [ + { + "lang": "javascript", + "value": "const personColl = aggDB.collection(\"persons\");" + }, + { + "lang": "javascript", + "value": "await personColl.deleteMany({});\n\nconst personData = [\n {\n person_id: \"6392529400\",\n firstname: \"Elise\",\n lastname: \"Smith\",\n dateofbirth: new Date(\"1972-01-13T09:32:07Z\"),\n vocation: \"ENGINEER\",\n address: {\n number: 5625,\n street: \"Tipa Circle\",\n city: \"Wojzinmoj\",\n },\n },\n {\n person_id: \"1723338115\",\n firstname: \"Olive\",\n lastname: \"Ranieri\",\n dateofbirth: new Date(\"1985-05-12T23:14:30Z\"),\n gender: \"FEMALE\",\n vocation: \"ENGINEER\",\n address: {\n number: 9303,\n street: \"Mele Circle\",\n city: \"Tobihbo\",\n },\n },\n {\n person_id: \"8732762874\",\n firstname: \"Toni\",\n lastname: \"Jones\",\n dateofbirth: new Date(\"1991-11-23T16:53:56Z\"),\n vocation: \"POLITICIAN\",\n address: {\n number: 1,\n street: \"High Street\",\n city: \"Upper Abbeywoodington\",\n },\n },\n {\n person_id: \"7363629563\",\n firstname: \"Bert\",\n lastname: \"Gooding\",\n dateofbirth: new Date(\"1941-04-07T22:11:52Z\"),\n vocation: \"FLORIST\",\n address: {\n number: 13,\n street: \"Upper Bold Road\",\n city: \"Redringtonville\",\n },\n },\n {\n person_id: \"1029648329\",\n firstname: \"Sophie\",\n lastname: \"Celements\",\n dateofbirth: new Date(\"1959-07-06T17:35:45Z\"),\n vocation: \"ENGINEER\",\n address: {\n number: 5,\n street: \"Innings Close\",\n city: \"Basilbridge\",\n },\n },\n {\n person_id: \"7363626383\",\n firstname: \"Carl\",\n lastname: \"Simmons\",\n dateofbirth: new Date(\"1998-12-26T13:13:55Z\"),\n vocation: \"ENGINEER\",\n address: {\n number: 187,\n street: \"Hillside Road\",\n city: \"Kenningford\",\n },\n },\n];\n\nawait personColl.insertMany(personData);" + }, + { + "lang": "javascript", + "value": "pipeline.push({\n $match: {\n \"vocation\": \"ENGINEER\"\n },\n});" + }, + { + "lang": "javascript", + "value": "pipeline.push({\n $sort: {\n \"dateofbirth\": -1,\n }\n});" + }, + { + "lang": "javascript", + "value": "pipeline.push({\n $limit: 3\n});" + }, + { + "lang": "javascript", + "value": "pipeline.push({\n $unset: [\n \"_id\",\n \"address\",\n ]\n});" + }, + { + "lang": "bash", + "value": "node agg_tutorial.js" + }, + { + "lang": "javascript", + "value": "const aggregationResult = await personColl.aggregate(pipeline);" + }, + { + "lang": "javascript", + "value": "{\n person_id: '7363626383',\n firstname: 'Carl',\n lastname: 'Simmons',\n dateofbirth: 1998-12-26T13:13:55.000Z,\n vocation: 'ENGINEER'\n}\n{\n person_id: '1723338115',\n firstname: 'Olive',\n lastname: 'Ranieri',\n dateofbirth: 1985-05-12T23:14:30.000Z,\n gender: 'FEMALE',\n vocation: 'ENGINEER'\n}\n{\n person_id: '6392529400',\n firstname: 'Elise',\n lastname: 'Smith',\n dateofbirth: 1972-01-13T09:32:07.000Z,\n vocation: 'ENGINEER'\n}" + } + ], + "preview": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app. This aggregation performs the following operations:", + "tags": "code example, node.js, sort, limit, aggregation", + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "aggregation-tutorials/group-total", + "title": "Group and Total", + "headings": [ + "Introduction", + "Aggregation Task Summary", + "Before You Get Started", + "Tutorial", + "Add a match stage for orders in 2020", + "Add a sort stage to sort by order date", + "Add a group stage to group by email address", + "Add a sort stage to sort by first order date", + "Add a set stage to display the email address", + "Add an unset stage to remove unneeded fields", + "Run the aggregation pipeline", + "Interpret results" + ], + "paragraphs": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app. This aggregation performs the following operations: Matches a subset of documents by a field value Groups documents by common field values Adds computed fields to each result document This tutorial demonstrates how to group and analyze customer order data. The\nresults show the list of customers who purchased items in 2020 and\nincludes each customer's order history for 2020. This example uses one collection, orders , which contains documents\ndescribing individual product orders. Since each order can correspond to\nonly one customer, the order documents are grouped by the\n customer_id field, which contains customer email addresses. Before you start this tutorial, complete the\n Aggregation Template App instructions to set up a working\nNode.js application. After you set up the app, access the orders collection by adding the\nfollowing code to the application: Delete any existing data and insert sample data into\nthe orders collection as shown in the following code: To view the complete code for this tutorial, see the Completed Group and Total App \non GitHub. First, add a $match stage that matches\norders placed in 2020: Next, add a $sort stage to set an\nascending sort on the orderdate field to surface the earliest\n2020 purchase for each customer in the next stage: Add a $group stage to group\norders by the value of the customer_id field. In this\nstage, add aggregation operations that create the\nfollowing fields in the result documents: first_purchase_date : the date of the customer's first purchase total_value : the total value of all the customer's purchases total_orders : the total number of the customer's purchases orders : the list of all the customer's purchases,\nincluding the date and value of each purchase Next, add another $sort stage to set an\nascending sort on the first_purchase_date field: Add a $set stage to recreate the\n customer_id field from the values in the _id field\nthat were set during the $group stage: Finally, add an $unset stage. The\n $unset stage removes the _id field from the result\ndocuments: Add the following code to the end of your application to perform\nthe aggregation on the orders collection: Finally, run the following command in your shell to start your\napplication: The aggregation returns the following summary of customers' orders\nfrom 2020: The result documents contain details from all the orders from\na given customer, grouped by the customer's email address.", + "code": [ + { + "lang": "javascript", + "value": "const ordersColl = aggDB.collection(\"orders\");" + }, + { + "lang": "javascript", + "value": "await ordersColl.deleteMany({});\n\nconst orderData = [\n {\n customer_id: \"elise_smith@myemail.com\",\n orderdate: new Date(\"2020-05-30T08:35:52Z\"),\n value: 231,\n },\n {\n customer_id: \"elise_smith@myemail.com\",\n orderdate: new Date(\"2020-01-13T09:32:07Z\"),\n value: 99,\n },\n {\n customer_id: \"oranieri@warmmail.com\",\n orderdate: new Date(\"2020-01-01T08:25:37Z\"),\n value: 63,\n },\n {\n customer_id: \"tj@wheresmyemail.com\",\n orderdate: new Date(\"2019-05-28T19:13:32Z\"),\n value: 2,\n },\n {\n customer_id: \"tj@wheresmyemail.com\",\n orderdate: new Date(\"2020-11-23T22:56:53Z\"),\n value: 187,\n },\n {\n customer_id: \"tj@wheresmyemail.com\",\n orderdate: new Date(\"2020-08-18T23:04:48Z\"),\n value: 4,\n },\n {\n customer_id: \"elise_smith@myemail.com\",\n orderdate: new Date(\"2020-12-26T08:55:46Z\"),\n value: 4,\n },\n {\n customer_id: \"tj@wheresmyemail.com\",\n orderdate: new Date(\"2021-02-29T07:49:32Z\"),\n value: 1024,\n },\n {\n customer_id: \"elise_smith@myemail.com\",\n orderdate: new Date(\"2020-10-03T13:49:44Z\"),\n value: 102,\n },\n];\n\nawait ordersColl.insertMany(orderData);" + }, + { + "lang": "javascript", + "value": "pipeline.push({\n $match: {\n orderdate: {\n $gte: new Date(\"2020-01-01T00:00:00Z\"),\n $lt: new Date(\"2021-01-01T00:00:00Z\"),\n },\n },\n});" + }, + { + "lang": "javascript", + "value": "pipeline.push({\n $sort: {\n orderdate: 1,\n },\n});" + }, + { + "lang": "javascript", + "value": "pipeline.push({\n $group: {\n _id: \"$customer_id\",\n first_purchase_date: { $first: \"$orderdate\" },\n total_value: { $sum: \"$value\" },\n total_orders: { $sum: 1 },\n orders: { $push: \n { \n orderdate: \"$orderdate\", \n value: \"$value\" \n }\n },\n },\n});" + }, + { + "lang": "javascript", + "value": "pipeline.push({\n $sort: {\n first_purchase_date: 1,\n },\n});" + }, + { + "lang": "javascript", + "value": "pipeline.push({\n $set: {\n customer_id: \"$_id\",\n },\n});" + }, + { + "lang": "javascript", + "value": "pipeline.push({ $unset: [\"_id\"] });" + }, + { + "lang": "bash", + "value": "node agg_tutorial.js" + }, + { + "lang": "javascript", + "value": "const aggregationResult = await ordersColl.aggregate(pipeline);" + }, + { + "lang": "javascript", + "value": "{\n first_purchase_date: 2020-01-01T08:25:37.000Z,\n total_value: 63,\n total_orders: 1,\n orders: [ { orderdate: 2020-01-01T08:25:37.000Z, value: 63 } ],\n customer_id: 'oranieri@warmmail.com'\n}\n{\n first_purchase_date: 2020-01-13T09:32:07.000Z,\n total_value: 436,\n total_orders: 4,\n orders: [\n { orderdate: 2020-01-13T09:32:07.000Z, value: 99 },\n { orderdate: 2020-05-30T08:35:52.000Z, value: 231 },\n { orderdate: 2020-10-03T13:49:44.000Z, value: 102 },\n { orderdate: 2020-12-26T08:55:46.000Z, value: 4 }\n ],\n customer_id: 'elise_smith@myemail.com'\n}\n{\n first_purchase_date: 2020-08-18T23:04:48.000Z,\n total_value: 191,\n total_orders: 2,\n orders: [\n { orderdate: 2020-08-18T23:04:48.000Z, value: 4 },\n { orderdate: 2020-11-23T22:56:53.000Z, value: 187 }\n ],\n customer_id: 'tj@wheresmyemail.com'\n}" + } + ], + "preview": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app. This aggregation performs the following operations:", + "tags": "code example, node.js, analyze, aggregation", + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "aggregation-tutorials/multi-field-join", + "title": "Multi-Field Join", + "headings": [ + "Introduction", + "Aggregation Task Summary", + "Before You Get Started", + "Tutorial", + "Add a lookup stage to link the collections and import fields", + "Add a match stage for products ordered in 2020", + "Add an unset stage to remove unneeded fields", + "Run the aggregation pipeline", + "Interpret results" + ], + "paragraphs": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app. This aggregation performs a multi-field join. A multi-field join occurs when there are\nmultiple corresponding fields in the documents of two collections that you use to\nmatch documents together. The aggregation matches these documents on the\nfield values and combines information from both into one document. A one-to-many join is a variety of a multi-field join. When you\nperform a one-to-many join, you select one field from a document that\nmatches a field value in multiple documents on the other side of the\njoin. To learn more about these data relationships,\nsee the Wikipedia entries about One-to-many (data model) and\n Many-to-many (data model) . This tutorial demonstrates how to combine data from a collection that\ndescribes product information with another collection that describes\ncustomer orders. The results show a list of products ordered in 2020\nthat also contains details about each order. This example uses two collections: An order can only contain one product, so the aggregation uses a\nmulti-field join to match a product document to documents representing orders of\nthat product. The collections are joined by the name and\n variation fields in documents in the products collection, corresponding\nto the product_name and product_variation fields in documents in\nthe orders collection. products , which contains documents describing the products that\na shop sells orders , which contains documents describing individual orders\nfor products in a shop Before you start this tutorial, complete the\n Aggregation Template App instructions to set up a working\nNode.js application. After you set up the app, access the products and orders \ncollections by adding the following code to the application: Delete any existing data and insert sample data into\nthe products collection as shown in the following code: Delete any existing data and insert sample data into\nthe orders collection as shown in the following code: To view the complete code for this tutorial, see the Completed Multi-field Join App \non GitHub. The first stage of the pipeline is a $lookup stage to join the\n orders collection to the products collection by two\nfields in each collection. The lookup stage contains an\nembedded pipeline to configure the join. Within the embedded pipeline, add a $match stage to match the\nvalues of two fields on each side of the join. Note that the following\ncode uses aliases for the name and variation fields\nset when creating the $lookup stage : Within the embedded pipeline, add another $match stage to match\norders placed in 2020: Within the embedded pipeline, add an $unset stage to remove\nunneeded fields from the orders collection side of the join: After the embedded pipeline is completed, add the\n $lookup stage to the main aggregation pipeline.\nConfigure this stage to store the processed lookup fields in\nan array field called orders : Next, add a $match stage to only show\nproducts for which there is at least one order in 2020,\nbased on the orders array calculated in the previous step: Finally, add an $unset stage. The\n $unset stage removes the _id and description \nfields from the result documents: Add the following code to the end of your application to perform\nthe aggregation on the products collection: Finally, run the following command in your shell to start your\napplication: The aggregated result contains two documents. The documents\nrepresent products for which there were orders placed in 2020.\nEach document contains an orders array field that lists details\nabout each order for that product: The result documents contain details from documents in the\n orders collection and the products collection, joined by\nthe product names and variations.", + "code": [ + { + "lang": "javascript", + "value": "const productsColl = aggDB.collection(\"products\");\nconst ordersColl = aggDB.collection(\"orders\");" + }, + { + "lang": "javascript", + "value": "await productsColl.deleteMany({});\n\nconst productsData = [\n {\n name: \"Asus Laptop\",\n variation: \"Ultra HD\",\n category: \"ELECTRONICS\",\n description: \"Great for watching movies\",\n },\n {\n name: \"Asus Laptop\",\n variation: \"Standard Display\",\n category: \"ELECTRONICS\",\n description: \"Good value laptop for students\",\n },\n {\n name: \"The Day Of The Triffids\",\n variation: \"1st Edition\",\n category: \"BOOKS\",\n description: \"Classic post-apocalyptic novel\",\n },\n {\n name: \"The Day Of The Triffids\",\n variation: \"2nd Edition\",\n category: \"BOOKS\",\n description: \"Classic post-apocalyptic novel\",\n },\n {\n name: \"Morphy Richards Food Mixer\",\n variation: \"Deluxe\",\n category: \"KITCHENWARE\",\n description: \"Luxury mixer turning good cakes into great\",\n },\n];\n\nawait productsColl.insertMany(productsData);" + }, + { + "lang": "javascript", + "value": "await ordersColl.deleteMany({});\n\nconst orderData = [\n {\n customer_id: \"elise_smith@myemail.com\",\n orderdate: new Date(\"2020-05-30T08:35:52Z\"),\n product_name: \"Asus Laptop\",\n product_variation: \"Standard Display\",\n value: 431.43,\n },\n {\n customer_id: \"tj@wheresmyemail.com\",\n orderdate: new Date(\"2019-05-28T19:13:32Z\"),\n product_name: \"The Day Of The Triffids\",\n product_variation: \"2nd Edition\",\n value: 5.01,\n },\n {\n customer_id: \"oranieri@warmmail.com\",\n orderdate: new Date(\"2020-01-01T08:25:37Z\"),\n product_name: \"Morphy Richards Food Mixer\",\n product_variation: \"Deluxe\",\n value: 63.13,\n },\n {\n customer_id: \"jjones@tepidmail.com\",\n orderdate: new Date(\"2020-12-26T08:55:46Z\"),\n product_name: \"Asus Laptop\",\n product_variation: \"Standard Display\",\n value: 429.65,\n },\n];\n\nawait ordersColl.insertMany(orderData);" + }, + { + "lang": "javascript", + "value": "const embedded_pl = [];\n\nembedded_pl.push({\n $match: {\n $expr: {\n $and: [\n { $eq: [\"$product_name\", \"$$prdname\"] },\n { $eq: [\"$product_variation\", \"$$prdvartn\"] },\n ],\n },\n },\n});" + }, + { + "lang": "javascript", + "value": "embedded_pl.push({\n $match: {\n orderdate: {\n $gte: new Date(\"2020-01-01T00:00:00Z\"),\n $lt: new Date(\"2021-01-01T00:00:00Z\"),\n },\n },\n});" + }, + { + "lang": "javascript", + "value": "embedded_pl.push({\n $unset: [\"_id\", \"product_name\", \"product_variation\"],\n});" + }, + { + "lang": "javascript", + "value": "pipeline.push({\n $lookup: {\n from: \"orders\",\n let: {\n prdname: \"$name\",\n prdvartn: \"$variation\",\n },\n pipeline: embedded_pl,\n as: \"orders\",\n },\n});" + }, + { + "lang": "javascript", + "value": "pipeline.push({\n $match: {\n orders: { $ne: [] },\n },\n});" + }, + { + "lang": "javascript", + "value": "pipeline.push({\n $unset: [\"_id\", \"description\"],\n});" + }, + { + "lang": "bash", + "value": "node agg_tutorial.js" + }, + { + "lang": "javascript", + "value": "const aggregationResult = await productsColl.aggregate(pipeline);" + }, + { + "lang": "javascript", + "value": "{\n name: 'Asus Laptop',\n variation: 'Standard Display',\n category: 'ELECTRONICS',\n orders: [\n {\n customer_id: 'elise_smith@myemail.com',\n orderdate: 2020-05-30T08:35:52.000Z,\n value: 431.43\n },\n {\n customer_id: 'jjones@tepidmail.com',\n orderdate: 2020-12-26T08:55:46.000Z,\n value: 429.65\n }\n ]\n}\n{\n name: 'Morphy Richards Food Mixer',\n variation: 'Deluxe',\n category: 'KITCHENWARE',\n orders: [\n {\n customer_id: 'oranieri@warmmail.com',\n orderdate: 2020-01-01T08:25:37.000Z,\n value: 63.13\n }\n ]\n}" + } + ], + "preview": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app.", + "tags": "code example, node.js, lookup, aggregation", + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "aggregation-tutorials/one-to-one-join", + "title": "One-to-One Join", + "headings": [ + "Introduction", + "Aggregation Task Summary", + "Before You Get Started", + "Tutorial", + "Add a match stage for orders in 2020", + "Add a lookup stage to link the collections", + "Add set stages to create new document fields", + "Add an unset stage to remove unneeded fields", + "Run the aggregation pipeline", + "Interpret results" + ], + "paragraphs": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app. This aggregation performs a one-to-one join. A one-to-one join occurs\nwhen a document in one collection has a field value that matches a\nsingle document in another collection that has the same field value. The\naggregation matches these documents on the field value and combines\ninformation from both sources into one result. A one-to-one join does not require the documents to have a\none-to-one relationship. To learn more about this data relationship,\nsee the Wikipedia entry about One-to-one (data model) . This tutorial demonstrates how to combine data from a collection that\ndescribes product information with another collection that describes\ncustomer orders. The results show a list of all orders placed in 2020 that\nincludes the product details associated with each order. This example uses two collections: An order can only contain one product, so the aggregation uses a\none-to-one join to match an order document to the document for the\nproduct. The collections are joined by a field called product_id \nthat exists in documents in both collections. orders : contains documents describing individual orders\nfor products in a shop products : contains documents describing the products that\na shop sells Before you start this tutorial, complete the\n Aggregation Template App instructions to set up a working\nNode.js application. After you set up the app, access the orders and products \ncollections by adding the following code to the application: Delete any existing data and insert sample data into\nthe orders collection as shown in the following code: Delete any existing data and insert sample data into\nthe products collection as shown in the following code: To view the complete code for this tutorial, see the Completed One-to-one Join App \non GitHub. Add a $match stage that matches\norders placed in 2020: Next, add a $lookup stage. The\n $lookup stage joins the product_id field in the orders \ncollection to the id field in the products collection: Next, add two $set \nstages to the pipeline. The first $set stage sets the product_mapping field\nto the first element in the product_mapping object\ncreated in the previous $lookup stage. The second $set stage creates two new fields, product_name \nand product_category , from the values in the\n product_mapping object field: Because this is a one-to-one join, the $lookup stage\nadds only one array element to the input document. The pipeline\nuses the $first \noperator to retrieve the data from this element. Finally, add an $unset stage. The\n $unset stage removes unnecessary fields from the document: Add the following code to the end of your application to perform\nthe aggregation on the orders collection: Finally, run the following command in your shell to start your\napplication: The aggregated result contains three documents. The documents\nrepresent customer orders that occurred in 2020, with the\n product_name and product_category of the ordered product: The result consists of documents that contain fields from\ndocuments in the orders collection and the products \ncollection, joined by matching the product_id field present in\neach original document.", + "code": [ + { + "lang": "javascript", + "value": "const ordersColl = aggDB.collection(\"orders\");\nconst productsColl = aggDB.collection(\"products\");" + }, + { + "lang": "javascript", + "value": "await ordersColl.deleteMany({});\n\nconst orderData = [\n {\n customer_id: \"elise_smith@myemail.com\",\n orderdate: new Date(\"2020-05-30T08:35:52Z\"),\n product_id: \"a1b2c3d4\",\n value: 431.43,\n },\n {\n customer_id: \"tj@wheresmyemail.com\",\n orderdate: new Date(\"2019-05-28T19:13:32Z\"),\n product_id: \"z9y8x7w6\",\n value: 5.01,\n },\n {\n customer_id: \"oranieri@warmmail.com\",\n orderdate: new Date(\"2020-01-01T08:25:37Z\"),\n product_id: \"ff11gg22hh33\",\n value: 63.13,\n },\n {\n customer_id: \"jjones@tepidmail.com\",\n orderdate: new Date(\"2020-12-26T08:55:46Z\"),\n product_id: \"a1b2c3d4\",\n value: 429.65,\n },\n];\n\nawait ordersColl.insertMany(orderData);" + }, + { + "lang": "javascript", + "value": "await productsColl.deleteMany({});\n\nconst productData = [\n {\n id: \"a1b2c3d4\",\n name: \"Asus Laptop\",\n category: \"ELECTRONICS\",\n description: \"Good value laptop for students\",\n },\n {\n id: \"z9y8x7w6\",\n name: \"The Day Of The Triffids\",\n category: \"BOOKS\",\n description: \"Classic post-apocalyptic novel\",\n },\n {\n id: \"ff11gg22hh33\",\n name: \"Morphy Richardds Food Mixer\",\n category: \"KITCHENWARE\",\n description: \"Luxury mixer turning good cakes into great\",\n },\n {\n id: \"pqr678st\",\n name: \"Karcher Hose Set\",\n category: \"GARDEN\",\n description: \"Hose + nosels + winder for tidy storage\",\n },\n];\n\nawait productsColl.insertMany(productData);" + }, + { + "lang": "javascript", + "value": "pipeline.push({\n $match: {\n orderdate: {\n $gte: new Date(\"2020-01-01T00:00:00Z\"),\n $lt: new Date(\"2021-01-01T00:00:00Z\"),\n },\n },\n});" + }, + { + "lang": "javascript", + "value": "pipeline.push({\n $lookup: {\n from: \"products\",\n localField: \"product_id\",\n foreignField: \"id\",\n as: \"product_mapping\",\n },\n});" + }, + { + "lang": "javascript", + "value": "pipeline.push(\n {\n $set: {\n product_mapping: { $first: \"$product_mapping\" },\n },\n },\n {\n $set: {\n product_name: \"$product_mapping.name\",\n product_category: \"$product_mapping.category\",\n },\n }\n );" + }, + { + "lang": "javascript", + "value": "pipeline.push({ $unset: [\"_id\", \"product_id\", \"product_mapping\"] });" + }, + { + "lang": "bash", + "value": "node agg_tutorial.js" + }, + { + "lang": "javascript", + "value": "const aggregationResult = await ordersColl.aggregate(pipeline);" + }, + { + "lang": "javascript", + "value": "{\n customer_id: 'elise_smith@myemail.com',\n orderdate: 2020-05-30T08:35:52.000Z,\n value: 431.43,\n product_name: 'Asus Laptop',\n product_category: 'ELECTRONICS'\n}\n{\n customer_id: 'oranieri@warmmail.com',\n orderdate: 2020-01-01T08:25:37.000Z,\n value: 63.13,\n product_name: 'Morphy Richardds Food Mixer',\n product_category: 'KITCHENWARE'\n}\n{\n customer_id: 'jjones@tepidmail.com',\n orderdate: 2020-12-26T08:55:46.000Z,\n value: 429.65,\n product_name: 'Asus Laptop',\n product_category: 'ELECTRONICS'\n}" + } + ], + "preview": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app.", + "tags": "code example, node.js, lookup, aggregation", + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "aggregation-tutorials/unpack-arrays", + "title": "Unpack Arrays and Group", + "headings": [ + "Introduction", + "Aggregation Task Summary", + "Before You Get Started", + "Tutorial", + "Add an unwind stage to unpack the array of product orders", + "Add a match stage for products that cost more than $15", + "Add a group stage to group by product type", + "Add a set stage to display the product ID", + "Add an unset stage to remove unneeded fields", + "Run the aggregation pipeline", + "Interpret results" + ], + "paragraphs": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app. This aggregation performs the following operations: Unwinds an array field into separate documents Matches a subset of documents by a field value Groups documents by common field values Adds computed fields to each result document This tutorial demonstrates how to create insights from customer order\ndata. The results show the list of products ordered that cost more than\n$15, and each document contains the number of units sold and the total\nsale value for each product. This example uses one collection, orders , which contains documents\ndescribing product orders. Since each order contains multiple products,\nthe first step of the aggregation is unpacking the products array\ninto individual product order documents. Before you start this tutorial, complete the\n Aggregation Template App instructions to set up a working\nNode.js application. After you set up the app, access the orders collection by adding the\nfollowing code to the application: Delete any existing data and insert sample data into\nthe orders collection as shown in the following code: To view the complete code for this tutorial, see the Completed Unpack Arrays App \non GitHub. First, add an $unwind stage to separate the\nentries in the products array into individual documents: Next, add a $match stage that matches\nproducts with a products.price value greater than 15 : Add a $group stage to group\norders by the value of the prod_id field. In this\nstage, add aggregation operations that create the\nfollowing fields in the result documents: product : the product name total_value : the total value of all the sales of the product quantity : the number of orders for the product Add a $set stage to recreate the\n product_id field from the values in the _id field\nthat were set during the $group stage: Finally, add an $unset stage. The\n $unset stage removes the _id field from the result\ndocuments: Add the following code to the end of your application to perform\nthe aggregation on the orders collection: Finally, run the following command in your shell to start your\napplication: The aggregation returns the following summary of customers' orders\nfrom 2020: The result documents contain details about the total value and\nquantity of orders for products that cost more than $15.", + "code": [ + { + "lang": "javascript", + "value": "const ordersColl = aggDB.collection(\"orders\");" + }, + { + "lang": "javascript", + "value": "await ordersColl.deleteMany({});\n\nconst orderData = [\n {\n order_id: 6363763262239,\n products: [\n {\n prod_id: \"abc12345\",\n name: \"Asus Laptop\",\n price: 431,\n },\n {\n prod_id: \"def45678\",\n name: \"Karcher Hose Set\",\n price: 22,\n },\n ],\n },\n {\n order_id: 1197372932325,\n products: [\n {\n prod_id: \"abc12345\",\n name: \"Asus Laptop\",\n price: 429,\n },\n ],\n },\n {\n order_id: 9812343774839,\n products: [\n {\n prod_id: \"pqr88223\",\n name: \"Morphy Richards Food Mixer\",\n price: 431,\n },\n {\n prod_id: \"def45678\",\n name: \"Karcher Hose Set\",\n price: 21,\n },\n ],\n },\n {\n order_id: 4433997244387,\n products: [\n {\n prod_id: \"def45678\",\n name: \"Karcher Hose Set\",\n price: 23,\n },\n {\n prod_id: \"jkl77336\",\n name: \"Picky Pencil Sharpener\",\n price: 1,\n },\n {\n prod_id: \"xyz11228\",\n name: \"Russell Hobbs Chrome Kettle\",\n price: 16,\n },\n ],\n },\n];\n\nawait ordersColl.insertMany(orderData);" + }, + { + "lang": "javascript", + "value": "pipeline.push({\n $unwind: {\n path: \"$products\",\n },\n});" + }, + { + "lang": "javascript", + "value": "pipeline.push({\n $match: {\n \"products.price\": {\n $gt: 15,\n },\n },\n});" + }, + { + "lang": "javascript", + "value": "pipeline.push({\n $group: {\n _id: \"$products.prod_id\",\n product: { $first: \"$products.name\" },\n total_value: { $sum: \"$products.price\" },\n quantity: { $sum: 1 },\n },\n});" + }, + { + "lang": "javascript", + "value": "pipeline.push({\n $set: {\n product_id: \"$_id\",\n },\n});" + }, + { + "lang": "javascript", + "value": "pipeline.push({ $unset: [\"_id\"] });" + }, + { + "lang": "bash", + "value": "node agg_tutorial.js" + }, + { + "lang": "javascript", + "value": "const aggregationResult = await ordersColl.aggregate(pipeline);" + }, + { + "lang": "javascript", + "value": "{\n product: 'Asus Laptop',\n total_value: 860,\n quantity: 2,\n product_id: 'abc12345'\n}\n{\n product: 'Morphy Richards Food Mixer',\n total_value: 431,\n quantity: 1,\n product_id: 'pqr88223'\n}\n{\n product: 'Russell Hobbs Chrome Kettle',\n total_value: 16,\n quantity: 1,\n product_id: 'xyz11228'\n}\n{\n product: 'Karcher Hose Set',\n total_value: 66,\n quantity: 3,\n product_id: 'def45678'\n}" + } + ], + "preview": "In this tutorial, you can learn how to use the Node.js driver to\nconstruct an aggregation pipeline, perform the\naggregation on a collection, and print the results by completing and\nrunning a sample app. This aggregation performs the following operations:", + "tags": "code example, node.js, analyze, array", + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "aggregation-tutorials", + "title": "Aggregation Tutorials", + "headings": [ + "Overview", + "Aggregation Template App", + "Available Tutorials" + ], + "paragraphs": "Aggregation tutorials provide detailed explanations of common\naggregation tasks in a step-by-step format. The tutorials are adapted\nfrom examples in the Practical MongoDB Aggregations book by Paul Done. Each tutorial includes the following sections: At the end of each aggregation tutorial, you can find a link to a fully\nrunnable Node.js code file that you can run in your environment. Introduction , which describes the purpose and common use cases of the\naggregation type. This section also describes the example and desired\noutcome that the tutorial demonstrates. Before You Get Started , which describes the necessary databases,\ncollections, and sample data that you must have before building the\naggregation pipeline and performing the aggregation. Tutorial , which describes how to build and run the aggregation\npipeline. This section describes each stage of the completed\naggregation tutorial, and then explains how to run and interpret the\noutput of the aggregation. To learn more about performing aggregations, see the\n Aggregation guide. Before you begin following an aggregation tutorial, you must set up a\nnew Node.js app. You can use this app to connect to a MongoDB\ndeployment, insert sample data into MongoDB, and run the aggregation\npipeline in each tutorial. Once you install the driver, create a file called\n agg_tutorial.js . Paste the following code in this file to create an\napp template for the aggregation tutorials: For every tutorial, you must replace the connection string placeholder with\nyour deployment's connection string. For example, if your connection string is\n \"mongodb+srv://mongodb-example:27017\" , your connection string assignment resembles\nthe following: To run the completed file after you modify the template for a\ntutorial, run the following command in your shell: To learn how to install the driver and connect to MongoDB,\nsee the Download and Install and\n Create a MongoDB Deployment steps of the\nQuick Start guide. In the preceding code, read the code comments to find the sections of\nthe code that you must modify for the tutorial you are following. If you attempt to run the code without making any changes, you will\nencounter a connection error. To learn how to locate your deployment's connection string, see the\n Create a Connection String step of the Quick Start guide. Filtered Subset Group and Total Unpack Arrays and Group One-to-One Join Multi-Field Join", + "code": [ + { + "lang": "javascript", + "value": "const uri = \"mongodb+srv://mongodb-example:27017\";" + }, + { + "lang": "bash", + "value": "node agg_tutorial.js" + }, + { + "lang": "javascript", + "value": "const { MongoClient } = require(\"mongodb\");\n\n// Replace the placeholder with your connection string.\nconst uri = \"\";\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const aggDB = client.db(\"agg_tutorials_db\");\n\n // Get a reference to relevant collections.\n // ... const someColl =\n // ... const anotherColl =\n\n // Delete any existing documents in collections.\n // ... await someColl.deleteMany({});\n\n // Insert sample data into the collection or collections.\n // ... const someData = [ ... ];\n\n // ... await someColl.insertMany(someData);\n\n // Create an empty pipeline array.\n const pipeline = [];\n\n // Add code to create pipeline stages.\n // ... pipeline.push({ ... })\n\n // Run the aggregation.\n // ... const aggregationResult = ...\n\n // Print the aggregation results.\n for await (const document of aggregationResult) {\n console.log(document);\n }\n } finally {\n await client.close();\n }\n}\n\nrun().catch(console.dir);\n" + } + ], + "preview": "Aggregation tutorials provide detailed explanations of common\naggregation tasks in a step-by-step format. The tutorials are adapted\nfrom examples in the Practical MongoDB Aggregations book by Paul Done.", + "tags": "node.js, code example, runnable app", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "compatibility", + "title": "Compatibility", + "headings": [ + "MongoDB Compatibility", + "Compatibility Table Legend", + "Language Compatibility", + "Component Compatibility" + ], + "paragraphs": "The following compatibility table specifies the recommended versions of\nthe MongoDB Node.js driver for use with MongoDB. The first column lists the driver version. MongoDB ensures compatibility between the MongoDB Server and the drivers\nfor three years after the server version's end of life (EOL) date. To learn\nmore about the MongoDB release and EOL dates, see\n MongoDB Software Lifecycle Schedules . Icon Explanation \u2713 All features are supported. \u229b The Driver version will work with the MongoDB version, but not all\nnew MongoDB features are supported. No mark The Driver version is not tested with the MongoDB version. Node.js Driver Version MongoDB 7.0 MongoDB 6.0 MongoDB 5.0 MongoDB 4.4 MongoDB 4.2 MongoDB 4.0 MongoDB 3.6 MongoDB 3.4 MongoDB 3.2 MongoDB 3.0 MongoDB 2.6 6.0 to 6.8 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 5.7 to 5.9 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 5.0 to 5.6 \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 4.8 to 4.17 \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 4.2 to 4.7 \u229b \u229b \u2713 \u2713 \u2713 \u2713 \u2713 4.0 to 4.1 \u229b \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 3.7 \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 3.6 \u229b \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 3.3 to 3.5 \u229b \u229b \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 3.1 to 3.2 \u229b \u229b \u229b \u229b \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 3.0 \u2713 \u2713 \u2713 \u2713 \u2713 2.2.12 \u2713 \u2713 \u2713 \u2713 2.0.14 \u2713 \u2713 1.4.29 \u2713 \u2713 When using Node.js Driver version 3.7, you must set the useUnifiedTopology flag to true for certain features. The following compatibility table specifies the recommended versions of\nthe MongoDB Node.js driver for use with a specific version of Node.js. The first column lists the driver version. Node.js Driver Version Node.js v20.x.x Node.js v18.x.x Node.js v16.x.x Node.js v14.x.x Node.js v12.x.x Node.js v10.x.x Node.js v8.X.X Node.js v6.X.X Node.js v4.X.X Node.js v0.12.X Node.js v0.10.X Node.js v0.8.X 6.X \u2713 \u2713 \u2713 5.6.X to 5.9.X \u2713 \u2713 \u2713 \u2713 5.0.0 to 5.5.X \u2713 \u2713 \u2713 4.X \u2713 \u2713 \u2713 \u2713 3.X \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 2.X \u2713 \u2713 \u2713 \u2713 \u2713 \u2713 >= 1.4.18 \u2713 \u2713 \u2713 1.4.X \u2713 \u2713 Versions 6.0 and later of the Node.js driver require Node.js v16.20.1 or later. The following table describes add-on component version compatibility for\nversions of the MongoDB Node.js driver. Any other combination of packages might be\nunstable. For more information on how to read the compatibility tables, see our guide\nabout MongoDB Compatibility Tables . Component Node.js Driver v6.x Node.js Driver v5.x Node.js Driver v4.x Node.js Driver v3.x bson ^6.0.0 ^5.0.0 ^4.0.0 ^1.0.0 bson-ext ^4.0.0 ^1.0.0 or ^2.0.0 kerberos ^2.0.1 ^1.0.0 or ^2.0.0 ^1.0.0 or ^2.0.0 ^1.0.0 mongodb-client-encryption ^6.0.0 ^2.3.0 ^1.0.0 or ^2.0.0 ^1.0.0 mongodb-legacy ^6.0.0 ^5.0.0 ^4.0.0 @mongodb-js/zstd ^1.1.0 ^1.0.0 ^1.0.0", + "code": [], + "preview": "Find the recommended versions of the Node.js driver that work with your version of MongoDB.", + "tags": "node.js", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "connection-troubleshooting", + "title": "Connection Troubleshooting", + "headings": [ + "Connection Error", + "Check Your Connection String", + "Configure Your Firewall", + "ECONNREFUSED Error", + "Ensure MongoDB and Your Client Use the Same Protocol", + "ECONNRESET Error", + "Control the Number of File Descriptors", + "Authentication Error", + "Check Your Connection String", + "Verify the User Is in the Authentication Database", + "Error Sending Message", + "Check the User Permissions", + "Configure Your Firewall", + "Check the Number of Connections", + "Too Many Open Connections", + "Check the Number of Connections", + "Timeout Error", + "Set connectTimeoutMS", + "Check the Number of Connections" + ], + "paragraphs": "This page offers potential solutions to issues you might encounter when\nusing the MongoDB Node.js driver to connect to a MongoDB deployment. This page addresses only connection issues. If you encounter any other issues\nwith MongoDB or the driver, visit the following resources: The Frequently Asked Questions (FAQ) for the\nNode.js driver The Issues & Help page, which has\ninformation about reporting bugs, contributing to the driver, and\nfinding more resources The MongoDB Community Forums for\nquestions, discussions, or general technical support The following error message indicates that the driver cannot connect to a server\non the specified hostname or port. Multiple situations can generate this error\nmessage. In this sample error message, the hostname is 127.0.0.1 and the\nport is 27017 : The following sections describe actions you can take to potentially resolve the\nissue. Verify that the hostname and port number in the connection string are both\naccurate. The default port value for a MongoDB instance is\n 27017 , but you can configure MongoDB to communicate on another port. Verify that the ports your MongoDB deployment listens on are not blocked by a\nfirewall on the same network. MongoDB uses port 27017 by default. To learn\nmore about the default ports MongoDB uses and how to change them, see\n Default MongoDB Port . Do not open a port in your firewall unless you are sure it's the port\nused by your MongoDB deployment. If the connection is refused when the driver attempts to connect to the MongoDB\ninstance, it generates this error message: The following sections describe actions you can take to potentially resolve the\nissue. In Node.js v17 and later, the DNS resolver uses IPv6 by default when both\nthe client and host support both. For example, if MongoDB uses IPv4 and your\nclient uses IPv6, the driver returns the previous error message. You can configure your MongoDB deployment to use IPv6 mode when starting\nwith mongod or mongos . For more information about how to specify\n IPv6 mode, see\n IP Binding in the server\nmanual. As an alternative, you can explicitly use IPv4 with your client by\nspecifying family: 4 as an\n option to your MongoClient . If the connection is reset when the driver calls client.connect() , it\ngenerates this error message: The following section describes a method that may help resolve the issue. A file descriptor is a unique identifier associated with an open process. In most\noperating systems, each open connection from the driver is associated with a\nfile descriptor. Operating systems typically have a limit on the number of file\ndescriptors used by a single process. An ECONNRESET error can occur\nif the number of connections exceeds this limit. You can set the maximum number of connections by setting maxPoolSize . To\nresolve this error, you can decrease the number of maximum allowed connections\nby setting the value of maxPoolSize . Alternatively, you could increase the\nfile descriptor limit in your operating system. Always be cautious when changing the configuration of your operating system. The Node.js driver can fail to connect to a MongoDB instance if\nthe authorization is not configured correctly. If you are using SCRAM-SHA-256 \nfor authentication and the driver fails to connect, the driver might raise an\nerror message similar to one of the following messages: The following sections describe actions you can take to potentially resolve the\nissue. An invalid connection string is the most common cause of authentication\nissues when attempting to connect to MongoDB using SCRAM-SHA-256 . If your connection string contains a username and password, ensure that they\nare in the correct format. If the username or password includes any of the\nfollowing characters, they must be\n percent encoded : The following example shows how to percent encode \"#MyP@assword?\": This results in the following output: For more information about connection strings,\nsee Connection URI in the Connection Guide. To successfully authenticate a connection by using a username and password with\n SCRAM-SHA-256 , the username must be defined in the authentication database.\nThe default authentication database is the admin database. To use a different\ndatabase for authentication, specify the authSource in the connection string.\nThe following example instructs the driver to use users as the authentication\ndatabase: You can check if this is the issue by attempting to connect to a MongoDB\ninstance hosted on the local machine with the same code. A deployment on\nthe same machine doesn't require any authorization to connect. When the driver fails to send a command after you make a request,\nit may display the following error message: The following sections describe actions you can take to potentially resolve the\nissue. Verify that you've accessed the MongoDB deployment with the correct user. The\nterm \"message\" in the error can be a command sent by the driver.\nIf you are using a user that doesn't have permissions to send the command, the\ndriver could generate this error. Also ensure that the user has the appropriate permissions for the message you\nare sending. MongoDB uses Role-Based Access Control (RBAC) to control access\nto a MongoDB deployment. For more information about how to configure RBAC in MongoDB,\nsee Default MongoDB Port . The firewall needs to have an open port for communicating with the MongoDB\ninstance. For more information about configuring the firewall, see\n Configure Your Firewall in\nthe Connection Error section. Each MongoClient instance supports a maximum number of concurrent open\nconnections in its connection pool. You can configure the parameter maxPoolSize \nwhich defines this limit. The default value is 100 . If there are already a\nnumber of open connections equal to maxPoolSize , the server waits until\na connection becomes available. If this wait time exceeds the maxIdleTimeMS \nvalue, the driver responds with an error. For more information about how connection pooling works, see\n How Does Connection Pooling Work in the Node Driver? \nin the FAQ. The driver creates the following error message when it attempts to open a\nconnection, but it's reached the maximum number of connections: The following section describes a method that may help resolve the issue. To create more open connections, increase the value of maxPoolSize . For more\ninformation about checking the number of connections, see\n Check the Number of Connections \nin the Error Sending Message section. When the network is not able to deliver a request from the driver to the server\nquickly enough, it can time out. When this happens, you might receive an error message\nsimilar to the following message: If you receive this error, try the following action to resolve the\nissue. The driver may hang when it's unable to establish a connection because it\ntakes too long attempting to reach unreachable replica set nodes. You can limit the\ntime the driver spends attempting to establish the connection by using the\n connectTimeMS setting. To learn more about this setting, see the\n Timeout Options in\nthe Server manual. Ensure the connectTimeoutMS setting is not lower than\nthe highest network latency you have for a member of the set. If one of the\nsecondary members has a latency of 10000 milliseconds, setting the\n connectTimeoutMS to 9000 prevents the driver from ever connecting to that\nmember. The following example sets connectTimeoutMS to 10000 milliseconds. The number of connections to the server may exceed maxPoolSize . For more\ninformation about checking the number of connections, see\n Check the Number of Connections \nin the Error Sending Message section.", + "code": [ + { + "lang": "none", + "value": "Error: couldn't connect to server 127.0.0.1:27017" + }, + { + "lang": "none", + "value": "MongoServerSelectionError: connect ECONNREFUSED :" + }, + { + "lang": "js", + "value": "const client = new MongoClient(uri, {\n family: 4,\n});" + }, + { + "lang": "none", + "value": "MongoServerSelectionError: connect ECONNRESET :::" + }, + { + "lang": "none", + "value": "Command failed with error 18 (AuthenticationFailed): 'Authentication\nfailed.' on server :." + }, + { + "lang": "none", + "value": "connection() error occurred during connection handshake: auth error:\nsasl conversation error: unable to authenticate using mechanism\n\"SCRAM-SHA-256\": (AuthenticationFailed) Authentication failed." + }, + { + "lang": "none", + "value": ": / ? # [ ] @" + }, + { + "lang": "javascript", + "value": "console.log(encodeURIComponent('#MyP@assword?'));" + }, + { + "lang": "none", + "value": "\"%23MyP%40assword%3F\"" + }, + { + "lang": "javascript", + "value": "const { MongoClient } = require(\"mongodb\");\nconst uri = \"mongodb://:@:/?authSource=users\";\nconst client = new MongoClient(uri);" + }, + { + "lang": "none", + "value": "com.mongodb.MongoSocketWriteException: Exception sending message" + }, + { + "lang": "none", + "value": "connection refused because too many open connections" + }, + { + "lang": "none", + "value": "timed out while checking out a connection from connection pool: context canceled" + }, + { + "lang": "javascript", + "value": "const client = new MongoClient(uri, {\n connectTimeoutMS: 10000,\n});" + } + ], + "preview": "This page offers potential solutions to issues you might encounter when\nusing the MongoDB Node.js driver to connect to a MongoDB deployment.", + "tags": "code example, node.js, disconnected, help", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "faq", + "title": "FAQ", + "headings": [ + "Why Am I Getting Errors While Connecting to MongoDB?", + "How Does Connection Pooling Work in the Node Driver?", + "What Is the Difference Between \"connectTimeoutMS\", \"socketTimeoutMS\" and \"maxTimeMS\"?", + "What Happens to Running Operations if the Client Disconnects?", + "How Can I Confirm That the Driver Closed Unusable Sockets?", + "How Can I Prevent Sockets From Timing Out Before They Become Active?", + "What Does a Value of \"0\" Mean for \"connectTimeoutMS\" and \"socketTimeoutMS\"?", + "How Can I Prevent Long-Running Operations From Slowing Down the Server?", + "What Does the keepAlive Option Do?", + "What Can I Do If I'm Experiencing Unexpected Network Behavior?", + "How Can I Prevent a Slow Operation From Delaying Other Operations?", + "How Can I Ensure my Connection String Is Valid for a Replica Set?" + ], + "paragraphs": "This page contains frequently asked questions and their corresponding answers. If you can't find an answer to your problem on this page,\nsee the Issues & Help page for next steps and more\nresources. If you have trouble connecting to a MongoDB deployment, see\nthe Connection Troubleshooting Guide \nfor possible solutions. Every MongoClient instance has a built-in connection pool for each server\nin your MongoDB topology. Connection pools open sockets on demand to\nsupport concurrent requests to MongoDB in your application. The maximum size of each connection pool is set by the maxPoolSize option, which\ndefaults to 100 . If the number of in-use connections to a server reaches\nthe value of maxPoolSize , the next request to that server will wait\nuntil a connection becomes available. In addition to the sockets needed to support your application's requests,\neach MongoClient instance opens two more sockets per server\nin your MongoDB topology for monitoring the server's state.\nFor example, a client connected to a three-node replica set opens six\nmonitoring sockets. If the application uses the default setting for\n maxPoolSize and only queries the primary (default) node, then\nthere can be at most 106 total connections in the connection pool. If the\napplication uses a read preference to query the\nsecondary nodes, those connection pools grow and there can be\n 306 total connections. To support high numbers of concurrent MongoDB requests\nwithin one process, you can increase maxPoolSize . Connection pools are rate-limited. The maxConnecting option\ndetermines the number of connections that the pool can create in\nparallel at any time. For example, if the value of maxConnecting is\n 2 , the third request that attempts to concurrently check out a\nconnection succeeds only when one the following cases occurs: You can set the minimum number of concurrent connections to\neach server with the minPoolSize option, which defaults to 0 .\nThe driver initializes the connection pool with this number of sockets. If\nsockets are closed, causing the total number\nof sockets (both in use and idle) to drop below the minimum, more\nsockets are opened until the minimum is reached. You can set the maximum number of milliseconds that a connection can\nremain idle in the pool by setting the maxIdleTimeMS option.\nOnce a connection has been idle for maxIdleTimeMS , the connection\npool removes and replaces it. This option defaults to 0 (no limit). The following default configuration for a MongoClient works for most\napplications: MongoClient supports multiple concurrent requests. For each process,\ncreate a client and reuse it for all operations in a process. This\npractice is more efficient than creating a client for each request. The driver does not limit the number of requests that\ncan wait for sockets to become available, and it is the application's\nresponsibility to limit the size of its pool to bound queuing\nduring a load spike. Requests wait for the amount of time specified in\nthe waitQueueTimeoutMS option, which defaults to 0 (no limit). A request that waits more than the length of time defined by\n waitQueueTimeoutMS for a socket raises a connection error. Use this\noption if it is more important to bound the duration of operations\nduring a load spike than it is to complete every operation. When MongoClient.close() is called by any request, the driver\ncloses all idle sockets and closes all sockets that are in\nuse as they are returned to the pool. Calling MongoClient.close() \ncloses only inactive sockets, so you cannot interrupt or terminate\nany ongoing operations by using this method. The driver closes these\nsockets only when the process completes. The connection pool finishes creating a connection and there are fewer\nthan maxPoolSize connections in the pool. An existing connection is checked back into the pool. The driver's ability to reuse existing connections improves due to\nrate-limits on connection creation. To specify the optional settings for your MongoClient , declare one or\nmore available settings in the options object of the constructor as\nfollows: To see all the available settings, see the\n MongoClientOptions \nAPI Documentation. To specify maxTimeMS , chain the maxTimeMS() method with a\ntimeout specification to an operation that returns a Cursor : Setting Description connectTimeoutMS connectTimeoutMS is a connection option that sets the time, in milliseconds,\nfor an individual connection from your connection pool to\nestablish a TCP connection to the MongoDB Server before\ntiming out. Default: 30000 To modify the allowed time for MongoClient.connect to establish a\nconnection to a MongoDB Server, use the serverSelectionTimeoutMS option instead. socketTimeoutMS socketTimeoutMS specifies the amount of time the driver waits\nfor an inactive socket before closing it. The default value is to\nnever time out the socket. This option applies only to sockets that\nhave already been connected. maxTimeMS maxTimeMS \nspecifies the maximum amount of time that the server\nwaits for an operation to complete after it has reached the\nserver. If an operation runs over the specified time limit, it\nreturns a timeout error. You can pass maxTimeMS only to an\nindividual operation or to a cursor. Starting in MongoDB Server version 4.2, the server terminates\nrunning operations such as aggregations and find operations if the\nclient disconnects. To see a full list of operations affected by this\nbehavior, see the Server version 4.2 release notes in the Server manual. Other operations, such as write operations, continue to run on the\nMongoDB Server even if the client disconnects. This behavior can cause data\ninconsistencies if your application retries the operation after the\nclient disconnects. If you experience unexpected network behavior or if a MongoDB process\nfails with an error, you may not receive confirmation that the\ndriver correctly closed the corresponding socket. To make sure that the driver correctly closes the socket in these cases,\nset the socketTimeoutMS option. When a MongoDB process times out, the driver\nwill close the socket. We recommend that you select a value\nfor socketTimeoutMS that is two to three times longer than the\nexpected duration of the slowest operation that your application executes. Having a large connection pool does not always reduce reconnection\nrequests. Consider the following example: An application has a connection pool size of 5 sockets and has the\n socketTimeoutMS option set to 5000 milliseconds. Operations occur,\non average, every 3000 milliseconds, and reconnection requests are\nfrequent. Each socket times out after 5000 milliseconds, which means\nthat all sockets must do something during those 5000 milliseconds to\navoid closing. One message every 3000 milliseconds is not enough to keep the sockets\nactive, so several of the sockets will time out after 5000 milliseconds.\nTo avoid excessive socket timeouts, reduce the number of connections\nthat the driver can maintain in the connection pool by specifying the\n maxPoolSize option. To specify the optional maxPoolSize setting for your MongoClient , declare\nit in the options object of the constructor as follows: If you set the value of connectTimeoutMS or socketTimeoutMS to\n 0 , your application will use the operating system's default socket\ntimeout value. You can prevent long-running operations from slowing down the server by\nspecifying a timeout value. You can chain the maxTimeMS() method to\nan operation that returns a Cursor to set a timeout on a specific action. The following example shows how you can chain the maxTimeMS() method\nto an operation that returns a Cursor : The keepAlive connection option specifies whether to enable\n Transmission Control Protocol (TCP) keepalives on a TCP socket. If you enable keepalives,\nthe driver checks whether the connection is active by sending periodic pings\nto your MongoDB deployment. This functionality only works if your\noperating system supports the SO_KEEPALIVE socket option. The keepAliveInitialDelay option specifies the number of\nmilliseconds that the driver waits before initiating a keepalive. The 5.3 driver version release deprecated these options. Starting in\nversion 6.0 of the driver, the keepAlive option is permanently set\nto true , and the keepAliveInitialDelay is set to 300000\nmilliseconds (300 seconds). If your firewall ignores or drops the keepalive messages, you might\nnot be able to identify dropped connections. You might experience unexpected network behavior if the firewall between\nyour application and MongoDB is misconfigured. These firewalls can be\noverly aggressive in their removal of connections, which can lead to\nunexpected errors. Confirm that your firewall exhibits the following behavior: The firewall sends a FIN packet when closing a connection,\ninforming the driver that the socket is closed. The firewall allows keepalive messages. To learn more about keepalive messages, see the What Does the\nkeepAlive Option Do? FAQ entry. When you use the same MongoClient instance to run multiple MongoDB\noperations concurrently, a slow operation can cause delays to other\noperations. Slow operations keep a connection to MongoDB occupied,\nwhich can cause other operations to wait until an additional connection\nbecomes available. If you suspect that slow MongoDB operations are causing delays, you\ncan check the performance of all in-progress operations by using the\nfollowing methods: After you determine which operations are causing delays, try to improve\nthe performance of these operations. Read the Best Practices\nGuide for MongoDB Performance for possible solutions. If you implement performance best practices but still\nexperience delays, you can modify your connection settings to increase\nthe size of the connection pool. A connection pool is the group of\nconnections to the server that the driver maintains at any time. To specify the maximum size of a\nconnection pool, you can set the maxPoolSize option in the\n connection options for your\n MongoClient instance. The default value\nof maxPoolSize is 100 . If the number of in-use connections to a\nserver reaches maxPoolSize , the next operation sent to the server\npauses until a connection to the driver becomes available. The following\ncode sets maxPoolSize to 150 when creating a new MongoClient : Enable the database profiler on your deployment. To learn more, see\n Database Profiler \nin the Server manual. Run the db.currentOp() MongoDB Shell command. To learn more, see the\n db.currentOp() \ndocumentation in the Server manual. Enable connection pool monitoring. To learn more, see\n Connection Pool Monitoring . To learn more about connection pooling, see the How Does Connection\nPooling Work in the Node Driver? FAQ entry. The connection string passed to the driver must use exact hostnames for\nthe servers as set in the Replica Set Config .\nGiven the following configuration settings for your Replica Set, in\norder for the Replica Set discovery and failover to work, the driver must have access\nto server1 , server2 , and server3 . If you are unable to find the answer to your question here, try our forums and\nsupport channels listed in the Issues and Help \nsection.", + "code": [ + { + "lang": "js", + "value": "const client = new MongoClient(\"\");" + }, + { + "lang": "javascript", + "value": "const client = new MongoClient(uri, {\n connectTimeoutMS: ,\n socketTimeoutMS: \n});" + }, + { + "lang": "javascript", + "value": "const cursor = myColl.find({}).maxTimeMS(50);" + }, + { + "lang": "javascript", + "value": "const client = new MongoClient(uri, {\n maxPoolSize: ,\n});" + }, + { + "lang": "javascript", + "value": "// Execute a find command\nawait collection\n .find({ $where: \"sleep(100) || true\" })\n .maxTimeMS(50);\n" + }, + { + "lang": "js", + "value": "const client = new MongoClient(uri, { maxPoolSize: 150 });" + }, + { + "lang": "JSON", + "value": "{\n \"_id\": \"testSet\",\n \"version\": 1,\n \"protocolVersion\": 1,\n \"members\": [\n {\n \"_id\": 1,\n \"host\": \"server1:31000\"\n },\n {\n \"_id\": 2,\n \"host\": \"server2:31001\"\n },\n {\n \"_id\": 3,\n \"host\": \"server3:31002\"\n }\n ]\n}" + } + ], + "preview": "This page contains frequently asked questions and their corresponding answers.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/aggregation", + "title": "Aggregation", + "headings": [ + "Overview", + "Analogy", + "Comparing Aggregation and Query Operations", + "References", + "Runnable Examples", + "Aggregation Example", + "Additional Examples" + ], + "paragraphs": "In this guide, you can learn how to use aggregation operations in\nthe MongoDB Node.js driver. Aggregation operations are expressions you can use to produce reduced\nand summarized results in MongoDB. MongoDB's aggregation framework\nallows you to create a pipeline that consists of one or more stages,\neach of which performs a specific operation on your data. You can think of the aggregation pipeline as similar to an automobile factory.\nAutomobile manufacturing requires the use of assembly stations organized\ninto assembly lines. Each station has specialized tools, such as\ndrills and welders. The factory transforms and\nassembles the initial parts and materials into finished products. The aggregation pipeline is the assembly line, aggregation\nstages are the assembly stations, and expression operators are the\nspecialized tools. Using query operations, such as the find() method, you can perform the following actions: Using aggregation operations, you can perform the following actions: Aggregation operations have some limitations : Select which documents to return Select which fields to return Sort the results Perform all query operations Rename fields Calculate fields Summarize data Group values Returned documents must not violate the BSON-document size limit \nof 16 megabytes. Pipeline stages have a memory limit of 100 megabytes by default. You can exceed this\nlimit by setting the allowDiskUse property of AggregateOptions to true . See\nthe AggregateOptions API documentation \nfor more details. The $graphLookup stage has a strict\nmemory limit of 100 megabytes and will ignore allowDiskUse . To view a full list of expression operators, see Aggregation\nOperators in the Server manual. To learn about assembling an aggregation pipeline and view examples, see\n Aggregation Pipeline in the\nServer manual. To learn more about creating pipeline stages, see Aggregation\nStages in the Server manual. The example uses sample data about restaurants. The following code\ninserts data into the restaurants collection of the aggregation \ndatabase: For more information on connecting to your MongoDB deployment, see the Connection Guide . To perform an aggregation, pass a list of aggregation stages to the\n collection.aggregate() method. In the example, the aggregation pipeline uses the following aggregation stages: This example produces the following output: For more information, see the aggregate() API documentation . A $match stage to filter for documents whose\n categories array field contains the element Bakery . A $group stage to group the matching documents by the stars \nfield, accumulating a count of documents for each distinct value of stars . To view step-by-step explanations of common aggregation tasks, see the\n Aggregation Tutorials . You can find another aggregation pipeline example in the Aggregation\nFramework with Node.js Tutorial \nblog post on the MongoDB website.", + "code": [ + { + "lang": "javascript", + "value": "const db = client.db(\"aggregation\");\nconst coll = db.collection(\"restaurants\");\n\n// Create sample documents\nconst docs = [\n { stars: 3, categories: [\"Bakery\", \"Sandwiches\"], name: \"Rising Sun Bakery\" },\n { stars: 4, categories: [\"Bakery\", \"Cafe\", \"Bar\"], name: \"Cafe au Late\" },\n { stars: 5, categories: [\"Coffee\", \"Bakery\"], name: \"Liz's Coffee Bar\" },\n { stars: 3, categories: [\"Steak\", \"Seafood\"], name: \"Oak Steakhouse\" },\n { stars: 4, categories: [\"Bakery\", \"Dessert\"], name: \"Petit Cookie\" },\n];\n\n// Insert documents into the restaurants collection\nconst result = await coll.insertMany(docs);" + }, + { + "lang": "json", + "value": "{ _id: 4, count: 2 }\n{ _id: 3, count: 1 }\n{ _id: 5, count: 1 }" + }, + { + "lang": "javascript", + "value": "// Define an aggregation pipeline with a match stage and a group stage\nconst pipeline = [\n { $match: { categories: \"Bakery\" } },\n { $group: { _id: \"$stars\", count: { $sum: 1 } } }\n];\n\n// Execute the aggregation\nconst aggCursor = coll.aggregate(pipeline);\n\n// Print the aggregated results\nfor await (const doc of aggCursor) {\n console.log(doc);\n}" + } + ], + "preview": "In this guide, you can learn how to use aggregation operations in\nthe MongoDB Node.js driver.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/authentication/enterprise-mechanisms", + "title": "Enterprise Authentication Mechanisms", + "headings": [ + "Kerberos (GSSAPI/SSPI)", + "LDAP (PLAIN)", + "MONGODB-OIDC", + "Azure IMDS", + "GCP IMDS", + "Custom Callback", + "API Documentation" + ], + "paragraphs": "In this guide, you can find sample code for connection to MongoDB with each\nauthentication mechanism available in the MongoDB Enterprise Edition:\n Kerberos (GSSAPI/SSPI) , LDAP (PLAIN) , and MONGODB-OIDC . The GSSAPI authentication mechanism uses your user principal to\nauthenticate to a Kerberos service. You can specify this authentication mechanism by performing the\nfollowing actions while specifying options on your\n connection string : The following code sample authenticates to Kerberos for UNIX using GSSAPI . The Node.js driver supports Kerberos on UNIX using the MIT Kerberos library\nand on Windows using the SSPI API. Set the authMechanism parameter to GSSAPI . Set the SERVICE_NAME value in the authMechanismProperties \nparameter if using a value other than mongodb . Specify a SERVICE_REALM value in the authMechanismProperties \nparameter if a custom service realm is required. Specify a CANONICALIZE_HOST_NAME value in the authMechanismProperties \nparameter if canonicalization of the hostname is required. This property can take\nthe following values: none : (Default) Does not perform hostname canonicalization forward : Performs a forward DNS lookup to canonicalize the hostname forwardAndReverse : Performs a forward DNS lookup and then a\nreverse lookup on that value to canonicalize the hostname The gssapiServiceName parameter is deprecated and may be removed\nin future versions of the driver. Use\n authMechanismProperties=SERVICE_NAME: in the\nconnection URI instead.\nSee the\n authMechanismProperties \nparameter documentation for more information. Always URI encode the principal using the encodeURIComponent method\nto ensure it is correctly parsed. The method refers to the GSSAPI authentication mechanism instead\nof Kerberos because the driver authenticates through\n GSSAPI RFC-4652 , the SASL\nmechanism. The PLAIN authentication mechanism uses your username and password to\nauthenticate to a Lightweight Directory Access Protocol (LDAP) server. You can specify this authentication mechanism by setting the authMechanism \nparameter to PLAIN and including your LDAP username and password in the\n connection string as shown\nin the following sample code. The authentication mechanism is named PLAIN instead of LDAP since it\nauthenticates using the PLAIN Simple Authentication and Security Layer\n(SASL) defined in RFC-4616 . The following sections describe how to use the MONGODB-OIDC authentication mechanism to\nauthenticate from various platforms. For more information about the MONGODB-OIDC authentication mechanism, see\n OpenID Connect Authentication and\n MongoDB Server Parameters \nin the MongoDB Server manual. The MONGODB-OIDC authentication mechanism requires MongoDB Server v7.0 or later running\non a Linux platform. If your application runs on an Azure VM, or otherwise uses the\n Azure Instance Metadata Service \n(IMDS), you can authenticate to MongoDB by using the Node.js driver's built-in Azure\nsupport. To specify Azure IMDS OIDC as the authentication mechanism, set the following options\nin your connection string: The following code example shows how to set the preceding connection options: username : If you're using an Azure managed identity, set this to the client ID\nof the managed identity. If you're using a service principal to represent an\nenterprise application, set this to the application ID of the service principal.\nOtherwise, omit this option. authMechanism : Set to MONGODB-OIDC . authMechanismProperties : Set to\n ENVIRONMENT:azure,TOKEN_RESOURCE: .\nReplace the placeholder with the\nvalue of the audience parameter configured on your MongoDB deployment. If your application runs on a Google Compute Engine VM, or otherwise uses the\n GCP Instance Metadata Service ,\nyou can authenticate to MongoDB by using the Node.js driver's built-in GCP\nsupport. To specify GCP IMDS OIDC as the authentication mechanism, set the following options\nin your connection string: The following code example shows how to set the preceding connection options: authMechanism : Set to MONGODB-OIDC . authMechanismProperties : Set to\n ENVIRONMENT:gcp,TOKEN_RESOURCE: .\nReplace the placeholder with the\nvalue of the audience parameter configured on your MongoDB deployment. The Node.js driver doesn't offer built-in support for all platforms, including\nAzure Functions and Azure Kubernetes Service (AKS). Instead, you\nmust define a custom callback to use OIDC to authenticate from these platforms. First, define a function that retrieves the access token to use for OIDC authentication.\nThis function must have the following signature: The OIDCCallbackParams parameter contains the following properties, which you can\naccess inside the function: The callback function must return an OIDCResponse object. This object contains the\nfollowing properties: The following example shows a callback function that retrieves an OIDC access token\nfrom a file named access-token.dat in the local file system: After you define your callback function, pass it to the MongoClient constructor\nas part of the authMechanismProperties parameter. The Node.js driver supports\nthe following authentication patterns: Property Value timeoutContext An AbortSignal that aborts the authentication workflow after 30 seconds version The current OIDC API version idpInfo The identity-provider information returned from the server username The username included in the connection string, if any refreshToken The refresh token to request a new access token from the issuer, if any Property Value accessToken The access token to use for authentication. expiresInSeconds Optional. The number of seconds until the access token expires. refreshToken Optional. The refresh token to request a new access token from the issuer. Machine authentication: Used by web services and other applications that require\nno human interaction. Select the Machine Callback tab to see an example of\nthis syntax. Human authentication: Used by database tools, command-line utilities, and other\napplications that involve direct human interaction. Select the Human Callback \ntab to see an example of this syntax. For machine authentication, assign the callback function to the\n authMechanismProperties.OIDC_CALLBACK property, as shown in the following\nexample: For human authentication, assign the callback function to the\n authMechanismProperties.OIDC_HUMAN_CALLBACK property, as shown in the following\nexample: To learn more about the methods and types discussed in this\nguide, see the following API documentation: MongoClient OIDCCallbackParams OIDCResponse", + "code": [ + { + "lang": "js", + "value": "const { MongoClient } = require(\"mongodb\");\n\n// specify the placeholder values for your environment in the following lines\nconst clusterUrl = \"\";\nconst principal = encodeURIComponent(\"\");\nconst serviceRealm = \"\";\nconst canonicalizationSetting = \"\";\nconst authMechanismProperties = `SERVICE_REALM:${serviceRealm},CANONICALIZE_HOST_NAME:${canonicalizationSetting}`;\n\nconst authMechanism = \"GSSAPI\";\n\n// Connection URI\nconst uri = `mongodb+srv://${principal}@${clusterUrl}/?authMechanism=${authMechanism}&authMechanismProperties=${authMechanismProperties}`;\n\nconst client = new MongoClient(uri);\n\n// Function to connect to the server\nasync function run() {\n try {\n // Establish and verify connection\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server\");\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);" + }, + { + "lang": "js", + "value": "const { MongoClient } = require(\"mongodb\");\n\n// specify the placeholder values for your environment in the following lines\nconst clusterUrl = \"\";\nconst ldapUsername = \"\";\nconst ldapPassword = \"\";\nconst authMechanism = \"PLAIN\";\n\n// Connection URI\nconst uri = `mongodb+srv://${ldapUsername}:${ldapPassword}@${clusterUrl}/?authMechanism=${authMechanism}`;\n\nconst client = new MongoClient(uri);\n\n// Function to connect to the server\nasync function run() {\n try {\n // Establish and verify connection\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server\");\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);" + }, + { + "lang": "js", + "value": "const { MongoClient } = require(\"mongodb\");\n\nconst uri = \"mongodb+srv://@:/?authMechanism=MONGODB-OIDC\"\n + \"&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:\";\nconst client = new MongoClient(uri);" + }, + { + "lang": "js", + "value": "const { MongoClient } = require(\"mongodb\");\n\nconst uri = \"mongodb+srv://:/?authMechanism=MONGODB-OIDC\"\n + \"&authMechanismProperties=ENVIRONMENT:gcp,TOKEN_RESOURCE:\";\nconst client = new MongoClient(uri);" + }, + { + "lang": "js", + "value": "const myCallback = (params: OIDCCallbackParams): Promise => { }" + }, + { + "lang": "js", + "value": "const fs = require(\"node:fs\");\n\nconst myCallback = (params: OIDCCallbackParams): Promise => {\n const token = fs.readFileSync(\"access-token.dat\", \"utf8\");\n\n return {\n accessToken: token,\n expiresInSeconds: 300,\n refreshToken: token\n };\n}" + }, + { + "lang": "js", + "value": "const { MongoClient } = require(\"mongodb\");\n\nconst uri = \"mongodb+srv://:/?authMechanism=MONGODB-OIDC\";\nconst client = new MongoClient(uri, {\n authMechanismProperties: {\n OIDC_CALLBACK: myCallback\n }\n});" + }, + { + "lang": "js", + "value": "const { MongoClient } = require(\"mongodb\");\n\nconst uri = \"mongodb+srv://:/?authMechanism=MONGODB-OIDC\";\nconst client = new MongoClient(uri, {\n authMechanismProperties: {\n OIDC_HUMAN_CALLBACK: myCallback\n }\n});" + } + ], + "preview": "In this guide, you can find sample code for connection to MongoDB with each\nauthentication mechanism available in the MongoDB Enterprise Edition:\nKerberos (GSSAPI/SSPI), LDAP (PLAIN), and MONGODB-OIDC.", + "tags": "ldap, encryption, principal, tls", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/authentication/mechanisms", + "title": "Authentication Mechanisms", + "headings": [ + "DEFAULT", + "SCRAM-SHA-256", + "SCRAM-SHA-1", + "MONGODB-CR", + "MONGODB-AWS", + "X.509", + "TLS Options" + ], + "paragraphs": "In this guide, you can find sample code for connection to MongoDB with each\nauthentication mechanism available in the MongoDB Community Edition:\n DEFAULT , SCRAM-SHA-256 , SCRAM-SHA-1 , MONGODB-CR ,\n MONGODB-AWS , and X.509 . The DEFAULT authentication mechanism is a fallback setting that instructs\nthe driver to negotiate the first authentication mechanism supported by the\nserver in the following order of preference: If the DEFAULT option is specified, the driver first attempts to\nauthenticate using SCRAM-SHA-256 . If the version of the MongoDB instance\ndoes not support that mechanism, the driver attempts to authenticate using\n SCRAM-SHA-1 . If the instance does not support that mechanism either,\nthe driver attempts to authenticate using MONGODB-CR . You can specify this authentication mechanism by setting the authMechanism \nparameter to DEFAULT in the\n connection string , or by omitting\nthe parameter since it is the default value. Also include your username and\npassword as shown in the code below. For more information on the challenge-response (CR) and salted\nchallenge-response authentication mechanisms (SCRAM) that MongoDB supports,\nsee the SCRAM section of the manual. SCRAM-SHA-256 SCRAM-SHA-1 MONGODB-CR Always URI encode the username and password using the\n encodeURIComponent method to ensure they are correctly parsed. SCRAM-SHA-256 is a salted challenge-response authentication mechanism\n(SCRAM) that uses your username and password, encrypted with the SHA-256 \nalgorithm to authenticate your user. You can specify this authentication mechanism by setting the authMechanism \nto the value SCRAM-SHA-256 in the\n connection string as shown in the\nfollowing sample code. SCRAM-SHA-256 is the default authentication method for MongoDB starting\nin version 4.0 Always URI encode the username and password using the\n encodeURIComponent method to ensure they are correctly parsed. SCRAM-SHA-1 is a salted challenge-response mechanism (SCRAM) that uses your\nusername and password, encrypted with the SHA-1 algorithm to authenticate\nyour user. You can specify this authentication mechanism by setting the authMechanism \nparameter to the value SCRAM-SHA-1 in the\n connection string as shown\nin the following sample code. SCRAM-SHA-1 is the default authentication method for MongoDB versions\n3.0, 3.2, 3.4, and 3.6. Always URI encode the username and password using the\n encodeURIComponent method to ensure they are correctly parsed. MONGODB-CR is a challenge-response authentication mechanism that uses your\nusername and password to authenticate your user. You can specify this option by setting the authMechanism parameter to value\n MONGODB-CR in the\n connection string as shown\nin the following sample code. Always URI encode the username and password using the\n encodeURIComponent method to ensure they are correctly parsed. If you have upgraded the authentication schema from MONGODB-CR to\nSCRAM , any MONGODB-CR user\nauthentication requests fail. The MONGODB-AWS authentication mechanism uses your Amazon Web Services\nIdentity and Access Management (AWS IAM) credentials to authenticate your\nuser. If you do not already have the AWS signature library , use the following\n npm command to install it: To connect to a MongoDB instance with MONGODB-AWS authentication\nenabled, specify the MONGODB-AWS authentication mechanism. The driver checks for your credentials in the following sources in order: The MONGODB-AWS authentication mechanism is available only in MongoDB\nversions 4.4 and later. Connection string Environment variables Web identity token file AWS ECS endpoint specified in AWS_CONTAINER_CREDENTIALS_RELATIVE_URI AWS EC2 endpoint. For more information, see IAM Roles for Tasks . The driver only reads the credentials from the first method that it detects\nin the order as given by the preceding list. For example, if you specify\nyour AWS credentials in the connection string, the driver ignores any\ncredentials that you specified in environment variables. To connect to your MongoDB instance with a connection string, pass\nyour AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY \ncredentials to the driver when you attempt to connect. If your AWS\nlogin requires a session token, include your AWS_SESSION_TOKEN as well. The following code shows an example of specifying the MONGODB-AWS \nauthentication mechanism and credentials with a connection string: Always URI encode the username and certificate file path using the\n encodeURIComponent method to ensure they are correctly parsed. To authenticate to your MongoDB instance using AWS credentials stored in\nenvironment variables, set the following variables by using\na shell: After you've set the preceding environment variables, specify the MONGODB-AWS \nauthentication mechanism in your connection string as shown in the following example: Omit the line containing AWS_SESSION_TOKEN if you don't need an AWS\nsession token for that role. You can use the OpenID Connect (OIDC) token obtained from a web identity\nprovider to authenticate to Amazon Elastic Kubernetes Service (EKS) or\nother services. To authenticate with your OIDC token you must first install\n @aws-sdk/credential-providers . You can\ninstall this dependency using the following npm command: Next, create a file that contains your OIDC token. Then\nset the absolute path to this file in an environment variable by using\na shell as shown in the following example: After you've set the preceding environment variable, specify the MONGODB-AWS \nauthentication mechanism in your connection string as shown in the following example: Starting in version 4.11, when you install the optional\n aws-sdk/credential-providers dependency, the driver uses the AWS SDK\nto retrieve credentials from the environment. As a result, if you\nhave a shared AWS credentials file or config file, the driver will\nuse those credentials by default. You can override this behavior by performing one of the following\nactions: Set AWS_SHARED_CREDENTIALS_FILE variable in your shell to point\nto your credentials file. Set the equivalent environment variable in your application to point\nto your credentials file. Create an AWS profile for your MongoDB credentials and set the\n AWS_PROFILE environment variable to that profile name. The X.509 authentication mechanism uses\n TLS with X.509 certificates to\nauthenticate by retrieving the distinguished name (DN) from the\nclient certificate. You can specify this authentication mechanism by setting the following\nparameters of your connection string : Pass the location of your client certificate file as the value of\n tlsCertificateKeyFile as a parameter of the connection URI. The X.509 authentication mechanism is only available in MongoDB versions\n2.6 and later. Set the authMechanism parameter to MONGODB-X509 Set the tls parameter to true Always URI encode the certificate file path using the\n encodeURIComponent method to ensure it is parsed correctly. To learn more about enabling TLS on a connection, see\n Enable TLS on a Connection . The following table describes the TLS options that you can set in a\nconnection URI. Parameter Name Type Default Value Description tls boolean false Specifies whether to enable TLS on the connection. tlsInsecure boolean false Specifies whether to allow invalid certificates and mismatched\nhostnames. When set to true , this is equivalent to setting\n tlsAllowInvalidCertificates and tlsAllowInvalidHostnames to\n true . tlsCAFile string Path to file that contains a single or bundle of trusted certificate\nauthorities used in a TLS connection. tlsCertificateKeyFile string Path to the client certificate file or the client private key file. If\nboth are required, the two must be concatenated into a single file. tlsCertificateKeyFilePassword buffer or string String or buffer that contains the password to decrypt the client\nprivate key. tlsAllowInvalidCertificates boolean false Specifies whether the driver permits an invalid certificate to be used\nto connect. tlsAllowInvalidHostnames boolean false Specifies whether the driver raises an error when there is a mismatch between the\nserver hostname and TLS certificate hostname.", + "code": [ + { + "lang": "javascript", + "value": "const { MongoClient } = require(\"mongodb\");\n\n// Replace the following with values for your environment.\nconst username = encodeURIComponent(\"\");\nconst password = encodeURIComponent(\"\");\nconst clusterUrl = \"\";\n\nconst authMechanism = \"DEFAULT\";\n\n// Replace the following with your MongoDB deployment's connection string.\nconst uri =\n `mongodb+srv://${username}:${password}@${clusterUrl}/?authMechanism=${authMechanism}`;\n\n// Create a new MongoClient\nconst client = new MongoClient(uri);\n\n// Function to connect to the server\nasync function run() {\n try {\n // Establish and verify connection\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server\");\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "javascript", + "value": "const { MongoClient } = require(\"mongodb\");\n\n// Replace the following with values for your environment.\nconst username = encodeURIComponent(\"\");\nconst password = encodeURIComponent(\"\");\nconst clusterUrl = \"\";\n\nconst authMechanism = \"SCRAM-SHA-256\";\n\n// Replace the following with your MongoDB deployment's connection string.\nconst uri =\n `mongodb+srv://${username}:${password}@${clusterUrl}/?authMechanism=${authMechanism}`;\n\n// Create a new MongoClient\nconst client = new MongoClient(uri);\n\n// Function to connect to the server\nasync function run() {\n try {\n // Establish and verify connection\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server\");\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "javascript", + "value": "const { MongoClient } = require(\"mongodb\");\n\n// Replace the following with values for your environment.\nconst username = encodeURIComponent(\"\");\nconst password = encodeURIComponent(\"\");\nconst clusterUrl = \"\";\n\nconst authMechanism = \"SCRAM-SHA-1\";\n\n// Replace the following with your MongoDB deployment's connection string.\nconst uri =\n `mongodb+srv://${username}:${password}@${clusterUrl}/?authMechanism=${authMechanism}`;\n\n// Create a new MongoClient\nconst client = new MongoClient(uri);\n\n// Function to connect to the server\nasync function run() {\n try {\n // Establish and verify connection\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server\");\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "javascript", + "value": "const { MongoClient } = require(\"mongodb\");\n\n// Replace the following with values for your environment.\nconst username = encodeURIComponent(\"\");\nconst password = encodeURIComponent(\"\");\nconst clusterUrl = \"\";\n\n// Replace the following with your MongoDB deployment's connection string.\nconst uri =\n `mongodb+srv://${username}:${password}@${clusterUrl}/?authMechanism=${authMechanism}&tls=true&tlsCertificateKeyFile=${clientPEMFile}`;\n\n// Create a new MongoClient\nconst client = new MongoClient(uri);\n\n// Function to connect to the server\nasync function run() {\n try {\n // Establish and verify connection\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server\");\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "bash", + "value": "npm install aws4" + }, + { + "lang": "javascript", + "value": "const { MongoClient } = require(\"mongodb\");\n\n// Replace the following with values for your environment.\nconst accessKeyId = encodeURIComponent(\"\");\nconst secretAccessKey = encodeURIComponent(\"\");\nconst clusterUrl = \"\";\n\nconst authMechanism = \"MONGODB-AWS\";\n\nlet uri =\n `mongodb+srv://${accessKeyId}:${secretAccessKey}@${clusterUrl}/?authSource=%24external&authMechanism=${authMechanism}`;\n \n// Uncomment the following lines if your AWS authentication setup requires a session token.\n// const sessionToken = encodeURIComponent(\"\");\n// uri = uri.concat(`&authMechanismProperties=AWS_SESSION_TOKEN:${sessionToken}`);\n\n// Create a new MongoClient.\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n // Establish and verify connection.\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server.\");\n } finally {\n // Ensure that the client closes when it finishes/errors.\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "bash", + "value": "export AWS_ACCESS_KEY_ID=\nexport AWS_SECRET_ACCESS_KEY=\nexport AWS_SESSION_TOKEN=" + }, + { + "lang": "javascript", + "value": "const { MongoClient } = require(\"mongodb\");\n\n// Remember to specify your AWS credentials in environment variables.\nconst clusterUrl = \"\";\nconst authMechanism = \"MONGODB-AWS\";\n\nlet uri =\n `mongodb+srv://${clusterUrl}/?authSource=%24external&authMechanism=${authMechanism}`;\n\n// Create a new MongoClient.\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n // Establish and verify connection.\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server.\");\n } finally {\n // Ensure that the client closes when it finishes/errors.\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "bash", + "value": "npm install @aws-sdk/credential-providers" + }, + { + "lang": "bash", + "value": "export AWS_WEB_IDENTITY_TOKEN_FILE=" + }, + { + "lang": "javascript", + "value": "const { MongoClient } = require(\"mongodb\");\n\n// Remember to specify your AWS credentials in environment variables.\nconst clusterUrl = \"\";\nconst authMechanism = \"MONGODB-AWS\";\n\nlet uri =\n `mongodb+srv://${clusterUrl}/?authSource=%24external&authMechanism=${authMechanism}`;\n\n// Create a new MongoClient.\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n // Establish and verify connection.\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server.\");\n } finally {\n // Ensure that the client closes when it finishes/errors.\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "javascript", + "value": "const { MongoClient } = require(\"mongodb\");\n\n// Replace the following with values for your environment.\nconst clusterUrl = \"\";\nconst clientPEMFile = encodeURIComponent(\"\");\n\nconst authMechanism = \"MONGODB-X509\";\n\n// Replace the following with your MongoDB deployment's connection string.\nconst uri =\n `mongodb+srv://${clusterUrl}/?authMechanism=${authMechanism}&tls=true&tlsCertificateKeyFile=${clientPEMFile}`;\n\n// Create a new MongoClient\nconst client = new MongoClient(uri);\n\n// Function to connect to the server\nasync function run() {\n try {\n // Establish and verify connection\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully to server\");\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + } + ], + "preview": "In this guide, you can find sample code for connection to MongoDB with each\nauthentication mechanism available in the MongoDB Community Edition:\nDEFAULT, SCRAM-SHA-256, SCRAM-SHA-1, MONGODB-CR,\nMONGODB-AWS, and X.509.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/authentication", + "title": "Authentication", + "headings": ["Overview"], + "paragraphs": "These guides show you how to authenticate to a MongoDB instance using the\nNode.js driver. The Authentication Mechanisms guide contains\nsample connection code using each authentication mechanism supported in the\nMongoDB Community Edition which includes: The Enterprise Authentication Mechanisms guide contains sample\nconnection code using authentication mechanisms available only in MongoDB\nEnterprise Edition which includes: DEFAULT SCRAM-SHA-256 SCRAM-SHA-1 MONGODB-CR MONGODB-AWS X.509 Kerberos (GSSAPI/SSPI) LDAP (PLAIN) MONGODB-OIDC For instructions on MongoDB driver installation and deployment setup, see\nour Connect to MongoDB guide . Select your\nMongoDB deployment type and the Node.js client.", + "code": [], + "preview": "These guides show you how to authenticate to a MongoDB instance using the\nNode.js driver.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/bson/undefined-values", + "title": "Undefined Values", + "headings": [ + "Overview", + "Ignore Undefined Values", + "Set the Scope for Serializing Undefined Values" + ], + "paragraphs": "In this guide, you can learn to control how the driver serializes\n undefined values. By default, the driver serializes undefined values\nas null values during write operations. To make the driver ignore fields with\n undefined values during serialization, set the\n ignoreUndefined setting to true . When you specify this setting,\nthe driver does not serialize fields with undefined values. The following example inserts two documents. The first insert operation has\nthe ignoreUndefined setting set to true , so the driver does not\nserialize the salesTax field in that operation. The second operation\ninserts a document that has the salesTax field with a null value: The documents appear in the collection as follows: You can specify the ignoreUndefined setting at the following levels: The ignoreUndefined setting automatically applies to the scope of the\nobject instance in which you specified it and any other objects created\nfrom that instance. For example, if you set the ignoreUndefined setting when\ninstantiating a database object, any collection instance created from\nthat object inherits the setting. Furthermore, any operations that you\ncall on that collection instance also inherit the setting. The following example performs an find-and-update operation that\ninherits the ignoreUndefined setting from the myDB database\nobject. This operation does not produce any data changes because the\ndriver ignores the gasTax field: You can specify the ignoreUndefined setting again at any level to\noverride any inherited settings. For example, if you set ignoreUndefined to true on your\ncollection object, you can override the setting in individual write\noperations that you execute on that collection. The client level The database level The collection level The operation level", + "code": [ + { + "lang": "javascript", + "value": "await myColl.insertOne(\n {\n state: \"Montana\",\n salesTax: undefined,\n },\n { ignoreUndefined: true }\n);\n\nawait myColl.insertOne({\n state: \"New Hampshire\",\n salesTax: undefined,\n});" + }, + { + "lang": "javascript", + "value": "{\n _id: ...,\n state: \"Montana\",\n},\n{\n _id: ...,\n state: \"New Hampshire\",\n salesTax: null\n}" + }, + { + "lang": "javascript", + "value": "const myDB = client.db(\"test\", { ignoreUndefined: true });\n\n// The collection inherits the ignoreUndefined setting\nconst myColl = myDB.collection(\"states\");\n\n// Any write operation will not serialize undefined values\nawait myColl.findOneAndUpdate(\n { state: \"Georgia\" },\n { $set: { gasTax: undefined } }\n);" + }, + { + "lang": "javascript", + "value": "const myColl = myDB.collection(\"states\", { ignoreUndefined: true });\n\n// The insert operation will not serialize undefined values\nawait myColl.insertOne({\n state: \"South Dakota\",\n capitalGainsTax: undefined,\n});\n\n// The insert operation will serialize undefined values\nawait myColl.insertOne(\n { state: \"Texas\", capitalGainsTax: undefined },\n { ignoreUndefined: false }\n);" + } + ], + "preview": "In this guide, you can learn to control how the driver serializes\nundefined values. By default, the driver serializes undefined values\nas null values during write operations.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/bson/utf8-validation", + "title": "UTF-8 Validation", + "headings": [ + "Overview", + "Specify the UTF-8 Validation Setting", + "Set the Validation Scope" + ], + "paragraphs": "In this guide, you can learn how to enable or disable the Node.js driver's\n UTF-8 validation feature. UTF-8 is a character encoding specification\nthat ensures compatibility and consistent presentation across most operating\nsystems, applications, and language character sets. If you enable validation, the driver throws an error when it attempts to\nconvert data that contains invalid UTF-8 characters. The validation adds\nprocessing overhead since it needs to check the data. If you disable validation, your application avoids the validation processing\noverhead, but cannot guarantee consistent presentation of invalid UTF-8 data. The driver enables UTF-8 validation by default. It checks documents for any\ncharacters that are not encoded in a valid UTF-8 format when it transfers data\nbetween your application and MongoDB. Read the sections below to learn how to set UTF-8 validation using the\nNode.js driver. The current version of the Node.js driver automatically substitutes\ninvalid UTF-8 characters with alternate valid UTF-8 ones before\nvalidation when you send data to MongoDB. Therefore, the validation\nonly throws an error when the setting is enabled and the driver\nreceives invalid UTF-8 document data from MongoDB. You can specify whether the driver performs UTF-8 validation by\ndefining the enableUtf8Validation setting in the options parameter\nwhen you create a client, reference a database or collection, or call a\nCRUD operation. If you omit the setting, the driver enables UTF-8 validation. See the following for code examples that demonstrate how to disable UTF-8\nvalidation on the client, database, collection, or CRUD operation: If your application reads invalid UTF-8 from MongoDB while the\n enableUtf8Validation option is enabled, it throws a BSONError that\ncontains the following message: The enableUtf8Validation setting automatically applies to the scope of the\nobject instance on which you included it, and any other objects created by\ncalls on that instance. For example, if you include the option on the call to instantiate a database\nobject, any collection instance you construct from that object inherits\nthe setting. Any operations you call on that collection instance also\ninherit the setting. You can override the setting at any level of scope by including it when\nconstructing the object instance or when calling an operation. For example, if you disable validation on the collection object, you can\noverride the setting in individual CRUD operation calls on that\ncollection.", + "code": [ + { + "lang": "javascript", + "value": "// disable UTF-8 validation on the client\nnew MongoClient('', { enableUtf8Validation: false });\n\n// disable UTF-8 validation on the database\nclient.db('', { enableUtf8Validation: false });\n\n// disable UTF-8 validation on the collection\ndb.collection('', { enableUtf8Validation: false });\n\n// disable UTF-8 validation on a specific operation call\nawait myColl.findOne({ title: 'Cam Jansen'}, { enableUtf8Validation: false });" + }, + { + "lang": null, + "value": "Invalid UTF-8 string in BSON document" + }, + { + "lang": "javascript", + "value": "const database = client.db('books', { enableUtf8Validation: false });\n\n// The collection inherits the UTF-8 validation disabled setting from the database\nconst myColl = database.collection('mystery');\n\n// CRUD operation runs with UTF-8 validation disabled\nawait myColl.findOne({ title: 'Encyclopedia Brown' });" + }, + { + "lang": "javascript", + "value": "const collection = database.collection('mystery', { enableUtf8Validation: false });\n\n// CRUD operation runs with UTF-8 validation enabled\nawait myColl.findOne({ title: 'Trixie Belden' }, { enableUtf8Validation: true });\n\n// CRUD operation runs with UTF-8 validation disabled\nawait myColl.findOne({ title: 'Enola Holmes' });" + } + ], + "preview": "In this guide, you can learn how to enable or disable the Node.js driver's\nUTF-8 validation feature. UTF-8 is a character encoding specification\nthat ensures compatibility and consistent presentation across most operating\nsystems, applications, and language character sets.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/bson", + "title": "BSON Settings", + "headings": ["Overview"], + "paragraphs": "Learn how to configure your application's BSON serialization settings.\nThe guides in this section describe the following topics: Undefined Values : Control how the\ndriver serializes undefined values UTF-8 Validation : Enable or disable\nthe UTF-8 validation feature", + "code": [], + "preview": "Learn how to configure your application's BSON serialization settings.\nThe guides in this section describe the following topics:", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/collations", + "title": "Collations", + "headings": [ + "Overview", + "Usage", + "Collation Parameters", + "Collation Examples", + "Set a Default Collation on a Collection", + "Assign a Collation to an Index", + "Collation Query Examples", + "find() and sort() Example", + "findOneAndUpdate() Example", + "findOneAndDelete() Example", + "Aggregation Example" + ], + "paragraphs": "Collations are available in MongoDB 3.4 and later. This guide shows you how to use collations , a set of sorting rules, to\nrun operations using string ordering for specific languages and locales (a\ncommunity or region that shares common language idioms). MongoDB sorts strings using binary collation by default. This collation\nmethod uses the ASCII standard \ncharacter values to compare and order strings. Languages and locales\nhave specific character ordering conventions that differ from the ASCII\nstandard. For example, in Canadian French, the right-most accented character determines\nthe ordering for strings when the other characters are the same. Consider the\nfollowing French words: cote , cot\u00e9 , c\u00f4te , and c\u00f4t\u00e9 . MongoDB sorts them in the following order using the default binary collation: MongoDB sorts them in the following order using the Canadian French collation: You can specify a collation when you create a new collection or new index. You\ncan also specify a collation for CRUD operations \nand aggregations. When you create a new collection with a collation, you define the default\ncollation for any of the operations that support collation called on that\ncollection. You can override the collation for an operation by specifying a\ndifferent one. When you create an index with a collation, you specify the sort order for\noperations that use that index. To use the collation in the index, you\nmust provide a matching collation in the operation, and the operation must\nuse the index. While most index types support collation, the following\ntypes support only binary comparison: Currently, you cannot create a collation on an existing collection. To use\ncollations with an existing collection, create an index with the collation\nand specify the same collation in your operations on it. text 2d geoHaystack The collation object contains the following parameters: You must specify the locale field in the collation; all other fields\nare optional. For a complete list of supported locales and the default values\nfor the locale fields, see Supported Languages and Locales .\nFor descriptions of each field, see the Collation Document MongoDB\nmanual entry . In the following example, we create a new collection called souvenirs and\nassign a default collation with the \"fr_CA\" locale. The collation applies\nto all operations that support collation performed on that\ncollection. Any of the operations that support collations automatically apply the collation\ndefined on the collection. The query below searches the souvenirs \ncollection and applies the \"fr_CA\" locale collation: You can specify a different collation as a parameter in an operation that\nsupports collations. The following query specifies the \"is\" Iceland locale\nand caseFirst optional parameter with the value \"upper\" : In the following example, we create a new index on the title field of\na collection with a collation set to the \"en_US\" locale. The following query uses the index we created: The following queries do not use the index that we created. The first\nquery does not include a collation and the second contains a different\nstrength value than the collation on the index. Operations that read, update, and delete documents from a collection can use\ncollations. This section includes examples of a selection of these. See the\nMongoDB manual for a full list of operations that support collation . The following example calls both find() and sort() on a collection\nthat uses the default binary collation. We use the German collation by\nsetting the value of the locale parameter to \"de\" . The following example calls the findOneAndUpdate() operation on a\ncollection that uses the default binary collation. The collection contains the\nfollowing documents: Consider the following findOneAndUpdate() operation on this collection\nwhich does not specify a collation: Since \"Gunter\" is the first sorted result when using a binary collation, none\nof the documents come lexically before and match the $lt comparison\noperator in the query document. As a result, the operation does not update any\ndocuments. Consider the same operation with a collation specified with the locale set to\n de@collation=phonebook . This locale specifies the collation=phonebook \noption which contains rules for prioritizing proper nouns, identified by\ncapitalization of the first letter. The de@collation=phonebook locale and\noption sorts characters with umlauts before the same characters without\numlauts. Since \"G\u00fcnter\" lexically comes before \"Gunter\" using the\n de@collation=phonebook collation specified in findOneAndUpdate() ,\nthe operation returns the following updated document: The following example calls the findOneAndDelete() operation on a\ncollection that uses the default binary collation and contains the following\ndocuments: In this example, we set the numericOrdering collation parameter to true \nto sort numeric strings based on their numerical order instead of their\nlexical order. After you run the operation above, the collection contains the following\ndocuments: If you perform the same operation without collation on the original\ncollection of three documents, it matches documents based on the lexical value\nof the strings ( \"16\" , \"84\" , and \"179\" ), and deletes the first\ndocument it finds that matches the query criteria. Since all the documents contain lexical values in the a field that\nmatch the criteria (greater than the lexical value of \"100\" ), the operation\nremoves the first result. After you run the operation above, the collection\ncontains the following documents: To use collation with the aggregate \noperation, pass the collation document in the options field, after the\narray of pipeline stages. The following example shows an aggregation pipeline on a collection that uses\nthe default binary collation. The aggregation groups the first_name field,\ncounts the total number of results in each group, and sorts the results by\nthe German phonebook ( \"de@collation=phonebook\" locale) order. You can specify only one collation on an aggregation.", + "code": [ + { + "lang": "none", + "value": "cote\ncot\u00e9\nc\u00f4te\nc\u00f4t\u00e9" + }, + { + "lang": "none", + "value": "cote\nc\u00f4te\ncot\u00e9\nc\u00f4t\u00e9" + }, + { + "lang": "javascript", + "value": "collation: {\n locale: ,\n caseLevel: ,\n caseFirst: ,\n strength: ,\n numericOrdering: ,\n alternate: ,\n maxVariable: ,\n backwards: \n}" + }, + { + "lang": "javascript", + "value": "db.createCollection(\"souvenirs\", {\n collation: { locale: \"fr_CA\" },\n});" + }, + { + "lang": "javascript", + "value": "myColl.find({type: \"photograph\"});" + }, + { + "lang": "javascript", + "value": " myColl.find({type: \"photograph\"},\n { collation: { locale: \"is\", caseFirst: \"upper\" } }\n );" + }, + { + "lang": "javascript", + "value": "myColl.createIndex(\n { 'title' : 1 },\n { 'collation' : { 'locale' : 'en_US' } });" + }, + { + "lang": "javascript", + "value": "myColl.find({\"year\": 1980}, {\"collation\" : {\"locale\" : \"en_US\" }})\n .sort({\"title\": -1});" + }, + { + "lang": "javascript", + "value": "myColl.find({\"year\": 1980}, {\"collation\" : {\"locale\" : \"en_US\", \"strength\": 2 }})\n .sort({\"title\": -1});" + }, + { + "lang": "javascript", + "value": "myColl.find({\"year\": 1980})\n .sort({\"title\": -1});" + }, + { + "lang": "javascript", + "value": "myColl.find({ city: \"New York\" }, { collation: { locale: \"de\" } })\n .sort({ name: 1 });" + }, + { + "lang": "none", + "value": "{ \"_id\" : 1, \"first_name\" : \"Hans\" }\n{ \"_id\" : 2, \"first_name\" : \"Gunter\" }\n{ \"_id\" : 3, \"first_name\" : \"G\u00fcnter\" }\n{ \"_id\" : 4, \"first_name\" : \"J\u00fcrgen\" }" + }, + { + "lang": "none", + "value": "{ lastErrorObject: { updatedExisting: true, n: 1 },\n value: { _id: 3, first_name: 'G\u00fcnter' },\n ok: 1 }" + }, + { + "lang": "javascript", + "value": "myColl.findOneAndUpdate(\n { first_name : { $lt: \"Gunter\" } },\n { $set: { verified: true } }\n);" + }, + { + "lang": "javascript", + "value": "myColl.findOneAndUpdate(\n { first_name: { $lt: \"Gunter\" } },\n { $set: { verified: true } },\n { collation: { locale: \"de@collation=phonebook\" } },\n);" + }, + { + "lang": "none", + "value": "{ \"_id\" : 1, \"a\" : \"16\" }\n{ \"_id\" : 2, \"a\" : \"84\" }\n{ \"_id\" : 3, \"a\" : \"179\" }" + }, + { + "lang": "none", + "value": "{ \"_id\" : 1, \"a\" : \"16\" }\n{ \"_id\" : 2, \"a\" : \"84\" }" + }, + { + "lang": "none", + "value": "{ \"_id\" : 2, \"a\" : \"84\" }\n{ \"_id\" : 3, \"a\" : \"179\" }" + }, + { + "lang": "javascript", + "value": "myColl.findOneAndDelete(\n { a: { $gt: \"100\" } },\n { collation: { locale: \"en\", numericOrdering: true } },\n);" + }, + { + "lang": "javascript", + "value": "await myColl.findOneAndDelete({ a: { $gt: \"100\" } });" + }, + { + "lang": "javascript", + "value": "myColl.aggregate(\n [\n { $group: { \"_id\": \"$first_name\", \"nameCount\": { \"$sum\": 1 } } },\n { $sort: { \"_id\": 1 } },\n ],\n { collation: { locale: \"de@collation=phonebook\" } },\n);" + } + ], + "preview": "Collations are available in MongoDB 3.4 and later.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/connection/connect", + "title": "Connection Guide", + "headings": [ + "Connection URI", + "Atlas Connection Example", + "Other Ways to Connect to MongoDB", + "Connect to a MongoDB Server on Your Local Machine", + "Connect to a Replica Set", + "Direct Connection" + ], + "paragraphs": "This guide shows you how to connect to a\n MongoDB Atlas deployment ,\na MongoDB instance, or a replica set using the Node.js driver. The connection URI is the set of instructions that the driver uses to connect to a\nMongoDB deployment. It tells the driver how to connect to MongoDB and how to behave\nwhile connected. The following example shows each part of the connection URI: In this example, we connect to an Atlas MongoDB deployment that has a\nDNS SRV record. For more details, see the\n DNS Seed List Connection Format \ndocumentation. This format offers flexibility in deployment and the\nability to change the servers in rotation without reconfiguring clients. If you are connecting to an instance or replica set that does not have a\nDNS SRV address, you must use mongodb for the protocol, which specifies\nthe Standard Connection String Format . After the protocol, the next part of the connection string contains credentials\nif you are using password-based authentication. Replace the value of user \nwith your username and pass with your password. If you are using an\nauthentication mechanism that does not require a username and password, omit\nthis part of the connection URI. The next part of the connection string specifies the hostname or IP address of\nyour MongoDB instance, followed by the port number. In the example above, we use\n sample.host as the hostname and 27017 as the port. Replace these values\nto point to your MongoDB instance. The last part of the connection string contains connection and authentication\noptions as parameters. In the example above, we set two connection options:\n maxPoolSize=20 and w=majority . For more information on connection\noptions, skip to the Connection Options section. To learn how to retrieve your connection string in Atlas, see the\n Atlas driver connection guide . You must create a client to connect to a MongoDB deployment on Atlas. To create\na client, construct an instance of MongoClient , passing in your\nURI and a MongoClientOptions object. Use the serverApi option in your MongoClientOptions object to\nenable the Stable API feature, which forces the server to run operations\nwith behavior compatible with the specified API version. The following code shows how you can specify the connection string and the\nStable API client option when connecting to a MongoDB deployment on Atlas and\nverify that the connection is successful: To learn more about the Stable\nAPI feature, see the Stable API page . As each MongoClient represents a pool of connections to the\ndatabase, most applications only require a single instance of a\n MongoClient , even across multiple requests. To learn more about\nhow connection pools work in the driver, see the FAQ page . The Node.js driver automatically calls the MongoClient.connect() \nmethod when using the client to perform CRUD operations on your MongoDB deployment.\nCall the MongoClient.connect() method explicitly if you want to verify that the\nconnection is successful. If you are connecting to a single MongoDB Server instance or replica set\nthat is not hosted on Atlas, see the following sections to find out how to\nconnect. To test whether you can connect to your server, replace the connection\nstring in the Connect to MongoDB code\nexample and run it. To connect to a MongoDB deployment on your local machine, complete the following\nsteps: After you successfully start your MongoDB Server, specify your connection\nstring in your driver connection code. If your MongoDB Server is running locally, you can use the following\nconnection string: In this connection string, is the port number on which you\nconfigured your server to listen for incoming connections. If you want to specify a different hostname or IP address, see our Server\nManual entry on Connection Strings . Download the Community \nor Enterprise version\nof MongoDB Server. Install and configure MongoDB Server. Start the server. Always secure your MongoDB Server from malicious attacks. See our\n Security Checklist for a\nlist of security recommendations. A MongoDB replica set deployment is a group of connected instances that\nstore the same set of data. This configuration of instances provides data\nredundancy and high data availability. To connect to a replica set deployment, specify the hostname and port numbers\nof each instance, separated by a comma, and the replica set name as the value\nof the replicaSet parameter in the connection string. When making a connection, the driver takes the following actions by default: Discovers all replica set members when given the address of any one member. Dispatches operations to the appropriate member, such as write against the primary . To ensure connectivity if one host is unavailable, provide the full\nlist of hosts when connecting to a replica set. To force your operations to run on the host specified in your connection\nURI, you can specify the directConnection connection option. If you\nspecify this option, you must use the standard connection URI format. The\ndriver does not accept the DNS seedlist connection format (SRV) when you\nspecify this option. When you specify directConnection and connect to a secondary member of the\nreplica set, your write operations fail because the client isn't\nconnected to the primary member. To perform read operations, you must\nenable secondary reads. See the read preference options \nfor more information.", + "code": [ + { + "lang": "javascript", + "value": "const { MongoClient, ServerApiVersion } = require(\"mongodb\");\n\n// Replace the placeholder with your Atlas connection string\nconst uri = \"\";\n\n// Create a MongoClient with a MongoClientOptions object to set the Stable API version\nconst client = new MongoClient(uri, {\n serverApi: {\n version: ServerApiVersion.v1,\n strict: true,\n deprecationErrors: true,\n }\n }\n);\n\nasync function run() {\n try {\n // Connect the client to the server (optional starting in v4.7)\n await client.connect();\n\n // Send a ping to confirm a successful connection\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Pinged your deployment. You successfully connected to MongoDB!\");\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "none", + "value": "mongodb://localhost:" + }, + { + "lang": "none", + "value": "mongodb://host1:27017,host2:27017,host3:27017/?replicaSet=myRs" + } + ], + "preview": "Learn how to connect to a MongoDB Atlas or local MongoDB deployment by using the Node.js driver.", + "tags": "node.js, code example, connection string, local connection, Stable API, Atlas", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/connection/connection-options", + "title": "Connection Options", + "headings": [], + "paragraphs": "This section explains the MongoDB connection and authentication options\nsupported by the driver. You can pass the connection options as\nparameters of the connection URI to specify the behavior of the client. Name Accepted Values Default Value Description appName string null Specifies the app name the driver passes to the server in the client\nmetadata as part of the connection handshake. The driver sends the\n appName value to MongoDB when establishing the connection.\nThis value is recorded in the log file, the slow query logs, and\nprofile collections. authMechanism string null Specifies the authentication mechanism method to use for connection to the\nserver. If you do not specify a value, the driver uses the default mechanism,\neither SCRAM-SHA-1 or SCRAM-SHA-256 depending on the server version. See\n authentication mechanism for available\nauthentication mechanisms. authMechanismProperties comma separated key:value pairs, for example, \"opt1:val1,opt2:val2\" null Specifies other options provided for authentication, such as the option to enable\nhostname canonicalization for GSSAPI. authSource string null Specifies the database that connections authenticate against. compressors comma separated list of strings, for example, \"snappy,zlib,zstd\" null Specifies the allowed compression types for wire protocol messages\nsent to or received from the server. See Network Compression \nfor more information. connectTimeoutMS non-negative integer 30000 Specifies the amount of time, in milliseconds, to wait to establish a single TCP\nsocket connection to the server before raising an error. Specifying\n 0 disables the connection timeout. directConnection boolean false Specifies whether to force dispatch all operations to the host\nspecified in the connection URI. enableUtf8Validation boolean true Specifying true enables UTF-8 validation for the\nconnection. MongoDB throws an error when\nit attempts to serialize string data that contains invalid\nUTF-8 characters to BSON. This applies to both document keys and\ndocument values, this validation adds processing overhead. Specifying false disables UTF-8 validation for the\nconnection. MongoDB does not throw errors when\ndata contains invalid UTF-8 data. If you disable validation,\nyour application avoids the validation processing overhead.\n Editing data while validation is disabled\ncan result in loss of data. Disabling UTF-8 validation is a\ntemporary workaround to query or export data only. You can also set UTF-8 validation in your Node.js code . To learn more about UTF-8 characters,\nsee UTF-8 on Wikipedia. heartbeatFrequencyMS integer greater than or equal to 500 null Specifies the interval, in milliseconds, between regular server monitoring checks. journal boolean null Specifies the journal write concern for the client. See\n write concern for more information. loadBalanced boolean null Specifies whether the driver is connecting to a load balancer. localThresholdMS non-negative integer 15 Specifies the size of the latency window, in milliseconds, on round trip time for\nselecting between suitable servers. Specifying 0 means no wait,\nmeaning the fastest available server. maxIdleTimeMS non-negative integer 0 Specifies the amount of time, in milliseconds, a connection can be idle before it's closed.\nSpecifying 0 means no minimum. maxPoolSize non-negative integer 100 Specifies the maximum number of clients or connections the driver\ncan create in its connection pool. This count includes connections\nin use. maxConnecting non-negative integer 2 Specifies the maximum number of connections a driver's connection\npool may be establishing concurrently. maxStalenessSeconds -1, or an integer greater than or equal 90 null Specifies the maximum replication lag, in wall clock time, that\na secondary can experience and still be eligible for server selection.\nSpecifying -1 means no maximum. minPoolSize non-negative integer 0 Specifies the number of connections the driver creates and\nmaintains in the connection pool even when no operations are occurring.\nThis count includes connections in use. proxyHost string null Specifies the SOCKS5 proxy IPv4 address, IPv6 address, or domain\nname. proxyPort non-negative integer null Specifies the TCP port number of the SOCKS5 proxy server. If you\nset the proxyHost option, the value of this option defaults\nto 1080 . proxyUsername string null Specifies the username for authentication to the SOCKS5\nproxy server. If you set\nthis option to a zero-length string, the driver ignores it. proxyPassword string null Specifies the password for authentication to the SOCKS5\nproxy server. If you set\nthis option to a zero-length string, the driver ignores it. readConcernLevel string null Specifies the default read concern for the client. See read concern for more information. readPreference string \"primary\" Specifies the default read preference for the client (excluding tags). See read preference for more information. readPreferenceTags comma-separated key:value pairs, for example, \"dc:ny,rack:1\" and \"dc:ny\ncan be specified multiple times, each instance of this key is a\nseparate tag set null Specifies the default read preference tags for the client. This option is\nvalid only if the read preference mode is not primary. The driver uses the order of the tags in the URI as the order\nfor the read preference. replicaSet string null Specifies the name of the replica set to connect to. retryReads boolean true Enables retryable reads. retryWrites boolean true Enables retryable writes. serverMonitoringMode auto , stream , poll auto Specifies the monitoring mode that the driver monitors use. When\nthis option is set to auto , the monitoring mode is determined\nby the environment in which the driver is running. The driver\nuses polling mode in function-as-a-service (FaaS) environments\nand the streaming mode in other environments. serverSelectionTimeoutMS non-negative integer 30000 Specifies the timeout, in milliseconds, to block for server selection\nbefore raising an error. serverSelectionTryOnce boolean true Specifies to scan the topology only once after a server selection\nfailure instead of repeatedly until the server selection times out. socketTimeoutMS non-negative integer 0 Specifies the amount of time, in milliseconds, spent attempting to send or receive on a\nsocket before timing out. Specifying 0 means no timeout. srvMaxHosts non-negative integer 0 Specifies the maximum number of SRV results to randomly select when initially\npopulating the seedlist or, during SRV polling, adding new hosts to the\ntopology. srvServiceName a valid SRV service name according to RFC 6335 \"mongodb\" Specifies the service name to use for SRV lookup in initial DNS seedlist discovery. ssl boolean false The ssl is an alias for the tls option. tls boolean false Specifies whether TLS is required for connections to the server.\nUsing a srvServiceName of \"mongodb+srv\" , or specifying other\n tls -prefixed options implicitly sets the value of tls to\n true . tlsAllowInvalidCertificates boolean false Specifies whether the driver generates an error when the server's\nTLS certificate is invalid. Set this option to true for testing\npurposes only. tlsAllowInvalidHostnames boolean false Specifies whether the driver generates an error when there is a mismatch\nbetween the server's hostname and the hostname specified by the\nTLS certificate. Set this option to true for testing purposes only. tlsCAFile string null Specifies the path to a file with either a single or bundle of certificate\nauthorities to trust when making a TLS connection. To learn more\nabout setting this connection option, see the Provide\nCertificate Filepaths section of the TLS guide. tlsCertificateKeyFile string null Specifies the path to the client certificate file or the client\nprivate key file. If you need both, you must concatenate the\nfiles. To learn more about setting this connection option, see\nthe Provide Certificate Filepaths \nsection of the TLS guide. tlsCertificateKeyFilePassword string null Specifies the password to decrypt the client private key to be used\nfor TLS connections. tlsInsecure boolean false Specifies to relax TLS constraints as much as possible, such as\nallowing invalid certificates or hostname mismatches. Set this option\nto true for testing purposes only. w non-negative integer or string null Specifies the default write concern \"w\" field for the client. waitQueueTimeoutMS non-negative integer 0 Specifies the amount of time, in milliseconds, spent attempting to check out a connection\nfrom a server's connection pool before timing out. wTimeoutMS non-negative integer null Specifies the default write concern timeout field for the client. zlibCompressionLevel integer between -1 and 9 (inclusive) -1 Specifies the level of compression when using zlib to compress wire\nprotocol messages. -1 signifies the default level, 0 signifies\nno compression, 1 signifies the fastest speed, and 9 signifies\nthe best compression. See Network Compression for more information.", + "code": [], + "preview": "This section explains the MongoDB connection and authentication options\nsupported by the driver. You can pass the connection options as\nparameters of the connection URI to specify the behavior of the client.", + "tags": "node.js, customize", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/connection/network-compression", + "title": "Network Compression", + "headings": [ + "Specify Compression Algorithms", + "Compression Algorithm Dependencies" + ], + "paragraphs": "You can enable a driver option to compress messages, which reduces the amount\nof data passed over the network between MongoDB and your application. The driver supports the following compression algorithms: If you specify multiple compression algorithms, the driver selects the\nfirst one in the list supported by your MongoDB instance. Snappy : available in MongoDB 3.6 and later. Zlib : available in MongoDB 3.6 and later. Zstandard : available in MongoDB 4.2 and later. When using the Snappy or Zstandard compression algorithm, you must\n add explicit dependencies . You can enable compression for the connection to your MongoDB instance\nby specifying the algorithms in one of two ways: Specify compression algorithms using the following strings: Adding the parameter to your connection string. Specifying the compressors option in your MongoClientOptions . To enable compression using the connection string, add the\n compressors parameter in the connection string. You can\nspecify one or more compression algorithms, separating them with\ncommas: To enable compression using the MongoClientOptions ,\npass the compressors option and the compression\nalgorithm you want to use. You can specify one or more compression\nalgorithms, separating them with commas: \"snappy\" for Snappy compression \"zlib\" for Zlib compression \"zstd\" for Zstandard compression To add the Snappy compression algorithm to your application, run the\nfollowing code: To add the Zstandard compression algorithm to your application, run the\nfollowing code:", + "code": [ + { + "lang": "javascript", + "value": "const uri =\n \"mongodb+srv://:@/?compressors=snappy,zlib\";\n\nconst client = new MongoClient(uri);" + }, + { + "lang": "javascript", + "value": "const uri =\n \"mongodb+srv://:@\";\n\nconst client = new MongoClient(uri,\n {\n compressors: [\"snappy\"]\n });" + }, + { + "lang": "javascript", + "value": "npm install --save snappy" + }, + { + "lang": "javascript", + "value": "npm install --save @mongodb-js/zstd" + } + ], + "preview": "You can enable a driver option to compress messages, which reduces the amount\nof data passed over the network between MongoDB and your application.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/connection/socks", + "title": "Enable SOCKS5 Proxy Support", + "headings": [ + "Overview", + "Install the socks Package", + "SOCKS5 Client Options", + "Example", + "Additional Information", + "API Documentation" + ], + "paragraphs": "In this guide, you can learn how to connect to MongoDB instances by using\na SOCKS5 proxy. SOCKS5 is a standardized protocol for connecting\nto network services through a proxy server. To learn more about the SOCKS5 protocol, see the Wikipedia entry on\n SOCKS . Starting in version 6.0 of the Node.js driver, you must install\nthe socks package to use SOCKS5 proxy support in your\napplication. You can install socks by running the following command\nin your shell: You can set options in your MongoClientOptions instance or\nin your connection URI to configure SOCKS5 proxy support for\nyour connection. The following table describes the client options\nrelated to SOCKS5: Name Accepted Values Default Value Description proxyHost string null Specifies the SOCKS5 proxy IPv4 address, IPv6 address, or domain\nname. proxyPort non-negative integer null Specifies the TCP port number of the SOCKS5 proxy server. If you\nset the proxyHost option, the value of this option defaults\nto 1080 . proxyUsername string null Specifies the username for authentication to the SOCKS5\nproxy server. If you set\nthis option to a zero-length string, the driver ignores it. proxyPassword string null Specifies the password for authentication to the SOCKS5\nproxy server. If you set\nthis option to a zero-length string, the driver ignores it. The driver throws an error if you set the proxyPort ,\n proxyUsername , or proxyPassword options without setting the\n proxyHost option. This example shows how to instantiate a MongoClient that uses SOCKS5\nproxy support. The following example code specifies proxy server options\nand connects to MongoDB: The preceding sample code uses placeholders for the connection URI\nand proxy server details. To run this code, you must replace these\nplaceholders with the information for your deployment and proxy server. For more information about SOCKS5 proxy support, see the\n MongoDB SOCKS5 specification . To learn more about the methods and types discussed in this\nguide, see the following API Documentation: MongoClientOptions MongoClient ProxyOptions", + "code": [ + { + "lang": "bash", + "value": "npm i socks" + }, + { + "lang": "javascript", + "value": "// Replace the placeholder with your connection string\nconst uri = \"\";\n\n// Replace the placeholders with your SOCKS5 proxy server details\nconst socksOptions = {\n proxyHost: \"\",\n proxyPort: 1080,\n proxyUsername: \"\",\n proxyPassword: \"\",\n};\n\n// Create a new client with the proxy server details\nconst client = new MongoClient(uri, socksOptions);" + } + ], + "preview": "In this guide, you can learn how to connect to MongoDB instances by using\na SOCKS5 proxy. SOCKS5 is a standardized protocol for connecting\nto network services through a proxy server.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/connection/tls", + "title": "Enable TLS on a Connection", + "headings": [ + "Overview", + "Enable TLS", + "Configure Certificates", + "Reference Certificates in a Client", + "Create a SecureContext Object to Store Certificates", + "Provide Certificate Filepaths", + "Create Buffer Objects to Store Certificates", + "SecureContext Example", + "Additional Information", + "API Documentation" + ], + "paragraphs": "In this guide, you can learn how to connect to MongoDB instances with\nthe TLS security protocol. To configure your connection to use TLS, enable\nthe TLS option and provide your certificates for validation. To learn more about TLS, see the Wikipedia entry on\n Transport Layer Security . You can enable TLS on a connection to your MongoDB instance\nin the following ways: In addition to the tls client option, the driver provides more\noptions to configure TLS on your connection. For testing purposes ,\nyou can set the tlsAllowInvalidHostnames ,\n tlsAllowInvalidCertificates , and tlsInsecure client options. Setting the tlsAllowInvalidHostnames option to true disables\nhostname verification, and setting the\n tlsAllowInvalidCertificates to true disables certificate\nvalidation. Setting the tlsInsecure option to true disables\nboth certificate and hostname validation. For a full list of client options, see Connection Options . Setting the tls option to true in your MongoClientOptions object Setting the tls option to true in your connection string A MongoClient instance can connect with TLS if you set tls \nto true in your MongoClientOptions object: A MongoClient instance can connect with TLS if you set the\n tls option to true in your connection string: If you use a DNS SRV record when connecting to MongoDB by specifying\nthe +srv modification in your connection string, you enable\nTLS on your connection by default. To disable it, set the tls or ssl parameter\nvalue to false in your connection string or MongoClientOptions object. To learn more about connection behavior when you use a DNS seedlist,\nsee the SRV Connection Format \nsection in the Server manual. Specifying any of these options in a production environment makes\nyour application insecure and potentially\nvulnerable to expired certificates and to foreign processes posing\nas valid client instances. To successfully initiate a TLS request, an application must prove its\nidentity by referencing cryptographic certificates. To connect to\nMongoDB with TLS, your certificates must be stored as PEM\nfiles. The following list describes the components required to establish\na connection with TLS: For production use, we recommend that your MongoDB deployment use valid\ncertificates generated and signed by the same certificate authority.\nFor testing, you can use self-signed certificates. TLS Component Description Certificate Authority (CA) One or more certificate authorities to\ntrust when making a TLS connection. Client Certificate A digital certificate and key that allow the server to verify the identity\nof your application to establish an encrypted network connection. Certificate Key The client certificate private key file. This key is often\nincluded within the certificate file itself. Passphrase The password to decrypt the private client key if it is encrypted. To learn more about the PEM format, see the Wikipedia entry on\n Privacy-Enhanced Mail . You must reference your certificates in your MongoClientOptions \nobject so that the server can validate them before the client connects.\nYou can reference your certificates in the following ways: Create a SecureContext object to store certificates (Recommended) Provide filepath strings that point to your certificates Create Buffer objects to store certificates We recommend that you use the secureContext option to configure\nyour TLS connection. SecureContext objects are native to Node.js\nand allow you to keep all your TLS options in a single reusable object. To create a SecureContext object, import the createSecureContext() \nmethod from the tls module. Next, call the createSecureContext() \nmethod and pass the contents of your certificates in the options parameter.\nThis method returns a SecureContext object that you can use in your\n MongoClientOptions object. The following code shows how to create a SecureContext object and\npass it to your client: To learn more about the createSecureContext() method and the\n tls package, see the Node.js TLS API documentation . For a runnable example that uses a SecureContext object, see\nthe SecureContext Example . You can include the filepaths for your certificates as client options to\nretrieve your certificates while connecting with TLS. The driver reads\nthese files when you call the connect() method on your\n MongoClient instance. The following code shows how to provide certificate filepaths as options\nin your MongoClient : Your TLS configuration might require that you present a certificate\nrevocation list (CRL) when connecting to MongoDB. Starting in version\n6.0 of the driver, you can pass the filepath of your CRL file to the\n tlsCRLFile option in your connection string or your\n MongoClientOptions instance. You can pass the contents of your certificate files as Buffer \nobjects in your client options to connect with TLS. The following code shows how to read the contents of your certificate\nfiles and pass the resulting Buffer objects as options in your\n MongoClient : This example shows how to create a SecureContext object and\na MongoClient instance that includes TLS options. The example\nconnects to MongoDB and executes a find query: For more information about enabling TLS on a connection, see the\nfollowing Server manual documentation: TLS/SSL (Transport Encryption) TLS/SSL Configuration for Clients MongoClientOptions MongoClient tlsAllowInvalidHostnames client option tlsAllowInvalidCertificates client option secureContext client option tlsCAFile client option tlsCertificateKeyFile client option ca client option cert client option key client option", + "code": [ + { + "lang": "js", + "value": "const client = new MongoClient(uri, { tls: true });" + }, + { + "lang": "js", + "value": "const uri = \"mongodb://:?tls=true\";\nconst client = new MongoClient(uri, myClientSettings);" + }, + { + "lang": "js", + "value": "// Create a SecureContext object\nconst secureContext = tls.createSecureContext({\n ca: fs.readFileSync(``),\n cert: fs.readFileSync(``),\n key: fs.readFileSync(``),\n});\n\n// Pass the SecureContext as a client option\nconst client = new MongoClient(uri, { tls: true, secureContext });" + }, + { + "lang": "js", + "value": "// Pass filepaths as client options\nconst client = new MongoClient(uri, {\n tls: true,\n tlsCAFile: ``,\n tlsCertificateKeyFile: ``,\n});" + }, + { + "lang": "js", + "value": "// Read file contents\nconst ca = fs.readFileSync(``);\nconst cert = fs.readFileSync(``);\nconst key = fs.readFileSync(``);\n\n// Pass Buffers as client options\nconst client = new MongoClient(uri, { tls: true, ca, cert, key });" + }, + { + "lang": "js", + "value": "import { MongoClient } from \"mongodb\";\nimport * as fs from \"fs\";\nimport * as tls from \"tls\";\n\n// Replace the uri string with your connection string.\nconst uri = \"\";\n\n// Replace the filepaths with your certificate filepaths.\nconst secureContext = tls.createSecureContext({\n ca: fs.readFileSync(``),\n cert: fs.readFileSync(``),\n key: fs.readFileSync(``),\n});\n\n// Create a client with the secureContext option\nconst client = new MongoClient(uri, { tls: true, secureContext });\n\nasync function run() {\n try {\n const db = client.db(\"myDB\");\n const myColl = db.collection(\"myColl\");\n const doc = await myColl.findOne({});\n console.log(doc);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);" + } + ], + "preview": "In this guide, you can learn how to connect to MongoDB instances with\nthe TLS security protocol.", + "tags": "code example, node.js, security, encrypt", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/connection", + "title": "Connection", + "headings": ["Overview", "Compatibility"], + "paragraphs": "Learn how to configure your application's connection to a MongoDB\ndeployment using the Node.js driver. In the following sections, you will\nlearn: How to Connect to MongoDB The Available Connection Options How to Enable Network Compression How to Enable TLS on a Connection How to Enable SOCKS5 Proxy Support How to Connect to MongoDB Atlas from AWS Lambda For information about authenticating to MongoDB,\nsee Authentication and\n Enterprise Authentication Mechanisms . You can use the Node.js driver to connect and use the Node.js driver for\ndeployments hosted in the following environments: MongoDB Atlas : The fully\nmanaged service for MongoDB deployments in the cloud MongoDB Enterprise : The\nsubscription-based, self-managed version of MongoDB MongoDB Community : The\nsource-available, free-to-use, and self-managed version of MongoDB To learn more about using drivers to connect for deployments hosted in MongoDB\nAtlas, see Connect Your Application .", + "code": [], + "preview": "Learn how to configure your application's connection to a MongoDB deployment by using the Node.js driver.", + "tags": "node.js", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/compound-operations", + "title": "Compound Operations", + "headings": [ + "Overview", + "Built-in Methods", + "includeResultMetadata Option" + ], + "paragraphs": "Most database requests either read data from a database or write data into\na database. However, there are instances where you may require a single\noperation that reads and writes data. Compound operations combine read and write operations\nin a single atomic statement, so there's no chance of data changing in\nbetween a read and a subsequent write. If you execute each operation separately, another request may alter the\ndata between the read and write operations. These data changes may not\nprevent your operation from succeeding, but they can make error handling\nmore difficult. When your application handles potential errors at\nany stage of the process, it can become brittle and difficult\nto test. The Node.js driver provides the following methods to perform compound\noperations: These methods accept an optional options object with\nconfigurable sort and\n projection options. You can also set the includeResultMetadata \noption to specify the return type of each\nof these methods. To learn more about this option, see the\n includeResultMetadata Option \nsection of this guide. The findOneAndUpdate() and findOneAndDelete() methods take the\n returnDocument setting, which specifies if the method returns the\npre-update or post-update version of the modified document. findOneAndDelete() findOneAndUpdate() findOneAndReplace() The includeResultMetadata option determines the return type of the\ncompound methods. This setting defaults to false , which means that each method returns the matched\ndocument. If no document is matched, each method returns null . If you set\n includeResultMetadata to true , the method returns a ModifyResult type that\ncontains the found document and metadata. Suppose a collection contains only the following document: The following table shows how the value of the\n includeResultMetadata option changes the return type of\nthe findOneAndDelete() method: Option Value Syntax and Output Default: false Document matched No document matched true", + "code": [ + { + "lang": "json", + "value": "{ _id: 1, x: \"on\" }" + }, + { + "lang": "js", + "value": "await coll.findOneAndDelete({ x: \"on\" });" + }, + { + "lang": "js", + "value": "{ _id: 1, x: 'on' }" + }, + { + "lang": "js", + "value": "await coll.findOneAndDelete({ x: \"off\" });" + }, + { + "lang": "js", + "value": "null" + }, + { + "lang": "js", + "value": "await coll.findOneAndDelete({ x: \"on\" }, { includeResultMetadata: true });" + }, + { + "lang": "js", + "value": "{ lastErrorObject: { n: 1 }, value: { _id: 1, x: 'on' }, ok: 1, ... }" + } + ], + "preview": "Most database requests either read data from a database or write data into\na database. However, there are instances where you may require a single\noperation that reads and writes data.", + "tags": "node.js, atomic operation, read, write", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/query-document", + "title": "Specify a Query", + "headings": [ + "Overview", + "Literal Value Queries", + "Comparison Operators", + "Logical Operators", + "Element Operators", + "Evaluation Operators" + ], + "paragraphs": "Most CRUD operations allow you to narrow the set of matched documents by\nspecifying matching criteria in a query document . Query documents contain\none or more query operators that apply to specific fields which determine which\ndocuments to include in the result set. In a query document, you can match fields against literal values, such as\n { title: 'The Room' } , or you can compose\n query operators to express more\ncomplex matching criteria. In this guide, we cover the following categories\nof query operators in MongoDB and show examples on how to use them: To follow the examples in this guide, use the following code\nsnippet to insert documents that describe fruits into the myDB.fruits collection: Comparison Operators Logical Operators Element Operators Evaluation Operators Your query operation may return a reference to a\ncursor that contains matching documents. To learn how to\nexamine data stored in the cursor, see the\n Cursor Fundamentals page . Literal value queries allow you to query for data that exactly matches\na value you provide in the query document. A literal value query has two\nparts: a field name and a value. Documents returned from such a query\nmust contain a field that has exactly the same name as the provided name\nand a value for that field that is exactly the same as the provided\nvalue. The following operation uses a literal query to search for\ndocuments containing a field called \"name\" that has a value of \"apples\": This code snippet returns the following results: Literal value queries are equivalent to the $eq comparison\noperator. As a result, the following two queries are equivalent: Comparison operators allow you to query for data based on comparisons\nwith values in a collection. Common comparison operators include\n $gt for \"greater than\" comparisons, $lt for \"less than\" comparisons,\nand $ne for \"not equal to\" comparisons. The following operation uses\nthe comparison operator $gt to search for documents in which the qty \nfield value is greater than 5 and prints them out: This code snippet returns the following results: Logical operators allow you to query for data using logic applied to the\nresults of field-level operators. For instance, you can use the $or \nmethod to query for documents that match either a $gt comparison\noperator or a literal value query. The following operation uses the\nlogical operator $not to search for documents with a quantity value\nthat is not greater than 5 and prints them out: This code snippet returns the following results: For more information on comparison operators, see the reference manual\nentry for Comparison Query Operators . Whenever a query document contains multiple elements, those elements\nare combined together with an implicit $and logical operator to\nfigure out which documents match the query. As a result, the following\ntwo queries are equivalent: Element operators allow you to query based on the presence, absence, or\ntype of a field. The following operation uses the element operator\n $exists to search for documents containing the color \nfield: This code snippet returns the following results: For more information on this operator, see the reference manual entry for\nthe $exists operator . Evaluation operators allow you to execute higher level logic, like\nregex and text searches, when querying for documents in a collection.\nCommon evaluation operators include $regex and $text .\nThe following operation uses the evaluation operator $mod to search\nfor documents in which the qty field value is divisible by 3 with\na remainder of 0: This code snippet returns the following results: For more information on this operator, see the reference manual entry for\nthe $mod operator .", + "code": [ + { + "lang": "javascript", + "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"fruits\");\n\nawait myColl.insertMany([\n { \"_id\": 1, \"name\": \"apples\", \"qty\": 5, \"rating\": 3 },\n { \"_id\": 2, \"name\": \"bananas\", \"qty\": 7, \"rating\": 1, \"color\": \"yellow\" },\n { \"_id\": 3, \"name\": \"oranges\", \"qty\": 6, \"rating\": 2 },\n { \"_id\": 4, \"name\": \"avocados\", \"qty\": 3, \"rating\": 5 },\n]);" + }, + { + "lang": "javascript", + "value": "const query = { \"name\": \"apples\" };\nconst cursor = myColl.find(query);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" + }, + { + "lang": "javascript", + "value": "{ \"_id\": 1, \"name\": \"apples\", \"qty\": 5, \"rating\": 3 }" + }, + { + "lang": "javascript", + "value": "myColl.find({\n rating: { $eq: 5 }\n});" + }, + { + "lang": "javascript", + "value": "myColl.find({\n rating: 5\n});" + }, + { + "lang": "javascript", + "value": "// $gt means \"greater than\"\nconst query = { qty: { $gt : 5 } };\nconst cursor = myColl.find(query);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" + }, + { + "lang": "javascript", + "value": "{ \"_id\": 2, \"name\": \"bananas\", \"qty\": 7, \"rating\": 1 }\n{ \"_id\": 3, \"name\": \"oranges\", \"qty\": 6, \"rating\": 2 }" + }, + { + "lang": "javascript", + "value": "const query = { qty: { $not: { $gt: 5 }}};\nconst cursor = myColl.find(query);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" + }, + { + "lang": "javascript", + "value": "{ \"_id\": 4, \"name\": \"avocados\", \"qty\": 3, \"rating\": 5 }\n{ \"_id\": 1, \"name\": \"apples\", \"qty\": 5, \"rating\": 3 }" + }, + { + "lang": "javascript", + "value": "myColl.find({\n rating: { $eq: 5 },\n qty: { $gt: 4 }\n});" + }, + { + "lang": "javascript", + "value": "myColl.find({\n $and: [\n { rating: { $eq: 5 }},\n { qty: { $gt: 4 }}\n ]\n});" + }, + { + "lang": "javascript", + "value": "const query = { color: { $exists: true } };\nconst cursor = myColl.find(query);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" + }, + { + "lang": "javascript", + "value": "{ \"_id\": 2, \"name\": \"bananas\", \"qty\": 7, \"rating\": 1, \"color\": \"yellow\" }" + }, + { + "lang": "javascript", + "value": "// $mod means \"modulo\" and returns the remainder after division\nconst query = { qty: { $mod: [ 3, 0 ] } };\nconst cursor = myColl.find(query);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" + }, + { + "lang": "javascript", + "value": "{ \"_id\": 3, \"name\": \"oranges\", \"qty\": 6, \"rating\": 2 }\n{ \"_id\": 4, \"name\": \"avocados\", \"qty\": 3, \"rating\": 5 }" + } + ], + "preview": "Most CRUD operations allow you to narrow the set of matched documents by\nspecifying matching criteria in a query document. Query documents contain\none or more query operators that apply to specific fields which determine which\ndocuments to include in the result set.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/read-operations/cursor", + "title": "Access Data From a Cursor", + "headings": [ + "Overview", + "Cursor Paradigms", + "Asynchronous Iteration", + "Manual Iteration", + "Return an Array of All Documents", + "Stream API", + "Event API", + "Cursor Utility Methods", + "Rewind", + "Close" + ], + "paragraphs": "Read operations that return multiple documents do not immediately return all values\nmatching the query. Because a query can potentially match very large sets of documents,\nthese operations return an object called a cursor, which references documents identified\nby the query. A cursor fetches documents in batches to reduce both memory consumption and\nnetwork bandwidth usage. Cursors are highly configurable and offer multiple interaction\nparadigms for different use cases. The following functions directly return cursors: Other methods such as Collection.findOne() \nand Collection.watch() use\ncursors internally, and return the results of the operations instead of\na cursor. Collection.find() Collection.aggregate() Collection.listIndexes() Collection.listSearchIndexes() Db.aggregate() Db.listCollections() You can use several different cursor paradigms to access data.\nMost cursor paradigms allow you to access query results one document at\na time, abstracting away network and caching logic. However, since use\ncases differ, other paradigms offer different access patterns, like\npulling all matching documents into a collection in process memory. Do not combine different cursor paradigms on a single cursor.\nOperations such as hasNext() and toArray() \neach predictably modify the original cursor. If you mix these calls\non a single cursor, you may receive unexpected results. Because asynchronous calls directly modify the cursor, executing\nasynchronous calls on a single cursor simultaneously can also cause\nundefined behavior. Always wait for the previous\nasynchronous operation to complete before running another. When you reach the last result through iteration or through an at-once\nfetch, the cursor is exhausted which means it ceases to respond to methods\nthat access the results. Cursors implement the AsyncIterator interface, which\nallows you to use cursors in for await...of loops: You can use the hasNext() \nmethod to check if a cursor can retrieve more data, and then use\nthe next() \nmethod to retrieve the subsequent element of the cursor: For use cases that require all documents matched by a query to be held\nin memory at the same time, use the toArray() \nmethod. Note that large numbers of matched documents can cause performance issues\nor failures if the operation exceeds memory constraints. Consider using\nthe for await...of syntax to iterate\nthrough results rather than returning all documents at once. Cursors expose the stream() method to convert them to Node Readable Streams. These streams operate in Object\nMode , which passes JavaScript objects rather than Buffers or Strings through the pipeline. As Readable Streams, cursors also support the Event API's\n close , data , end , and readable events: To reset a cursor to its initial position in the set of returned\ndocuments, use rewind() . Cursors consume memory and network resources both in the client\napplication and in the connected instance of MongoDB. Use\n close() \nto free up a cursor's resources in both the client application\nand the MongoDB Server:", + "code": [ + { + "lang": "javascript", + "value": " const cursor = myColl.find({});\n console.log(\"async\");\n for await (const doc of cursor) {\n console.log(doc);\n }" + }, + { + "lang": "javascript", + "value": " const cursor = myColl.find({});\n\n while (await cursor.hasNext()) {\n console.log(await cursor.next());\n }" + }, + { + "lang": "javascript", + "value": " const cursor = myColl.find({});\n const allValues = await cursor.toArray();" + }, + { + "lang": "javascript", + "value": " const cursor = myColl.find({});\n cursor.stream().on(\"data\", doc => console.log(doc));" + }, + { + "lang": "javascript", + "value": " const cursor = myColl.find({});\n // the \"data\" event is fired once per document\n cursor.on(\"data\", data => console.log(data));" + }, + { + "lang": "javascript", + "value": " const cursor = myColl.find({});\n const firstResult = await cursor.toArray();\n console.log(\"First count: \" + firstResult.length);\n await cursor.rewind();\n const secondResult = await cursor.toArray();\n console.log(\"Second count: \" + secondResult.length);" + }, + { + "lang": "javascript", + "value": " await cursor.close();" + } + ], + "preview": "Read operations that return multiple documents do not immediately return all values\nmatching the query. Because a query can potentially match very large sets of documents,\nthese operations return an object called a cursor, which references documents identified\nby the query. A cursor fetches documents in batches to reduce both memory consumption and\nnetwork bandwidth usage. Cursors are highly configurable and offer multiple interaction\nparadigms for different use cases.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/read-operations/distinct", + "title": "Retrieve Distinct Values", + "headings": [ + "Overview", + "Sample Documents", + "Distinct", + "Document Field Parameter", + "Example", + "Query Parameter", + "Example", + "Options Parameter", + "Example", + "Additional Information", + "API Documentation" + ], + "paragraphs": "Use the distinct() method to retrieve all distinct values for a specified field\nacross a collection. To follow the examples in this guide, use the following code snippet to insert documents\nthat describe restaurants into the myDB.restaurants collection: Your query operation may return a reference to a\ncursor that contains matching documents. To learn how to\nexamine data stored in the cursor, see the\n Cursor Fundamentals page . The distinct() method requires a document field as a parameter. You can specify the\nfollowing optional parameters to adjust the method output: A query parameter to refine your results An options parameter to set collation rules Pass the name of the document field to return a list of the field's unique values. The \"Queens\" and \"Manhattan\" borough values each appear more than\nonce in the sample documents. However, the following example retrieves the\nunique values of the borough field: This code outputs the following borough values: You can specify a query parameter to return unique values for documents that match\nyour query. Visit Specify a Query for more information on constructing a\nquery filter. The following example outputs the distinct values of the cuisine field but\nexcludes restaurants in \"Brooklyn\" : In this case, the query filter matches every borough value except for \"Brooklyn\" . This\nprevents distinct() from outputting one cuisine value, \"Middle Eastern\" .\nThe code outputs the following values: You can specify the collation to the distinct() method by defining a\n collation field as an options parameter. This field allows you to set\nregional rules for string ordering and comparisons. See Collations for instructions on applying collations. When using the options parameter, you must also specify a query parameter. If\nyou don't want to use a query filter, define the query as {} . The following example uses a collation field to specify German language ordering\nconventions when outputting the distinct restaurant values: In this case, German string ordering conventions place words beginning with \"\u00c4\" before\nthose beginning with \"B\". The code outputs the following: If you do not specify the collation field, the output order follows default\nbinary collation rules. These rules place words beginning with \"\u00c4\" after the those\nwith unaccented first letters: For a runnable example of retrieving distinct values, see Retrieve Distinct Values of a Field . To learn more about the distinct() method and its parameters, you can visit the\n API documentation .", + "code": [ + { + "lang": "javascript", + "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"restaurants\");\n\nawait myColl.insertMany([\n { \"_id\": 1, \"restaurant\": \"White Bear\", \"borough\": \"Queens\", \"cuisine\": \"Chinese\" },\n { \"_id\": 2, \"restaurant\": \"Via Carota\", \"borough\": \"Manhattan\", \"cuisine\": \"Italian\" },\n { \"_id\": 3, \"restaurant\": \"Borgatti's\", \"borough\": \"Bronx\", \"cuisine\": \"Italian\" },\n { \"_id\": 4, \"restaurant\": \"Tanoreen\", \"borough\": \"Brooklyn\", \"cuisine\": \"Middle Eastern\" },\n { \"_id\": 5, \"restaurant\": \"\u00c4pfel\", \"borough\": \"Queens\", \"cuisine\": \"German\" },\n { \"_id\": 6, \"restaurant\": \"Samba Kitchen\", \"borough\": \"Manhattan\", \"cuisine\": \"Brazilian\" },\n]);" + }, + { + "lang": "javascript", + "value": "// specify \"borough\" as the field to return values for\nconst cursor = myColl.distinct(\"borough\");\nfor await (const doc of cursor) {\n console.dir(doc);\n}" + }, + { + "lang": "json", + "value": "[ \"Bronx\", \"Brooklyn\", \"Manhattan\", \"Queens\" ]" + }, + { + "lang": "javascript", + "value": "// exclude Brooklyn restaurants from the output\nconst query = { borough: { $ne: \"Brooklyn\" }};\n\n// find the filtered distinct values of \"cuisine\"\nconst cursor = myColl.distinct(\"cuisine\", query);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" + }, + { + "lang": "json", + "value": "[ \"Brazilian\", \"Chinese\", \"German\", \"Italian\" ]" + }, + { + "lang": "javascript", + "value": "// define an empty query document\nconst query = {};\n// specify German string ordering conventions\nconst options = { collation: { locale: \"de\" }};\n\nconst cursor = myColl.distinct(\"restaurant\", query, options);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" + }, + { + "lang": "json", + "value": "[ \"\u00c4pfel\", \"Borgatti's\", \"Samba Kitchen\", \"Tanoreen\", \"Via Carota\", \"White Bear\" ]" + }, + { + "lang": "json", + "value": "[ \"Borgatti's\", \"Samba Kitchen\", \"Tanoreen\", \"Via Carota\", \"White Bear\", \"\u00c4pfel\" ]" + } + ], + "preview": "Use the distinct() method to retrieve all distinct values for a specified field\nacross a collection.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/read-operations/geo", + "title": "Search Geospatially", + "headings": [ + "Overview", + "Coordinates on an Earth-like Sphere", + "Coordinates on a 2D Plane", + "Examples", + "Query by Proximity", + "Query Within a Range" + ], + "paragraphs": "You can query data based on geographical location using geospatial query\noperators. You can format geospatial queries using one of the following\ncoordinate systems: This section contains examples of geospatial queries using different\nquery operators that you can run against your Atlas sample dataset. Coordinates on an Earth-like Sphere Coordinates on a 2D Plane For geospatial queries using longitude and latitude coordinates\non an Earth-like sphere, use the GeoJSON \nquery format. While GeoJSON has multiple types , all GeoJSON data\ntypes use some form of the following structure: The object type determines the number of coordinates. For instance, a\n Point requires only one coordinate: a longitude and a latitude.\nA Line uses two coordinates: a longitude and a latitude for each end.\nA Polygon consists of a list of coordinates in which the first and last\ncoordinate are the same, effectively closing the polygon. To learn more\nabout the GeoJSON shapes you can use in MongoDB, consult the\n GeoJSON manual entry . To enable querying GeoJSON data, you must add the field to a 2dsphere \nindex. The following snippet creates an index on the location.geo field in\nthe theaters collection using the createIndex() method: You can also express geospatial queries using x and y coordinates in\na two-dimensional Euclidean plane. Until MongoDB, this was the only format\ncompatible with geospatial queries, and are now referred to as\n\"legacy coordinate pairs\". Legacy coordinate pairs use the following structure: The field contains an array of two values in which the first represents\nthe x axis value and the second represents the y axis value. To enable querying using legacy coordinate pairs, create a 2d index on\nthe field on the collection. The following snippet creates an index on the\n coordinates field in the shipwrecks collection using the\n createIndex() method: See the\n MongoDB Server manual page on legacy coordinate pairs \nfor more information. Spherical ( 2dsphere ) and flat ( 2d ) indexes support some, but\nnot all, of the same query operators. For a full list of operators\nand their index compatibility, consult the\n manual entry for geospatial queries . The following examples use the MongoDB Atlas sample dataset. You can learn how to set up your own free-tier Atlas cluster and how to load the sample dataset in our\n quick start guide . The examples use the theaters collection in the sample_mflix database\nfrom the sample dataset. The theaters collection contains a 2dsphere index\non the location.geo field. The $near \noperator accepts a set of longitude-latitude coordinates and returns\ndocuments ordered from nearest to farthest. To limit the results to a\nmaximum distance in meters, use the $maxDistance option. For a\ncomplete list of options, see the reference documentation for $near .\nThe following example queries for theaters within 10,000 meters of\n [ -73.9667, 40.78 ] . The $geoWithin operator\nselects documents with geospatial data that exist within a specified\nshape. The following example searches for movie theaters in the New\nEngland area: See the MongoDB Server manual page on geospatial query operators \nfor more information on the operators you can use in your query.", + "code": [ + { + "lang": "javascript", + "value": " : {\n type: ,\n coordinates: [\n [longitude_1, latitude_1],\n ...\n [longitude_n, latitude_n]\n ]\n}" + }, + { + "lang": "javascript", + "value": "db.theaters.createIndex({location.geo: \"2dsphere\"});" + }, + { + "lang": "javascript", + "value": " : [ x, y ]" + }, + { + "lang": "javascript", + "value": "db.shipwrecks({coordinates: \"2d\"});" + }, + { + "lang": "javascript", + "value": "// Find theaters within a certain proximity\nasync function proximity(theaters) {\n // Define the query to find theaters near a specific location\n const query = {\n \"location.geo\": {\n $near: {\n $geometry: { type: \"Point\", coordinates: [-73.9667, 40.78] },\n $maxDistance: 10000,\n },\n },\n };\n // Find documents based on our query\n const cursor = theaters.find(query);" + }, + { + "lang": "javascript", + "value": "// Find theaters within a specific geographic range\nasync function range(theaters) {\n // Define the query to find theaters within a specified polygon\n const query = {\n \"location.geo\": {\n $geoWithin: {\n $geometry: {\n type: \"Polygon\",\n coordinates: [\n [\n [-72, 40], // Polygon coordinates defining the range\n [-74, 41],\n [-72, 39],\n [-72, 40],\n ],\n ],\n },\n },\n },\n };\n\n // Find documents based on our query\n const cursor = theaters.find(query);" + } + ], + "preview": "You can query data based on geographical location using geospatial query\noperators. You can format geospatial queries using one of the following\ncoordinate systems:", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/read-operations/limit", + "title": "Limit the Number of Returned Results", + "headings": ["Overview", "Sample Documents", "Limit", "Skip"], + "paragraphs": "Use limit to cap the number of documents that can be returned from a\nread operation. limit functions as a cap on the maximum number of\ndocuments that the operation can return, but the operation can return\na smaller number of documents if there are not enough documents present\nto reach the limit. If limit is used with the\n skip method, the skip applies\nfirst and the limit only applies to the documents left over after\nthe skip. To follow the examples in this guide, use the following code snippet to insert documents\nthat describe books into the myDB.books collection: Your query operation may return a reference to a\ncursor that contains matching documents. To learn how to\nexamine data stored in the cursor, see the\n Cursor Fundamentals page . The following example queries the collection to return the top three\nlongest books. It matches all documents because the query filter is\nempty. Then, it applies a descending sort on the length field to\nreturn longer books before shorter books and a limit to\nreturn only the 3 first results: The code example above outputs the following three documents, sorted by\nlength: You can also apply sort and limit by specifying them in an\n options object in your call to the find() method. The following two\ncalls are equivalent: For more information on the options settings for the find() \nmethod, see the\n API documentation on find() . The order in which you call limit and sort does not matter\nbecause the driver reorders the calls to apply the sort first and the\nlimit after it. The following two calls are equivalent: To see the next three books in the results, append the skip() method,\npassing the number of documents to bypass as shown below: This operation returns the documents that describe the fourth through sixth\nbooks in order of longest-to-shortest length: You can combine skip and limit in this way to implement paging for your\ncollection, returning only small \"slices\" of the collection at once.", + "code": [ + { + "lang": "javascript", + "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"books\");\n\nawait myColl.insertMany([\n { \"_id\": 1, \"name\": \"The Brothers Karamazov\", \"author\": \"Dostoyevsky\", \"length\": 824 },\n { \"_id\": 2, \"name\": \"Les Mis\u00e9rables\", \"author\": \"Hugo\", \"length\": 1462 },\n { \"_id\": 3, \"name\": \"Atlas Shrugged\", \"author\": \"Rand\", \"length\": 1088 },\n { \"_id\": 4, \"name\": \"Infinite Jest\", \"author\": \"Wallace\", \"length\": 1104 },\n { \"_id\": 5, \"name\": \"Cryptonomicon\", \"author\": \"Stephenson\", \"length\": 918 },\n { \"_id\": 6, \"name\": \"A Dance With Dragons\", \"author\": \"Martin\", \"length\": 1104 },\n]);" + }, + { + "lang": "javascript", + "value": "// define an empty query document\nconst query = {};\n// sort in descending (-1) order by length\nconst sort = { length: -1 };\nconst limit = 3;\nconst cursor = myColl.find(query).sort(sort).limit(limit);\nfor await (const doc of cursor) {\n console.dir;\n}" + }, + { + "lang": "json", + "value": "{ \"_id\": 2, \"title\": \"Les Mis\u00e9rables\", \"author\": \"Hugo\", \"length\": 1462 }\n{ \"_id\": 6, \"title\": \"A Dance With Dragons\", \"author\": \"Martin\", \"length\": 1104 }\n{ \"_id\": 4, \"title\": \"Infinite Jest\", \"author\": \"Wallace\", \"length\": 1104 }" + }, + { + "lang": "javascript", + "value": "myColl.find(query).sort({ length: -1 }).limit(3);\nmyColl.find(query, { sort: { length: -1 }, limit: 3 });" + }, + { + "lang": "javascript", + "value": "myColl.find(query).sort({ length: -1 }).limit(3);\nmyColl.find(query).limit(3).sort({ length: -1 });" + }, + { + "lang": "javascript", + "value": "// define an empty query document\nconst query = {};\n// sort in descending (-1) order by length\nconst sort = { length: -1 };\nconst limit = 3;\nconst skip = 3;\nconst cursor = myColl.find(query).sort(sort).limit(limit).skip(skip);\nfor await (const doc of cursor) {\n console.dir;\n}" + }, + { + "lang": "json", + "value": "{ \"_id\": 3, \"title\": \"Atlas Shrugged\", \"author\": \"Rand\", \"length\": 1088 }\n{ \"_id\": 5, \"title\": \"Cryptonomicon\", \"author\": \"Stephenson\", \"length\": 918 }\n{ \"_id\": 1, \"title\": \"The Brothers Karamazov\", \"author\": \"Dostoyevsky\", \"length\": 824 }" + } + ], + "preview": "Use limit to cap the number of documents that can be returned from a\nread operation. limit functions as a cap on the maximum number of\ndocuments that the operation can return, but the operation can return\na smaller number of documents if there are not enough documents present\nto reach the limit. If limit is used with the\nskip method, the skip applies\nfirst and the limit only applies to the documents left over after\nthe skip.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/read-operations/project", + "title": "Specify Which Fields to Return", + "headings": [ + "Overview", + "Sample Documents", + "Single Field", + "Multiple Fields" + ], + "paragraphs": "Use a projection to control which fields appear in the documents\nreturned by read operations. Many requests only require certain fields,\nso projections can help you limit unnecessary network bandwidth usage.\nProjections work in two ways: These two methods of projection are mutually exclusive: if you\nexplicitly include fields, you cannot explicitly exclude fields, and\nvice versa. Explicitly include fields with a value of 1 . This has the\nside-effect of implicitly excluding all unspecified fields. Implicitly exclude fields with a value of 0 . This has the\nside-effect of implicitly including all unspecified fields. To follow the examples in this guide, use the following code snippet to insert documents\nthat describe fruits into the myDB.fruits collection: Your query operation may return a reference to a\ncursor that contains matching documents. To learn how to\nexamine data stored in the cursor, see the\n Cursor Fundamentals page . In the following query, pass the projection to only return the name \nfield of each document: The projection document specifies a value of 1 for name . This instructs\nthe operation to include the name field of each returned document in\nthe results and exclude the qty and rating fields. Passing this projection\nto find() with an empty query document and no sort document yields the following\nresults: Although this projection only explicitly included the name field, the query returned\nthe _id field as well. The _id field is a special case because it is always included in every query unless\nexplicitly specified otherwise. This is because _id is a unique identifier for each\ndocument, a property that is often used when constructing queries. The movies \ncollection data demonstrates why this property is necessary: two or more movies can share\nthe same title, such as movie remakes. Because of this, you need a unique _id value to\nreliably reference a specific movie. _id is the only exception to the mutually\nexclusive include-exclude behavior in projections: you can explicitly exclude _id \neven when explicitly including other fields if you do not want _id to be present in\nreturned documents. The projection document specifies a value of 1 for name and 0 for\n _id . This instructs the operation to include the name field of each\nreturned document in the results and exclude the _id , qty , and rating \nfields. Passing this projection to find() with an empty query document and\nno sort document yields the following results: You can also specify multiple fields to include in your projection. Note: the\norder in which you specify the fields in the projection does not alter the\norder in which they are returned. This example that identifies two fields to include in the projection yields\nthe following results: For more projection examples, see the\n MongoDB Manual page on Project Fields to Return from Query .", + "code": [ + { + "lang": "javascript", + "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"fruits\");\n\nawait myColl.insertMany([\n { \"_id\": 1, \"name\": \"apples\", \"qty\": 5, \"rating\": 3 },\n { \"_id\": 2, \"name\": \"bananas\", \"qty\": 7, \"rating\": 1 },\n { \"_id\": 3, \"name\": \"oranges\", \"qty\": 6, \"rating\": 2 },\n { \"_id\": 4, \"name\": \"avocados\", \"qty\": 3, \"rating\": 5 },\n]);" + }, + { + "lang": "javascript", + "value": "// return only* the name field\nconst projection = { name: 1 };\nconst cursor = myColl.find().project(projection);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" + }, + { + "lang": "json", + "value": "{ \"_id\": 1, \"name\": \"apples\" }\n{ \"_id\": 2, \"name\": \"bananas\" }\n{ \"_id\": 3, \"name\": \"oranges\" }\n{ \"_id\": 4, \"name\": \"avocados\" }" + }, + { + "lang": "javascript", + "value": "// return only the name field\nconst projection = { _id: 0, name: 1 };\nconst cursor = myColl.find().project(projection);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" + }, + { + "lang": "json", + "value": "{ \"name\": \"apples\" }\n{ \"name\": \"bananas\" }\n{ \"name\": \"oranges\" }\n{ \"name\": \"avocados\" }" + }, + { + "lang": "javascript", + "value": "const projection = { _id: 0, rating: 1, name: 1 };\nconst cursor = myColl.find().project(projection);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" + }, + { + "lang": "json", + "value": " { \"name\": \"apples\", \"rating\": 3 }\n { \"name\": \"bananas\", \"rating\": 1 }\n { \"name\": \"oranges\", \"rating\": 2 }\n { \"name\": \"avocados\", \"rating\": 5 }" + } + ], + "preview": "Use a projection to control which fields appear in the documents\nreturned by read operations. Many requests only require certain fields,\nso projections can help you limit unnecessary network bandwidth usage.\nProjections work in two ways:", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/read-operations/retrieve", + "title": "Retrieve Data", + "headings": [ + "Overview", + "Find Documents", + "Additional Information", + "Aggregate Data from Documents", + "Additional Information", + "Monitor Data Changes", + "Additional Information" + ], + "paragraphs": "You can perform find operations to retrieve data from your MongoDB database.\nYou can perform a find operation to match documents on a set of criteria\nby calling the find() or findOne() method. You can also further specify the information that the find operation\nreturns by specifying optional parameters or by chaining other methods,\nas shown in the following guides: You can also use an aggregation operation to retrieve data. This type of\noperation allows you to apply an ordered pipeline of transformations to the\nmatched data. If you want to monitor the database for incoming data that matches a set of\ncriteria, you can use the watch operation to be notified in real-time when\nmatching data is inserted. This page includes a short interactive lab that demonstrates how to\nretrieve data by using the find() method. You can complete this lab\ndirectly in your browser window without installing MongoDB or a code editor. To start the lab, click the Open Interactive Tutorial button at the\ntop of the page. To expand the lab to a full-screen format, click the\nfull-screen button ( \u26f6 ) in the top-right corner of the lab pane. Sort Results Skip Returned Results Limit the Number of Returned Results Specify Which Fields to Return Your query operation may return a reference to a\ncursor that contains matching documents. To learn how to\nexamine data stored in the cursor, see the\n Cursor Fundamentals page . You can use the Node.js driver to connect and perform read operations for\ndeployments hosted in the following environments: MongoDB Atlas : The fully\nmanaged service for MongoDB deployments in the cloud MongoDB Enterprise : The\nsubscription-based, self-managed version of MongoDB MongoDB Community : The\nsource-available, free-to-use, and self-managed version of MongoDB To learn more about performing read operations in the Atlas UI for deployments hosted in MongoDB\nAtlas, see View, Filter, and Sort Documents . You can call the find() method on a Collection object. The\nmethod accepts a query document that describes the documents you want to\nretrieve. For more information on how to specify your query document,\nsee the Specify a Query guide. The find() method returns a Cursor instance from which you can\naccess the matched documents. The findOne() method returns a Promise \ninstance, which you can resolve to access either the matching document or\na null value if there are no matches. To execute a find operation that has no query criteria, you can\npass an empty query or omit the query document in your find\nmethod parameters. The following operations both return all documents in the\n myColl collection: If you don't pass a query or pass an empty query\nto the findOne() method, the operation returns a single\ndocument from a collection. You can specify options in a find operation even when you pass an\nempty query. For example, the following code shows how you can\nspecify a projection as an option while executing a find operation\nthat receives an empty query parameter: For more information about projecting document fields, see the\n Specify Which Fields to Return guide. A pizza restaurant wants to find all pizzas ordered by Lemony Snicket\nyesterday. They run the following find() query on the\n orders collection: Once the operation returns, the findResult variable references a\n Cursor . You can print the documents retrieved using the for await...of \nsyntax as shown below: The output might resemble the following: For runnable code examples that demonstrate find operations, see the following\nusage examples: For more information about the findOne() and find() methods, see the\nfollowing Server manual documentation: Find a Document Find Multiple Documents findOne() find() If you want to run a custom processing pipeline to retrieve data from your\ndatabase, you can use the aggregate() method. This method accepts\naggregation expressions to run in sequence. These expressions let you filter,\ngroup, and arrange the result data from a collection. A pizza restaurant wants to run a status report on-demand to\nsummarize pizza orders over the past week. They run the following\n aggregate() query on the orders collection to fetch the\ntotals for each distinct \"status\" field: Once the operation returns, the aggregateResult variable references a\n Cursor . You can print the documents retrieved using the for await...of \nsyntax as shown below: The output might resemble the following: For more information on how to construct an aggregation pipeline, see\nthe Aggregation guide or Aggregation Operations \nin the Server manual. You can use the watch() method to monitor a collection for changes to\na collection that match certain criteria. These changes include inserted,\nupdated, replaced, and deleted documents. You can pass this method\na pipeline of aggregation commands that sequentially runs on the changed\ndata whenever write operations are executed on the collection. A pizza restaurant wants to receive a notification whenever a new pizza\norder comes in. To accomplish this, they create an aggregation pipeline\nto filter on insert operations and return specific fields. They pass\nthis pipeline to the watch() method called on the orders \ncollection as shown below: For a runnable example of the watch() method, see the\n Watch for Changes usage example.", + "code": [ + { + "lang": "javascript", + "value": "myColl.find(); // no query\nmyColl.find({}); // empty query" + }, + { + "lang": "javascript", + "value": "const options = {\n projection: { _id: 0, field1: 1 },\n};\n\nconst findResult = await myColl.findOne({}, options);" + }, + { + "lang": "javascript", + "value": "for await (const doc of findResult) {\n console.log(doc);\n}" + }, + { + "lang": "javascript", + "value": "[\n { name: \"Lemony Snicket\", type: \"horseradish pizza\", qty: 1, status: \"delivered\", date: ... },\n { name: \"Lemony Snicket\", type: \"coal-fired oven pizza\", qty: 3, status: \"canceled\", date: ...},\n ...\n]" + }, + { + "lang": "javascript", + "value": " // Search for orders by name and within a specific date range\n const findResult = orders.find({\n name: \"Lemony Snicket\",\n date: {\n $gte: new Date(new Date().setHours(00, 00, 00)),\n $lt: new Date(new Date().setHours(23, 59, 59)),\n },\n });" + }, + { + "lang": "javascript", + "value": "for await (const doc of aggregateResult) {\n console.log(doc);\n}" + }, + { + "lang": "javascript", + "value": "[\n { _id: 'delivering', count: 5 },\n { _id: 'delivered', count: 37 },\n { _id: 'created', count: 9 }\n]" + }, + { + "lang": "javascript", + "value": " // Group orders by status within the last week\n const aggregateResult = orders.aggregate([\n {\n $match: {\n date: {\n $gte: new Date(new Date().getTime() - 1000 * 3600 * 24 * 7),\n $lt: new Date(),\n },\n },\n },\n {\n $group: {\n _id: \"$status\",\n count: {\n $sum: 1,\n },\n },\n },\n ]);" + }, + { + "lang": "javascript", + "value": " // Set up a change stream to listen for new order insertions\n const changeStream = orders.watch([\n { $match: { operationType: \"insert\" } },\n {\n $project: {\n \"fullDocument.name\": 1,\n \"fullDocument.address\": 1,\n },\n },\n ]);\n changeStream.on(\"change\", change => {\n const { name, address } = change.fullDocument;\n console.log(`New order for ${name} at ${address}.`);\n });" + } + ], + "preview": "Learn how to retrieve data, aggregate data, and monitor data changes in MongoDB by using the Node.js driver.", + "tags": "node.js, code example, find one, find many", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/read-operations/skip", + "title": "Skip Returned Results", + "headings": ["Overview", "Sample Documents", "Example"], + "paragraphs": "Use skip to omit documents from the beginning of the list of\nreturned documents for a read operation. You can combine skip with\n sort to omit the top\n(for descending order) or bottom (for ascending order) results for a\ngiven query. Since the order of documents returned is not guaranteed in\nthe absence of a sort, using skip without using sort omits\narbitrary documents. If the value of skip exceeds the number of matched documents for\na query, then that query returns no documents. To follow the examples in this guide, use the following code snippet to insert documents\nthat describe fruits into the myDB.fruits collection: Your query operation may return a reference to a\ncursor that contains matching documents. To learn how to\nexamine data stored in the cursor, see the\n Cursor Fundamentals page . In the following example, we query the collection with a filter that\nmatches all the documents and pass options that specifies sort and\n skip commands as query options. The sort option specifies that fruit\ndocuments that have higher rating values are returned before ones with lower\nratings. The skip option specifies that the first 2 documents are\nomitted from the result: Since we specified that query skip the first 2 documents, the third and fourth highest\nrating documents are printed by the code snippet above: The sort and skip options can also be specified as methods chained to\nthe find method. The following two commands are equivalent:", + "code": [ + { + "lang": "javascript", + "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"fruits\");\n\nawait myColl.insertMany([\n { \"_id\": 1, \"name\": \"apples\", \"qty\": 5, \"rating\": 3 },\n { \"_id\": 2, \"name\": \"bananas\", \"qty\": 7, \"rating\": 1 },\n { \"_id\": 3, \"name\": \"oranges\", \"qty\": 6, \"rating\": 2 },\n { \"_id\": 4, \"name\": \"avocados\", \"qty\": 3, \"rating\": 5 },\n]);" + }, + { + "lang": "javascript", + "value": "// define an empty query document\nconst query = {};\nconst options = {\n // sort in descending (-1) order by rating\n sort : { rating: -1 },\n // omit the first two documents\n skip : 2,\n}\n\nconst cursor = myColl.find(query, options);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" + }, + { + "lang": "json", + "value": "{ \"_id\": 3, \"name\": \"oranges\", \"qty\": 6, \"rating\": 2 }\n{ \"_id\": 2, \"name\": \"bananas\", \"qty\": 7, \"rating\": 1 }" + }, + { + "lang": "javascript", + "value": "myColl.find(query, { sort: { rating: -1}, skip: 2});\nmyColl.find(query).sort({rating: -1}).skip(2);" + } + ], + "preview": "Use skip to omit documents from the beginning of the list of\nreturned documents for a read operation. You can combine skip with\nsort to omit the top\n(for descending order) or bottom (for ascending order) results for a\ngiven query. Since the order of documents returned is not guaranteed in\nthe absence of a sort, using skip without using sort omits\narbitrary documents.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/read-operations/sort", + "title": "Sort Results", + "headings": ["Overview", "Sample Documents", "Example"], + "paragraphs": "Use sort to change the order in which read operations return\ndocuments. Sort tells MongoDB to order returned documents by the\nvalues of one or more fields in a certain direction. To sort returned\ndocuments by a field in ascending (lowest first) order, use a value of\n 1 . To sort in descending (greatest first) order instead, use -1 .\nIf you do not specify a sort, MongoDB does not guarantee the order of\nquery results. Follow the instructions in the examples below to insert data into\nthe myDB.books collection and perform a sort on the results of a query.\nConsider a collection containing documents that describe books. To\ninsert this data into a collection, run the following operation: Your query operation may return a reference to a\ncursor that contains matching documents. To learn how to\nexamine data stored in the cursor, see the\n Cursor Fundamentals page . Pass the following sort document to a read operation to ensure that the\noperation returns books with longer lengths before books with shorter\nlengths: In this case, the number -1 tells the read operation to sort the\nbooks in descending order by length. find() returns the following\ndocuments when this sort is used with an empty query: Sometimes, the order of two or more documents is ambiguous using a\nspecified sort. In the above case, both \"A Dance with Dragons\" and\n\"Infinite Jest\" have 1104 pages, so the order in which they are\nreturned is not guaranteed. To resolve ties in your sorted results in a\nrepeatable way, add more fields to the sort document: With the addition of the author field to the sort document, the read operation sorts\nmatching documents first by length then, if there is a tie, by author . Matched\ndocument fields are compared in the same order as fields are specified in the sort\ndocument. find() returns the following ordering of documents when this sort is used on\nthe documents matching the query, sorting \"Martin\" before \"Wallace\" for the two books with\nthe same length:", + "code": [ + { + "lang": "javascript", + "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"books\");\n\nawait myColl.insertMany([\n { \"_id\": 1, \"name\": \"The Brothers Karamazov\", \"author\": \"Dostoyevsky\", \"length\": 824 },\n { \"_id\": 2, \"name\": \"Les Mis\u00e9rables\", \"author\": \"Hugo\", \"length\": 1462 },\n { \"_id\": 3, \"name\": \"Atlas Shrugged\", \"author\": \"Rand\", \"length\": 1088 },\n { \"_id\": 4, \"name\": \"Infinite Jest\", \"author\": \"Wallace\", \"length\": 1104 },\n { \"_id\": 5, \"name\": \"Cryptonomicon\", \"author\": \"Stephenson\", \"length\": 918 },\n { \"_id\": 6, \"name\": \"A Dance with Dragons\", \"author\": \"Martin\", \"length\": 1104 },\n]);" + }, + { + "lang": "javascript", + "value": "// define an empty query document\nconst query = {};\n// sort in descending (-1) order by length\nconst sort = { length: -1 };\nconst cursor = myColl.find(query).sort(sort);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" + }, + { + "lang": "json", + "value": "{ \"_id\": 2, \"title\": \"Les Mis\u00e9rables\", \"author\": \"Hugo\", \"length\": 1462 }\n{ \"_id\": 4, \"title\": \"Infinite Jest\", \"author\": \"Wallace\", \"length\": 1104 }\n{ \"_id\": 6, \"title\": \"A Dance with Dragons\", \"author\": \"Martin\", \"length\": 1104 }\n{ \"_id\": 3, \"title\": \"Atlas Shrugged\", \"author\": \"Rand\", \"length\": 1088 }\n{ \"_id\": 5, \"title\": \"Cryptonomicon\", \"author\": \"Stephenson\", \"length\": 918 }\n{ \"_id\": 1, \"title\": \"The Brothers Karamazov\", \"author\": \"Dostoyevsky\", \"length\": 824 }" + }, + { + "lang": "javascript", + "value": "// define an empty query document\nconst query = {};\n// sort in ascending (1) order by length\nconst sort = { length: 1, author: 1 };\nconst cursor = myColl.find(query).sort(sort);\nfor await (const doc of cursor) {\n console.dir(doc);\n}" + }, + { + "lang": "json", + "value": "{ \"_id\": 1, \"title\": \"The Brothers Karamazov\", \"author\": \"Dostoyevsky\", \"length\": 824 }\n{ \"_id\": 5, \"title\": \"Cryptonomicon\", \"author\": \"Stephenson\", \"length\": 918 }\n{ \"_id\": 3, \"title\": \"Atlas Shrugged\", \"author\": \"Rand\", \"length\": 1088 }\n{ \"_id\": 6, \"title\": \"A Dance with Dragons\", \"author\": \"Martin\", \"length\": 1104 }\n{ \"_id\": 4, \"title\": \"Infinite Jest\", \"author\": \"Wallace\", \"length\": 1104 }\n{ \"_id\": 2, \"title\": \"Les Mis\u00e9rables\", \"author\": \"Hugo\", \"length\": 1462 }" + } + ], + "preview": "Use sort to change the order in which read operations return\ndocuments. Sort tells MongoDB to order returned documents by the\nvalues of one or more fields in a certain direction. To sort returned\ndocuments by a field in ascending (lowest first) order, use a value of\n1. To sort in descending (greatest first) order instead, use -1.\nIf you do not specify a sort, MongoDB does not guarantee the order of\nquery results.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/read-operations/text", + "title": "Search Text", + "headings": [ + "Overview", + "Examples", + "Query for Words", + "Query By Phrase", + "Query with Negations", + "Sort by Relevance" + ], + "paragraphs": "Text searches let you search string type fields in your collection for specified words or\nphrases. You can perform a text search by using the $text operator, which performs a\nlogical OR on each term separated by a space in the search string. You can also\nspecify more options to the operator to handle case sensitivity, stop words, and word\nstemming (such as plural forms or other tenses) for a supported language.\nThis is often used for unstructured text such as transcripts, essays, or web pages. The $text query operator requires that you specify the search field in\na text index on your collection. See the examples below for sample\ncode for creating a text index and using the $text query operator. Atlas Search helps you build fast,\nrelevance-based search capabilities on top of your MongoDB data. Try it today on\n MongoDB Atlas , our\nfully managed database as a service. The following examples use sample data from the movies collection in the\n sample_mflix database. To enable text searches on the title field, create a\n text index by using the following command: We use a single field text index for the examples in this guide, but you can\ncreate a compound text index that broadens your text queries to multiple\nfields. The following command creates a text index on two fields in the\n movies collection: You can only create one text index per collection. Every text search\nqueries all the fields specified in that index for matches. To learn more about text indexes, see Text Indexes in the Server manual. When creating a compound text index, you can specify a weight option to\nprioritize certain text fields in your index. When you execute a text\nsearch, the field weights influence how MongoDB calculates the\n text search score for each matching\ndocument. To learn more about specifying field weights when creating a text\nindex, see the Text Indexes \nsection in the Indexes guide. This example queries for Star Trek movies by searching for titles\ncontaining the word \"trek\". If you want to query using multiple words,\nseparate your words with spaces to query for documents that match any of\nthe search terms (logical OR ). This operation returns the following documents: Success! The query found every document in the movies collection\nwith a title including the word \"trek\". Unfortunately, the search included\none unintended item: \"Trek Nation,\" which is a movie about Star Trek and not\npart of the Star Trek movie series. To solve this, we can query with a more\nspecific phrase . To make your query more specific, try using the phrase \"star trek\"\ninstead of just the word \"trek\". To search by phrase, surround your\nmulti-word phrase with escaped quotes ( \\\"\\\" ): Querying by the phrase \"star trek\" instead of just the term \"trek\" \nmatches the following documents: These results include all movies in the database that contain the phrase\n \"star trek\" , which in this case results in only fictional Star Trek\nmovies. Unfortunately, this query returned \"Star Trek Into\nDarkness\" , a movie that was not part of the original series of movies. To\nresolve this issue, we can omit that document with a negation . To use a negated term, place a negative sign, - , in front of the term\nyou to omit from the result set. The query operation omits any\ndocuments that contain this term from the search result. Since this query\nincludes two distinct terms, separate them with a space. Querying with the negated term yields the following documents: Your query operation may return a reference to a\ncursor that contains matching documents. To learn how to\nexamine data stored in the cursor, see the\n Cursor Fundamentals page . Now that the result set reflects the desired results, you can use the\ntext search textScore , accessed using the $meta operator in the query\nprojection, to order the results by relevance: Querying in this way returns the following documents in the following\norder. In general, text relevance increases as a string matches more\nterms and decreases as the unmatched portion of the string lengthens. For more information about the $text operator and its options, see the\n manual entry .", + "code": [ + { + "lang": "javascript", + "value": "db.movies.createIndex({ title: \"text\" });" + }, + { + "lang": "javascript", + "value": "db.movies.createIndex({ title: \"text\", plot: \"text\" });" + }, + { + "lang": "javascript", + "value": "{ title: 'Trek Nation' }\n{ title: 'Star Trek' }\n{ title: 'Star Trek Into Darkness' }\n{ title: 'Star Trek: Nemesis' }\n{ title: 'Star Trek: Insurrection' }\n{ title: 'Star Trek: Generations' }\n{ title: 'Star Trek: First Contact' }\n{ title: 'Star Trek: The Motion Picture' }\n{ title: 'Star Trek VI: The Undiscovered Country' }\n{ title: 'Star Trek V: The Final Frontier' }\n{ title: 'Star Trek IV: The Voyage Home' }\n{ title: 'Star Trek III: The Search for Spock' }\n{ title: 'Star Trek II: The Wrath of Khan' }" + }, + { + "lang": "javascript", + "value": " // Create a query that searches for the string \"trek\"\n const query = { $text: { $search: \"trek\" } };\n\n // Return only the `title` of each matched document\n const projection = {\n _id: 0,\n title: 1,\n };\n\n // Find documents based on our query and projection\n const cursor = movies.find(query).project(projection);" + }, + { + "lang": "javascript", + "value": "{ title: 'Star Trek' }\n{ title: 'Star Trek Into Darkness' }\n{ title: 'Star Trek: Nemesis' }\n{ title: 'Star Trek: Insurrection' }\n{ title: 'Star Trek: Generations' }\n{ title: 'Star Trek: First Contact' }\n{ title: 'Star Trek: The Motion Picture' }\n{ title: 'Star Trek VI: The Undiscovered Country' }\n{ title: 'Star Trek V: The Final Frontier' }\n{ title: 'Star Trek IV: The Voyage Home' }\n{ title: 'Star Trek III: The Search for Spock' }\n{ title: 'Star Trek II: The Wrath of Khan' }" + }, + { + "lang": "javascript", + "value": " // Create a query that searches for the phrase \"star trek\"\n const query = { $text: { $search: \"\\\"star trek\\\"\" } };\n\n // Return only the `title` of each matched document\n const projection = {\n _id: 0,\n title: 1,\n };\n\n // Find documents based on the query and projection\n const cursor = movies.find(query).project(projection);" + }, + { + "lang": "javascript", + "value": "{ title: 'Star Trek' }\n{ title: 'Star Trek: Nemesis' }\n{ title: 'Star Trek: Insurrection' }\n{ title: 'Star Trek: Generations' }\n{ title: 'Star Trek: First Contact' }\n{ title: 'Star Trek: The Motion Picture' }\n{ title: 'Star Trek VI: The Undiscovered Country' }\n{ title: 'Star Trek V: The Final Frontier' }\n{ title: 'Star Trek IV: The Voyage Home' }\n{ title: 'Star Trek III: The Search for Spock' }\n{ title: 'Star Trek II: The Wrath of Khan' }" + }, + { + "lang": "javascript", + "value": " // Create a query that searches for the phrase \"star trek\" while omitting \"into darkness\"\n const query = { $text: { $search: \"\\\"star trek\\\" -\\\"into darkness\\\"\" } };\n\n // Include only the `title` field of each matched document\n const projection = {\n _id: 0,\n title: 1,\n };\n\n // Find documents based on the query and projection\n const cursor = movies.find(query).project(projection);" + }, + { + "lang": "javascript", + "value": "{ title: 'Star Trek', score: 1.5 }\n{ title: 'Star Trek: Generations', score: 1.3333333333333333 }\n{ title: 'Star Trek: Insurrection', score: 1.3333333333333333 }\n{ title: 'Star Trek: Nemesis', score: 1.3333333333333333 }\n{ title: 'Star Trek: The Motion Picture', score: 1.25 }\n{ title: 'Star Trek: First Contact', score: 1.25 }\n{ title: 'Star Trek II: The Wrath of Khan', score: 1.2 }\n{ title: 'Star Trek III: The Search for Spock', score: 1.2 }\n{ title: 'Star Trek IV: The Voyage Home', score: 1.2 }\n{ title: 'Star Trek V: The Final Frontier', score: 1.2 }\n{ title: 'Star Trek VI: The Undiscovered Country', score: 1.2 }" + }, + { + "lang": "javascript", + "value": " // Create a query that searches for the phrase \"star trek\" while omitting \"into darkness\"r\n const query = { $text: { $search: \"\\\"star trek\\\" -\\\"into darkness\\\"\" } };\n\n // Sort returned documents by descending text relevance score\n const sort = { score: { $meta: \"textScore\" } };\n\n // Include only the `title` and `score` fields in each returned document\n const projection = {\n _id: 0,\n title: 1,\n score: { $meta: \"textScore\" },\n };\n\n // Find documents based on the query, sort, and projection\n const cursor = movies\n .find(query)\n .sort(sort)\n .project(projection);" + } + ], + "preview": "Text searches let you search string type fields in your collection for specified words or\nphrases. You can perform a text search by using the $text operator, which performs a\nlogical OR on each term separated by a space in the search string. You can also\nspecify more options to the operator to handle case sensitivity, stop words, and word\nstemming (such as plural forms or other tenses) for a supported language.\nThis is often used for unstructured text such as transcripts, essays, or web pages.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/read-operations", + "title": "Read Operations", + "headings": [], + "paragraphs": "Retrieve Data Access Data From a Cursor Retrieve Distinct Values Sort Results Skip Returned Results Limit the Number of Returned Results Specify Which Fields to Return Search Geospatially Search Text", + "code": [], + "preview": "Learn about the commands for running MongoDB read operations by using the MongoDB Node.js driver.", + "tags": null, + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/read-write-pref", + "title": "Specify How CRUD Operations Run on Replica Sets", + "headings": [ + "Overview", + "Write Concern", + "Example: Set the Write Concern for a Single Write Operation", + "Example: Retrieve and Apply an Existing Write Concern", + "Read Concern", + "Example: Set the Read Concern Level of an Aggregation", + "Example: Change the Read Concern of a Database", + "Read Preference", + "Example: Set Read Preference and Concerns for a Transaction", + "Example: Set the Read Preference of a Cluster in the Connection String", + "API Documentation" + ], + "paragraphs": "In this guide, you can learn how to use the write concern , read concern , and\n read preference configurations to modify the way that MongoDB runs\ncreate, read, update, and delete (CRUD) operations on replica sets. You can set write concern, read concern, and read preference options at the following\nlevels: This list also indicates the increasing order of precedence of the option settings. For\nexample, if you set a read concern level for a transaction, it will override a read\nconcern level set for the client. These options allow you to customize the causal consistency and availability of the data\nin your replica sets. Client, which sets the default for all operation executions unless overridden Session Transaction Database Collection The write concern specifies the level of acknowledgement requested from MongoDB for write\noperations, such as an insert or update, before the operation successfully returns.\nOperations that do not specify an explicit write concern inherit the global default write\nconcern settings. For more information, see Write Concern in the\nServer manual. For detailed API documentation, see the WriteConcern API documentation . The following table describes the WriteConcern parameters: Parameter Type Description w (optional) W Requests acknowledgment that the write operation has propagated to a specified\nnumber of mongod instances or to mongod instances that are labelled specified tags wtimeoutMS (optional) number Specifies a time limit to prevent write operations from blocking indefinitely journal (optional) boolean Requests acknowledgment that the write operation has been written to the on-disk journal This code uses custom WriteConcern settings while creating new a document: This code uses the fromOptions() method to construct a WriteConcern from the\noptions of an existing database reference, myDB . Note that myDB could be replaced\nwith a reference to any entity that accepts a write concern option. Then the new write\nconcern is applied to a document, myDoc . The read concern specifies the following behaviors: You can specify the read concern setting by using the level parameter. The default\nread concern level is local . This means that the client returns the data from the\nreplica set member that the client is connected to, with no guarantee that the data has\nbeen written to all replica set members. Note that lower read concern level requirements\nmay reduce latency. For more information about read concerns or read concern levels, see\n Read Concern in the Server manual. For more detail on\nthe ReadConcern type and definitions of the read concern levels, see the ReadConcern in\nthe API documentation. Level of causal consistency across replica sets Isolation guarantees maintained during a query This code sets the read concern level of an an aggregation to \"majority\" : For more information about aggregates, see the Aggregation page. This code changes the read concern level of a database to \"local\" : The read preference determines which member of a replica set MongoDB reads when running a\nquery. You can also customize how the server evaluates members. For more detailed API documentation, see the ReadPreference API\ndocumentation . The following table describes the ReadPreference parameters: Parameter Type Description mode ReadPreferenceMode Specifies a requirement or preference for which replica set\nmember the server reads from. The default mode, primary , specifies that\noperations read from the primary member of the replica set. tags (optional) TagSet List Assigns tags to secondary replica set members to customize how the server evaluates\nthem. Tags cannot be used with the primary read preference mode setting. options (optional) ReadPreferenceOptions Sets various options, including hedge \nand maxStalenessSeconds that can be\napplied to your read preference. This code sets the read preference, read concern, and write concern for the operations in\na transaction: For more information about transactions, see Transactions . This code example creates a MongoClient that uses the \"secondary\" read preference mode\nwhen performing queries on a cluster: This example also sets the maxStalenessSeconds option. For more information about connection string options, see the Connection String Options \nsection in the manual. To learn more about the methods and types mentioned in this guide, see the following API\ndocumentation: API WriteConcern API ReadConcern API ReadPreference", + "code": [ + { + "lang": "js", + "value": "myDB.myCollection.insertOne(\n { name: \"anotherDocumentName\" },\n { writeConcern:\n { w: 2, wtimeoutMS: 5000 }\n }\n);" + }, + { + "lang": "js", + "value": "const newWriteConcern = WriteConcern.fromOptions(myDB);\nconst myDoc = { name: \"New Document\" };\nWriteConcern.apply(myDoc,newWriteConcern);" + }, + { + "lang": "js", + "value": "const pipeline = [\n {\"$match\": {\n category: \"KITCHENWARE\",\n }},\n {\"$unset\": [\n \"_id\",\n \"category\",\n ]}\n ];\n\nresult = await myDB.collection(\"mycollection\")\n .aggregate(\n pipeline,\n { readConcern:\n { level: \"available\" }\n }\n );" + }, + { + "lang": "js", + "value": "const options = { readConcern: { level: \"local\" } };\nconst myDB = client.db(\"mydb\", options);" + }, + { + "lang": "js", + "value": "const transactionOptions = {\n readPreference: \"primary\",\n readConcern: { level: \"local\" },\n writeConcern: { w: \"majority\" },\n};\n\nconst session = client.startSession();\nsession.startTransaction(transactionOptions);\n// ...\nawait session.commitTransaction();\nawait session.endSession();" + }, + { + "lang": "js", + "value": "const uri = \"mongodb+srv://:@?readPreference=secondary&maxStalenessSeconds=120\";\nconst client = new MongoClient(uri);" + } + ], + "preview": "In this guide, you can learn how to use the write concern, read concern, and\nread preference configurations to modify the way that MongoDB runs\ncreate, read, update, and delete (CRUD) operations on replica sets.", + "tags": "node.js, customize, preferences, replica set, consistency", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/write-operations/delete", + "title": "Delete Documents", + "headings": ["Overview", "Delete"], + "paragraphs": "In this section, we show you how to call the write operations to remove \ndocuments from a collection in your MongoDB database. If you want to remove existing documents from a collection, you can\nuse deleteOne() to remove one document or deleteMany() for one or\nmore documents. These methods accept a query document that matches the\ndocuments you want to delete. You can specify the document or documents to be deleted by the\n deleteOne() or deleteMany() write operations in a JSON object as\nfollows: To delete the first matching document using the deleteOne() method or\nto delete all matching documents using the deleteMany() method, pass the\ndocument as the method parameter: You can print the number of documents deleted by the operation by\naccessing the deletedCount field of the result for each of the\nmethod calls above as follows: If the delete operation is successful, these statements print the number of documents\ndeleted by the associated operation. To see fully runnable examples and more information on the available options, see the usage\nexamples for deleteOne() and\n deleteMany() .", + "code": [ + { + "lang": "javascript", + "value": "const doc = {\n pageViews: {\n $gt: 10,\n $lt: 32768\n }\n};" + }, + { + "lang": "javascript", + "value": "const deleteResult = await myColl.deleteOne(doc);\nconst deleteManyResult = await myColl.deleteMany(doc);" + }, + { + "lang": "javascript", + "value": "console.dir(deleteResult.deletedCount);\nconsole.dir(deleteManyResult.deletedCount);" + } + ], + "preview": "In this section, we show you how to call the write operations to remove\ndocuments from a collection in your MongoDB database.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/write-operations/embedded-arrays", + "title": "Update Arrays in a Document", + "headings": [ + "Overview", + "Specifying Array Elements", + "The First Matching Array Element", + "Example", + "Matching All Array Elements", + "Example", + "Matching Multiple Array Elements", + "Usage", + "Example" + ], + "paragraphs": "In this guide, you can learn how to use the following array update\noperators to modify an array embedded within a document: For a list of array update operators, see Update Operators in the Server\nManual documentation. Positional Operator : $ All Positional Operator : $[] Filtered Positional Operator : $[] Positional operators specify which array elements to update. You can use these operators to apply updates to the first element, all elements, or\ncertain elements of an array that match a criteria. To specify elements in an array with positional operators, use dot\nnotation . Dot notation is a property access syntax for navigating BSON\nobjects. To learn more, see dot notation . To update the first array element of each document that matches your\nquery, use the positional operator $ . The positional operator $ references the array matched by the query.\nYou cannot use this operator to reference a nested array. If you want to\naccess a nested array, use the filtered positional operator . Do not use the $ operator in an upsert call because the\ndriver treats $ as a field name in the insert document. This example uses the following sample document to show how to update\nthe first matching array element: The following code shows how to increment a value in the first array\nelement that matches a query. The query matches elements in the entries array where the value of\n x is a string type. The update increases the y value by\n 33 in the first matching element. After you run the update operation, the document resembles the\nfollowing: The example includes the entries.x field in the\nquery to match the array that the $ operator applies an update to. If you\nomit the entries.x field from the query while using the\n $ operator in an update, the driver is unable to identify the\nmatching array and raises the following error: To perform the update on all array elements of each document that\nmatches your query, use the all positional operator $[] . This example uses the following sample documents, which describe phone\ncall logs, to show how to update all matching array elements: The following code shows how to remove the duration field from\nall calls array entries in the document whose date is\n \"5/15/2023\" : After you run the update operation, the documents resemble the following: To perform an update on all embedded array elements of each document\nthat matches your query, use the filtered positional operator\n $[] . The filtered positional operator $[] specifies the\nmatching array elements in the update document. To identify which array\nelements to match, pair this operator with in an\n arrayFilters object. The placeholder represents an element of the array\nfield. You must select a value for that starts with a\nlowercase letter and contains only alphanumeric characters. You can use a filtered positional operator in an update operation.\nAn update operation takes a query, an update document, and\noptionally, an options object as its parameters. The following steps describe how to use a filtered positional operator\nin an update operation: Format your update document as follows: This update document contains the following placeholders: $ : The array update operator : The array in the document to update : The identifier for the filtered positional operator : The field in the array element to update : The value that describes the update Add the matching criteria in the arrayFilters object. This object\nis an array of queries that specify which array elements to include\nin the update. Set this object in an options parameter: Pass the query, the update document, and options to an\nupdate method. The following sample code shows how to call the\n updateOne() method with these parameters: This example uses the following sample documents, which describe\nshopping lists for specific recipes, to show how to update certain matching array elements: Suppose you want to increase the quantity of items you purchase for a\nrecipe on your \"11/12/2023\" grocery trip. You want to double the quantity if\nthe item meets all the following criteria: To double the quantity value in the matching array\nentries, use the filtered positional operator as shown in the following\ncode: The update multiplied the quantity value by 2 for\nitems that matched the criteria. The item \"Sesame oil\" did not match\nthe criteria in the arrayFilters object and therefore was excluded\nfrom the update. The following documents reflect these changes: The item is for the \"Fried rice\" recipe. The item name does not include the word \"oil\" .", + "code": [ + { + "lang": "javascript", + "value": "{\n _id: ...,\n entries: [\n { x: false, y: 1 },\n { x: \"hello\", y: 100 },\n { x: \"goodbye\", y: 1000 }\n ]\n}" + }, + { + "lang": "javascript", + "value": "{\n _id: ...,\n entries: [\n { x: false, y: 1 },\n { x: \"hello\", y: 133 },\n { x: \"goodbye\", y: 1000 }\n ]\n}" + }, + { + "lang": "none", + "value": "MongoServerError: The positional operator did not find the match needed from the query." + }, + { + "lang": "javascript", + "value": "// Query for all elements in entries array where the value of x is a string\nconst query = { \"entries.x\": { $type : \"string\" } };\n\n// On first matched element, increase value of y by 33\nconst updateDocument = {\n $inc: { \"entries.$.y\": 33 }\n};\n\n// Execute the update operation\nconst result = await myColl.updateOne(query, updateDocument);" + }, + { + "lang": "javascript", + "value": "{\n _id: ...,\n date: \"5/15/2023\",\n calls: [\n { time: \"10:08 AM\", caller: \"Mom\", duration: 67 },\n { time: \"04:11 PM\", caller: \"Dad\", duration: 121 },\n { time: \"06:36 PM\", caller: \"Grandpa\", duration: 13 }\n ]\n},\n{\n _id: ...,\n date: \"5/16/2023\",\n calls: [\n { time: \"11:47 AM\", caller: \"Mom\", duration: 4 },\n ]\n}" + }, + { + "lang": "javascript", + "value": "{\n _id: ...,\n date: \"5/15/2023\",\n calls: [\n { time: \"10:08 AM\", caller: \"Mom\" },\n { time: \"04:11 PM\", caller: \"Dad\" },\n { time: \"06:36 PM\", caller: \"Grandpa\" }\n ]\n},\n{\n _id: ...,\n date: \"5/16/2023\",\n calls: [\n { time: \"11:47 AM\", caller: \"Mom\", duration: 4 },\n ]\n}" + }, + { + "lang": "javascript", + "value": "// Query for all documents where date is the string \"5/15/2023\"\nconst query = { date: \"5/15/2023\" };\n\n// For each matched document, remove duration field from all entries in calls array \nconst updateDocument = {\n $unset: { \"calls.$[].duration\": \"\" }\n};\n\n// Execute the update operation\nconst result = await myColl.updateOne(query, updateDocument);" + }, + { + "lang": "javascript", + "value": "{ $: { \".$[].\": } }" + }, + { + "lang": "javascript", + "value": "arrayFilters: [\n { \".\": },\n { \".\": },\n ...\n]" + }, + { + "lang": "javascript", + "value": "await myColl.updateOne(query, updateDocument, options);" + }, + { + "lang": "javascript", + "value": "{\n _id: ...,\n date: \"11/12/2023\",\n items: [\n { item: \"Scallions\", quantity: 3, recipe: \"Fried rice\" },\n { item: \"Mangos\", quantity: 4, recipe: \"Salsa\" },\n { item: \"Pork shoulder\", quantity: 1, recipe: \"Fried rice\" },\n { item: \"Sesame oil\", quantity: 1, recipe: \"Fried rice\" }\n ]\n},\n{\n _id: ...,\n date: \"11/20/2023\",\n items: [\n { item: \"Coffee beans\", quantity: 1, recipe: \"Coffee\" }\n ]\n}" + }, + { + "lang": "javascript", + "value": "{\n _id: ...,\n date: \"11/12/2023\",\n items: [\n { item: \"Scallions\", quantity: 6, recipe: \"Fried rice\" },\n { item: \"Mangos\", quantity: 4, recipe: \"Salsa\" },\n { item: \"Pork shoulder\", quantity: 2, recipe: \"Fried rice\" },\n { item: \"Sesame oil\", quantity: 1, recipe: \"Fried rice\" }\n ]\n},\n{\n _id: ...,\n date: \"11/20/2023\",\n items: [\n { item: \"Coffee beans\", quantity: 1, recipe: \"Coffee\" }\n ]\n}" + }, + { + "lang": "javascript", + "value": "// Query for all documents where date is the string \"11/12/2023\"\nconst query = { date: \"11/12/2023\" };\n\n// For each matched document, change the quantity of items to 2 \nconst updateDocument = {\n $mul: { \"items.$[i].quantity\": 2 }\n};\n\n// Update only non-oil items used for fried rice \nconst options = {\n arrayFilters: [\n {\n \"i.recipe\": \"Fried rice\",\n \"i.item\": { $not: { $regex: \"oil\" } },\n }\n ]\n};\n\n// Execute the update operation\nconst result = await myColl.updateOne(query, updateDocument, options);" + } + ], + "preview": "In this guide, you can learn how to use the following array update\noperators to modify an array embedded within a document:", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/write-operations/insert", + "title": "Insert Documents", + "headings": [ + "Overview", + "A Note About _id", + "Insert a Single Document", + "Example", + "Insert Multiple Documents", + "Example" + ], + "paragraphs": "In this guide, you can learn how to insert documents into MongoDB. You can use MongoDB to retrieve, update, and delete information that is already stored\nin MongoDB. To store information, use an insert operation . An insert operation inserts one or more documents into a MongoDB collection.\nThe Node.js driver provides the following methods to perform insert\noperations: The following sections focus on insertOne() and insertMany() . For an\nexample on how to use the bulkWrite() method, see our runnable Bulk\nOperations Example . insertOne() insertMany() bulkWrite() This page includes a short interactive lab that demonstrates how to\ninsert data by using the insertOne() method. You can complete this lab\ndirectly in your browser window without installing MongoDB or a code editor. To start the lab, click the Open Interactive Tutorial button at the\ntop of the page. To expand the lab to a full-screen format, click the\nfull-screen button ( \u26f6 ) in the top-right corner of the lab pane. When inserting a document, MongoDB enforces one constraint on your\ndocuments by default. Each document must contain a unique _id \nfield. There are two ways to manage this field: Unless you have provided strong guarantees for uniqueness, we recommend\nyou let the driver automatically generate _id values. For more information about _id , see the Server manual entry on\n Unique Indexes . You can manage this field yourself, ensuring each value you use is unique. You can let the driver automatically generate unique ObjectId values\nwith the primary key factory . Duplicate _id values violate unique index constraints, resulting\nin a WriteError . Use the insertOne() method when you want to insert a single\ndocument. On successful insertion, the method returns an\n InsertOneResult instance representing the _id of\nthe new document. The following example uses the insertOne() method to insert a new\ndocument into the myDB.pizzaMenu collection: Your output looks similar to the following text: For more information on the classes and methods mentioned in this\nsection, see the following resources: API Documentation on insertOne() API Documentation on InsertOneResult Server manual entry on insertOne() Runnable Insert a Document Example Use the insertMany() method when you want to insert multiple\ndocuments. This method inserts documents in the order specified until an\nexception occurs, if any. For example, assume you want to insert the following documents: If you attempt to insert these documents, a WriteError occurs when the third\ndocument is processed, but the documents before the error are inserted into your\ncollection. On successful insertion, the method returns an\n InsertManyResult instance representing the number of\ndocuments inserted and the _id of the new document. Use a try-catch block to get an acknowledgment for successfully\nprocessed documents before the error occurs: The output consists of documents MongoDB can process and looks similar to the\nfollowing: If you look inside your collection, you see the following documents: The following example uses the insertMany() method to insert three new\ndocuments into the myDB.pizzaMenu collection: Your output looks similar to the following: For more information on the classes and methods mentioned in this\nsection, see the following resources: API Documentation on insertMany() API Documentation on InsertManyResult API Documentation on PkFactory Server manual entry on insertMany() Runnable Insert Multiple Documents Example", + "code": [ + { + "lang": "javascript", + "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"pizzaMenu\");\n\nconst doc = { name: \"Neapolitan pizza\", shape: \"round\" };\nconst result = await myColl.insertOne(doc);\nconsole.log(\n `A document was inserted with the _id: ${result.insertedId}`,\n);" + }, + { + "lang": null, + "value": "A document was inserted with the _id: 60c79c0f4cc72b6bb31e3836" + }, + { + "lang": "json", + "value": "{ \"_id\": 1, \"color\": \"red\" }\n{ \"_id\": 2, \"color\": \"purple\" }\n{ \"_id\": 1, \"color\": \"yellow\" }\n{ \"_id\": 3, \"color\": \"blue\" }" + }, + { + "lang": "javascript", + "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"colors\");\n\ntry {\n const docs = [\n { \"_id\": 1, \"color\": \"red\"},\n { \"_id\": 2, \"color\": \"purple\"},\n { \"_id\": 1, \"color\": \"yellow\"},\n { \"_id\": 3, \"color\": \"blue\"}\n ];\n\n const insertManyresult = await myColl.insertMany(docs);\n let ids = insertManyresult.insertedIds;\n\n console.log(`${insertManyresult.insertedCount} documents were inserted.`);\n for (let id of Object.values(ids)) {\n console.log(`Inserted a document with id ${id}`);\n }\n} catch(e) {\n console.log(`A MongoBulkWriteException occurred, but there are successfully processed documents.`);\n let ids = e.result.result.insertedIds;\n for (let id of Object.values(ids)) {\n console.log(`Processed a document with id ${id._id}`);\n }\n console.log(`Number of documents inserted: ${e.result.result.nInserted}`);\n}" + }, + { + "lang": null, + "value": "A MongoBulkWriteException occurred, but there are successfully processed documents.\nProcessed a document with id 1\nProcessed a document with id 2\nProcessed a document with id 1\nProcessed a document with id 3\nNumber of documents inserted: 2" + }, + { + "lang": "json", + "value": "{ \"_id\": 1, \"color\": \"red\" }\n{ \"_id\": 2, \"color\": \"purple\" }" + }, + { + "lang": "javascript", + "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"pizzaMenu\");\n\nconst docs = [\n { name: \"Sicilian pizza\", shape: \"square\" },\n { name: \"New York pizza\", shape: \"round\" },\n { name: \"Grandma pizza\", shape: \"square\" }\n];\n\nconst insertManyresult = await myColl.insertMany(docs);\nlet ids = insertManyresult.insertedIds;\n\nconsole.log(`${insertManyresult.insertedCount} documents were inserted.`);\n\nfor (let id of Object.values(ids)) {\n console.log(`Inserted a document with id ${id}`);\n}" + }, + { + "lang": null, + "value": "3 documents were inserted.\nInserted a document with id 60ca09f4a40cf1d1afcd93a2\nInserted a document with id 60ca09f4a40cf1d1afcd93a3\nInserted a document with id 60ca09f4a40cf1d1afcd93a4" + } + ], + "preview": "In this guide, you can learn how to insert documents into MongoDB.", + "tags": "code example, node.js, add data", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/write-operations/modify", + "title": "Modify Documents", + "headings": [ + "Overview", + "Update Documents", + "Example", + "Replace a Document", + "Example" + ], + "paragraphs": "You can modify documents in a MongoDB collection by using update \nand replace operations. Update operations modify the fields and\nvalues of a document while keeping other fields and values\nunchanged. Replace operations substitute all fields and values\nin an existing document with specified fields and values while keeping\nthe _id field value unchanged. The Node.js driver provides the following methods to change documents: updateOne() updateMany() replaceOne() This page includes a short interactive lab that demonstrates how to\nmodify data by using the updateMany() method. You can complete this lab\ndirectly in your browser window without installing MongoDB or a code editor. To start the lab, click the Open Interactive Tutorial button at the\ntop of the page. To expand the lab to a full-screen format, click the\nfull-screen button ( \u26f6 ) in the top-right corner of the lab pane. To perform an update to one or more documents, create an update\ndocument that specifies the update operator (the type of update to\nperform) and the fields and values that describe the change. Update\ndocuments use the following format: The top level of an update document contains one or more of the following\nupdate operators: See the MongoDB Server manual for a complete list of update operators\nand their usage . The update operators apply only to the fields associated with them in your\nupdate document. $set : replaces the value of a field with a specified one $inc : increments or decrements field values $rename : renames fields $unset : removes fields $mul : multiplies a field value by a specified number If you are using MongoDB Version 4.2 or later, you can use aggregation\npipelines made up of a subset of aggregation stages in update operations. For\nmore information on the aggregation stages MongoDB supports in\naggregation pipelines used in update operations, see our tutorial on building\n updates with aggregation pipelines . Consider a document in the myDB.items collection with fields\ndescribing an item for sale, its price, and the quantity available: If you apply the $set update operator with a new value for\n quantity , you can use the following update document: The updated document resembles the following, with an updated value in\nthe quantity field and all other values unchanged: If an update operation fails to match any documents in a collection, it\ndoes not make any changes. Update operations can be configured to perform\nan upsert which\nattempts to perform an update, but if no documents are matched, inserts\na new document with the specified fields and values. You cannot modify the _id field of a document nor change a field to\na value that violates a unique index constraint. See the MongoDB Server manual\nfor more information on unique indexes . To perform a replacement operation, create a replacement document that\nconsists of the fields and values that you want to use in your\n replace operation. Replacement documents use the following format: Replacement documents are the documents that you want to take the place of\nexisting documents that match the query filters. Consider a document in the myDB.items collection with fields\ndescribing an item for sale, its price, and the quantity available: Suppose you wanted to replace this document with one that contains a\ndescription for an entirely different item. Your replacement operation might\nresemble the following: The replaced document contains the contents of the replacement document\nand the immutable _id field as follows: If a replace operation fails to match any documents in a collection, it\ndoes not make any changes. Replace operations can be configured to perform\nan upsert which\nattempts to perform the replacement, but if no documents are matched, it\ninserts a new document with the specified fields and values. You cannot modify the _id field of a document nor change a field to\na value that violates a unique index constraint. See the MongoDB Server manual\nfor more information on unique indexes .", + "code": [ + { + "lang": "javascript", + "value": "{\n : {\n : {\n ...\n },\n : {\n }\n },\n : {\n ...\n }\n}" + }, + { + "lang": "javascript", + "value": "{\n _id: 465,\n item: \"Hand-thrown ceramic plate\",\n price: 32.50,\n quantity: 7,\n}" + }, + { + "lang": "javascript", + "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"items\");\n\nconst filter = { _id: 465 };\n\n// update the value of the 'quantity' field to 5\nconst updateDocument = {\n $set: {\n quantity: 5,\n },\n};\nconst result = await myColl.updateOne(filter, updateDocument);" + }, + { + "lang": "javascript", + "value": "{\n _id: 465,\n item: \"Hand-thrown ceramic plate\",\n price: 32.50,\n quantity: 5,\n}" + }, + { + "lang": "javascript", + "value": "{\n : {\n \n },\n : {\n ...\n }\n}" + }, + { + "lang": "javascript", + "value": "{\n _id: 501,\n item: \"3-wick beeswax candle\",\n price: 18.99,\n quantity: 10,\n}" + }, + { + "lang": "javascript", + "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"items\");\n\nconst filter = { _id: 501 };\n\n// replace the matched document with the replacement document\nconst replacementDocument = {\n item: \"Vintage silver flatware set\",\n price: 79.15,\n quantity: 1,\n};\nconst result = await myColl.replaceOne(filter, replacementDocument);" + }, + { + "lang": "javascript", + "value": "{\n _id: 501,\n item: \"Vintage silver flatware set\",\n price: 79.15,\n quantity: 1,\n}" + } + ], + "preview": "You can modify documents in a MongoDB collection by using update\nand replace operations. Update operations modify the fields and\nvalues of a document while keeping other fields and values\nunchanged. Replace operations substitute all fields and values\nin an existing document with specified fields and values while keeping\nthe _id field value unchanged.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/write-operations/pkFactory", + "title": "Generate Custom Values for _id", + "headings": [ + "Overview", + "Specify a Primary Key Factory", + "Additional Information" + ], + "paragraphs": "In this guide, you can learn how to use the MongoDB Node.js driver to generate your\nown _id values using the primary key factory . The primary key factory allows you to create unique identifiers in your\ndocuments when you choose not to specify an _id during an\n insert operation . The\ndefault primary key factory generates ObjectId values. The driver doesn't use the primary key factory for\n upsert operations because it's\nunable to determine whether to apply the primary key factory. If you\nspecified the primary key factory in an upsert operation and it\nperforms an insert operation, the server autogenerates an\n ObjectId for that document. If you want to use your specified primary key factory, perform a\n find operation , then an\n update or\n insert operation. To specify a primary key factory, apply the pkFactory option to your\n MongoClient instance. The following code snippet applies the pkFactory option to\ngenerate _id values of type uuid : If you insert a document with an _id field with a different\ntype than the type specified by the primary key factory, then you\nwill have inconsistent data. For example, if you run the following insert operation on a primary\nkey factory that generates uuid types, your _id values will\ncontain both the uuid and string types: To learn more about the types, interfaces, and classes discussed in this\nsection, see the following resources: pkFactory The _id Field Insert or Update in a Single Operation Retrieve Data Modify Documents Insert Documents", + "code": [ + { + "lang": "javascript", + "value": "const { UUID } = require('bson');\n...\nconst client = new MongoClient(uri, {\n pkFactory: { createPk: () => new UUID().toBinary() }\n});" + }, + { + "lang": "javascript", + "value": "myColl.insertOne({ _id: \"user1388\", ... });" + } + ], + "preview": "In this guide, you can learn how to use the MongoDB Node.js driver to generate your\nown _id values using the primary key factory.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/write-operations/upsert", + "title": "Insert or Update in a Single Operation", + "headings": ["Overview", "Performing an Update", "Performing an Upsert"], + "paragraphs": "If your application stores and modifies data in MongoDB, you probably use\ninsert and update operations. In certain workflows, whether you perform\nan insert or update operation depends on whether the document exists.\nIn these cases, you can streamline your application logic by using the\n upsert option available in the following methods: If the query filter passed to these methods does not find any matches and\nyou set the upsert option to true , MongoDB inserts the update\ndocument. Let's go through an example. updateOne() replaceOne() updateMany() Suppose your application tracks the current location of food trucks,\nstoring the nearest address data in the myDB.foodTrucks collection,\nwhich resembles the following: As an application user, you read about a food truck changing its regular\nlocation and want to apply the update. This update might resemble the\nfollowing: If a food truck named \"Deli Llama\" exists, the method call above updates\nthe document in the collection. However, if there are no food trucks named\n\"Deli Llama\" in your collection, no changes are made. Consider the case in which you want to add information about the food\ntruck even if it does not yet exist in your collection. Rather than\nfirst querying whether it exists to determine whether to insert or\nupdate the document, we can set upsert to true in our call to\n updateOne() as follows: After you run the operation above, your collection looks similar to the\nfollowing, even if the \"Deli Llama\" document did not exist in your collection\nbefore the operation:", + "code": [ + { + "lang": "javascript", + "value": "[\n { name: \"Haute Skillet\", address: \"42 Avenue B\" },\n { name: \"Lady of the Latke\", address: \"35 Fulton Rd\" },\n ...\n]" + }, + { + "lang": "javascript", + "value": "const myDB = client.db(\"myDB\");\nconst myColl = myDB.collection(\"foodTrucks\");\n\nconst query = { name: \"Deli Llama\" };\nconst update = { $set: { name: \"Deli Llama\", address: \"3 Nassau St\" }};\nconst options = {};\nmyColl.updateOne(query, update, options);" + }, + { + "lang": "javascript", + "value": "const query = { name: \"Deli Llama\" };\nconst update = { $set: { name: \"Deli Llama\", address: \"3 Nassau St\" }};\nconst options = { upsert: true };\nmyColl.updateOne(query, update, options);" + }, + { + "lang": "javascript", + "value": "[\n { name: \"Haute Skillet\", address: \"42 Avenue B\" },\n { name: \"Lady of the Latke\", address: \"35 Fulton Rd\" },\n { name: \"Deli Llama\", address: \"3 Nassau St\" },\n ...\n]" + } + ], + "preview": "If your application stores and modifies data in MongoDB, you probably use\ninsert and update operations. In certain workflows, whether you perform\nan insert or update operation depends on whether the document exists.\nIn these cases, you can streamline your application logic by using the\nupsert option available in the following methods:", + "tags": "code example, node.js, write, add data", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud/write-operations", + "title": "Write Operations", + "headings": [], + "paragraphs": "Insert Documents Generate Custom Values for _id Delete Documents Modify Documents Update Arrays in a Document Insert or Update in a Single Operation", + "code": [], + "preview": "Learn about the commands for running MongoDB write operations by using the MongoDB Node.js driver.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/crud", + "title": "CRUD Operations", + "headings": ["Compatibility"], + "paragraphs": "CRUD (Create, Read, Update, Delete) operations allow you to work with\nthe data stored in MongoDB. The CRUD operation documentation is categorized in two sections: Some operations combine aspects of read and write operations. See our\nguide on compound operations \nto learn more about these hybrid methods. Read Operations find and return\ndocuments stored within your MongoDB database. Write Operations insert, modify,\nor delete documents in your MongoDB database. You can use the Node.js driver to connect and perform CRUD operations for\ndeployments hosted in the following environments: MongoDB Atlas : The fully\nmanaged service for MongoDB deployments in the cloud MongoDB Enterprise : The\nsubscription-based, self-managed version of MongoDB MongoDB Community : The\nsource-available, free-to-use, and self-managed version of MongoDB To learn more about performing CRUD operations in the Atlas UI for deployments hosted in MongoDB\nAtlas, see Create, View, Update, and Delete Documents . To learn more about performing CRUD operations, see the following posts on the MongoDB\nDeveloper Hub : Learn how to apply CRUD Operations \nwith an example scenario. Analyze data in MongoDB Atlas using the Aggregation Pipeline .", + "code": [], + "preview": "Learn how to perform create, read, update, and delete (CRUD) operations to work with the data stored in MongoDB by using the Node.js driver.", + "tags": "node.js", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/encrypt-fields", + "title": "In-Use Encryption", + "headings": [ + "Overview", + "Queryable Encryption", + "Client-side Field Level Encryption" + ], + "paragraphs": "You can use the Node.js driver to encrypt specific document fields by using a\nset of features called in-use encryption . In-use encryption allows\nyour application to encrypt data before sending it to MongoDB\nand query documents with encrypted fields. In-use encryption prevents unauthorized users from viewing plaintext\ndata as it is sent to MongoDB or while it is in an encrypted database. To\nenable in-use encryption in an application and authorize it to decrypt\ndata, you must create encryption keys that only your application can\naccess. Only applications that have access to your encryption\nkeys can access the decrypted, plaintext data. If an attacker gains\naccess to the database, they can only see the encrypted ciphertext data\nbecause they lack access to the encryption keys. You might use in-use encryption to encrypt fields in your MongoDB\ndocuments that contain the following types of sensitive data: MongoDB offers the following features to enable in-use encryption: Credit card numbers Addresses Health information Financial information Any other sensitive or personally identifiable information (PII) Queryable Encryption Client-side Field Level Encryption Queryable Encryption is the next-generation in-use encryption feature,\nfirst introduced as a preview feature in MongoDB Server version 6.0 and\nas a generally available (GA) feature in MongoDB 7.0. Queryable\nEncryption supports searching encrypted fields for equality and encrypts\neach value uniquely. To learn more about Queryable Encryption, see Queryable\nEncryption in the Server manual. The implementation of Queryable Encryption in MongoDB 6.0 is incompatible with the GA version introduced in MongoDB 7.0. The Queryable Encryption preview feature is no longer supported. Client-side Field Level Encryption (CSFLE) was introduced in MongoDB\nServer version 4.2 and supports searching encrypted fields for equality.\nCSFLE differs from Queryable Encryption in that you can select either a\ndeterministic or random encryption algorithm to encrypt fields. You can only\nquery encrypted fields that use a deterministic encryption algorithm when\nusing CSFLE. When you use a random encryption algorithm to encrypt\nfields in CSFLE, they can be decrypted, but you cannot perform equality\nqueries on those fields. When you use Queryable Encryption, you cannot\nspecify the encryption algorithm, but you can query all encrypted\nfields. When you deterministically encrypt a value, the same input value\nproduces the same output value. While deterministic encryption allows\nyou to perform queries on those encrypted fields, encrypted data with\nlow cardinality is susceptible to code breaking by frequency analysis. To learn more about CSFLE, see CSFLE in the\nServer manual. To learn more about these concepts, see the following Wikipedia\nentries: Cardinality Frequency Analysis", + "code": [], + "preview": "You can use the Node.js driver to encrypt specific document fields by using a\nset of features called in-use encryption. In-use encryption allows\nyour application to encrypt data before sending it to MongoDB\nand query documents with encrypted fields.", + "tags": "node.js", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/gridfs", + "title": "GridFS", + "headings": [ + "Overview", + "How GridFS Works", + "Create a GridFS Bucket", + "Upload Files", + "Retrieve File Information", + "Download Files", + "Rename Files", + "Delete Files", + "Delete a GridFS Bucket", + "Additional Resources" + ], + "paragraphs": "In this guide, you can learn how to store and retrieve large files in\nMongoDB using GridFS . GridFS is a specification that describes how\nto split files into chunks during storage\nand reassemble them during retrieval. The driver implementation of\nGridFS manages the operations and organization of\nthe file storage. Use GridFS if the size of your file exceeds the BSON-document\nsize limit of 16 megabytes. For more detailed information on whether GridFS is\nsuitable for your use case, see the GridFS Server manual page . Navigate the following sections to learn more about GridFS operations\nand implementation: Create a GridFS Bucket Upload Files Retrieve File Information Download Files Rename Files Delete Files Delete a GridFS Bucket GridFS organizes files in a bucket , a group of MongoDB collections\nthat contain the chunks of files and descriptive information.\nBuckets contain the following collections, named using the convention\ndefined in the GridFS specification: When you create a new GridFS bucket, the driver creates the chunks \nand files collections, prefixed with the default bucket name fs , unless\nyou specify a different name. The driver also creates an index on each\ncollection to ensure efficient retrieval of files and related\nmetadata. The driver only creates the GridFS bucket on the first write\noperation if it does not already exist. The driver only creates indexes if\nthey do not exist and when the bucket is empty. For more information on\nGridFS indexes, see the Server manual page on GridFS Indexes . When storing files with GridFS, the driver splits the files into smaller\npieces, each represented by a separate document in the chunks collection.\nIt also creates a document in the files collection that contains\na unique file id, file name, and other file metadata. You can upload the file from\nmemory or from a stream. The following diagram describes how GridFS splits\nfiles when uploading to a bucket: When retrieving files, GridFS fetches the metadata from the files \ncollection in the specified bucket and uses the information to reconstruct\nthe file from documents in the chunks collection. You can read the file\ninto memory or output it to a stream. The chunks collection stores the binary file chunks. The files collection stores the file metadata. Create a bucket or get a reference to an existing one to begin storing\nor retrieving files from GridFS. Create a GridFSBucket \ninstance, passing a database as the parameter. You can then use the\n GridFSBucket instance to call read and write operations on the files\nin your bucket: Pass your bucket name as the second parameter to the create() method\nto create or reference a bucket with a custom name other than the\ndefault name fs , as shown in the following example: For more information, see the GridFSBucket API documentation . Use the openUploadStream() method from GridFSBucket to create an upload\nstream for a given file name. You can use the pipe() method to\nconnect a Node.js read stream to the upload stream. The\n openUploadStream() method allows you to specify configuration information\nsuch as file chunk size and other field/value pairs to store as metadata. The following example shows how to pipe a Node.js read stream, represented by the\nvariable fs , to the openUploadStream() method of a GridFSBucket instance: See the openUploadStream() API documentation for more information. In this section, you can learn how to retrieve file metadata stored in the\n files collection of the GridFS bucket. The metadata contains information\nabout the file it refers to, including: Call the find() method on the GridFSBucket instance to retrieve\nfiles from a GridFS bucket. The method returns a FindCursor instance\nfrom which you can access the results. The following code example shows you how to retrieve and print file metadata\nfrom all your files in a GridFS bucket. Among the different ways that you can\ntraverse the retrieved results from the FindCursor iterable, the\nfollowing example uses the for await...of syntax to display the results: The find() method accepts various query specifications and can be\ncombined with other methods such as sort() , limit() , and project() . For more information on the classes and methods mentioned in this section,\nsee the following resources: The _id of the file The name of the file The length/size of the file The upload date and time A metadata document in which you can store any other information find() API documentation FindCursor API documentation Cursor Fundamentals page Read Operations page You can download files from your MongoDB database by using the\n openDownloadStreamByName() method from GridFSBucket to create a\ndownload stream. The following example shows you how to download a file referenced\nby the file name, stored in the filename field, into your working\ndirectory: Alternatively, you can use the openDownloadStream() \nmethod, which takes the _id field of a file as a parameter: For more information on the openDownloadStreamByName() method, see\nits API documentation . If there are multiple documents with the same filename value,\nGridFS will stream the most recent file with the given name (as\ndetermined by the uploadDate field). The GridFS streaming API cannot load partial chunks. When a download\nstream needs to pull a chunk from MongoDB, it pulls the entire chunk\ninto memory. The 255 kilobyte default chunk size is usually\nsufficient, but you can reduce the chunk size to reduce memory\noverhead. Use the rename() method to update the name of a GridFS file in your\nbucket. You must specify the file to rename by its _id field\nrather than its file name. The following example shows how to update the filename field to\n\"newFileName\" by referencing a document's _id field: For more information on this method, see the rename() \nAPI documentation. The rename() method only supports updating the name of one file at\na time. To rename multiple files, retrieve a list of files matching the\nfile name from the bucket, extract the _id field from the files you\nwant to rename, and pass each value in separate calls to the rename() \nmethod. Use the delete() method to remove a file from your bucket. You must\nspecify the file by its _id field rather than its file name. The following example shows you how to delete a file by referencing its _id field: For more information on this method, see the delete() \nAPI documentation. The delete() method only supports deleting one file at a time. To\ndelete multiple files, retrieve the files from the bucket, extract\nthe _id field from the files you want to delete, and pass each value\nin separate calls to the delete() method. Use the drop() method to remove a bucket's files and chunks \ncollections, which effectively deletes the bucket. The following\ncode example shows you how to delete a GridFS bucket: For more information on this method, see the drop() \nAPI documentation. MongoDB GridFS specification", + "code": [ + { + "lang": "javascript", + "value": "const db = client.db(dbName);\nconst bucket = new mongodb.GridFSBucket(db);" + }, + { + "lang": "javascript", + "value": "const bucket = new mongodb.GridFSBucket(db, { bucketName: 'myCustomBucket' });" + }, + { + "lang": "javascript", + "value": "fs.createReadStream('./myFile').\n pipe(bucket.openUploadStream('myFile', {\n chunkSizeBytes: 1048576,\n metadata: { field: 'myField', value: 'myValue' }\n }));" + }, + { + "lang": "javascript", + "value": "const cursor = bucket.find({});\nfor await (const doc of cursor) {\n console.log(doc);\n}" + }, + { + "lang": "javascript", + "value": "bucket.openDownloadStreamByName('myFile').\n pipe(fs.createWriteStream('./outputFile'));" + }, + { + "lang": "javascript", + "value": "bucket.openDownloadStream(ObjectId(\"60edece5e06275bf0463aaf3\")).\n pipe(fs.createWriteStream('./outputFile'));" + }, + { + "lang": "javascript", + "value": "bucket.rename(ObjectId(\"60edece5e06275bf0463aaf3\"), \"newFileName\");" + }, + { + "lang": "javascript", + "value": "bucket.delete(ObjectId(\"60edece5e06275bf0463aaf3\"));" + }, + { + "lang": "javascript", + "value": "bucket.drop();" + } + ], + "preview": "In this guide, you can learn how to store and retrieve large files in\nMongoDB using GridFS. GridFS is a specification that describes how\nto split files into chunks during storage\nand reassemble them during retrieval. The driver implementation of\nGridFS manages the operations and organization of\nthe file storage.", + "tags": "node.js, code example, file storage", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/indexes", + "title": "Indexes", + "headings": [ + "Overview", + "Query Coverage and Performance", + "Operational Considerations", + "List Indexes", + "Index Types", + "Single Field Indexes", + "Compound Indexes", + "Multikey Indexes (Indexes on Array Fields)", + "Clustered Indexes", + "Text Indexes", + "Geospatial Indexes", + "Unique Indexes", + "Search Indexes", + "Create a Search Index", + "List Search Indexes", + "Update a Search Index", + "Drop a Search Index" + ], + "paragraphs": "Indexes are data structures that support the efficient execution of queries in\nMongoDB. They contain copies of parts of the data in documents to make\nqueries more efficient. Without indexes, MongoDB must scan every document in a collection to find\nthe documents that match each query. These collection scans are slow and can\nnegatively affect the performance of your application. By using an index to\nlimit the number of documents MongoDB scans, queries can be more efficient\nand therefore return faster. When you execute a query against MongoDB, your query can include three\nparts: When all the fields specified in the query criteria and projection of a\nquery are indexed, MongoDB returns results directly from the index\nwithout scanning any documents in the collection or loading them into\nmemory. For more information on how to ensure your index covers your query\ncriteria and projection, see the MongoDB manual articles on\n query coverage \nand index intersection . Query criteria that specify one or more fields and values that you are looking for Options that affect the query's execution, such as read concern Projection criteria to specify the fields you want MongoDB to return (optional) To improve query performance, build indexes on fields that appear often in your\napplication's queries and operations that return sorted results. Each index that you add\nconsumes disk space and memory when active, so it might be necessary to track index memory\nand disk usage for capacity planning. In addition, when a write operation updates an\nindexed field, MongoDB also updates the related index. For more information on designing your data model and choosing indexes\nappropriate for your application, see the MongoDB Server documentation on\n Indexing Strategies and\n Data Modeling and Indexes . You can use the listIndexes() method to list all the indexes\nfor a collection. The listIndexes() method takes an\noptional ListIndexesOptions parameter. The listIndexes() method returns an\nobject of type ListIndexesCursor . The following code uses the listIndexes() method to list all the\nindexes in a collection: MongoDB supports several different index types to support querying\nyour data. The following sections describe the most common index types\nand provide sample code for creating each index type. Single field indexes are indexes that improve performance for queries\nthat specify ascending or descending sort order on a single field of a\ndocument. The following example uses the createIndex() method to create an\nascending order index on the title field in the movies collection in\nthe sample_mflix database. The following is an example of a query that is covered by the index\ncreated above. To learn more, see Single Field Indexes . Compound indexes are indexes that improve performance for queries that\nspecify ascending or descending sort order for multiple fields of\na document. You must specify the direction (ascending or descending) for\neach field in the index. The following example uses the createIndex() method to create a compound\nindex on the type and genre fields in the movies collection in the\n sample_mflix database. The following is an example of a query that is covered by the index\ncreated above. To learn more, see Compound Indexes . Multikey indexes are indexes that improve the performance of queries on\nfields that contain array values. You can create a multikey index on a field with an array value by\ncalling the createIndex() method. The following code creates an ascending\nindex on the cast field in the movies collection of the\n sample_mflix database: The following code queries the multikey index to find\ndocuments in which the cast field value contains \"Viola Davis\" : Multikey indexes behave differently from non-multikey indexes in terms of\nquery coverage, index bound computation, and sort behavior. For a full\nexplanation of multikey indexes, including a discussion of their behavior\nand limitations, see the Multikey Indexes page in the MongoDB Server manual. Clustered indexes are indexes that improve the performance of\ninsert, update, and delete operations on clustered collections .\nClustered collections store documents ordered by the clustered index key\nvalue. To create a clustered index, specify the clusteredIndex option in\nthe CollectionOption . The clusteredIndex option must specify the\n _id field as the key and the unique field as true . The following example uses the createCollection() method to create a\nclustered index on the _id field in the vendors collection of the\n tea database. To learn more, see\n Clustered Indexes and\n Clustered Collections . Text indexes support text search queries on string content. These indexes\ncan include any field whose value is a string or an array of string elements. MongoDB supports text search for various languages, so you can specify the\ndefault language as an option when creating the index. You can also\nspecify a weight option to prioritize certain text fields in your\nindex. These weights denote the significance of fields relative to the\nother indexed fields. To learn more about text searches, see our guide on text search queries . The following example uses the createIndex() method to perform the\nfollowing actions: The following query uses the text index created in the preceding code: To learn more about text indexes, see Text Indexes in the Server manual. Create a text index on the title and body fields in the\n blogPosts collection Specify english as the default language Set the field weight of body to 10 and title to 3 MongoDB supports queries of geospatial coordinate data using 2dsphere\nindexes . With a 2dsphere index, you can query the geospatial data for\ninclusion, intersection, and proximity. For more information on querying\ngeospatial data with the MongoDB Node.js driver, read our\n Search Geospatial guide. To create a 2dsphere index, you must specify a field that contains\nonly GeoJSON objects . For more details on this type, see the MongoDB\nServer manual page on GeoJSON objects . The location.geo field in following sample document from the\n theaters collection in the sample_mflix database is a GeoJSON Point\nobject that describes the coordinates of the theater: The following example uses the createIndexes() method to create a\n 2dsphere index on the location.geo field in the theaters \ncollection in the sample_mflix database to enable geospatial searches. MongoDB also supports 2d indexes for calculating distances on a\nEuclidean plane and for working with the \"legacy coordinate pairs\" syntax\nused in MongoDB 2.2 and earlier. To learn more, see\n Geospatial Queries . Unique indexes ensure that the indexed fields do not store duplicate\nvalues. By default, MongoDB creates a unique index on the _id field\nduring the creation of a collection. To create a unique index, specify the\nfield or combination of fields that you want to prevent duplication on and\nset the unique option to true . The following example uses the createIndex() method to create a unique\nindex on the theaterId field in the theaters collection of the\n sample_mflix database. If you attempt to perform a write operation that stores a duplicate value\nthat violates the unique index, MongoDB will throw an error that resembles\nthe following: To learn more, see Unique Indexes . Atlas Search is a feature that allows you to perform full-text\nsearches. To learn more, see the Atlas Search \ndocumentation. Before you can perform a search on an Atlas collection, you must first\ncreate an Atlas Search index on the collection. An Atlas Search\nindex is a data structure that categorizes data in a searchable format. You can use the following methods to manage your Search indexes: The following sections provide code samples that use each of the preceding\nmethods to manage Search indexes. createSearchIndex() createSearchIndexes() listSearchIndexes() updateSearchIndex() dropSearchIndex() You can use the createSearchIndex() and\n createSearchIndexes() \nmethods to create new Search indexes. The following code shows how to\nuse the createSearchIndex() method to create an index called\n search1 : When connecting to MongoDB Server v6.0.11 and later v6 versions, or\nv7.0.2 and later v7 versions, you can use the driver to create an Atlas\nVector Search index on a collection. Learn more about this feature in\nthe Atlas Vector Search documentation . The following code shows how to use the createSearchIndex() method\nto create a search index in which the type field is\n vectorSearch : You can use the listSearchIndexes() \nmethod to return a cursor that contains the Search indexes of a given\ncollection. The listSearchIndexes() method takes an optional string\nparameter, name , to return only the indexes with matching names. It\nalso takes an optional aggregateOptions parameter. The following code uses the listSearchIndexes() method to list the\nSearch indexes in a collection: You can use the updateSearchIndex() method to update a Search\nindex. The following code shows how to\nuse the updateSearchIndex() method to update an index called\n search1 to specify a string type for the description field: You can use the dropSearchIndex() method to remove a Search\nindex. The following code shows how to\nuse the dropSearchIndex() method to remove an index called\n search1 :", + "code": [ + { + "lang": "javascript", + "value": "// List the indexes on the collection and output them as an array\nconst result = await collection.listIndexes().toArray();\n\n// Print the list of indexes\nconsole.log(\"Existing indexes:\\n\");\nfor(const doc in result){\n console.log(doc);\n}" + }, + { + "lang": "js", + "value": "const database = client.db(\"sample_mflix\");\nconst movies = database.collection(\"movies\");\n\n// Create an ascending index on the \"title\" field in the\n// \"movies\" collection.\nconst result = await movies.createIndex({ title: 1 });\nconsole.log(`Index created: ${result}`);" + }, + { + "lang": "js", + "value": "// Define the query parameters\nconst query = { title: \"Batman\" }\nconst sort = { title: 1 };\nconst projection = { _id: 0, title: 1 };\n// Execute the query using the defined parameters\nconst cursor = movies\n .find(query)\n .sort(sort)\n .project(projection);" + }, + { + "lang": "js", + "value": "// Connect to the \"sample_mflix\" database\nconst database = client.db(\"sample_mflix\");\n// Access the database's \"movies\" collection\nconst movies = database.collection(\"movies\");\n\n// Create an ascending index on the \"type\" and \"genre\" fields\n// in the \"movies\" collection.\nconst result = await movies.createIndex({ type: 1, genre: 1 });\nconsole.log(`Index created: ${result}`);" + }, + { + "lang": "js", + "value": "// Define a query to find movies in the \"Drama\" genre\nconst query = { type: \"movie\", genre: \"Drama\" };\n// Define sorting criteria for the query results\nconst sort = { type: 1, genre: 1 };\n// Include only the type and genre fields in the query results\nconst projection = { _id: 0, type: 1, genre: 1 };\n\n// Execute the query using the defined criteria and projection\nconst cursor = movies\n .find(query)\n .sort(sort)\n .project(projection);" + }, + { + "lang": "js", + "value": "const database = client.db(\"sample_mflix\");\nconst movies = database.collection(\"movies\");\n\n// Create a multikey index on the \"cast\" field in the \"movies\" collection\nconst result = await movies.createIndex({ cast: 1 });" + }, + { + "lang": "js", + "value": "const query = { cast: \"Viola Davis\" };\nconst projection = { _id: 0, cast: 1 , title: 1 };\n\n// Perform a find operation with the preceding filter and projection\nconst cursor = movies\n .find(query)\n .project(projection);" + }, + { + "lang": "javascript", + "value": "const db = client.db('tea');\nawait db.createCollection('ratings', {\n clusteredIndex: {\n key: { _id: 1 },\n unique: true\n }\n});" + }, + { + "lang": "js", + "value": "// Get the database and collection on which to create the index \nconst myDB = client.db(\"testDB\");\nconst myColl = myDB.collection(\"blogPosts\");\n\n// Create a text index on the \"title\" and \"body\" fields\nconst result = await myColl.createIndex(\n { title: \"text\", body: \"text\" },\n { default_language: \"english\" },\n { weights: { body: 10, title: 3 } }\n);" + }, + { + "lang": "js", + "value": "// Query for documents where body or title contain \"life ahead\"\nconst query = { $text: { $search: \"life ahead\" } };\n\n// Show only the title field\nconst projection = { _id: 0, title: 1 };\n\n// Execute the find operation\nconst cursor = myColl.find(query).project(projection);" + }, + { + "lang": "json", + "value": "{\n \"_id\" : ObjectId(\"59a47286cfa9a3a73e51e75c\"),\n \"theaterId\" : 104,\n \"location\" : {\n \"address\" : {\n \"street1\" : \"5000 W 147th St\",\n \"city\" : \"Hawthorne\",\n \"state\" : \"CA\",\n \"zipcode\" : \"90250\"\n },\n \"geo\" : {\n \"type\" : \"Point\",\n \"coordinates\" : [\n -118.36559,\n 33.897167\n ]\n }\n }\n}" + }, + { + "lang": "js", + "value": "const database = client.db(\"sample_mflix\");\nconst movies = database.collection(\"movies\");\n\n/* Create a 2dsphere index on the \"location.geo\" field in the\n\"movies\" collection */\nconst result = await movies.createIndex({ \"location.geo\": \"2dsphere\" });\n\n// Print the result of the index creation\nconsole.log(`Index created: ${result}`);" + }, + { + "lang": "none", + "value": "E11000 duplicate key error index" + }, + { + "lang": "js", + "value": "const database = client.db(\"sample_mflix\");\nconst movies = database.collection(\"movies\");\n\n// Create a unique index on the \"theaterId\" field in the \"theaters\" collection.\nconst result = await movies.createIndex({ theaterId: 1 }, { unique: true });\nconsole.log(`Index created: ${result}`);" + }, + { + "lang": "javascript", + "value": "// Create a search index\nconst index1 = {\n name: \"search1\",\n definition: {\n \"mappings\": {\n \"dynamic\": true\n }\n }\n}\nawait collection.createSearchIndex(index1);" + }, + { + "lang": "javascript", + "value": "// Create a Vector Search index\nconst vectorSearchIdx = {\n name: \"vsidx1\",\n type: \"vectorSearch\",\n definition: {\n fields: [{\n type: \"vector\",\n numDimensions: 384,\n path: \"summary\",\n similarity: \"dotProduct\"\n }]\n }\n}\n\nawait collection.createSearchIndex(vectorSearchIdx);" + }, + { + "lang": "javascript", + "value": "// List search indexes\nconst result = await collection.listSearchIndexes().toArray();\nconsole.log(\"Existing search indexes:\\n\");\nfor (const doc in result) {\n console.log(doc);\n}" + }, + { + "lang": "javascript", + "value": "// Update a search index\nconst index2 = {\n \"mappings\": {\n \"dynamic\": true,\n \"fields\": {\n \"description\": {\n \"type\": \"string\"\n }\n }\n }\n}\nawait collection.updateSearchIndex(\"search1\", index2);" + }, + { + "lang": "javascript", + "value": "// Dropping (deleting) a search index\nawait collection.dropSearchIndex(\"search1\");" + } + ], + "preview": "Indexes are data structures that support the efficient execution of queries in\nMongoDB. They contain copies of parts of the data in documents to make\nqueries more efficient.", + "tags": "node.js, code example, Atlas search", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/logging", + "title": "Logging", + "headings": ["Temporary Alternative"], + "paragraphs": "The driver doesn't use the logger in versions 4.0 and later.\nAttempting to use prior logger settings in this version won't print\nanything in the log. Instead, see our monitoring guides: Command Monitoring Cluster Monitoring Connection Pool Monitoring We are developing a new logging framework. In the meantime, you can output monitor events\nby using the following snippet:", + "code": [ + { + "lang": "javascript", + "value": "const uri = \"mongodb+srv://:@?writeConcern=majority\";\nconst client = new MongoClient(uri, { monitorCommands:true });\n\nclient.on('commandStarted', (event) => console.debug(event));\nclient.on('commandSucceeded', (event) => console.debug(event));\nclient.on('commandFailed', (event) => console.debug(event));" + } + ], + "preview": "We are developing a new logging framework. In the meantime, you can output monitor events\nby using the following snippet:", + "tags": "code example, deprecated, replace", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/monitoring/cluster-monitoring", + "title": "Cluster Monitoring", + "headings": [ + "Overview", + "Event Subscription Example", + "Event Descriptions", + "Example Event Documents", + "serverDescriptionChanged", + "serverHeartbeatStarted", + "serverHeartbeatSucceeded", + "serverHeartbeatFailed", + "serverOpening", + "serverClosed", + "topologyOpening", + "topologyClosed", + "topologyDescriptionChanged" + ], + "paragraphs": "This guide shows you how to monitor topology events in a MongoDB instance,\nreplica set, or sharded cluster. The driver creates topology events, also\nknown as Server Discovery and Monitoring (SDAM) events, when there is\na change in the state of the instance or cluster that you connected to.\nFor example, the driver creates an event when you establish a new connection\nor if the cluster elects a new primary. The following sections demonstrate how to record topology changes in your application\nand explore the information provided in these events. You can access one or more SDAM events using the driver by subscribing to them\nin your application. The following example demonstrates connecting to a\nreplica set and subscribing to one of the SDAM events created by the MongoDB\ndeployment: You can subscribe to any of the following SDAM events: Event Name Description serverOpening Created when a connection to an instance opens. serverClosed Created when a connection to an instance closes. serverDescriptionChanged Created when an instance state changes (such as from secondary to\nprimary). topologyOpening Created before attempting a connection to an instance. topologyClosed Created after all instance connections in the topology close. topologyDescriptionChanged Created when the topology changes, such as an election of a new\nprimary or a mongos proxy disconnecting. serverHeartbeatStarted Created before issuing a hello command to a MongoDB instance. serverHeartbeatSucceeded Created when the hello command returns successfully from a\nMongoDB instance. serverHeartbeatFailed Created when a hello command issued to a specific MongoDB\ninstance fails to return a successful response. The following sections show sample output for each type of SDAM event. The type field of the ServerDescription object in this event contains\none of the following possible values: Type Description Unknown Unknown instance Standalone Standalone instance Mongos Mongos proxy instance PossiblePrimary At least one server recognizes this as the primary, but is not yet\nverified by all instances. RSPrimary Primary instance RSSecondary Secondary instance RSArbiter Arbiter instance RSOther See the RSGhost and RSOther specification \nfor more details RSGhost See the RSGhost and RSOther specification \nfor more details The type field of the TopologyDescription object in this event contains\none of the following possible values: Type Description Single Standalone instance ReplicaSetWithPrimary Replica set with a primary ReplicaSetNoPrimary Replica set with no primary Sharded Sharded cluster Unknown Unknown topology", + "code": [ + { + "lang": "javascript", + "value": "/* Subscribe to SDAM event */\n\nconst { MongoClient } = require(\"mongodb\");\n\n// Replace the following with your MongoDB deployment's connection string\nconst uri = \"mongodb+srv:///?replicaSet=rs&writeConcern=majority\";\n\nconst client = new MongoClient(uri);\n\n// Replace with the name of the event you are subscribing to\nconst eventName = \"\";\n\n// Subscribe to a specified event and print a message when the event is received\nclient.on(eventName, event => {\n console.log(`received ${eventName}: ${JSON.stringify(event, null, 2)}`);\n});\n\nasync function run() {\n try {\n // Establish and verify connection to the database\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully\");\n } finally {\n // Close the database connection on completion or error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "javascript", + "value": "ServerDescriptionChangedEvent {\n topologyId: 0,\n address: 'localhost:27017',\n previousDescription: ServerDescription {\n address: 'localhost:27017',\n error: null,\n roundTripTime: 0,\n lastUpdateTime: 1571251089030,\n lastWriteDate: null,\n opTime: null,\n type: 'Unknown',\n minWireVersion: 0,\n maxWireVersion: 0,\n hosts: [],\n passives: [],\n arbiters: [],\n tags: []\n },\n newDescription: ServerDescription {\n address: 'localhost:27017',\n error: null,\n roundTripTime: 0,\n lastUpdateTime: 1571251089051,\n lastWriteDate: 2019-10-16T18:38:07.000Z,\n opTime: { ts: Timestamp, t: 18 },\n type: 'RSPrimary',\n minWireVersion: 0,\n maxWireVersion: 7,\n maxBsonObjectSize: 16777216,\n maxMessageSizeBytes: 48000000,\n maxWriteBatchSize: 100000,\n me: 'localhost:27017',\n hosts: [ 'localhost:27017' ],\n passives: [],\n arbiters: [],\n tags: [],\n setName: 'rs',\n setVersion: 1,\n electionId: ObjectID,\n primary: 'localhost:27017',\n logicalSessionTimeoutMinutes: 30,\n '$clusterTime': ClusterTime\n }\n}" + }, + { + "lang": "javascript", + "value": "ServerHeartbeatStartedEvent {\n connectionId: 'localhost:27017'\n}" + }, + { + "lang": "javascript", + "value": "ServerHeartbeatSucceededEvent {\n duration: 1.939997,\n reply:{\n hosts: [ 'localhost:27017' ],\n setName: 'rs',\n setVersion: 1,\n isWritablePrimary: true,\n secondary: false,\n primary: 'localhost:27017',\n me: 'localhost:27017',\n electionId: ObjectID,\n lastWrite: {\n opTime: { ts: [Timestamp], t: 18 },\n lastWriteDate: 2019-10-16T18:38:17.000Z,\n majorityOpTime: { ts: [Timestamp], t: 18 },\n majorityWriteDate: 2019-10-16T18:38:17.000Z\n },\n maxBsonObjectSize: 16777216,\n maxMessageSizeBytes: 48000000,\n maxWriteBatchSize: 100000,\n localTime: 2019-10-16T18:38:19.589Z,\n logicalSessionTimeoutMinutes: 30,\n minWireVersion: 0,\n maxWireVersion: 7,\n readOnly: false,\n ok: 1,\n operationTime: Timestamp,\n '$clusterTime': ClusterTime\n },\n connectionId: 'localhost:27017'\n}" + }, + { + "lang": "javascript", + "value": "ServerHeartbeatFailed {\n duration: 20,\n failure: MongoError('some error'),\n connectionId: 'localhost:27017'\n}" + }, + { + "lang": "javascript", + "value": "ServerOpeningEvent {\n topologyId: 0,\n address: 'localhost:27017'\n}" + }, + { + "lang": "javascript", + "value": "ServerClosedEvent {\n topologyId: 0,\n address: 'localhost:27017'\n}" + }, + { + "lang": "javascript", + "value": "TopologyOpeningEvent {\n topologyId: 0\n}" + }, + { + "lang": "javascript", + "value": "TopologyClosedEvent {\n topologyId: 0\n}" + }, + { + "lang": "javascript", + "value": "TopologyDescriptionChangedEvent {\n topologyId: 0,\n previousDescription: TopologyDescription {\n type: 'ReplicaSetNoPrimary',\n setName: null,\n maxSetVersion: null,\n maxElectionId: null,\n servers: Map {\n 'localhost:27017' => ServerDescription\n },\n stale: false,\n compatible: true,\n compatibilityError: null,\n logicalSessionTimeoutMinutes: null,\n heartbeatFrequencyMS: 10000,\n localThresholdMS: 15,\n options: Object,\n error: undefined,\n commonWireVersion: null\n },\n newDescription: TopologyDescription {\n type: 'ReplicaSetWithPrimary',\n setName: 'rs',\n maxSetVersion: 1,\n maxElectionId: null,\n servers: Map {\n 'localhost:27017' => ServerDescription\n },\n stale: false,\n compatible: true,\n compatibilityError: null,\n logicalSessionTimeoutMinutes: 30,\n heartbeatFrequencyMS: 10000,\n localThresholdMS: 15,\n options: Object,\n error: undefined,\n commonWireVersion: 7\n }\n}" + } + ], + "preview": "This guide shows you how to monitor topology events in a MongoDB instance,\nreplica set, or sharded cluster. The driver creates topology events, also\nknown as Server Discovery and Monitoring (SDAM) events, when there is\na change in the state of the instance or cluster that you connected to.\nFor example, the driver creates an event when you establish a new connection\nor if the cluster elects a new primary.", + "tags": "code example, node.js, watch", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/monitoring/command-monitoring", + "title": "Command Monitoring", + "headings": [ + "Overview", + "Event Subscription Example", + "Event Descriptions", + "Example Event Documents", + "commandStarted", + "commandSucceeded", + "commandFailed" + ], + "paragraphs": "This guide shows you how to monitor the success or failure of commands\nsent by the driver to your MongoDB deployment. The following sections demonstrate how to record command status in your\napplication and explore the information provided in these events. You can access one or more command monitoring events using the driver by\nsubscribing to them in your application. The following example demonstrates\nconnecting to a replica set and subscribing to one of the command monitoring\nevents created by the MongoDB deployment: Command monitoring is disabled by default. To enable command\nmonitoring, pass the monitorCommands option as true to\nyour MongoClient constructor. You can subscribe to any of the following command monitoring events: Event Name Description commandStarted Created when a command is started. commandSucceeded Created when a command succeeded. commandFailed Created when a command failed. The following sections show sample output for each type of command monitoring event.", + "code": [ + { + "lang": "javascript", + "value": "/* Subscribe to an event */\n\nconst { MongoClient } = require(\"mongodb\");\n\n// Replace the following with your MongoDB deployment's connection string\nconst uri = \"mongodb+srv:///?replicaSet=rs&writeConcern=majority\";\n\nconst client = new MongoClient(uri, { monitorCommands:true });\n\n// Replace with the name of the event you are subscribing to\nconst eventName = \"\";\n\n// Subscribe to a specified event and print a message when the event is received\nclient.on(eventName, event => {\n console.log(`received ${eventName}: ${JSON.stringify(event, null, 2)}`);\n});\n\nasync function run() {\n try {\n // Establish and verify connection to the \"admin\" database\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"Connected successfully\");\n } finally {\n // Close the database connection on completion or error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "javascript", + "value": "CommandStartedEvent {\n requestId: 1534,\n databaseName: \"app\",\n commandName: \"find\",\n address: 'localhost:27017',\n connectionId: 812613,\n command: {\n find: { firstName: \"Jane\", lastName: \"Doe\" }\n }\n}" + }, + { + "lang": "javascript", + "value": "CommandSucceededEvent {\n requestId: 1534,\n commandName: \"find\",\n address: 'localhost:27017',\n connectionId: 812613,\n duration: 15,\n reply: {\n cursor: {\n firstBatch: [\n {\n _id: ObjectId(\"5e8e2ca217b5324fa9847435\"),\n firstName: \"Jane\",\n lastName: \"Doe\"\n }\n ],\n _id: 0,\n ns: \"app.users\"\n },\n ok: 1,\n operationTime: 1586380205\n }\n}" + }, + { + "lang": "javascript", + "value": "CommandFailedEvent {\n requestId: 1534,\n commandName: \"find\",\n address: 'localhost:27017',\n connectionId: 812613,\n failure: Error(\"something failed\"),\n duration: 11\n}" + } + ], + "preview": "This guide shows you how to monitor the success or failure of commands\nsent by the driver to your MongoDB deployment.", + "tags": "code example, node.js, watch, command status", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/monitoring/connection-monitoring", + "title": "Connection Pool Monitoring", + "headings": [ + "Overview", + "Event Subscription Examples", + "Event Descriptions", + "Example Event Documents", + "connectionPoolCreated", + "connectionPoolReady", + "connectionPoolClosed", + "connectionCreated", + "connectionReady", + "connectionClosed", + "connectionCheckOutStarted", + "connectionCheckOutFailed", + "connectionCheckedOut", + "connectionCheckedIn", + "connectionPoolCleared" + ], + "paragraphs": "This guide shows you how to monitor the driver's connection pool . A\nconnection pool is a set of open TCP connections your driver maintains\nwith a MongoDB instance. Connection pools help reduce the number of\nnetwork handshakes your application needs to perform and can help your\napplication run faster. The following sections demonstrate how to record connection pool events in your\napplication and explore the information provided in these events. You can access one or more connection pool events using the driver by\nsubscribing to them in your application. The following example demonstrates\nconnecting to a replica set and subscribing to one of the connection\npool monitoring events created by the MongoDB deployment: Connection pool monitoring events can aid you in debugging and understanding\nthe behavior of your application's connection pool. The following example uses connection\npool monitoring events to return a count of checked-out connections in the pool: You can subscribe to any of the following connection pool monitoring events: Event Name Description connectionPoolCreated Created when a connection pool is created. connectionPoolReady Created when a connection pool is ready. connectionPoolClosed Created when a connection pool is closed before server\ninstance destruction. connectionCreated Created when a connection is created, but not necessarily\nwhen it is used for an operation. connectionReady Created after a connection has successfully completed a\nhandshake and is ready to be used for operations. connectionClosed Created when a connection is closed. connectionCheckOutStarted Created when an operation attempts to acquire a connection for\nexecution. connectionCheckOutFailed Created when an operation fails to acquire a connection for\nexecution. connectionCheckedOut Created when an operation successfully acquires a connection for\nexecution. connectionCheckedIn Created when a connection is checked back into the pool after an operation\nis executed. connectionPoolCleared Created when a connection pool is cleared. The following sections show sample output for each type of connection\npool monitoring event.", + "code": [ + { + "lang": "javascript", + "value": "const { MongoClient } = require(\"mongodb\");\n\n// Replace the following with your MongoDB deployment's connection string\nconst uri =\n \"mongodb+srv:///?replicaSet=rs&writeConcern=majority\";\n\nconst client = new MongoClient(uri);\n\n// Replace with the name of the event you are subscribing to\nconst eventName = \"\";\n\n// Subscribe to the event\nclient.on(eventName, (event) =>\n console.log(\"\\nreceived event:\\n\", event)\n);\n\nasync function run() {\n try {\n // Establish and verify connection\n await client.db(\"admin\").command({ ping: 1 });\n console.log(\"\\nConnected successfully!\\n\");\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "javascript", + "value": "function connectionPoolStatus(client) {\n let checkedOut = 0;\n\n function onCheckout() {\n checkedOut++;\n }\n\n function onCheckin() {\n checkedOut--;\n }\n\n function onClose() {\n client.removeListener('connectionCheckedOut', onCheckout);\n client.removeListener('connectionCheckedIn', onCheckin);\n\n checkedOut = NaN;\n }\n\n // Decreases count of connections checked out of the pool when connectionCheckedIn event is triggered\n client.on('connectionCheckedIn', onCheckin);\n\n // Increases count of connections checked out of the pool when connectionCheckedOut event is triggered\n client.on('connectionCheckedOut', onCheckout);\n\n // Cleans up event listeners when client is closed\n client.on('close', onClose);\n\n return {\n count: () => checkedOut,\n cleanUp: onClose\n };\n}" + }, + { + "lang": "none", + "value": "ConnectionPoolCreatedEvent {\n time: 2023-02-13T15:54:06.944Z,\n address: '...',\n options: {...}\n}" + }, + { + "lang": "none", + "value": "ConnectionPoolReadyEvent {\n time: 2023-02-13T15:56:38.440Z,\n address: '...'\n}" + }, + { + "lang": "none", + "value": "ConnectionPoolClosedEvent {\n time: 2023-02-13T15:56:38.440Z,\n address: '...'\n}" + }, + { + "lang": "none", + "value": "ConnectionCreatedEvent {\n time: 2023-02-13T15:56:38.291Z,\n address: '...',\n connectionId: 1\n}" + }, + { + "lang": "none", + "value": "ConnectionReadyEvent {\n time: 2023-02-13T15:56:38.291Z,\n address: '...',\n connectionId: 1\n}" + }, + { + "lang": "none", + "value": "ConnectionClosedEvent {\n time: 2023-02-13T15:56:38.439Z,\n address: '...',\n connectionId: 1,\n reason: 'poolClosed',\n serviceId: undefined\n}" + }, + { + "lang": "none", + "value": "ConnectionCheckOutStartedEvent {\n time: 2023-02-13T15:56:38.291Z,\n address: '...',\n}" + }, + { + "lang": "none", + "value": "ConnectionCheckOutFailedEvent {\n time: 2023-02-13T15:56:38.291Z,\n address: '...',\n reason: ...\n}" + }, + { + "lang": "none", + "value": "ConnectionCheckedOutEvent {\n time: 2023-02-13T15:54:07.188Z,\n address: '...',\n connectionId: 1\n}" + }, + { + "lang": "none", + "value": "ConnectionCheckedInEvent {\n time: 2023-02-13T15:54:07.189Z,\n address: '...',\n connectionId: 1\n}" + }, + { + "lang": "none", + "value": "ConnectionPoolClearedEvent {\n time: 2023-02-13T15:56:38.439Z,\n address: '...',\n serviceId: undefined,\n interruptInUseConnections: true,\n}" + } + ], + "preview": "This guide shows you how to monitor the driver's connection pool. A\nconnection pool is a set of open TCP connections your driver maintains\nwith a MongoDB instance. Connection pools help reduce the number of\nnetwork handshakes your application needs to perform and can help your\napplication run faster.", + "tags": "code example, node.js, watch, deployment", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/monitoring", + "title": "Monitoring", + "headings": [], + "paragraphs": "Cluster Monitoring : monitoring\nchanges in a cluster Command Monitoring : monitoring\nthe execution status of commands Connection Pool Monitoring : monitoring\nthe driver's connection pool", + "code": [], + "preview": null, + "tags": null, + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/promises", + "title": "Promises", + "headings": [ + "Overview", + "Promises", + "Await", + "Operational Considerations" + ], + "paragraphs": "The Node.js driver uses the asynchronous Javascript API to communicate with\nyour MongoDB cluster. Asynchronous Javascript allows you to execute operations without waiting for\nthe processing thread to become free. This helps prevent your application\nfrom becoming unresponsive when\nexecuting long-running operations. For more information about asynchronous\nJavascript, see the MDN web documentation on\n Asynchronous Javascript . This section describes Promises that you can use with the Node.js driver to\naccess the results of your method calls to your MongoDB cluster. A Promise is an object returned by the asynchronous method call that allows\nyou to access information on the eventual success or failure of the operation\nthat they wrap. The Promise is in the Pending state if the operation is\nstill running, Fulfilled if the operation completed successfully, and\n Rejected if the operation threw an exception. For more information on\nPromises and related terminology, see the MDN documentation on\n Promises . Most driver methods that communicate with your MongoDB cluster, such as\n findOneAndUpdate() and countDocuments() , return Promise\nobjects and already contain logic to handle the success or failure of the\noperation. You can define your own logic that executes once the Promise reaches the\n Fulfilled or Rejected state by appending the then() method.\nThe first parameter of then() is the method that gets called when the\nPromise reaches the Fulfilled state and the optional second parameter is\nthe method that gets called when it reaches the Rejected state. The\n then() method returns a Promise to which you can append more\n then() methods. When you append one or more then() methods to a Promise, each call passes\nits execution result to the next one. This pattern is called\n Promise chaining . The following code snippet shows an example of Promise\nchaining by appending a single then() method. To handle only Promise transitions to the Rejected state, use the catch() method\nrather than passing a null first parameter to then() . The catch() method\naccepts a single callback that is executed when the Promise transitions to the Rejected \nstate. The catch() method is often appended at the end of a Promise chain to\nhandle any exceptions thrown. The following code snippet demonstrates appending\na catch() method to the end of a Promise chain. Certain methods in the driver such as find() return a Cursor \ninstead of a Promise. To determine what type each method returns, see\nthe Node.js API documentation . If you are using async functions, you can use the await operator on\na Promise to pause further execution until the Promise reaches either the\n Fulfilled or Rejected state and returns. Since the await operator\nwaits for the resolution of the Promise, you can use it in place of\nPromise chaining to sequentially execute your logic. The following code\nsnippet uses await to execute the same logic as the first Promise\nchaining example. For more information, see the MDN documentation on\n await . One common mistake when using async methods is to forget to use await \noperator on Promises to get the value of the result rather than the Promise\nobject. Consider the following example in which we iterate over a cursor\nusing hasNext() , which returns a Promise that resolves to a boolean that\nindicates whether more results exist, and next() which returns a\nPromise that resolves to the next entry the cursor is pointing to. Since the call to hasNext() returns a Promise , the conditional\nstatement returns true regardless of the value that it resolves to. If we alter the code to await the call to next() only, as demonstrated\nin the following code snippet, it throws the following error:\n MongoError: Cursor is closed . While hasNext() is not called until after the result of next() returns,\nthe call to hasNext() returns a Promise which evaluates to true rather\nthan the value it resolves to, similar to the prior example. The code\nattempts to call next() on a Cursor that has already returned its results\nand closed as a result. If we alter the code to only await the call to hasNext() as shown in\nthe following example, the console prints Promise objects rather than the\ndocument objects. Use await before both the hasNext() and next() method calls to\nensure that you are operating on the correct return values as demonstrated\nin the following code:", + "code": [ + { + "lang": "js", + "value": "collection\n .updateOne({ name: \"Mount McKinley\" }, { $set: { meters: 6190 } })\n .then(\n res => console.log(`Updated ${res.result.n} documents`),\n err => console.error(`Something went wrong: ${err}`),\n );" + }, + { + "lang": "js", + "value": "deleteOne({ name: \"Mount Doom\" })\n .then(result => {\n if (result.deletedCount !== 1) {\n throw \"Could not find Mount Doom!\";\n }\n return new Promise((resolve, reject) => {\n ...\n });\n })\n .then(result => console.log(`Vanquished ${result.quantity} Nazgul`))\n .catch(err => console.error(`Fatal error occurred: ${err}`));" + }, + { + "lang": "js", + "value": "async function run() {\n ...\n try {\n res = await myColl.updateOne(\n { name: \"Mount McKinley\" },\n { $set: { meters: 6190 } },\n );\n console.log(`Updated ${res.result.n} documents`);\n } catch (err) {\n console.error(`Something went wrong: ${err}`);\n }\n}" + }, + { + "lang": "js", + "value": "async function run() {\n ...\n // WARNING: this snippet may cause an infinite loop\n const cursor = myColl.find();\n\n while (cursor.hasNext()) {\n console.log(cursor.next());\n }\n}" + }, + { + "lang": "js", + "value": "async function run() {\n ...\n // WARNING: this snippet throws a MongoError\n const cursor = myColl.find();\n\n while (cursor.hasNext()) {\n console.log(await cursor.next());\n }\n}" + }, + { + "lang": "js", + "value": "async function run() {\n ...\n // WARNING: this snippet prints Promises instead of the objects they resolve to\n const cursor = myColl.find();\n\n while (await cursor.hasNext()) {\n console.log(cursor.next());\n }\n}" + }, + { + "lang": "js", + "value": "async function run() {\n ...\n const cursor = myColl.find();\n\n while (await cursor.hasNext()) {\n console.log(await cursor.next());\n }\n}" + } + ], + "preview": "The Node.js driver uses the asynchronous Javascript API to communicate with\nyour MongoDB cluster.", + "tags": "code example, node.js, operation status, chain", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/run-command", + "title": "Run a Command", + "headings": [ + "Overview", + "Execute a Command", + "Command Options", + "Response", + "Example", + "Output", + "Additional Information" + ], + "paragraphs": "In this guide, you can learn how to run a database command with the\nNode.js driver. You can use database commands to perform a variety of\nadministrative and diagnostic tasks, such as fetching server statistics,\ninitializing a replica set, or running an aggregation pipeline. The driver provides wrapper methods for many database commands.\nWe recommend using driver methods instead of executing database\ncommands when possible. To perform administrative tasks, use the MongoDB Shell \ninstead of the Node.js driver. Calling the db.runCommand() \nmethod inside the shell is the preferred method to issue database\ncommands, as it provides a consistent interface between the shell and\ndrivers. To run a database command, you must specify the command and any relevant\nparameters in a document, then pass this document to a\ncommand execution method. The Node.js driver provides the following methods\nto run database commands: The following code shows how you can use the command() \nmethod to run the hello command, which returns information about\nthe current member's role in the replica set, on a database: For a full list of database commands and corresponding parameters, see\nthe Additional Information section . command() , which returns the command response as a\n Document type. You can use this method with any database command. runCursorCommand() , which returns the command response as an iterable\n RunCommandCursor type. You can use this method only if your database command\nreturns multiple result documents. You can specify optional command behavior for the command() \nand runCursorCommand() methods. The command() method accepts a RunCommandOptions object. To learn\nmore about the RunCommandOptions type, see the API documentation . The runCursorCommand() method accepts a RunCursorCommandOptions \nobject. To learn more about the RunCursorCommandOptions type, see\nthe API documentation . Starting in version 6.0 of the Node.js driver, you can pass only the\nfollowing options to the command() method: You can set more options in the document that you pass to the command() method. To\nlearn more about a command and the options that it accepts, locate the command and follow\nthe link on the Database Commands section of the Server\nmanual. The following code shows how to specify a grantRolesToUser command\nthat executes with a majority write concern: comment enableUtf8Validation raw readPreference session The command() and runCursorCommand() methods ignore\nthe read preference setting you may have set on your Db object.\nBy default, these methods use the primary read preference. The following code shows how to specify a read preference and pass it\nas an option to the command() method: For more information on read preference options, see Read\nPreference in the Server manual. Each method returns a Document object or a cursor that contains\nthe response from the database after the command has been executed. Each\ndatabase command performs a different function, so the response content\ncan vary across commands. However, every response contains documents\nwith the following fields: Field Description Provides fields specific to the database command. For example,\n count returns the n field and explain returns the\n queryPlanner field. ok Indicates whether the command has succeeded ( 1 )\nor failed ( 0 ). operationTime Indicates the logical time of the operation. MongoDB uses the\nlogical time to order operations. To learn more about logical time, see our blog post about\nthe Global Logical Clock . $clusterTime Provides a document that returns the signed cluster time. Cluster time is a\nlogical time used for ordering of operations. The document contains the following fields: clusterTime , which is the timestamp of the highest known cluster time for the member. signature , which is a document that contains the hash of the cluster time and the ID\nof the key used to sign the cluster time. The following code shows how you can use the runCursorCommand() method to\nrun the checkMetadataConsistency command on the testDB database\nand iterate through the results: The output contains the contents of the cursor object. The documents\ndescribe any metadata inconsistencies in the database: If you store the command response in a cursor, you see only the\ncommand result documents when you access the contents of the cursor. You won't\nsee the ok , operationTime , and $clusterTime fields. For more information about the concepts in this guide, see the following documentation: To learn how to retrieve data from a cursor, see the\n Access Data From a Cursor fundamentals page. db.runCommand() Database Commands hello Command find Command", + "code": [ + { + "lang": "javascript", + "value": "const result = await myDB.command({ hello: 1 });" + }, + { + "lang": "javascript", + "value": "const commandDoc = {\n grantRolesToUser: \"user011\",\n roles: [ \"readWrite\" ],\n writeConcern: { w: \"majority\" }\n};\nconst result = await myDB.command(commandDoc);" + }, + { + "lang": "javascript", + "value": "const commandOptions = { readPreference: \"nearest\" };\nconst result = await myDB.command(commandDoc, commandOptions);" + }, + { + "lang": "javascript", + "value": "// Connect to the \"testDB\" database\nconst db = client.db(\"testDB\");\n\n// Run a cursor command to check metadata consistency within the database\nconst cursor = await db.runCursorCommand({\n checkMetadataConsistency: 1,\n});\n// Iterate through the cursor's results and print the contents\nfor await (const doc of cursor) {\n console.log(doc);\n}" + }, + { + "lang": "javascript", + "value": "{\n type: ...,\n description: ...,\n details: {\n namespace: ...,\n info: ...\n }\n}\n{\n type: ...,\n description: ...,\n details: {\n namespace: ...,\n collectionUUID: ...,\n maxKeyObj: ...,\n ...\n }\n}" + } + ], + "preview": "In this guide, you can learn how to run a database command with the\nNode.js driver. You can use database commands to perform a variety of\nadministrative and diagnostic tasks, such as fetching server statistics,\ninitializing a replica set, or running an aggregation pipeline.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/stable-api", + "title": "Stable API", + "headings": [ + "Overview", + "Enable the Stable API on a MongoDB Client", + "Stable API Options" + ], + "paragraphs": "The Stable API feature requires MongoDB Server 5.0 or later. Use the Stable API feature only if all the MongoDB\nservers you are connecting to support this feature. In this guide, you can learn how to specify the Stable API when\nconnecting to a MongoDB instance or replica set. You can use the\nStable API feature to force the server to run operations with behavior\ncompatible with the specified API version . An API version defines the\nexpected behavior of the operations it covers and the format of server\nresponses. If you change to a different API version, the operations are not\nguaranteed to be compatible and the server responses are not guaranteed to\nbe similar. When you use the Stable API feature with an official MongoDB driver, you\ncan update your driver or server without worrying about backward compatibility\nissues of the commands covered by the Stable API. See the MongoDB reference page on the Stable API \nfor more information including a list of commands it covers. The following sections describe how you can enable the Stable API for\nyour MongoDB client and the options that you can specify. To enable the Stable API, you must specify an API version in the MongoClientOptions \npassed to your MongoClient . Once you instantiate a MongoClient instance with\na specified API version, all commands you run with that client use that\nversion of the Stable API. The example below shows how you can instantiate a MongoClient that\nsets the Stable API version and connects to a server by performing the\nfollowing operations: For more information on the methods and classes referenced in this\nsection, see the following API Documentation: To run commands that are not covered by the Stable API, make sure the\n\"strict\" option is disabled. See the section on\n Stable API Options for more\ninformation. which you want to run a command. Specify a server URI to connect to. Specify a Stable API version in the MongoClientOptions object, using a\nconstant from the ServerApiVersion object. Instantiate a MongoClient , passing the URI and the MongoClientOptions \nto the constructor. If you specify an API version and connect to a MongoDB Server that does\nnot support the Stable API, your application may throw an error when\nconnecting to your MongoDB Server with the following text: ServerApiVersion MongoClientOptions MongoClient You can enable or disable optional behavior related to the Stable API as\ndescribed in the following table. The following example shows how you can set the options of the ServerApi \ninterface. For more information on the options in this section, see the following\nAPI Documentation: Option Name Description version strict deprecationErrors ServerApi", + "code": [ + { + "lang": "javascript", + "value": "const { MongoClient, ServerApiVersion } = require(\"mongodb\");\n\n// Replace the placeholders in the connection string uri with your credentials\nconst uri = \"mongodb+srv://:@?retryWrites=true&w=majority\";\n\n// Create a client with options to specify Stable API Version 1\nconst client = new MongoClient(uri, { serverApi: ServerApiVersion.v1 });" + }, + { + "lang": "none", + "value": "MongoParseError: Invalid server API version=..." + }, + { + "lang": "javascript", + "value": "const { MongoClient, ServerApiVersion } = require(\"mongodb\");\n\n// Replace the placeholders in the connection string uri with your credentials\nconst uri = \"mongodb+srv://:@?retryWrites=true&w=majority\";\n\n/* Create a client with options to specify Stable API Version 1, return\nerrors for commands outside of the API version, and raise exceptions\nfor deprecated commands */\nconst client = new MongoClient(uri,\n {\n serverApi: {\n version: ServerApiVersion.v1,\n strict: true,\n deprecationErrors: true,\n }\n });" + } + ], + "preview": "In this guide, you can learn how to specify the Stable API when\nconnecting to a MongoDB instance or replica set. You can use the\nStable API feature to force the server to run operations with behavior\ncompatible with the specified API version. An API version defines the\nexpected behavior of the operations it covers and the format of server\nresponses. If you change to a different API version, the operations are not\nguaranteed to be compatible and the server responses are not guaranteed to\nbe similar.", + "tags": "code example, node.js, safe, breaking change", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/time-series", + "title": "Time Series", + "headings": [ + "Overview", + "Create a Time Series Collection", + "Query a Time Series Collection" + ], + "paragraphs": "In this guide, you can learn about time series collections in the MongoDB\nNode.js driver. We recommend that you create a time series collection using the MongoDB Shell.\nLearn more about how to install and run the MongoDB Shell in the MongoDB Shell documentation .\nFor detailed instructions on creating a time series collection\nusing the MongoDB Shell, see our\n MongoDB Manual entry on time series collections . Since you query a time series collection in the same way you query other\ncollection types in MongoDB, the Node.js driver has no features specifically for\nquerying time series data. For more information on querying data in the MongoDB Node.js driver, see the\nfollowing resources: Guide On Read Operations Guide On Aggregation MongoDB version 5.0 introduces window functions into the MongoDB aggregation\npipeline. You can use window functions to perform operations on a\ncontiguous span of time series data. For more information, see\n the reference documentation for the $setWindowFields aggregation stage .", + "code": [], + "preview": "In this guide, you can learn about time series collections in the MongoDB\nNode.js driver.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/transactions", + "title": "Transactions", + "headings": [ + "Overview", + "Transaction APIs", + "Core API", + "Convenient Transaction API", + "Transaction Options", + "Transaction Errors" + ], + "paragraphs": "In this guide, you can learn how to use the\nNode.js driver to perform transactions . Transactions allow you\nto run a series of operations that do not change any data until the\nentire transaction is committed. If any operation in the transaction fails, the\ndriver ends the transaction and discards all data changes before they\never become visible. This feature is called atomicity . Since all write operations on a single document in MongoDB are atomic, you\nmight want to use transactions to make an atomic change that\nmodifies multiple documents. This situation requires a multi-document transaction.\nMulti-document transactions are ACID compliant because MongoDB\nguarantees that the data involved in your transaction operations remains\nconsistent, even if the driver encounters unexpected errors. To learn more about ACID compliance and transactions, see our article on\nACID transactions . In MongoDB, multi-document transactions run within a client session .\nA client session is a grouping of related read or write operations that\nyou want to execute sequentially. We recommend you reuse\nyour client for multiple sessions and transactions instead of\ninstantiating a new client each time. When combined with majority read and\nwrite concerns, the driver guarantees causal consistency between the\noperations. To learn more, see Client Sessions and Causal Consistency Guarantees in the\nServer manual. Learn more about how to use the driver to perform multi-document\ntransactions in the following sections of this guide: To execute a multi-document transaction, you must be connected to a\ndeployment running MongoDB Server version 4.0 or later. For a detailed list of limitations, see the Transactions and\nOperations section in\nthe Server manual. Transaction APIs Transaction Options Transaction Errors The driver provides two APIs for performing transactions, the Core\nAPI and the Convenient Transaction API . The Core API is a framework that enables\nyou to create, commit, and end transactions. When using this API,\nyou must explicitly perform the following actions: The Convenient Transaction API is a\nframework that enables you to perform transactions without being\nresponsible for committing or ending them. This API automatically\nincorporates error-handling logic to retry operations when the server\nraises certain error types. To learn more about this behavior, see the\n Transaction Errors section of this guide. Create, commit, and end the transaction. Create and end the session in which you run the transaction. Implement error-handling logic. When you connect to MongoDB Server version 4.2 or\nearlier, you can perform write operations in a transaction only on\ncollections that already exist. When you connect to MongoDB Server\nversion 4.4 and later, the server automatically creates collections\nas necessary when you perform write operations in a transaction. To\nlearn more about this behavior, see Create Collections and\nIndexes in a Transaction \nin the Server manual. The Core API provides the following methods to implement transactions: You must perform the following steps when using this API: The following code demonstrates how to perform a transaction by using\nthe Core API: To see a fully runnable example that uses this API, see the\n Use the Core API usage example. startSession() :\ncreates a new ClientSession instance startTransaction() : starts a new\ntransaction commitTransaction() : commits the\nactive transaction in the session that it was created in abortTransaction() : ends the\nactive transaction in the session that it was created in endSession() : ends the\nactive session Pass the session instance to each operation that\nyou want to run in that session. Implement a catch block in which you identify\nserver transaction errors and implement error-handling logic. The driver throws an error if you provide a session from one MongoClient \ninstance to a different client instance. For example, the following code generates an\n MongoInvalidArgumentError error because it creates\na ClientSession instance from the client1 client, but provides\nthis session to the client2 client for a write operation: The Convenient Transaction API provides the following methods to\nimplement transactions: These methods return the value that the callback returns. For example,\nif a callback you pass to the withTransaction() method returns the\ndocument { hello: \"world\" } , then the withTransaction() method\nalso returns that document. When you use the Convenient Transaction API, you\ncan propagate return values from the callback as the return values of\nthe withTransaction() and withSession() methods to\nwork with them elsewhere in your code. You must perform the following steps when using this API: The following code demonstrates how to perform a transaction by using\nthe Convenient Transaction API: To see a fully runnable example that uses this API, see the\n Use the Convenient Transaction API usage example. withSession() : Runs\nthe callback passed to it within a session. The API handles the creation and\ntermination of the session automatically. withTransaction() :\nRuns the callback passed to it within a transaction and calls the\n commitTransaction() method when the callback returns. Pass the session instance to each operation that\nyou want to run in that session. Implement the async await syntax for each operation in the\nsession. Avoid parallelism, such as calling the Promise.all() method.\nUsing sessions in parallel usually leads to server errors. You can pass a TransactionOptions instance to the\n startTransaction() and withTransaction() methods to configure\nhow the driver performs a transaction. When you specify an option,\nit overrides the value of the option that you might have set on your\n MongoClient instance. The following table includes options that you can specify\nin a TransactionOptions instance: For a full list of options, see the API documentation for\n TransactionOptions . The following code shows how to define and pass transaction options to\nthe startTransaction() method: Setting Description readConcern writeConcern readPreference maxCommitTimeMS Specifies the length of time that a commit action on a\ntransaction can run, in milliseconds. The transaction inherits settings from your MongoClient instance unless you\nspecify them in your transaction options. If you are using the Core API to perform a transaction, you must incorporate\nerror-handling logic into your application for the following errors: The Convenient Transaction API incorporates retry logic for these error\ntypes, so the driver retries the transaction until there is a successful commit. TransientTransactionError : Raised if a write operation errors\nbefore the driver commits the transaction. To learn more about this error, see the\n TransientTransactionError description on\nthe Driver API page in the Server manual. UnknownTransactionCommitResult : Raised if the commit operation\nencounters an error. To learn more about this error, see the\n UnknownTransactionCommitResult description on\nthe Driver API page in the Server manual.", + "code": [ + { + "lang": "javascript", + "value": "async function coreTest(client) {\n const session = client.startSession();\n try {\n session.startTransaction();\n\n const savingsColl = client.db(\"bank\").collection(\"savings_accounts\");\n await savingsColl.findOneAndUpdate(\n {account_id: \"9876\"}, \n {$inc: {amount: -100 }}, \n { session });\n\n const checkingColl = client.db(\"bank\").collection(\"checking_accounts\");\n await checkingColl.findOneAndUpdate(\n {account_id: \"9876\"}, \n {$inc: {amount: 100 }}, \n { session });\n\n // ... perform other operations\n\n await session.commitTransaction();\n console.log(\"Transaction committed.\");\n } catch (error) {\n console.log(\"An error occurred during the transaction:\" + error);\n await session.abortTransaction();\n } finally {\n await session.endSession();\n }\n}" + }, + { + "lang": "js", + "value": "const session = client1.startSession();\nclient2.db('myDB').collection('myColl').insertOne({ name: 'Jane Eyre' }, { session });" + }, + { + "lang": "javascript", + "value": "async function convTest(client) {\n let txnRes = await client.withSession(async (session) =>\n session.withTransaction(async (session) => {\n const savingsColl = client.db(\"bank\").collection(\"savings_accounts\");\n await savingsColl.findOneAndUpdate(\n {account_id: \"9876\"}, \n {$inc: {amount: -100 }}, \n { session });\n \n const checkingColl = client.db(\"bank\").collection(\"checking_accounts\");\n await checkingColl.findOneAndUpdate(\n {account_id: \"9876\"}, \n {$inc: {amount: 100 }}, \n { session });\n\n // ... perform other operations\n\n return \"Transaction committed.\";\n }, null)\n );\n console.log(txnRes);\n}" + }, + { + "lang": "javascript", + "value": "const txnOpts = {\n readPreference: 'primary',\n readConcern: { level: 'local' },\n writeConcern: { w: 'majority' },\n maxCommitTimeMS: 1000\n};\nsession.startTransaction(txnOpts);" + } + ], + "preview": "In this guide, you can learn how to use the\nNode.js driver to perform transactions. Transactions allow you\nto run a series of operations that do not change any data until the\nentire transaction is committed. If any operation in the transaction fails, the\ndriver ends the transaction and discards all data changes before they\never become visible. This feature is called atomicity.", + "tags": "modify, customize", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals/typescript", + "title": "TypeScript", + "headings": [ + "Overview", + "Features", + "Type Parameters that Extend Document", + "Type Parameters of Any Type", + "Type Safety and Dot Notation", + "Referencing Keys that Incorporate Variables", + "Working with the _id Field", + "Insert Operations and the _id Field", + "Find Methods and the _id Field", + "Known Limitations", + "Recursive Types and Dot Notation", + "Mutual Recursion" + ], + "paragraphs": "In this guide, you can learn about the TypeScript features and limitations\nof the MongoDB Node.js driver. TypeScript is a strongly typed programming\nlanguage that compiles to JavaScript. The TypeScript compiler offers type checking in real time. Code editors that\nsupport TypeScript can provide autocomplete suggestions, display documentation\ninline, and identify type-related errors. All TypeScript features of the driver are optional. All valid JavaScript\ncode written with the driver is also valid TypeScript code. For more information, see the\n TypeScript website . If you use TypeScript, you can specify a type for some classes in the driver.\nAll classes that accept a type parameter in the driver have the default type\n Document . The Document interface has the following definition: All object types extend the Document interface. For more information on object types, see the\n TypeScript handbook . The following classes accept all types that extend the Document interface: You can pass a type parameter that extends the Document interface like this: Collection ChangeStream Keys not listed in your specified type parameter receive the any type.\nThe following code snippet demonstrates this behavior: The following classes accept all type parameters: You can find a code snippet that shows how to specify a type for the FindCursor \nclass in the\n Find Multiple Documents Usage Example . FindCursor AggregationCursor Starting in version 5.0, by default, the Node.js driver does not provide type\nsafety for operations that search on fields expressed in dot notation . Dot\nnotation is a syntax you can use to navigate nested JSON objects. When\nyou construct a filter to pass to a query, the driver will not raise a\ntype error even if you specify an incorrectly typed value for a field expressed\nin dot notation. The following code snippet defines the ClassificationPet interface,\nwhich includes a classification field that enables you to specify the\ngenus and color of dogs and cats: The driver does not raise a type error for the following code sample,\neven though the value of classification.color is a boolean\ninstead of a string: You can enable type-checking by constructing filters as StrictFilter or\n StrictUpdateFilter types. In the following code sample, the filter is assigned a\n StrictFilter type. Given this filter type, the Node.js driver\nreports a type error because the value of classification.color is a\nboolean instead of a string. The following example assigns a StrictUpdateFilter type to an update\nfilter. The Node.js driver reports a type error because the value of\n classification.color is a boolean instead of a string. The StrictFilter and StrictUpdateFilter types are experimental and\nmight incorrectly show type errors in valid queries. To query a collection or perform another operation with a key that incorporates\nvariables, you must use an as const assertion when specifying the key. This\nmechanism allows your code to compile successfully if the input types are\ncorrect. The following code snippet defines the ClassificationPet interface\nand the Mealtime interface. ClassificationPet includes a\n mealtimes field that contains an array of Mealtime interfaces,\neach of which includes a time field: The following code snippet performs a find-and-update operation on a\ncollection of ClassificationPet documents. The operation\nupdates the nested time field of the Mealtime instance at index\n 1 . The index position is specified by the variable mealCounter : To learn more about dot notation, see\n Dot Notation \nin the MongoDB manual. To learn more about the limitations of dot notation in the\nNode.js driver, see the\n Recursive Types and Dot Notation \nsection. MongoDB does not recommend specifying the _id as a part of your model.\nOmitting the _id field makes the model more generic and reusable and more accurately\nmodels the data important to an application. The Node driver\u2019s TypeScript integration\ntakes care of adding the _id field to the return types for relevant methods. The following sections provide information about write and read operations that\nuse the _id field. How you specify the _id field in type parameters passed to your\n Collection instance affects the behavior\nof insert operations. The following table describes how different\n _id field specifications affect insert operations: If you must specify the _id field as required in the type you define to represent\ndocuments in your collection but you do not want to specify values for the\n _id field in insert operations, use the OptionalId helper type when you\ncreate your collection. The OptionalId type accepts a type parameter as an\nargument and returns that type with an optional _id field. The following code snippet defines the IdPet interface, which\nincludes a type for the _id field: The following code uses the preceding interface and the\n OptionalId type to insert a document without specifying a value for the\n _id field: To learn more about the _id field, see\n The _id Field in the MongoDB\nmanual. To learn more about the types, interfaces, and classes discussed in this section, see the\nfollowing resources: _id field type Example Type Required on insert Behavior on insert OptionalId API documentation PkFactory API documentation ObjectId source code The find and findOne methods of the Collection class include\nthe _id field in their return type. The driver infers the type of the\nreturned _id field based on the type parameter you passed to your\n Collection instance. If the type parameter you passed to your Collection instance includes the\n _id field in its schema, the driver infers that the _id field returned\nfrom the method is of the type specified in the schema. However, if the type parameter you passed to your Collection instance does not\ninclude the _id field in its schema, the driver infers that the type of the\n _id field returned from the method is ObjectId . The following code uses the Pet \ninterface to return a document with an _id inferred to be of type ObjectId : The following code uses the IdNumberPet interface to return a\ndocument with an _id inferred to be of type number : To learn more about the classes and methods discussed in this section, see the following\nAPI documentation: The type parameter passed to your Collection influences only the type\ninference of the fields returned from the method. The driver does not convert\nthe field to the specified type. The type of each field in your type\nparameter's schema must match the type of the corresponding field in the\ncollection. If you specify a projection in a find\nmethod, you must pass a type parameter to your find method that reflects\nthe structure of your projected documents.\nWithout a type parameter, TypeScript cannot check at compile time that you\nare using your projected documents safely. To show this behavior, the following code snippet passes type checking but\nraises an error at runtime: To catch this error at compile time, pass a type parameter that does not include\nthe _id field to your find method: To view a runnable TypeScript example that includes a find method applying a\nprojection, see the\n Find a Document page. Collection find findOne Learn about the following TypeScript specific limitations of the Node.js driver: No type safety for dot notation references to nested instances of recursive types Depth limitations on type safety for mutually recursive types The Node.js driver cannot provide type safety within nested instances of\n recursive types referenced through dot notation. A recursive type is a type that references itself. You can update\nthe Pet interface\nto be recursive by allowing a pet to have its own pet. The following is the\nrecursive Pet interface: The following code snippet references a nested instance of the\n RecursivePet interface\nwith an incorrect type using dot notation, but the TypeScript compiler\ndoes not raise a type error: The following code snippet references a top-level instance of the\n RecursivePet interface with an incorrect type and raises a type error: The error raised by the preceding code snippet is as follows: If you must have type safety within nested instances of recursive types,\nyou must write your query or update without dot notation. To learn more about dot notation, see\n Dot Notation \nin the MongoDB manual. The Node.js driver does not traverse nested recursive types when\ntype checking dot notation keys to avoid hitting\nTypeScript's recursive depth limit. A mutually recursive type exists when two types contain a property that is of\nthe other's type. You can update the Pet \ninterface to be mutually recursive by allowing a pet to have a handler, and\ndefining a handler to have a pet. The following examples reference the mutually\nrecursive Pet and Handler interfaces: The Node.js driver provides type safety for mutually recursive types\nreferenced through dot notation up to a depth of eight. The following code\nsnippet assigns a string to a number and raises a type error because\nthe referenced property is at a depth of four: The error raised by the preceding code snippet is as follows: At a depth greater than or equal to eight, TypeScript compiles your code but no\nlonger type checks it. The following code assigns a string to a number \nproperty but does not cause a compilation error because the referenced property\nis at a depth of 10:", + "code": [ + { + "lang": "typescript", + "value": "interface Document {\n [key: string]: any;\n}" + }, + { + "lang": "typescript", + "value": "interface Pet {\n name: string;\n age: number;\n}\n\nconst database = client.db(\"\");\nconst collection = database.collection(\"\");\n" + }, + { + "lang": "typescript", + "value": "interface User {\n email: string;\n}\n\nconst database = client.db(\"\");\nconst myColl = db.collection(\"\");\nmyColl.find({ age: \"Accepts any type!\" });" + }, + { + "lang": "typescript", + "value": "interface ClassificationPet {\n name: string;\n age: number;\n classification: { genus: \"Canis\" | \"Felis\"; color: string };\n}" + }, + { + "lang": "typescript", + "value": "await myColl.findOneAndDelete({ \"classification.color\": false });" + }, + { + "lang": "typescript", + "value": "const filterPredicate: StrictFilter = { \"classification.color\": false };\nawait myColl.findOneAndDelete(filterPredicate);" + }, + { + "lang": "typescript", + "value": "const updateFilter: StrictUpdateFilter = { $set: { \"classification.color\": false } }\nawait pets.updateOne({}, updateFilter);" + }, + { + "lang": "typescript", + "value": "interface ClassificationPet {\n name: string;\n mealtimes: Mealtime[];\n}\n\ninterface Mealtime{\n time: string;\n amount: number;\n}" + }, + { + "lang": "typescript", + "value": "const mealCounter = 1;\n\nawait myColl.findOneAndUpdate(\n { name: \"Lassie\" },\n { $set: { [`mealtimes.${mealCounter}.time` as const]: '04:00 PM' } },\n);" + }, + { + "lang": "typescript", + "value": "interface IdPet {\n _id: ObjectId;\n name: string;\n age: number;\n}" + }, + { + "lang": "typescript", + "value": "const database = client.db(\"\");\nconst collection = db.collection>(\"\");\n\nmyColl.insertOne({\n name: \"Spot\",\n age: 2\n});" + }, + { + "lang": "typescript", + "value": "const database = client.db(\"\");\nconst collection = db.collection(\"\");\n\nconst document = await myColl.findOne({\n name: \"Spot\",\n});\nconst id : ObjectId = document._id;" + }, + { + "lang": "typescript", + "value": "interface IdNumberPet {\n _id: number;\n name: string;\n age: number;\n}\n\nconst database = client.db(\"\");\nconst collection = db.collection(\"\");\n\nconst document = await myColl.findOne({\n name: \"Spot\",\n});\nconst id : number = document._id;" + }, + { + "lang": "typescript", + "value": "const doc = await myColl.findOne(\n {},\n { projection: { _id: 0, name: 1 } }\n);\nconsole.log(doc._id.generationTime);" + }, + { + "lang": "typescript", + "value": "interface ProjectedDocument {\n name: string\n}\n\nconst doc = await myColl.findOne(\n {},\n { projection: { _id: 0, name: 1 } }\n);\n// Compile time error: Property '_id' does not exist on type 'ProjectedDocument'.\nconsole.log(doc._id.generationTime);" + }, + { + "lang": "typescript", + "value": "interface RecursivePet {\n pet?: RecursivePet;\n name: string;\n age: number;\n}" + }, + { + "lang": "typescript", + "value": "database\n .collection(\"\")\n .findOne({ \"pet.age\": \"Spot\" });" + }, + { + "lang": "typescript", + "value": "database\n .collection(\"\")\n .findOne({ pet: \"Spot\" });" + }, + { + "lang": "none", + "value": "index.ts(19,59): error TS2769: No overload matches this call.\nThe last overload gave the following error.\nType 'string' is not assignable to type 'Condition'." + }, + { + "lang": "typescript", + "value": "interface Pet {\n handler?: Handler;\n name: string;\n age: number;\n}\n\ninterface Handler {\n pet: Pet;\n name: string;\n}" + }, + { + "lang": "typescript", + "value": "database\n .collection(\"\")\n .findOne({'handler.pet.handler.pet.age': \"four\"});" + }, + { + "lang": "none", + "value": "index.ts(19,59): error TS2769: No overload matches this call.\nThe last overload gave the following error.\nType 'string' is not assignable to type 'Condition | undefined'." + }, + { + "lang": "typescript", + "value": "database\n .collection(\"\")\n .findOne({'handler.pet.handler.pet.handler.pet.handler.pet.handler.pet.age': \"four\"});" + } + ], + "preview": "In this guide, you can learn about the TypeScript features and limitations\nof the MongoDB Node.js driver. TypeScript is a strongly typed programming\nlanguage that compiles to JavaScript.", + "tags": "code example, node.js, static typing", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "fundamentals", + "title": "Fundamentals", + "headings": [], + "paragraphs": "Learn how to perform the following tasks using the Node.js driver in the\nFundamentals section: Connect to MongoDB Use the Stable API Authenticate with MongoDB Read from and Write to MongoDB Access Return Values Transform your Data Create and Manage Transactions Run a Database Command Create Indexes to Speed Up Queries Sort Using Collations Log Events in the Driver Monitor Driver Events Store and Retrieve Large Files in MongoDB Encrypt Fields from the Client Create and Query Time Series Collection Specify Type Parameters with TypeScript Specify BSON Serialization Settings", + "code": [], + "preview": null, + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "", + "title": "MongoDB Node Driver", + "headings": [ + "Introduction", + "Quick Start", + "Quick Reference", + "What's New", + "Usage Examples", + "Fundamentals", + "Aggregation Tutorials", + "API", + "FAQ", + "Connection Troubleshooting", + "Issues & Help", + "Compatibility", + "Upgrade Driver Versions", + "Related Tools and Libraries", + "Object Document Mappers", + "Packages", + "Learn", + "Developer Hub", + "MongoDB University", + "Take the Following Free Online Courses Taught by MongoDB Instructors" + ], + "paragraphs": "Welcome to the documentation site for the official MongoDB Node.js driver.\nYou can add the driver to your application to work with MongoDB\nin JavaScript or TypeScript. For more information about downloading and\ninstalling the Node.js driver, see\n Download and Install in the\nQuick Start guide. You can connect using the Node.js driver for\ndeployments hosted in the following environments: MongoDB Atlas : The fully\nmanaged service for MongoDB deployments in the cloud MongoDB Enterprise : The\nsubscription-based, self-managed version of MongoDB MongoDB Community : The\nsource-available, free-to-use, and self-managed version of MongoDB Learn how to establish a connection to MongoDB Atlas and begin\nworking with data in the step-by-step Quick Start . See driver syntax examples for common MongoDB commands in the\n Quick Reference section. For a list of new features and changes in each version, see the\n What's New section. For fully runnable code snippets and explanations for common\nmethods, see the Usage Examples section. Learn how to perform the following tasks using the Node.js driver in the\nFundamentals section: Connect to MongoDB Use the Stable API Authenticate with MongoDB Read from and Write to MongoDB Access Return Values Transform your Data Create and Manage Transactions Run a Database Command Create Indexes to Speed Up Queries Sort Using Collations Log Events in the Driver Monitor Driver Events Store and Retrieve Large Files in MongoDB Encrypt Fields from the Client Create and Query Time Series Collection Specify Type Parameters with TypeScript Specify BSON Serialization Settings For step-by-step explanations of common\naggregation tasks, see the Aggregation Tutorials \nsection. For detailed information about classes and methods in the MongoDB\nNode.js driver, see the MongoDB Node.js driver API documentation . For answers to commonly asked questions about the MongoDB\nNode.js Driver, see the Frequently Asked Questions (FAQ) \nsection. For solutions to issues you might encounter when using the driver to connect to\na MongoDB deployment, see the Connection Troubleshooting section. Learn how to report bugs, contribute to the driver, and to find help in the\n Issues & Help section. For the compatibility tables that show the recommended Node.js driver\nversion for each MongoDB Server version, see the\n Compatibility section. Learn what changes you must make to your application to upgrade\ndriver versions in the Upgrade Driver Versions section. MongoDB and our partners provide several object-document mappers (ODMs) for Node.js that\nlet developers work with MongoDB data as objects. One popular ODM is Mongoose ,\nwhich helps enforce a semi-rigid schema at the application level and provides features\nto assist with data modeling and manipulation. Prisma , another ODM, helps\nensure data consistency by offering a type-safe database client and an intuitive schema. For more information about using ODMs with MongoDB, see the following resources: MongoDB ORMs, ODMs, and Libraries Mongoose official documentation Prisma official documentation You can install the following packages to expand the functionality of the Node.js driver: For information about each package's version compatibility, see the Component Support Matrix in the Node.js driver Github\nrepository. Package Description kerberos C++ extension for Node.js that provides support for Kerberos authentication mongodb-legacy Legacy Node.js driver with optional callback support Visit the Developer Hub and MongoDB University to learn more about the\nNode.js driver. The Developer Hub provides tutorials and social engagement for developers. To learn how to use MongoDB features with the Node.js driver, see the\n How To's and Articles page . To ask questions and engage in discussions with fellow developers using\nthe Node.js driver, see the Developer Community forums . MongoDB University provides free courses to teach everyone how to use MongoDB. Using MongoDB with Node.js Learn the essentials of Node.js application development with MongoDB. MongoDB Node.js Developer Path Gain a comprehensive understanding of Node.js application development, complex operations, interactions\nwith MongoDB Atlas datasets, and more.", + "code": [], + "preview": "Learn how to connect to and interact with data stored in MongoDB by using JavaScript or TypeScript with the Node.js driver.", + "tags": "node.js, object-relational, object-document", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "issues-and-help", + "title": "Issues & Help", + "headings": ["Bugs / Feature Requests", "Pull Requests"], + "paragraphs": "Our developer community is vibrant and highly engaged, with extensive experience using Node.js with MongoDB. Often, the quickest way to get support for general questions is through the\n MongoDB Community Forums . Refer to our support channels documentation for more information. To report a bug or to request a new feature in the Node.js driver,\nplease open a case in our issue management tool, JIRA: Bug reports in JIRA for the Node.js driver and the Core Server (SERVER) project are public . If you\u2019ve identified a security vulnerability in a driver or any other\nMongoDB project, please report it according to the instructions found in\nthe Create a Vulnerability Report . Create an account and login . Navigate to the NODE project . Click Create Issue . Please provide as much information as possible about the\nissue and the steps to reproduce it. We are happy to accept contributions to help improve the driver. We will review user\ncontributions to ensure they meet the standards of the codebase. Pull requests must pass\nthe travis.ci checks, include documentation, and include tests. To get started check out the source and work on a branch: To run the test suite, you must have a server topology running and provide the URI to the command.\nFor example, if you have a single server running at \"mongodb://localhost:27017\" , you can run the following: Note that the tests run on your feature are different depending on the type of topology\nthat you are running, such as for a standalone instance or replica set. There are many tools that can help you with setting up different topologies for local testing.\nSome examples are mtools and mongo-orchestration .", + "code": [ + { + "lang": "bash", + "value": "git clone https://github.com/mongodb/node-mongodb-native.git\ncd node-mongodb-native\nnpm install\ngit checkout -b myNewFeature" + }, + { + "lang": "bash", + "value": "MONGODB_URI=\"mongodb://localhost:27017\" npm test" + } + ], + "preview": "Our developer community is vibrant and highly engaged, with extensive experience using Node.js with MongoDB.", + "tags": null, + "facets": { + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "quick-reference", + "title": "Quick Reference", + "headings": ["Compatibility"], + "paragraphs": "This page shows the driver syntax for several MongoDB commands and links to\ntheir related reference and API documentation. You can use the Node.js driver to connect and execute commands for\ndeployments hosted in the following environments: MongoDB Atlas : The fully\nmanaged service for MongoDB deployments in the cloud MongoDB Enterprise : The\nsubscription-based, self-managed version of MongoDB MongoDB Community : The\nsource-available, free-to-use, and self-managed version of MongoDB To learn more about performing common CRUD operations in the Atlas UI for deployments hosted in MongoDB\nAtlas, see Create, View, Update, and Delete Documents . Command Syntax", + "code": [ + { + "lang": "js", + "value": "await coll.findOne({ title: 'Hamlet' });" + }, + { + "lang": "js", + "value": "{ title: 'Hamlet', type: 'movie', ... }" + }, + { + "lang": "js", + "value": "coll.find({ year: 2005 });" + }, + { + "lang": "js", + "value": "[\n { title: 'Christmas in Boston', year: 2005, ... },\n { title: 'Chicken Little', year: 2005, ... },\n ...\n]" + }, + { + "lang": "javascript", + "value": "await coll.insertOne({ title: 'Jackie Robinson' });" + }, + { + "lang": "javascript", + "value": "await coll.insertMany([\n { title: 'Dangal', rating: 'Not Rated' },\n { title: 'The Boss Baby', rating: 'PG' }\n ]);" + }, + { + "lang": "js", + "value": "await coll.updateOne(\n { title: 'Amadeus' },\n { $set: { 'imdb.rating': 9.5 } }\n);" + }, + { + "lang": "js", + "value": "{ title: 'Amadeus', imdb: { rating: 9.5, ... } }" + }, + { + "lang": "js", + "value": "await coll.updateMany(\n { year: 2001 },\n { $inc: { 'imdb.votes': 100 } }\n);" + }, + { + "lang": "js", + "value": "[\n { title: 'A Beautiful Mind', year: 2001, imdb: { votes: 826257, ... },\n { title: 'Shaolin Soccer', year: 2001, imdb: { votes: 65442, ... },\n ...\n]" + }, + { + "lang": "js", + "value": "await coll.updateOne(\n { title: 'Cosmos' },\n { $push: { genres: 'Educational' } }\n):" + }, + { + "lang": "js", + "value": "{ title: 'Cosmos', genres: [ 'Documentary', 'Educational' ] }" + }, + { + "lang": "js", + "value": "await coll.replaceOne(\n { name: 'Deli Llama', address: '2 Nassau St' },\n { name: 'Lord of the Wings', zipcode: 10001 }\n);" + }, + { + "lang": "js", + "value": "{ name: 'Lord of the Wings', zipcode: 10001 }" + }, + { + "lang": "javascript", + "value": "await coll.deleteOne({ title: 'Congo' });" + }, + { + "lang": "javascript", + "value": "await coll.deleteMany({ title: { $regex: /^Shark.*/ } });" + }, + { + "lang": "js", + "value": "await coll.bulkWrite([\n {\n insertOne: {\n document: {\n title: 'A New Movie',\n year: 2022\n }\n }\n },\n {\n deleteMany: {\n filter: { year: { $lt: 1970 } }\n }\n }\n]);" + }, + { + "lang": "js", + "value": "BulkWriteResult {\n result: {\n ...\n },\n ...\n}" + }, + { + "lang": "javascript", + "value": "coll.watch([ { $match: { year: { $gte: 2022 } } } ]);" + }, + { + "lang": "js", + "value": "const cursor = coll.find();\nfor await (const doc of cursor) {\n console.dir(doc);\n}" + }, + { + "lang": "js", + "value": "[\n { title: '2001: A Space Odyssey', ... },\n { title: 'The Sound of Music', ... },\n ...\n]" + }, + { + "lang": "js", + "value": "const cursor = coll.find();\nconst results = await cursor.toArray();" + }, + { + "lang": "js", + "value": "[\n { title: '2001: A Space Odyssey', ... },\n { title: 'The Sound of Music', ... },\n ...\n]" + }, + { + "lang": "js", + "value": "await coll.countDocuments({ year: 2000 });" + }, + { + "lang": "js", + "value": "618" + }, + { + "lang": "js", + "value": "await coll.distinct('year');" + }, + { + "lang": "js", + "value": "[ 1891, 1893, 1894, 1896, 1903, ... ]" + }, + { + "lang": "js", + "value": "coll.find().limit(2);" + }, + { + "lang": "js", + "value": "[\n { title: 'My Neighbor Totoro', ... },\n { title: 'Am\u00e9lie', ... }\n]" + }, + { + "lang": "js", + "value": "coll.find({ title: { $regex: /^Rocky/} }, { skip: 2 });" + }, + { + "lang": "js", + "value": "[\n { title: 'Rocky III', ... },\n { title: 'Rocky IV', ... },\n { title: 'Rocky V'}, ... }\n]" + }, + { + "lang": "js", + "value": "coll.find().sort({ year: 1});" + }, + { + "lang": "js", + "value": "[\n { title: 'Newark Athlete', year: 1891, ... },\n { title: 'Blacksmith Scene', year: 1893, ...},\n { title: 'Dickson Experimental Sound Film', year: 1894},\n ...\n]" + }, + { + "lang": "js", + "value": "coll.find().project({ _id: 0, year: 1, imdb: 1 });" + }, + { + "lang": "js", + "value": "[\n { year: 2012, imdb: { rating: 5.8, votes: 230, id: 8256 }},\n { year: 1985, imdb: { rating: 7.0, votes: 447, id: 1654 }},\n ...\n]" + }, + { + "lang": "javascript", + "value": "await coll.createIndex({ title: 1, year: -1 });" + }, + { + "lang": "js", + "value": "// only searches fields with text indexes\ncoll.find({ $text: { $search: 'zissou' } });" + }, + { + "lang": "js", + "value": "[\n { title: 'The Life Aquatic with Steve Zissou', ... }\n]" + }, + { + "lang": "javascript", + "value": "\"dependencies\": {\n \"mongodb\": \"^6.8\",\n ...\n}" + } + ], + "preview": "See Node.js driver code examples of frequently-used MongoDB commands and links to their related reference and API documentation.", + "tags": "node.js, code example", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "quick-start/connect-to-mongodb", + "title": "Connect to MongoDB", + "headings": [ + "Create your Node.js Application", + "Assign the Connection String", + "Run your Node.js Application" + ], + "paragraphs": "After you complete these steps, you have a working application that\nuses the driver to connect to your MongoDB deployment, runs a query on\nthe sample data, and prints out the result. Create a file to contain your application called index.js in your\n node_quickstart project directory. Copy and paste the following code into the index.js file: Replace the placeholder with the\nconnection string that you copied from the Create a Connection String \nstep of this guide. In your shell, run the following command to start this application: The output includes details of the retrieved movie document: If you encounter an error or see no output, check whether you specified the\nproper connection string in the index.js file, and that you loaded the\nsample data. If you run into issues on this step, ask for help in the\n MongoDB Community Forums \nor submit feedback by using the Rate this page \ntab on the right or bottom right side of this page.", + "code": [ + { + "lang": "js", + "value": "const { MongoClient } = require(\"mongodb\");\n\n// Replace the uri string with your connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const database = client.db('sample_mflix');\n const movies = database.collection('movies');\n\n // Query for a movie that has the title 'Back to the Future'\n const query = { title: 'Back to the Future' };\n const movie = await movies.findOne(query);\n\n console.log(movie);\n } finally {\n // Ensures that the client will close when you finish/error\n await client.close();\n }\n}\nrun().catch(console.dir);" + }, + { + "lang": "none", + "value": "node index.js" + }, + { + "lang": "none", + "value": "{\n _id: ...,\n plot: 'A young man is accidentally sent 30 years into the past...',\n genres: [ 'Adventure', 'Comedy', 'Sci-Fi' ],\n ...\n title: 'Back to the Future',\n ...\n}" + } + ], + "preview": "After you complete these steps, you have a working application that\nuses the driver to connect to your MongoDB deployment, runs a query on\nthe sample data, and prints out the result.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "quick-start/create-a-connection-string", + "title": "Create a Connection String", + "headings": [ + "Find your MongoDB Atlas Connection String", + "Copy your Connection String", + "Update the Placeholders" + ], + "paragraphs": "You can connect to your MongoDB deployment by providing a\n connection URI , also called a connection string , which\ninstructs the driver on how to connect to a MongoDB deployment\nand how to behave while connected. The connection string includes the hostname or IP address and\nport of your deployment, the authentication mechanism, user credentials\nwhen applicable, and connection options. To connect to an instance or deployment not hosted on Atlas, see\n Other Ways to Connect to MongoDB . After completing these steps, you have a connection string that\ncontains your database username and password. To retrieve your connection string for the deployment that\nyou created in the previous step ,\nlog into your Atlas account and navigate to the\n Database section and click the Connect button\nfor your new deployment. Proceed to the Connect your application section and select\n\"Node.js\" from the Driver selection menu and the version\nthat best matches the version you installed from the Version \nselection menu. Select the Password (SCRAM) authentication mechanism. Deselect the Include full driver code example to view\nthe connection string. Click the button on the right of the connection string to copy it to\nyour clipboard as shown in the following screenshot: Paste this connection string into a a file in your preferred text editor\nand replace the \"\" and \"\" placeholders with\nyour database user's username and password. Save this file to a safe location for use in the next step. If you run into issues on this step, ask for help in the\n MongoDB Community Forums \nor submit feedback by using the Rate this page \ntab on the right or bottom right side of this page.", + "code": [], + "preview": "You can connect to your MongoDB deployment by providing a\nconnection URI, also called a connection string, which\ninstructs the driver on how to connect to a MongoDB deployment\nand how to behave while connected.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "quick-start/create-a-deployment", + "title": "Create a MongoDB Deployment", + "headings": [ + "Create a Free MongoDB deployment on Atlas", + "Save your Credentials" + ], + "paragraphs": "You can create a free tier MongoDB deployment on MongoDB Atlas\nto store and manage your data. MongoDB Atlas hosts and manages\nyour MongoDB database in the cloud. After you complete these steps, you have a new free tier MongoDB\ndeployment on Atlas, database user credentials, and sample data loaded\nin your database. Complete the Get Started with Atlas \nguide to set up a new Atlas account and load sample data into a new free\ntier MongoDB deployment. After you create your database user, save that user's\nusername and password to a safe location for use in an upcoming step. If you run into issues on this step, ask for help in the\n MongoDB Community Forums \nor submit feedback by using the Rate this page \ntab on the right or bottom right side of this page.", + "code": [], + "preview": "You can create a free tier MongoDB deployment on MongoDB Atlas\nto store and manage your data. MongoDB Atlas hosts and manages\nyour MongoDB database in the cloud.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "quick-start/download-and-install", + "title": "Download and Install", + "headings": [ + "Install Node and npm", + "Create a Project Directory", + "Install the Node.js Driver" + ], + "paragraphs": "After you complete these steps, you have Node.js and npm installed\nand a new project directory with the driver dependencies installed. Ensure you have Node.js v16 or later and\nnpm (Node Package Manager) installed in your development environment. For information on how to install Node.js and npm, see\n downloading and installing Node.js and npm . In your shell, run the following command to create a\ndirectory called node_quickstart for this project: Run the following command to navigate into the project\ndirectory: Run the following command to initialize your Node.js project: When this command successfully completes, you have a package.json \nfile in your node_quickstart directory. Run the following command in your shell to install\nthe driver in your project directory: This command performs the following actions: Downloads the mongodb package and the dependencies it requires Saves the package in the node_modules directory Records the dependency information in the package.json file If you run into issues on this step, ask for help in the\n MongoDB Community Forums \nor submit feedback by using the Rate this page \ntab on the right or bottom right side of this page.", + "code": [ + { + "lang": "bash", + "value": "mkdir node_quickstart" + }, + { + "lang": "bash", + "value": "cd node_quickstart" + }, + { + "lang": "bash", + "value": "npm init -y" + }, + { + "lang": "bash", + "value": "npm install mongodb@6.8" + } + ], + "preview": "After you complete these steps, you have Node.js and npm installed\nand a new project directory with the driver dependencies installed.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "quick-start/next-steps", + "title": "Next Steps", + "headings": [], + "paragraphs": "Congratulations on completing the quick start tutorial! In this tutorial, you created a Node.js application that\nconnects to a MongoDB deployment hosted on MongoDB Atlas\nand retrieves a document that matches a query. Learn more about the MongoDB Node.js driver from the following resources: Discover how to perform read and write operations in the\n CRUD Operations section. See examples of frequently-used operations in the\n Usage Examples section.", + "code": [], + "preview": "Congratulations on completing the quick start tutorial!", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "quick-start", + "title": "Node Driver Quick Start", + "headings": ["Overview"], + "paragraphs": "This guide shows you how to create an application that uses the\nMongoDB Node.js driver to connect to a MongoDB cluster hosted on MongoDB Atlas. If\nyou prefer to connect to MongoDB using a different driver or programming\nlanguage, see our list of official drivers . The Node.js driver is a library of functions that you can use to connect\nto and communicate with MongoDB. MongoDB Atlas is a fully managed cloud database service that hosts your\nMongoDB deployments. You can create your own free (no credit card\nrequired) MongoDB Atlas deployment by following the steps in this guide. Follow the steps in this guide to connect a sample Node.js application to\na MongoDB Atlas deployment.", + "code": [], + "preview": "Learn how to create an app to connect to MongoDB deployment by using the Node.js driver.", + "tags": "node.js", + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "upgrade", + "title": "Upgrade Driver Versions", + "headings": [ + "Overview", + "How to Upgrade", + "Breaking Changes", + "Version 6.0 Breaking Changes", + "Version 5.0 Breaking Changes", + "Version 4.0 Breaking Changes", + "Server Release Compatibility Changes", + "Version 4.2 Server Release Support Changes" + ], + "paragraphs": "On this page, you can learn how to upgrade your driver to a new version. This page also\nincludes the changes you must make to your application to upgrade your driver\nwithout losing functionality, if applicable. Before you upgrade, perform the following actions: To upgrade your driver version, run the following command in your application's\ndirectory: To upgrade to a different version of the driver, replace the information after the\n @ symbol with your preferred version number. For more information about the\n npm install command, see the npm-install \nnpm documentation. Ensure the new driver version is compatible with the MongoDB Server version\nyour application connects to and the version of Node.js that your\napplication runs on. See the Compatibility \npage for this information. Address any breaking changes between the version of the driver\nyour application uses now and your planned upgrade version in the\n Breaking Changes section of this guide. To learn\nmore about the MongoDB Server release compatibility changes, see the\n Server Release Compatibility Changes section. You can minimize the amount of changes that you must make to your\napplication when upgrading driver versions by using the\n Stable API . A breaking change is a modification in a convention or behavior in\na specific version of the driver that may prevent your application from\nworking as expected. The breaking changes in this section are categorized by the major\nversion releases that introduced them. When upgrading driver versions,\naddress all the breaking changes between your current version and the\nplanned upgrade version. For example, if you are upgrading the driver\nfrom v3.x to v5.x, address all breaking changes listed under v4.0 and\nv5.0. Version 6.0 of the Node.js driver requires Node.js v16.20.1 or later. The driver removes support for the addUser() helper command. Use the\n createUser MongoDB Shell command instead. The driver removes support for the collStats operation. Use the\n $collStats aggregation operator\ninstead. The driver removes all the deprecated ssl -prefixed options and the\n tlsCertificateFile option in the MongoClientOptions type.\nCreate a SecureContext object or set the tls -prefixed options\nin your MongoClientOptions instance instead. The driver reads files set in the tlsCAFile and\n tlsCertificateKeyFile connection options when you call the\n MongoClient.connect() method, not when you create the\n MongoClient instance. The driver removes the keepAlive and keepAliveInitialDelay connection\noptions. The value of keepAlive is permanently set to true and the\nvalue of keepAliveInitialDelay is set to 300000 milliseconds (300\nseconds). The Db.command() method accepts only options that are not related\nto a specific command. To learn more about these options, see the\n Command Options section of the Run a\nCommand guide. If you add mongodb-client-encryption as a dependency,\nthe major version number must match that of the Node.js driver. For example,\nNode.js driver v6.x.x requires mongodb-client-encryption v6.x.x. Automatic Encryption methods are now in the Node.js driver. You must\nimport these methods from the driver instead of from\n mongodb-client-encryption . Removed the ObjectId constructor that accepted a 12-character string. Modified abortTransaction() and commitTransaction() methods to return\n null instead of the raw command results. Removed connection option helpers that accepted values other than true \nor false as booleans. You must provide either true or false values in\nthe connection string or to the MongoClient constructor. Removed the Binary BSON type constructor that accepted a string. The Binary.write() method no longer accepts a string to write to the binary\nBSON object. The ClientEncryption API returns promises instead of callbacks. The socks package, which enables SOCKS5 proxy support, is a\npeer-optional dependency. You must install the package to enable\nSOCKS5 in your application. To learn more, see Enable SOCKS5 Proxy Support . If you start a session on a client, then pass that session to a\ndifferent client, the driver throws an error when you\nperform any operations in the session. The includeResultMetadata option for compound operation methods is\n false by default. See the Built-in Methods \nsection of the Compound Operations guide for more information. The withSession() method returns the value that the provided\nfunction returns. In previous driver versions, this method returns\n undefined . The withTransaction() method returns the value that the\ncallback returns. In previous driver versions, this method\nreturns the server command response, which varies depending on the MongoDB\nServer version or type that the driver connects to. To learn more\nabout transactions, see the Perform a Transaction usage\nexamples and the Transactions guide. Raised the optional kerberos dependency minimum version to 2.0.1 and\nremoved support for version 1.x. Raised the optional zstd dependency minimum version to 1.1.0. The driver is no longer compatible with Node.js v12 or earlier. If you want to use\nthis version of the driver, you must use Node.js v14.20.1 or greater. The driver removes support for callbacks in favor of a promise-based API.\nThe following list provides some strategies for callback users to adopt this\nversion: For more information about these strategies, see\n the v5.0 changelog . Migrate to the promise-based API (recommended) Use the promise-based API and util.callbackify Add mongodb-legacy to continue using callbacks The driver removes support for the Collection.insert() ,\n Collection.update() , and Collection.remove() helper methods.\nThe following list provides instructions on how to replace the\nfunctionality of the removed methods: Migrate from Collection.insert() to insertOne() or insertMany() Migrate from Collection.update() to updateOne() or updateMany() Migrate from Collection.remove() to deleteOne() or deleteMany() The driver no longer includes AWS SDK modules by default. The driver no longer automatically imports the bson-ext package. The driver removes support for custom Promise libraries. The driver no\nlonger supports the promiseLibrary option of MongoClient and the Promise.set \nexport that allows specifying a custom Promise library. The driver removes support for the Collection.mapReduce() helper. The BulkWriteResult type no longer has the publicly enumerable\n result property. The following types, options, and methods have been removed: BulkResult.lastOp() method opTime property of BulkResult BulkWriteOptions.keepGoing option WriteConcernError.err() method AddUserOptions.digestPassword option Kerberos gssapiCanonicalizeHostName option slaveOk options and method removed in favor of secondaryOk ObjectID type removed in favor of ObjectId AsyncIterator interface removed in favor of AsyncGenerator For more information about these changes, see\n the v4.0 changelog . The driver is no longer compatible with Node.js v12.8 or earlier. If you\nwant to use this version of the driver, you must use Node.js v12.9 or greater. Cursor types no longer extend Readable directly. You cannot use a ChangeStream instance as an iterator after using\nit as an EventEmitter . You also cannot do the reverse\u2014using an\n EventEmitter instance as an iterator after using it as a ChangeStream . The following methods no longer accept a callback parameter: Collection.find() Collection.aggregate() Db.aggregate() The default value of the maxPoolSize connection option is now\n 100 . The driver no longer supports the gssapiServiceName Kerberos\noption. Use authMechanismProperties.SERVICE_NAME instead. The driver no longer accepts non-boolean types, such as 0 or\n 1 , for boolean options. The db.collection type no longer accepts a callback. The Db type is no longer an EventEmitter . You can listen to\nany events directly from the MongoClient instance. The driver removes support for the Collection.group() helper. The driver no longer includes the deprecated GridStore API. A server release compatibility change is a modification\nto the driver that discontinues support for a set of\nMongoDB Server versions. The driver discontinues support for a MongoDB Server version after it reaches\nend-of-life (EOL). To learn more about the MongoDB support for EOL products,\nsee the Legacy Support Policy . The v4.2 driver drops support for MongoDB Server v3.4 and earlier.\nTo use the v4.2 driver, your MongoDB Server must be v3.6 or later. To learn\nhow to upgrade your MongoDB Server deployment, see Release\nNotes in the MongoDB Server manual.", + "code": [ + { + "lang": "bash", + "value": "npm install mongodb@6.8" + } + ], + "preview": "On this page, you can learn how to upgrade your driver to a new version. This page also\nincludes the changes you must make to your application to upgrade your driver\nwithout losing functionality, if applicable.", + "tags": "backwards compatibility, update", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/bulkWrite", + "title": "Perform Bulk Operations", + "headings": ["Example"], + "paragraphs": "The bulkWrite() method performs batch write operations against a\n single collection. This method reduces the number of network round trips from\nyour application to the server which therefore increases the throughput and\nperformance. Bulk writes return a collection of results for all operations\nonly after all operations passed to the method complete. You can specify one or more of the following write operations in\n bulkWrite() : The bulkWrite() method accepts the following parameters: If you create an index with a unique index \nconstraint, you might encounter a duplicate key write error during an\noperation in the following format: Similarly, if you attempt to perform a bulk write against a collection\nthat uses schema validation , you may\nencounter warnings or errors related to the formatting of inserted or\nmodified documents. insertOne updateOne updateMany deleteOne deleteMany replaceOne operations : specifies the bulk write operations to\nperform. Pass each operation to bulkWrite() as an object in\nan array. For examples that show the syntax for each write operation, see\nthe bulkWrite API documentation . options : optional settings that affect the execution\nof the operation, such as whether the write operations executes in\nsequential order and the write concern. By default, MongoDB executes bulk write operations one-by-one in the specified order\n(serially). During an ordered bulk write, if an error occurs during the processing of an\noperation, MongoDB returns without processing the remaining operations in the list. In\ncontrast, when ordered is false , MongoDB continues to process remaining write\noperations in the list. Unordered operations are theoretically faster since MongoDB can\nexecute them in parallel, but only use them if the writes do not depend on order. The following code sample performs a bulk write operation on the\n theaters collection in the sample_mflix database. The example call\nto bulkWrite() includes examples of insertOne , updateMany , and\n deleteOne write operations: Running the preceding example results in the following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", + "code": [ + { + "lang": "sh", + "value": "Error during bulkWrite, BulkWriteError: E11000 duplicate key error collection: ..." + }, + { + "lang": "javascript", + "value": "BulkWriteResult {\n insertedCount: 2,\n matchedCount: 1,\n modifiedCount: 1,\n deletedCount: 0,\n upsertedCount: 0,\n upsertedIds: {},\n insertedIds: {\n '0': new ObjectId(\"...\"),\n '1': new ObjectId(\"...\")\n }\n}" + }, + { + "lang": "javascript", + "value": "// Bulk write operation\n\n// Import MongoClient from the MongoDB node driver package\nconst { MongoClient } = require(\"mongodb\");\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const theaters = database.collection(\"theaters\");\n\n // Insert a new document into the \"theaters\" collection\n const result = await theaters.bulkWrite([\n {\n insertOne: {\n document: {\n location: {\n address: {\n street1: \"3 Main St.\",\n city: \"Anchorage\",\n state: \"AK\",\n zipcode: \"99501\",\n },\n },\n },\n },\n },\n {\n insertOne: {\n document: {\n location: {\n address: {\n street1: \"75 Penn Plaza\",\n city: \"New York\",\n state: \"NY\",\n zipcode: \"10001\",\n },\n },\n },\n },\n },\n {\n // Update documents that match the specified filter\n updateMany: {\n filter: { \"location.address.zipcode\": \"44011\" },\n update: { $set: { is_in_ohio: true } },\n upsert: true,\n },\n },\n {\n // Delete a document that matches the specified filter\n deleteOne: { filter: { \"location.address.street1\": \"221b Baker St\" } },\n },\n ]);\n // Log the result of the bulk write operation \n console.log(result);\n } finally {\n // Close the database connection when the operations are completed or if an error occurs\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "typescript", + "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\ninterface Address {\n street1: string;\n city: string;\n state: string;\n zipcode: string;\n}\n\ninterface Theater {\n location: { address: Address };\n is_in_ohio?: boolean;\n}\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const theaters = database.collection(\"theaters\");\n\n const result = await theaters.bulkWrite([\n {\n insertOne: {\n document: {\n location: {\n address: {\n street1: \"3 Main St.\",\n city: \"Anchorage\",\n state: \"AK\",\n zipcode: \"99501\",\n },\n },\n },\n },\n },\n {\n insertOne: {\n document: {\n location: {\n address: {\n street1: \"75 Penn Plaza\",\n city: \"New York\",\n state: \"NY\",\n zipcode: \"10001\",\n },\n },\n },\n },\n },\n {\n updateMany: {\n // Important: You lose type safety when you use dot notation in queries\n filter: { \"location.address.zipcode\": \"44011\" },\n update: { $set: { is_in_ohio: true } },\n upsert: true,\n },\n },\n {\n deleteOne: {\n filter: { \"location.address.street1\": \"221b Baker St\" },\n },\n },\n ]);\n\n console.log(result);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + } + ], + "preview": "The bulkWrite() method performs batch write operations against a\nsingle collection. This method reduces the number of network round trips from\nyour application to the server which therefore increases the throughput and\nperformance. Bulk writes return a collection of results for all operations\nonly after all operations passed to the method complete.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/changeStream", + "title": "Watch for Changes", + "headings": [ + "Open a Change Stream", + "Examples", + "Iteration", + "Listener Function" + ], + "paragraphs": "You can watch for changes in MongoDB using the watch() method on the\nfollowing objects: For each object, the watch() method opens a change stream to\nemit change event documents when they occur. The watch() method optionally takes an aggregation pipeline which consists of an array of aggregation stages \nas the first parameter. The aggregation stages filter and transform the change events. In the following snippet, the $match stage matches all change event documents with a runtime value of less than\n15, filtering all others out. The watch() method accepts an options object as the second parameter. Refer to the links at the end of this\nsection for more information on the settings you can configure with this object. The watch() method returns an instance of a ChangeStream . You can read events from\nchange streams by iterating over them or listening for events. Select the tab that corresponds to the way you want to\nread events from the change stream: Collection Database MongoClient Starting in version 4.12, ChangeStream objects are async\niterables. With this change, you can use for-await loops to\nretrieve events from an open change stream: You can call methods on the ChangeStream object such as: hasNext() to check for remaining documents in the stream next() to request the next document in the stream close() to close the ChangeStream You can attach listener functions to the ChangeStream object\nby calling the on() method. This method is inherited from the\nJavascript EventEmitter class. Pass the string \"change\" as\nthe first parameter and your listener function as the second parameter as shown below: The listener function triggers when a change event is emitted. You\ncan specify logic in the listener to process the change event document\nwhen it is received. You can control the change stream by calling pause() to stop emitting events or resume() to continue to emit events. To stop processing change events, call the close() method on the\n ChangeStream instance. This closes the change stream and frees resources. Using a ChangeStream in EventEmitter and Iterator mode\nconcurrently is not supported by the driver and causes an error. This\nis to prevent undefined behavior, where the driver cannot guarantee\nwhich consumer receives documents first. The following example opens a change stream on the haikus collection in\nthe insertDB database and prints change events as they occur: When you run this code and then make a change to the haikus \ncollection, such as performing an insert or delete operation, you can\nsee the change event document printed in your terminal. For example, if you insert a document to the collection, the code prints\nthe following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide . The JavaScript and TypeScript code snippets above are identical. There are no\nTypeScript specific features of the driver relevant to this use case. Change events that contain information on update operations only return the modified\nfields by default rather than the full updated document. You can configure\nyour change stream to also return the most current version of the document\nby setting the fullDocument field of the options object to\n \"updateLookup\" as follows: The following example opens a change stream on the haikus collection in\nthe insertDB database. Let's create a listener function to receive and\nprint change events that occur on the collection. First, open the change stream on the collection and then define a listener\non the change stream using the on() method. Once you set the\nlistener, generate a change event by performing a change to the collection. To generate the change event on the collection, let's use the insertOne() \nmethod to add a new document. Since insertOne() may run before the\nlistener function can register, we use a timer, defined as\n simulateAsyncPause to wait 1 second before executing the insert. We also use simulateAsyncPause after the insertion of the document.\nThis provides ample time for the listener function to receive the change\nevent and for the listener to complete its execution before\nclosing the ChangeStream instance using the close() method. Visit the following resources for more material on the classes and\nmethods mentioned on this page: The timers used in this example are only for demonstration\npurposes. They make sure that there is enough time to register\nthe listener and have the listener process the change event before\nexiting. The JavaScript and TypeScript code snippets above are identical. There are no\nTypeScript specific features of the driver relevant to this use case. Change streams Change events Aggregation pipeline Aggregation stages ChangeStream class API documentation Collection.watch() , Db.watch() , MongoClient.watch() API documentation", + "code": [ + { + "lang": "javascript", + "value": "const pipeline = [ { $match: { runtime: { $lt: 15 } } } ];\nconst changeStream = myColl.watch(pipeline);" + }, + { + "lang": "js", + "value": "for await (const change of changeStream) {\n console.log(\"Received change: \", change);\n}" + }, + { + "lang": "javascript", + "value": "changeStream.on(\"change\", (changeEvent) => { /* your listener function */ });" + }, + { + "lang": "javascript", + "value": "changeStream.close();" + }, + { + "lang": "none", + "value": "Received change:\n{\n _id: {\n _data: '...'\n },\n operationType: 'insert',\n clusterTime: new Timestamp({ t: 1675800603, i: 31 }),\n fullDocument: {\n _id: new ObjectId(\"...\"),\n ...\n },\n ns: { db: 'insertDB', coll: 'haikus' },\n documentKey: { _id: new ObjectId(\"...\") }\n}" + }, + { + "lang": "javascript", + "value": "// Watch for changes in a collection by using a change stream\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\n// Declare a variable to hold the change stream\nlet changeStream;\n\n// Define an asynchronous function to manage the change stream\nasync function run() {\n try {\n const database = client.db(\"insertDB\");\n const haikus = database.collection(\"haikus\");\n\n // Open a Change Stream on the \"haikus\" collection\n changeStream = haikus.watch();\n\n // Print change events as they occur\n for await (const change of changeStream) {\n console.log(\"Received change:\\n\", change);\n }\n // Close the change stream when done\n await changeStream.close();\n \n } finally {\n // Close the MongoDB client connection\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "javascript", + "value": "// Watch for changes in a collection by using a change stream\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\n// Declare a variable to hold the change stream\nlet changeStream;\n\n// Define an asynchronous function to manage the change stream\nasync function run() {\n try {\n const database = client.db(\"insertDB\");\n const haikus = database.collection(\"haikus\");\n\n // Open a Change Stream on the \"haikus\" collection\n changeStream = haikus.watch();\n\n // Print change events as they occur\n for await (const change of changeStream) {\n console.log(\"Received change:\\n\", change);\n }\n // Close the change stream when done\n await changeStream.close();\n \n } finally {\n // Close the MongoDB client connection\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "javascript", + "value": "const options = { fullDocument: \"updateLookup\" };\n// This could be any pipeline.\nconst pipeline = [];\n\nconst changeStream = myColl.watch(pipeline, options);" + }, + { + "lang": "javascript", + "value": "/* Change stream listener */\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nconst simulateAsyncPause = () =>\n new Promise(resolve => {\n setTimeout(() => resolve(), 1000);\n });\n\nlet changeStream;\nasync function run() {\n try {\n const database = client.db(\"insertDB\");\n const haikus = database.collection(\"haikus\");\n\n // Open a Change Stream on the \"haikus\" collection\n changeStream = haikus.watch();\n\n // Set up a change stream listener when change events are emitted\n changeStream.on(\"change\", next => {\n // Print any change event\n console.log(\"received a change to the collection: \\t\", next);\n });\n\n // Pause before inserting a document\n await simulateAsyncPause();\n\n // Insert a new document into the collection\n await myColl.insertOne({\n title: \"Record of a Shriveled Datum\",\n content: \"No bytes, no problem. Just insert a document, in MongoDB\",\n });\n\n // Pause before closing the change stream\n await simulateAsyncPause();\n\n // Close the change stream and print a message to the console when it is closed\n await changeStream.close(); \n console.log(\"closed the change stream\");\n } finally {\n // Close the database connection on completion or error\n await client.close();\n }\n}\nrun().catch(console.dir);" + }, + { + "lang": "javascript", + "value": "/* Change stream listener */\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nconst simulateAsyncPause = () =>\n new Promise(resolve => {\n setTimeout(() => resolve(), 1000);\n });\n\nlet changeStream;\nasync function run() {\n try {\n const database = client.db(\"insertDB\");\n const haikus = database.collection(\"haikus\");\n\n // Open a Change Stream on the \"haikus\" collection\n changeStream = haikus.watch();\n\n // Set up a change stream listener when change events are emitted\n changeStream.on(\"change\", next => {\n // Print any change event\n console.log(\"received a change to the collection: \\t\", next);\n });\n\n // Pause before inserting a document\n await simulateAsyncPause();\n\n // Insert a new document into the collection\n await myColl.insertOne({\n title: \"Record of a Shriveled Datum\",\n content: \"No bytes, no problem. Just insert a document, in MongoDB\",\n });\n\n // Pause before closing the change stream\n await simulateAsyncPause();\n\n // Close the change stream and print a message to the console when it is closed\n await changeStream.close(); \n console.log(\"closed the change stream\");\n } finally {\n // Close the database connection on completion or error\n await client.close();\n }\n}\nrun().catch(console.dir);" + } + ], + "preview": "You can watch for changes in MongoDB using the watch() method on the\nfollowing objects:", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/command", + "title": "Run a Command", + "headings": ["Example"], + "paragraphs": "You can execute database commands by using the\n command() method on a Db \ninstance. You can specify a command and options in a document. To run the\ncommand, pass this document to the command() method. To see a full\nlist of database commands, see Database Commands in the Server manual. You can specify optional command behavior by passing a\n RunCommandOptions object to the command() method. To learn more\nabout the supported options, see the\n Db.command() API documentation . Use the MongoDB Shell for administrative tasks instead of\nthe Node.js driver whenever possible. Running the preceding command, you see the following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide . The JavaScript and TypeScript code snippets above are identical. There are no\nTypeScript specific features of the driver relevant to this use case.", + "code": [ + { + "lang": "javascript", + "value": "{\n db: 'sample_mflix',\n collections: 6,\n views: 0,\n objects: 75620,\n ...\n}" + }, + { + "lang": "javascript", + "value": "/* Run a database command */\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n // Get the \"sample_mflix\" database\n const db = client.db(\"sample_mflix\");\n\n // Find and print the storage statistics for the \"sample_mflix\" database using the 'dbStats' command\n const result = await db.command({\n dbStats: 1,\n });\n console.log(result);\n } finally {\n // Close the database connection on completion or error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "javascript", + "value": "/* Run a database command */\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n // Get the \"sample_mflix\" database\n const db = client.db(\"sample_mflix\");\n\n // Find and print the storage statistics for the \"sample_mflix\" database using the 'dbStats' command\n const result = await db.command({\n dbStats: 1,\n });\n console.log(result);\n } finally {\n // Close the database connection on completion or error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + } + ], + "preview": "You can execute database commands by using the\ncommand() method on a Db\ninstance.", + "tags": "code example, multiple, modify, customize, debug", + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/count", + "title": "Count Documents", + "headings": ["Example"], + "paragraphs": "The Node.js driver provides two methods for counting documents in a\ncollection: estimatedDocumentCount() is faster than countDocuments() because\nthe estimation uses the collection's metadata rather than scanning the\ncollection. In contrast, countDocuments() takes longer to return, but\nprovides an accurate count of the number of documents and supports\nspecifying a filter. Choose the appropriate method for your workload. To specify which documents you wish to count, countDocuments() \naccepts a query parameter.\n countDocuments() counts the documents that match the specified query. countDocuments() and estimatedDocumentCount() support optional\nsettings that affect the method's execution. Refer to the reference\ndocumentation for each method for more information. collection.countDocuments() returns the number of documents in\nthe collection that match the specified query. If you specify an empty\nquery document, countDocuments() returns the total number of\ndocuments in the collection. collection.estimatedDocumentCount() returns an\n estimation of the number of documents in the collection based on\ncollection metadata. You can improve performance when using countDocuments() to return the\ntotal number of documents in a collection by avoiding a collection scan. To\ndo this, use a hint to take\nadvantage of the built-in index on the _id field. Use this technique only\nwhen calling countDocuments() with an empty query parameter. The following example estimates the number of documents in the\n movies collection in the sample_mflix database, and then returns\nan accurate count of the number of documents in the movies \ncollection with Canada in the countries field. Running the preceding sample code, you see the following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide . The JavaScript and TypeScript code snippets above are identical. There are no\nTypeScript specific features of the driver relevant to this use case.", + "code": [ + { + "lang": "javascript", + "value": "collection.countDocuments({}, { hint: \"_id_\" });" + }, + { + "lang": "none", + "value": "Estimated number of documents in the movies collection: 23541\nNumber of movies from Canada: 1349" + }, + { + "lang": "javascript", + "value": "// Count documents in a collection\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n /* Print the estimate of the number of documents in the\n \"movies\" collection */\n const estimate = await movies.estimatedDocumentCount();\n console.log(`Estimated number of documents in the movies collection: ${estimate}`);\n\n /* Print the number of documents in the \"movies\" collection that\n match the specified query */\n const query = { countries: \"Canada\" };\n const countCanada = await movies.countDocuments(query);\n console.log(`Number of movies from Canada: ${countCanada}`);\n } finally {\n // Close the connection after the operations complete\n await client.close();\n }\n}\n// Run the program and print any thrown exceptions\nrun().catch(console.dir);\n" + }, + { + "lang": "javascript", + "value": "// Count documents in a collection\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n /* Print the estimate of the number of documents in the\n \"movies\" collection */\n const estimate = await movies.estimatedDocumentCount();\n console.log(`Estimated number of documents in the movies collection: ${estimate}`);\n\n /* Print the number of documents in the \"movies\" collection that\n match the specified query */\n const query = { countries: \"Canada\" };\n const countCanada = await movies.countDocuments(query);\n console.log(`Number of movies from Canada: ${countCanada}`);\n } finally {\n // Close the connection after the operations complete\n await client.close();\n }\n}\n// Run the program and print any thrown exceptions\nrun().catch(console.dir);\n" + } + ], + "preview": "The Node.js driver provides two methods for counting documents in a\ncollection:", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/delete-operations", + "title": "Delete Operations", + "headings": [], + "paragraphs": "Delete a Document Delete Multiple Documents", + "code": [], + "preview": null, + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/deleteMany", + "title": "Delete Multiple Documents", + "headings": ["Example"], + "paragraphs": "You can delete multiple documents in a collection at once using the\n collection.deleteMany() method.\nPass a query document to the deleteMany() method to specify a subset\nof documents in the collection to delete. If you do not provide a query\ndocument (or if you provide an empty document), MongoDB matches all documents\nin the collection and deletes them. While you can use deleteMany() \nto delete all documents in a collection, consider using\n drop() instead for better performance\nand clearer code. You can specify more options in the options object passed in\nthe second parameter of the deleteMany() method. For more detailed\ninformation, see the\n deleteMany() API documentation . The following snippet deletes multiple documents from the movies \ncollection. It uses a query document that configures the query to\nmatch and delete movies with the title \"Santa Claus\". Running the preceding example for the first time, you see the following output: If you run the example more than once, you see the following output because\nyou deleted the matching documents in the first run: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", + "code": [ + { + "lang": "none", + "value": "Deleted 19 documents" + }, + { + "lang": "none", + "value": "Deleted 0 documents" + }, + { + "lang": "javascript", + "value": "// Delete multiple documents\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n /* Delete all documents that match the specified regular\n expression in the title field from the \"movies\" collection */\n const query = { title: { $regex: \"Santa\" } };\n const result = await movies.deleteMany(query);\n\n // Print the number of deleted documents\n console.log(\"Deleted \" + result.deletedCount + \" documents\");\n } finally {\n // Close the connection after the operation completes\n await client.close();\n }\n}\n// Run the program and print any thrown exceptions\nrun().catch(console.dir);\n" + }, + { + "lang": "typescript", + "value": "// Delete multiple documents\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n /* Delete all documents that match the specified regular\n expression in the title field from the \"movies\" collection */\n const result = await movies.deleteMany({ title: { $regex: \"Santa\" } });\n \n // Print the number of deleted documents\n console.log(\"Deleted \" + result.deletedCount + \" documents\");\n } finally {\n // Close the connection after the operation completes\n await client.close();\n }\n}\n// Run the program and print any thrown exceptions\nrun().catch(console.dir);\n" + } + ], + "preview": "You can delete multiple documents in a collection at once using the\ncollection.deleteMany() method.\nPass a query document to the deleteMany() method to specify a subset\nof documents in the collection to delete. If you do not provide a query\ndocument (or if you provide an empty document), MongoDB matches all documents\nin the collection and deletes them. While you can use deleteMany()\nto delete all documents in a collection, consider using\ndrop() instead for better performance\nand clearer code.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/deleteOne", + "title": "Delete a Document", + "headings": ["Example"], + "paragraphs": "You can delete a single document in a collection with\n collection.deleteOne() .\nThe deleteOne() method uses a query document that you provide\nto match the subset of the documents in the collection that match\nthe query. If you do not provide a query document (or if you provide an\nempty document), MongoDB matches all documents in the collection and\ndeletes the first match. You can specify more query options using the\n options object passed as the second parameter of the\n deleteOne method. For more information on this method,\nsee the\n deleteOne() API documentation . If your application requires the deleted document after deletion,\nconsider using the\n collection.findOneAndDelete() \nmethod, which has a similar interface to deleteOne() but also\nreturns the deleted document. The following snippet deletes a single document from the movies \ncollection. It uses a query document that configures the query\nto match movies with a title value of \"Annie Hall\". Running the preceding example, you see the following output: If you run the example more than once, you see the following output because\nyou deleted the matching document in the first run: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide . The JavaScript and TypeScript code snippets above are identical. There are no\nTypeScript specific features of the driver relevant to this use case.", + "code": [ + { + "lang": "none", + "value": "Successfully deleted one document." + }, + { + "lang": "none", + "value": "No documents matched the query. Deleted 0 documents." + }, + { + "lang": "javascript", + "value": "// Delete a document\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n /* Delete the first document in the \"movies\" collection that matches\n the specified query document */\n const query = { title: \"Annie Hall\" };\n const result = await movies.deleteOne(query);\n\n /* Print a message that indicates whether the operation deleted a\n document */\n if (result.deletedCount === 1) {\n console.log(\"Successfully deleted one document.\");\n } else {\n console.log(\"No documents matched the query. Deleted 0 documents.\");\n }\n } finally {\n // Close the connection after the operation completes\n await client.close();\n }\n}\n// Run the program and print any thrown exceptions\nrun().catch(console.dir);\n" + }, + { + "lang": "javascript", + "value": "// Delete a document\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n /* Delete the first document in the \"movies\" collection that matches\n the specified query document */\n const query = { title: \"Annie Hall\" };\n const result = await movies.deleteOne(query);\n\n /* Print a message that indicates whether the operation deleted a\n document */\n if (result.deletedCount === 1) {\n console.log(\"Successfully deleted one document.\");\n } else {\n console.log(\"No documents matched the query. Deleted 0 documents.\");\n }\n } finally {\n // Close the connection after the operation completes\n await client.close();\n }\n}\n// Run the program and print any thrown exceptions\nrun().catch(console.dir);\n" + } + ], + "preview": "You can delete a single document in a collection with\ncollection.deleteOne().\nThe deleteOne() method uses a query document that you provide\nto match the subset of the documents in the collection that match\nthe query. If you do not provide a query document (or if you provide an\nempty document), MongoDB matches all documents in the collection and\ndeletes the first match.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/distinct", + "title": "Retrieve Distinct Values of a Field", + "headings": ["Example"], + "paragraphs": "You can retrieve a list of distinct values for a field across a collection by using\nthe collection.distinct() \nmethod. Call the distinct() method on a Collection object with a document\nfield name parameter as a String to produce a list that contains one of each\nof the different values found in the specified document field as shown below: You can specify a document field within an embedded document using\n dot notation . If you call\n distinct() on an document field that contains an array, the method\ntreats each element as a separate value. See the following example of\na method call to the wins field in the awards subdocument: You can specify more query options using the options object passed\nas the third parameter to the distinct() method. For details on the\nquery parameters, see the\n distinct() method in the API documentation . If you specify a value for the document field name that is not of type\n String such as a Document , Array , Number , or null ,\nthe method does not execute and returns a TypeMismatch error with a\nmessage that resembles the following: Visit Retrieve Distinct Values for more information about the distinct() \nmethod. \"key\" had the wrong type. Expected string, found The following snippet retrieves a list of distinct values for the year \ndocument field from the movies collection. It uses a query document to\nmatch movies that include \"Barbara Streisand\" as a director . Running the preceding example, you see the following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", + "code": [ + { + "lang": "javascript", + "value": "const distinctValues = myColl.distinct(\"countries\", query);" + }, + { + "lang": "javascript", + "value": "const distinctValues = myColl.distinct(\"awards.wins\", query);" + }, + { + "lang": "json", + "value": "[ 1983, 1991, 1996 ]" + }, + { + "lang": "javascript", + "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n \n // Get the database and collection on which to run the operation\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n // Specify the document field to find distinct values for\n const fieldName = \"year\";\n\n // Specify an optional query document to narrow results\n const query = { directors: \"Barbra Streisand\" };\n\n // Execute the distinct operation\n const distinctValues = await movies.distinct(fieldName, query);\n\n // Print the result\n console.log(distinctValues);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "typescript", + "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\ninterface Movie {\n directors: string;\n year: number;\n}\n\nasync function run() {\n try {\n // define a database and collection on which to run the method\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n const distinctValues = await movies.distinct(\"year\", {\n directors: \"Barbra Streisand\",\n });\n\n console.log(distinctValues);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + } + ], + "preview": "You can retrieve a list of distinct values for a field across a collection by using\nthe collection.distinct()\nmethod. Call the distinct() method on a Collection object with a document\nfield name parameter as a String to produce a list that contains one of each\nof the different values found in the specified document field as shown below:", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/find-operations", + "title": "Find Operations", + "headings": [], + "paragraphs": "Find a Document Find Multiple Documents", + "code": [], + "preview": "Learn by example: how to create queries and retrieve data from MongoDB by using the MongoDB Node.js driver.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/find", + "title": "Find Multiple Documents", + "headings": ["Compatibility", "Example"], + "paragraphs": "You can query for multiple documents in a collection with\n collection.find() . The find() method uses a query document that you\nprovide to match the subset of the documents in the collection that match the\nquery. If you don't provide a query document (or if you provide an empty\ndocument), MongoDB returns all documents in the collection. For more\ninformation on querying MongoDB, see our\n documentation on query documents . You can also define more query options such as\n sort \nand\n projection \nto configure the result set. You can specify these in the options\nparameter in your find() method call in sort and projection \nobjects. See collection.find() for more\ninformation on the parameters you can pass to the method. The find() method returns a FindCursor that\nmanages the results of your query. You can iterate through the matching\ndocuments using the for await...of syntax, or one of the following\n cursor methods : If no documents match the query, find() returns an empty cursor. next() toArray() You can use the Node.js driver to connect and use the find() method for\ndeployments hosted in the following environments: MongoDB Atlas : The fully\nmanaged service for MongoDB deployments in the cloud MongoDB Enterprise : The\nsubscription-based, self-managed version of MongoDB MongoDB Community : The\nsource-available, free-to-use, and self-managed version of MongoDB To learn more about finding documents in the Atlas UI for deployments hosted in MongoDB\nAtlas, see Create, View, Update, and Delete Documents . The following snippet finds documents from the movies collection. It\nuses the following parameters: Running the preceding example, you see the following output: The sort and projection options can also be specified as methods\n( sort() and project() ) chained to the find() method.\nThe following two commands are equivalent: A query document that configures the query to return only\nmovies with a runtime of less than 15 minutes. A sort that organizes returned documents in ascending order by\ntitle (alphabetical order in which \"A\" comes before \"Z\" and \"1\" before\n\"9\"). A projection that explicitly excludes the _id field from\nreturned documents and explicitly includes only the title and\n imdb object (and its embedded fields). You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", + "code": [ + { + "lang": "javascript", + "value": "{ title: '10 Minutes', imdb: { rating: 7.9, votes: 743, id: 339976 } }\n{ title: '3x3', imdb: { rating: 6.9, votes: 206, id: 1654725 } }\n{ title: '7:35 in the Morning', imdb: { rating: 7.3, votes: 1555, id: 406501 } }\n{ title: '8', imdb: { rating: 7.8, votes: 883, id: 1592502 } }\n..." + }, + { + "lang": "javascript", + "value": "movies.find({ runtime: { $lt: 15 } }, { sort: { title: 1 }, projection: { _id: 0, title: 1, imdb: 1 }});\nmovies.find({ runtime: { $lt: 15 } }).sort({ title: 1}).project({ _id: 0, title: 1, imdb: 1 });" + }, + { + "lang": "javascript", + "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n \n // Get the database and collection on which to run the operation\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n // Query for movies that have a runtime less than 15 minutes\n const query = { runtime: { $lt: 15 } };\n\n const options = {\n // Sort returned documents in ascending order by title (A->Z)\n sort: { title: 1 },\n // Include only the `title` and `imdb` fields in each returned document\n projection: { _id: 0, title: 1, imdb: 1 },\n };\n\n // Execute query \n const cursor = movies.find(query, options);\n\n // Print a message if no documents were found\n if ((await movies.countDocuments(query)) === 0) {\n console.log(\"No documents found!\");\n }\n\n // Print returned documents\n for await (const doc of cursor) {\n console.dir(doc);\n }\n\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "typescript", + "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\ntype Minutes = number;\n\ninterface IMDB {\n rating: number;\n votes: number;\n id: number;\n}\n\ninterface Movie {\n title: string;\n imdb: IMDB;\n runtime: Minutes;\n}\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n const query = { runtime: { $lt: 15 } };\n const cursor = movies.find(\n query,\n {\n sort: { title: 1 },\n projection: { _id: 0, title: 1, imdb: 1 },\n }\n );\n\n if ((await movies.countDocuments(query)) === 0) {\n console.warn(\"No documents found!\");\n }\n\n for await (const doc of cursor) {\n console.dir(doc);\n } \n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + } + ], + "preview": "Learn how to retrieve multiple documents from MongoDB by using the Node.js driver.", + "tags": "code example, node.js, sample dataset", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/findOne", + "title": "Find a Document", + "headings": ["Compatibility", "Example"], + "paragraphs": "You can query for a single document in a collection with the\n collection.findOne() method. The findOne() method uses a query\ndocument that you provide to match only the subset of the documents in the\ncollection that match the query. If you don't provide a query document or if\nyou provide an empty document, MongoDB matches all documents in the\ncollection. The findOne() operation only returns the first matched\ndocument. For more information on querying MongoDB, see our\n documentation on query documents . You can also define more query options such as\n sort \nand projection \nto configure the returned document. You can specify the more options\nin the options object passed as the second parameter of the\n findOne method. For detailed reference documentation, see\n collection.findOne() . You can use the Node.js driver to connect and use the findOne() method for\ndeployments hosted in the following environments: MongoDB Atlas : The fully\nmanaged service for MongoDB deployments in the cloud MongoDB Enterprise : The\nsubscription-based, self-managed version of MongoDB MongoDB Community : The\nsource-available, free-to-use, and self-managed version of MongoDB To learn more about finding documents in the Atlas UI for deployments hosted in MongoDB\nAtlas, see Create, View, Update, and Delete Documents . The following snippet finds a single document from the movies \ncollection. It uses the following parameters: Running the preceding example, you see the following output: A query document that configures the query to return only\nmovies with the title of exactly the text 'The Room' . A sort that organizes matched documents in descending order by\nrating, so if our query matches multiple documents the returned\ndocument will be the document with the highest rating. A projection that explicitly excludes the _id field from\nreturned documents and explicitly includes only the title and\n imdb object (and its embedded fields). You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", + "code": [ + { + "lang": "javascript", + "value": "{ title: 'The Room', imdb: { rating: 3.5, votes: 25673, id: 368226 } }" + }, + { + "lang": "javascript", + "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n \n // Get the database and collection on which to run the operation\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n // Query for a movie that has the title 'The Room'\n const query = { title: \"The Room\" };\n\n const options = {\n // Sort matched documents in descending order by rating\n sort: { \"imdb.rating\": -1 },\n // Include only the `title` and `imdb` fields in the returned document\n projection: { _id: 0, title: 1, imdb: 1 },\n };\n\n // Execute query\n const movie = await movies.findOne(query, options);\n\n // Print the document returned by findOne()\n console.log(movie);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "typescript", + "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\ninterface IMDB {\n rating: number;\n votes: number;\n id: number;\n}\n\nexport interface Movie {\n title: string;\n year: number;\n released: Date;\n plot: string;\n type: \"movie\" | \"series\";\n imdb: IMDB;\n}\n\ntype MovieSummary = Pick;\n\nasync function run(): Promise {\n try {\n const database = client.db(\"sample_mflix\");\n // Specifying a Schema is always optional, but it enables type hinting on\n // finds and inserts\n const movies = database.collection(\"movies\");\n\n const movie = await movies.findOne(\n { title: \"The Room\" },\n {\n sort: { rating: -1 },\n projection: { _id: 0, title: 1, imdb: 1 },\n }\n );\n console.log(movie);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + } + ], + "preview": "Learn how to retrieve one document from MongoDB by using the Node.js driver.", + "tags": "code example, node.js, sample dataset", + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/insert-operations", + "title": "Insert Operations", + "headings": [], + "paragraphs": "Insert a Document Insert Multiple Documents", + "code": [], + "preview": "Learn by example: how to insert data into MongoDB by using the MongoDB Node.js driver.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/insertMany", + "title": "Insert Multiple Documents", + "headings": ["Example"], + "paragraphs": "You can insert multiple documents using the\n collection.insertMany() method. The insertMany() takes an array\nof documents to insert into the specified collection. You can specify more options in the options object passed as the\nsecond parameter of the insertMany() method. Specify ordered:true \nto prevent inserting the remaining documents if the insertion failed for a\nprevious document in the array. Specifying incorrect parameters for your insertMany() operation can\ncause problems. Attempting to insert a field with a value that violates\nunique index rules results in a duplicate key error . Running the preceding example, you see the following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", + "code": [ + { + "lang": "none", + "value": "3 documents were inserted" + }, + { + "lang": "javascript", + "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n\n // Get the database and collection on which to run the operation\n const database = client.db(\"insertDB\");\n const foods = database.collection(\"foods\");\n\n // Create an array of documents to insert\n const docs = [\n { name: \"cake\", healthy: false },\n { name: \"lettuce\", healthy: true },\n { name: \"donut\", healthy: false }\n ];\n\n // Prevent additional documents from being inserted if one fails\n const options = { ordered: true };\n\n // Execute insert operation\n const result = await foods.insertMany(docs, options);\n \n // Print result\n console.log(`${result.insertedCount} documents were inserted`);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "typescript", + "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\ninterface Food {\n name: string;\n healthy: boolean;\n}\n\nasync function run() {\n try {\n const database = client.db(\"insertDB\");\n // Specifying a schema is optional, but it enables type hints on\n // finds and inserts\n const foods = database.collection(\"foods\");\n\n const result = await foods.insertMany(\n [\n { name: \"cake\", healthy: false },\n { name: \"lettuce\", healthy: true },\n { name: \"donut\", healthy: false },\n ],\n { ordered: true }\n );\n console.log(`${result.insertedCount} documents were inserted`);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + } + ], + "preview": "You can insert multiple documents using the\ncollection.insertMany() method. The insertMany() takes an array\nof documents to insert into the specified collection.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/insertOne", + "title": "Insert a Document", + "headings": ["Compatibility", "Example"], + "paragraphs": "You can insert a document into a collection using the\n collection.insertOne() method. To\ninsert a document, define an object that contains the fields and values that\nyou want to store. If the specified collection does not exist, the\n insertOne() method creates the collection. You can specify more query options using the options parameter.\nFor more information on the method parameters, see the\n insertOne() API documentation .\nFor more information on this method, see the\n insertOne() API documentation . If the operation successfully inserts a document, it appends an\n insertedId field to the object passed in the method call, and sets the\nvalue of the field to the _id of the inserted document. You can use the Node.js driver to connect and use the insertOne() method for\ndeployments hosted in the following environments: MongoDB Atlas : The fully\nmanaged service for MongoDB deployments in the cloud MongoDB Enterprise : The\nsubscription-based, self-managed version of MongoDB MongoDB Community : The\nsource-available, free-to-use, and self-managed version of MongoDB To learn more about inserting documents in the Atlas UI for deployments hosted in MongoDB\nAtlas, see Create, View, Update, and Delete Documents . Running the preceding example, you see the following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", + "code": [ + { + "lang": "none", + "value": "A document was inserted with the _id: " + }, + { + "lang": "javascript", + "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\n// Create a new client and connect to MongoDB\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n // Connect to the \"insertDB\" database and access its \"haiku\" collection\n const database = client.db(\"insertDB\");\n const haiku = database.collection(\"haiku\");\n \n // Create a document to insert\n const doc = {\n title: \"Record of a Shriveled Datum\",\n content: \"No bytes, no problem. Just insert a document, in MongoDB\",\n }\n // Insert the defined document into the \"haiku\" collection\n const result = await haiku.insertOne(doc);\n\n // Print the ID of the inserted document\n console.log(`A document was inserted with the _id: ${result.insertedId}`);\n } finally {\n // Close the MongoDB client connection\n await client.close();\n }\n}\n// Run the function and handle any errors\nrun().catch(console.dir);\n" + }, + { + "lang": "typescript", + "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\ninterface Haiku {\n title: string;\n content: string;\n}\n\nasync function run() {\n try {\n const database = client.db(\"insertDB\");\n // Specifying a Schema is optional, but it enables type hints on\n // finds and inserts\n const haiku = database.collection(\"haiku\");\n const result = await haiku.insertOne({\n title: \"Record of a Shriveled Datum\",\n content: \"No bytes, no problem. Just insert a document, in MongoDB\",\n });\n console.log(`A document was inserted with the _id: ${result.insertedId}`);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + } + ], + "preview": "Learn how to insert a document into MongoDB by using the Node.js driver.", + "tags": "code example, node.js, sample dataset", + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/replaceOne", + "title": "Replace a Document", + "headings": ["Example"], + "paragraphs": "You can replace a single document using the\n collection.replaceOne() method.\n replaceOne() accepts a query document and a replacement document. If\nthe query matches a document in the collection, it replaces the first\ndocument that matches the query with the provided replacement document.\nThis operation removes all fields and values in the original document and\nreplaces them with the fields and values in the replacement document. The\nvalue of the _id field remains the same unless you explicitly specify\na new value for _id in the replacement document. You can specify more options, such as upsert , using the\noptional options parameter. If you set the upsert option field to\n true the method inserts a new document if no document matches the query. The replaceOne() method throws an exception if an error occurs\nduring execution. For example, if you specify a value that violates a\nunique index rule, replaceOne() throws a duplicate key error . If your application requires the document after updating,\nuse the collection.findOneAndReplace() \nmethod which has a similar interface to replaceOne() .\nYou can configure findOneAndReplace() to return either the\noriginal matched document or the replacement document. Running the preceding example, you see the following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", + "code": [ + { + "lang": "none", + "value": "Modified 1 document(s)" + }, + { + "lang": "javascript", + "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n \n // Get the database and collection on which to run the operation\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n // Create a query for documents where the title contains \"The Cat from\"\n const query = { title: { $regex: \"The Cat from\" } };\n \n // Create the document that will replace the existing document\n const replacement = {\n title: `The Cat from Sector ${Math.floor(Math.random() * 1000) + 1}`,\n };\n\n // Execute the replace operation\n const result = await movies.replaceOne(query, replacement);\n \n // Print the result \n console.log(`Modified ${result.modifiedCount} document(s)`);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "typescript", + "value": "import { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\ninterface Movie {\n title: string;\n}\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n const result = await movies.replaceOne(\n { title: { $regex: \"The Cat from\" } },\n {\n title: `The Cat from Sector ${Math.floor(Math.random() * 1000) + 1}`,\n }\n );\n console.log(`Modified ${result.modifiedCount} document(s)`);\n } finally {\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + } + ], + "preview": "You can replace a single document using the\ncollection.replaceOne() method.\nreplaceOne() accepts a query document and a replacement document. If\nthe query matches a document in the collection, it replaces the first\ndocument that matches the query with the provided replacement document.\nThis operation removes all fields and values in the original document and\nreplaces them with the fields and values in the replacement document. The\nvalue of the _id field remains the same unless you explicitly specify\na new value for _id in the replacement document.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/transaction-conv", + "title": "Use the Convenient Transaction API", + "headings": [ + "Example", + "Sample Data", + "Implementation", + "Sample Orders and Transaction Results", + "API Documentation" + ], + "paragraphs": "You can perform a transaction to run a series of operations that do not\nchange any data until the entire transaction is committed. This usage\nexample uses the Convenient Transaction API to perform a transaction. To learn more about the performing transactions in the\nNode.js driver, see the Transactions guide. The Node.js driver also provides the Core API to perform\ntransactions. To learn more about the Core API, see the\n Use the Core API usage example. Consider a situation in which a customer purchases items from your shop.\nTo record the purchase, your application must update\nyour inventory and record the order information. The following table describes the collections that store purchase data\nand how a purchase changes the data in each collection. Collection Operation Description of the Change orders insert Inserts a document that describes the order inventory update Updates the quantities of items available after a purchase The inventory collection contains the\nfollowing documents: You store purchase records in the orders collection of the\n testdb database. This collection is empty, as there have been no\npurchases. The code example in this section demonstrates how to use the Convenient\nTransaction API to perform a multi-document transaction in a session. In\nthis example, the transaction makes the changes needed when a\ncustomer purchases items from your shop. This example code performs a transaction through the following actions: Calls the withSession() method on the client to implicitly create\nthe session and run the callback passed to it within the session. Calls the withTransaction() method on the session to create a\ntransaction, run the callback passed to it, and commit the\ntransaction. If the transaction fails, this method ends the\ntransaction and returns an error message. Performs the following operations within the transaction: Updates the inventory and orders collections if there is\nsufficient inventory to fulfill the purchase Ends the transaction and throws an exception if there isn't\nsufficient inventory for any item in the order Returns a message acknowledging that the transaction\ncommitted successfully with a copy of the purchase record Prints the return type of withSession() , which is either the\nerror message or the acknowledgment that the transaction completed. This section describes the results of the transactions performed for two\nsample orders. Sufficient inventory exists for the following order, so the transaction\nsuccessfully completes: After passing this order to the example transaction code, the code\noutputs the following result: In the inventory collection, the quantity of\n \"sunblock\" is now 82 and the quantity of \"beach chair\" \nis 29 . The orders collection contains the record of the\npurchase. There is not sufficient inventory for the following order, so the\ndriver ends the transaction: After passing this order to the example transaction code, the code\noutputs the following result: Since the driver ends the transaction, there are no changes to\nthe inventory and orders collections. To learn more about any of the methods or types discussed in this\nusage example, see the following API Documentation: withSession() withTransaction() abortTransaction() \"", + "code": [ + { + "lang": "javascript", + "value": "{ item: \"sunblock\", qty: 85, price: 6.0 },\n{ item: \"beach chair\", qty: 30, price: 25.0 }" + }, + { + "lang": "javascript", + "value": "const txnResult = await client.withSession(async (session) =>\n session\n .withTransaction(async (session) => {\n const invColl = client.db(\"testdb\").collection(\"inventory\");\n const recColl = client.db(\"testdb\").collection(\"orders\");\n\n let total = 0;\n for (const item of order) {\n /* Update the inventory for the purchased items. End the\n transaction if the quantity of an item in the inventory is\n insufficient to complete the purchase. */\n const inStock = await invColl.findOneAndUpdate(\n {\n item: item.item,\n qty: { $gte: item.qty },\n },\n { $inc: { qty: -item.qty } },\n { session }\n );\n if (inStock === null) {\n await session.abortTransaction();\n return \"Item not found or insufficient quantity.\";\n }\n const subTotal = item.qty * inStock.price;\n total = total + subTotal;\n }\n\n // Create a record of the purchase\n const receipt = {\n date: new Date(),\n items: order,\n total: total,\n };\n await recColl.insertOne(receipt, { session });\n return (\n \"Order successfully completed and recorded!\\nReceipt:\\n\" +\n JSON.stringify(receipt, null, 1)\n );\n }, null)\n .finally(async () => await client.close())\n);\n\nconsole.log(txnResult);" + }, + { + "lang": "none", + "value": "Order successfully completed and recorded!\nReceipt:\n{\n \"date\": \"2023-08-25T20:06:52.564Z\",\n \"items\": [\n { \"item\": \"sunblock\", \"qty\": 3 },\n { \"item\": \"beach chair\", \"qty\": 1 }\n ],\n \"total\": 43,\n \"_id\": \"...\"\n}" + }, + { + "lang": "none", + "value": "Item not found or insufficient quantity." + }, + { + "lang": "javascript", + "value": "{ item: \"sunblock\", qty: 3 },\n{ item: \"beach chair\", qty: 1 }" + }, + { + "lang": "javascript", + "value": "{ item: \"volleyball\", qty: 1 }" + } + ], + "preview": "You can perform a transaction to run a series of operations that do not\nchange any data until the entire transaction is committed. This usage\nexample uses the Convenient Transaction API to perform a transaction.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/transaction-core", + "title": "Use the Core API", + "headings": [ + "Example", + "Sample Data", + "Implementation", + "Transaction Results", + "API Documentation" + ], + "paragraphs": "You can perform a transaction to run a series of operations that do not\nchange any data until the entire transaction is committed. This usage\nexample uses the Core API to perform a transaction. To learn more about the performing transactions in the\nNode.js driver, see the Transactions guide. The Node.js driver also provides the Convenient Transaction API to\nperform transactions. To learn more about the Convenient Transaction\nAPI, see the Use the Convenient Transaction API usage example. Consider a situation in which a customer purchases items from your online\nshop. To record the purchase, your application must update\nyour inventory and the customer's orders. Your\napplication also needs to save the order details. The following table describes the collections that store purchase data\nand how a purchase changes the data in each collection. Collection Operation Description of the Change orders insert Inserts a document that describes the order customers update or upsert Appends the _id from the order document to the order history\nin the customer document inventory update Updates the quantities of items available after a purchase The code examples use the following sample data in the testdb \ndatabase: The following document is in the customers collection: The inventory collection contains the following documents: You store purchase records in the orders collection of the\n testdb database. This collection is empty, as there have been no\npurchases. The code examples use the cart and payment variables to represent\na sample list of items purchased and the order payment details. The\nfollowing code describes the contents of the cart and payment variables: Documents in the customers collection that describe customers and\ntheir past orders Documents in the inventory collection that include quantities and\ndescriptions of all items The code example in this section demonstrates how to use the Core API to\nperform a multi-document transaction in a session. In this example, the\ntransaction makes the changes needed when a customer purchases items from\nyour shop. This example code performs a transaction through the following actions: Calls the startSession() method to create a new session Calls the startTransaction() method with an options parameter to\ncreate a new transaction Performs the following operations within the transaction: Inserts a document to the orders collection that contains\ninformation about the purchase and customer Updates the inventory collection if there is\nsufficient inventory to fulfill the purchase Ends the transaction and throws an exception if there isn't\nsufficient inventory for any item in the order Adds the ID of the order to the list of past orders for the customer Returns a message acknowledging that the transaction\ncommitted successfully with a copy of the purchase record Calls the commitTransaction() method to commit the transaction if\nall operations complete successfully Implements a catch block that contains error-handling logic Calls the abortTransaction() method to end the transaction Calls the endSession() method to end the session This section describes the data changes created by the transaction. The customers collection contains the customer document with an\norder _id appended to the orders field: The inventory collection contains updated quantities for the\nitems \"sunblock\" and \"beach towel\" : The orders collection contains the order and payment\ninformation: To learn more about any of the methods or types discussed in this\nusage example, see the following API Documentation: TransactionOptions ClientSession startSession() startTransaction() commitTransaction() abortTransaction() endSession()", + "code": [ + { + "lang": "json", + "value": "{ _id: 98765, orders: [] }" + }, + { + "lang": "json", + "value": "{ item: \"sunblock\", item_id: 5432, qty: 85 },\n{ item: \"beach towel\", item_id: 7865, qty: 41 }" + }, + { + "lang": "javascript", + "value": "const cart = [\n { item: 'sunblock', item_id: 5432, qty: 1, price: 5.19 },\n { item: 'beach towel', item_id: 7865, qty: 2, price: 15.99 }\n];\nconst payment = { customer: 98765, total: 37.17 };" + }, + { + "lang": "javascript", + "value": "async function placeOrder(client, cart, payment) {\n const transactionOptions = {\n readConcern: { level: 'snapshot' },\n writeConcern: { w: 'majority' },\n readPreference: 'primary'\n };\n\n // Start the session\n const session = client.startSession();\n try {\n // Start the transaction in the session, specifying the transaction options\n session.startTransaction(transactionOptions);\n\n const ordersCollection = client.db('testdb').collection('orders');\n /* Within the session, insert an order that contains information about the\n customer, items purchased, and the total payment */\n const orderResult = await ordersCollection.insertOne(\n {\n customer: payment.customer,\n items: cart,\n total: payment.total,\n },\n { session }\n );\n\n const inventoryCollection = client.db('testdb').collection('inventory');\n \n for (const item of order) { \n /* Update the inventory for the purchased items. End the\n transaction if the quantity of an item in the inventory is\n insufficient to complete the purchase. */\n const inStock = await inventoryCollection.findOneAndUpdate(\n {\n item_id: item.item_id,\n item_id: { $gte: item.qty }\n },\n { $inc: { 'qty': -item.qty }},\n { session }\n )\n if (inStock === null) {\n throw new Error('Insufficient quantity or item ID not found.');\n }\n }\n\n const customerCollection = client.db('testdb').collection('customers');\n\n // Within the session, add the order details to the \"orders\" array of the customer document\n await customerCollection.updateOne(\n { _id: payment.customer },\n { $push: { orders: orderResult.insertedId }},\n { session }\n );\n\n // Commit the transaction to apply all updates performed within it\n await session.commitTransaction();\n console.log('Transaction successfully committed.');\n\n } catch (error) {\n /*\n Handle any exceptions thrown during the transaction and end the\n transaction. Roll back all the updates performed in the transaction.\n */\n if (error instanceof MongoError && error.hasErrorLabel('UnknownTransactionCommitResult')) {\n // Add your logic to retry or handle the error\n }\n else if (error instanceof MongoError && error.hasErrorLabel('TransientTransactionError')) {\n // Add your logic to retry or handle the error\n } else {\n console.log('An error occured in the transaction, performing a data rollback:' + error);\n }\n await session.abortTransaction();\n } finally {\n // End the session\n await session.endSession();\n }\n}" + }, + { + "lang": "json", + "value": "{\n \"_id\": 98765,\n \"orders\": [\n \"61dc...\"\n ]\n}" + }, + { + "lang": "json", + "value": "[\n {\n \"_id\": ...,\n \"item\": \"sunblock\",\n \"item_id\": 5432,\n \"qty\": 84\n },\n {\n \"_id\": ...,\n \"item\": \"beach towel\",\n \"item_id\": 7865,\n \"qty\": 39\n }\n]" + }, + { + "lang": "json", + "value": "[\n {\n \"_id\": \"...\",\n \"customer\": 98765,\n \"items\": [\n {\n \"item\": \"sunblock\",\n \"item_id\": 5432,\n \"qty\": 1,\n \"price\": 5.19\n },\n {\n \"item\": \"beach towel\",\n \"item_id\": 7865,\n \"qty\": 2,\n \"price\": 15.99\n }\n ],\n \"total\": 37.17\n }\n]" + } + ], + "preview": "You can perform a transaction to run a series of operations that do not\nchange any data until the entire transaction is committed. This usage\nexample uses the Core API to perform a transaction.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/transactions", + "title": "Perform a Transaction", + "headings": [], + "paragraphs": "The following usage examples demonstrate how to perform transactions by\nusing the transaction APIs in the Node.js driver: Use the Convenient Transaction API Use the Core API", + "code": [], + "preview": "The following usage examples demonstrate how to perform transactions by\nusing the transaction APIs in the Node.js driver:", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/update-and-replace-operations", + "title": "Update & Replace Operations", + "headings": [], + "paragraphs": "Update a Document Update Multiple Documents Replace a Document", + "code": [], + "preview": "Learn by example: how to update and replace data in MongoDB by using the MongoDB Node.js driver.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/updateMany", + "title": "Update Multiple Documents", + "headings": ["Example"], + "paragraphs": "You can update multiple documents using the\n collection.updateMany() method.\nThe updateMany() method accepts a filter document and an update document. If the query matches documents in the\ncollection, the method applies the updates from the update document to fields\nand values of the matching documents. The update document requires an update operator to modify a field in a document. You can specify more options in the options object passed in\nthe third parameter of the updateMany() method. For more detailed\ninformation, see\n the updateMany() API documentation . Running the preceding example, you see the following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", + "code": [ + { + "lang": "none", + "value": "Updated 477 documents" + }, + { + "lang": "javascript", + "value": "/* Update multiple documents */\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n // Get the \"movies\" collection in the \"sample_mflix\" database\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n // Create a filter to update all movies with a 'G' rating\n const filter = { rated: \"G\" };\n\n // Create an update document specifying the change to make\n const updateDoc = {\n $set: {\n random_review: `After viewing I am ${\n 100 * Math.random()\n }% more satisfied with life.`,\n },\n };\n // Update the documents that match the specified filter\n const result = await movies.updateMany(filter, updateDoc);\n console.log(`Updated ${result.modifiedCount} documents`);\n } finally {\n // Close the database connection on completion or error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + }, + { + "lang": "typescript", + "value": "/* Update multiple documents */\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string.\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nenum Rating {\n G = \"G\",\n PG = \"PG\",\n PG_13 = \"PG-13\",\n R = \"R\",\n NR = \"NOT RATED\",\n}\n\n// Create a Movie interface\ninterface Movie {\n rated: Rating;\n random_review?: string;\n}\n\nasync function run() {\n try {\n // Get the \"movies\" collection in the \"sample_mflix\" database\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n // Update all documents that match the specified filter\n const result = await movies.updateMany(\n { rated: Rating.G },\n {\n $set: {\n random_review: `After viewing I am ${\n 100 * Math.random()\n }% more satisfied with life.`,\n },\n }\n );\n console.log(`Updated ${result.modifiedCount} documents`);\n } finally {\n // Close the database connection on completion or error\n await client.close();\n }\n}\nrun().catch(console.dir);\n" + } + ], + "preview": "You can update multiple documents using the\ncollection.updateMany() method.\nThe updateMany() method accepts a filter document and an update document. If the query matches documents in the\ncollection, the method applies the updates from the update document to fields\nand values of the matching documents. The update document requires an update operator to modify a field in a document.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples/updateOne", + "title": "Update a Document", + "headings": ["Example"], + "paragraphs": "You can update a single document using the\n collection.updateOne() \nmethod. The updateOne() method accepts a filter\ndocument and an update document. If the query matches documents in the\ncollection, the method applies the updates from the update document to fields\nand values of them. The update document contains update operators that instruct the method\non the changes to make to the matches. You can specify more query options using the options object\npassed as the second parameter of the updateOne() method.\nSet the upsert option to true to create a new document\nif no documents match the filter. For more information, see the\n updateOne() API documentation . updateOne() throws an exception if an error occurs during execution.\nIf you specify a value in your update document for the immutable field\n _id , the method throws an exception. If your update document contains\na value that violates unique index rules, the method throws a duplicate\nkey error exception. If your application requires the document after updating,\nconsider using the\n collection.findOneAndUpdate() .\nmethod, which has a similar\ninterface to updateOne() but also returns the original or updated\ndocument. The following example uses the $set update operator which specifies\nupdate values for document fields. For more information on update operators,\nsee the MongoDB update operator reference documentation . If you run the example above, you see the following output: You can use this example to connect to an instance of MongoDB\nand interact with a database that contains sample data. To learn more about connecting to your MongoDB\ninstance and loading a sample dataset, see the Usage Examples\nguide .", + "code": [ + { + "lang": "none", + "value": "1 document(s) matched the filter, updated 1 document(s)" + }, + { + "lang": "javascript", + "value": "// Update a document\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n // Create a filter for movies with the title \"Random Harvest\"\n const filter = { title: \"Random Harvest\" };\n\n /* Set the upsert option to insert a document if no documents match\n the filter */\n const options = { upsert: true };\n\n // Specify the update to set a value for the plot field\n const updateDoc = {\n $set: {\n plot: `A harvest of random numbers, such as: ${Math.random()}`\n },\n };\n\n // Update the first document that matches the filter\n const result = await movies.updateOne(filter, updateDoc, options);\n \n // Print the number of matching and modified documents\n console.log(\n `${result.matchedCount} document(s) matched the filter, updated ${result.modifiedCount} document(s)`,\n );\n } finally {\n // Close the connection after the operation completes\n await client.close();\n }\n}\n// Run the program and print any thrown errors\nrun().catch(console.dir);\n" + }, + { + "lang": "typescript", + "value": "// Update a document\n\nimport { MongoClient } from \"mongodb\";\n\n// Replace the uri string with your MongoDB deployment's connection string\nconst uri = \"\";\n\nconst client = new MongoClient(uri);\n\n// Define the Movie interface\ninterface Movie {\n plot: string;\n title: string;\n}\n\nasync function run() {\n try {\n const database = client.db(\"sample_mflix\");\n const movies = database.collection(\"movies\");\n\n /* Update a document that has the title \"Random Harvest\" to have a\n plot field with the specified value */\n const result = await movies.updateOne(\n { title: \"Random Harvest\" },\n {\n $set: {\n plot: `A harvest of random numbers, such as: ${Math.random()}`,\n },\n },\n /* Set the upsert option to insert a document if no documents\n match the filter */\n { upsert: true }\n );\n\n // Print the number of matching and modified documents\n console.log(\n `${result.matchedCount} document(s) matched the filter, updated ${result.modifiedCount} document(s)`\n );\n } finally {\n // Close the connection after the operation completes\n await client.close();\n }\n}\n// Run the program and print any thrown errors\nrun().catch(console.dir);\n" + } + ], + "preview": "You can update a single document using the\ncollection.updateOne()\nmethod. The updateOne() method accepts a filter\ndocument and an update document. If the query matches documents in the\ncollection, the method applies the updates from the update document to fields\nand values of them. The update document contains update operators that instruct the method\non the changes to make to the matches.", + "tags": null, + "facets": { + "genre": ["tutorial"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "usage-examples", + "title": "Usage Examples", + "headings": [ + "Overview", + "How to Use the Usage Examples", + "Available Usage Examples" + ], + "paragraphs": "Usage examples provide convenient starting points for popular MongoDB\noperations. Each example provides the following information: Explanation of the operation in the example, including the\npurpose and a sample use case for the method Explanation of how to use the operation, including parameters,\nreturn values, and common exceptions you might encounter Full Node.js program that you can copy and paste to run the example\nin your own environment These examples use the\n MongoDB Atlas sample data \ndatabase. You can use this sample data on the free tier\nof MongoDB Atlas by following the Get Started with Atlas guide or you\ncan import the sample dataset into a local MongoDB instance . Once you have imported the dataset, you can copy and paste a usage\nexample into your development environment of choice. You can follow the\n quick start guide to learn more about getting\nstarted with Node.js, npm, and the Node.js driver. Once you've copied\na usage example, you must edit one line to get the example running\nwith your instance of MongoDB: All examples use ES module imports. You can\n enable ES module imports \nby adding the following key-value pair to your package.json file: You can use the Atlas Connectivity Guide to enable connectivity to your instance of\nAtlas and find the connection string to replace the uri variable in the\nusage example. If your instance uses SCRAM authentication , you can replace with your username,\n with your password, and with the IP\naddress or URL of your instance. Consult the\n Connection Guide for more information\nabout getting connected to your MongoDB instance. You can use any usage example with CommonJS require . To use CommonJS require , you\nmust swap out the ES module import statement for your CommonJS require \nstatement. Click on the tabs to see the syntax for importing the driver with ES module\n import and CommonJS require : Find Operations Insert Operations Update Operations Delete Operations Count Documents Retrieve Distinct Values of a Field Run a Command Watch for Changes Perform Bulk Operations Perform a Transaction", + "code": [ + { + "lang": "javascript", + "value": "// Replace the following with your MongoDB deployment's connection string.\nconst uri =\n \"mongodb+srv://:@?retryWrites=true&writeConcern=majority\";" + }, + { + "lang": "json", + "value": "\"type\": \"module\"" + }, + { + "lang": "javascript", + "value": "import { MongoClient } from 'mongodb'" + }, + { + "lang": "javascript", + "value": "const { MongoClient } = require('mongodb')" + } + ], + "preview": "Learn how to load sample data into a MongoDB Atlas deployment and run Node.js driver usage examples.", + "tags": "node.js", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + }, + { + "slug": "whats-new", + "title": "What's New", + "headings": [ + "What's New in 6.8", + "What's New in 6.7", + "What's New in 6.6", + "What's New in 6.5", + "What's New in 6.4", + "What's New in 6.3", + "What's New in 6.2", + "What's New in 6.1", + "What's New in 6.0", + "What's New in 5.9", + "What's New in 5.8", + "What's New in 5.7", + "What's New in 5.6", + "What's New in 5.5", + "What's New in 5.4", + "What's New in 5.3", + "What's New in 5.2", + "What's New in 5.1", + "What's New in 5.0", + "What's New in 4.17", + "What's New in 4.16", + "What's New in 4.15", + "What's New in 4.14", + "What's New in 4.13", + "What's New in 4.12", + "What's New in 4.11", + "Prioritization Order in Monitoring", + "Changes to AWS Authentication", + "Mutually Recursive Schema Type Checking", + "Example", + "What's New in 4.10", + "What's New in 4.9", + "What's New in 4.8", + "What's New in 4.7", + "What's New in 4.6", + "What's New in 4.5", + "What's New in 4.4", + "What's New in 4.3", + "What's New in 4.2", + "What's New in 4.1", + "What's New in 4.0", + "TypeScript", + "Key Changes", + "Node.js Version", + "Cursor Improvements", + "Cursor Stream API", + "MongoClientOptions Interface", + "createCollection()", + "BulkWriteError \u2192 MongoBulkWriteError", + "DB", + "Collection.group()", + "Authentication", + "GridStore Removal", + "Construction", + "File Seeking", + "File Upload & Download", + "File Deletion", + "Finding File Metadata", + "Unified Topology", + "Explain", + "Command Monitoring", + "What's New in 3.7", + "What's New in 3.6" + ], + "paragraphs": "Learn what's new in: Version 6.8 Version 6.7 Version 6.6 Version 6.5 Version 6.4 Version 6.3 Version 6.2 Version 6.1 Version 6.0 Version 5.9 Version 5.8 Version 5.7 Version 5.6 Version 5.5 Version 5.4 Version 5.3 Version 5.2 Version 5.1 Version 5.0 Version 4.17 Version 4.16 Version 4.15 Version 4.14 Version 4.13 Version 4.12 Version 4.11 Version 4.10 Version 4.9 Version 4.8 Version 4.7 Version 4.6 Version 4.5 Version 4.4 Version 4.3 Version 4.2 Version 4.1 Version 4.0 Version 3.7 Version 3.6 The Node.js driver v6.8 release includes the following features: To learn more about this release, see the\n v6.8.0 Release Notes on\nGitHub. Fixes a bug where a local KMS provider accepted a BSON Binary instance at\nruntime, but the TypeScript compiler allowed only values of type Buffer and\n string . The ReadConcernMajorityNotAvailableYet error is now a retryable read error. You can now associate a name with, and provide multiple keys for, KMS providers.\nThis feature requires mongodb-client-encryption v6.0.1 or later.\nYou can't use named KMS providers if your application uses the automatic\nKMS provider refresh capability. The following code example shows how to configure a ClientEncryption object with\nmultiple AWS keys: When you create a KMIP data key, you can now specify the delegated option. If this\noption is set to true , the KMIP provider performs encryption and decryption of\nthe data key locally, ensuring that the encryption key never leaves the KMIP server.\nThis feature requires mongodb-client-encryption v6.0.1 or later. The following code example shows how to specify this option: The driver now decodes BSON responses as the cursor iterates over them,\nrather than decoding the entire BSON response when it is received. The Github release for the mongodb package now contains a detached signature file,\n mongodb-X.Y.Z.tgz.sig , for the NPM package. This change applies to every major\nand patch release for versions 5.x and 6.x of the driver. To verify the package signature,\nfollow the instructions in the Release Integrity section of the\n README.md \nfile in the driver's GitHub repository. The Node.js driver v6.7 release includes the following features: To learn more about this release, see the\n v6.7.0 Release Notes on\nGitHub. Adds support for the MONGODB-OIDC authentication mechanism when connected to\nMongoDB Server v7.0 and later. The driver supports authentication with Azure\nmachine authentication, GCP machine authentication, callback authentication,\nand human interaction callback authentication facets. Fixes an issue where setting the useBigInt64 flag to true caused the internal\n compareTopologyVersion function to generate an error. The Node.js driver v6.6 release includes the following features: To learn more about this release, see the\n v6.6.0 Release Highlights on\nGitHub. Upgrades to using BSON 6.7.0. For details about the new BSON features, see the\nrelease notes for BSON 6.5.0 ,\n BSON 6.6.0 , and\n BSON 6.7.0 . Adds the addStage() method to the fluid aggregation API. You can use this method to\nadd aggregation pipeline stages individually, as shown in the following\nexample: Adds the cause and dependencyName fields to the MongoMissingDependencyError \nclass. You can use these fields to programmatically determine if a package is missing\nor why a package didn't load. Adds the minRoundTripTime property to the ServerDescription class. This\nproperty contains the minimum round-trip time over the last 10 heartbeats. Adds the toJSON() method to the TopologyDescription class. Although you can use\nthis method to stringify TopologyDescription objects to JSON, we\nrecommend using Node's util.inspect() method instead, because it properly handles\nall types used in JavaScript and the driver. Adds cursor options support for the Collection.indexExists() ,\n Collection.indexes() , and Collection.indexInformation() methods in Typescript. Removes support for the readConcern and writeConcern options from the\n Collection.listSearchIndexes() method. listSearchIndexes() is an Atlas-specific method, and Atlas\nsearch indexes don't support these options. Redefines the ServerDescription.roundTripTime property as a moving average. Previously,\nit was a weighted average of the most recently observed heartbeat duration and the\nprevious duration. You can specify the type of a search index when creating the index, as shown\nin the following example: The UpdateFilter.$currentDate property no longer throws an error when you pass\nit to a compound method, such as findOneAndUpdate() , on a collection with a limited schema. The driver throws a MongoTransactionError only if you provide a\n ReadPreferenceMode other than primary and then try to perform a command that\ninvolves a read operation. The data type of the TopologyDescription.error property is MongoError . The Collection.indexExists() method no longer supports the full option. The Collection.indexInformation() , Collection.indexes() , and\n Db.indexInformation() methods have a return type of\n IndexDescriptionCompact | IndexDescriptionInfo[] in TypeScript. When retrieving AWS KMS (Key Management System) credentials, the driver no longer\nthrows an error when it receives an access key that includes\nan expiration timestamp. The ClusterTime interface no longer defines the signature field as required in\nTypeScript. The Node.js driver v6.5 release includes the following features: To learn more about this release, see the\n v6.5.0 Release Highlights on\nGitHub. Updates bulk write operations to use the pkFactory class for document\nID generation. If you previously specified an instance of a pkFactory to handle\nbulk writes, the _id fields of the documents inserted by using bulk\nwrites may be inconsistent with the behavior in this version. Fixes the read preference that is sent with read operations to\n primaryPreferred when the driver is connected to a secondary node in\nthe replica set. Fixes a memory leak in promise creation for socket operations. Reduces first-time connection latency when connecting to a DNS seedlist by\nquerying the SRV and TXT records in parallel. Adds tracking to container metadata when running a client in Kubernetes\nor a container environment in the client.env.container field of the\nhandshake document. Adds the original error document returned by the server to the\n errorResponse field of the MongoServerError document. Deprecates the CloseOptions interface which is unused by the driver. The Node.js driver v6.4 release includes the following features: To learn more about this release, see the\n v6.4.0 Release Highlights on\nGitHub. When multiple mongos instances are available, different servers are used\nfor read and write retry attempts. Caches AWS credentials at the client level, rather than for each\nauthentication. Upgrades to using BSON 6.4.0. For details about the new BSON features, see the\nrelease notes for BSON 6.3.0 and BSON 6.4.0 . Read operations that result in an ExceededTimeLimit error are retried. Fixes a request issue related to TLS sockets and\n KMS Providers . Fixes the base64 padding on the saslContinue command to allow for mongosh\nauthentication. Types countDocuments using Filter rather than Document ,\nwhich enables autocompletion and helps prevent downstream typing issues. Fixes a type error in the $addToSet option of the bulkWrite command.\nThe driver skips $addToSet validation you extend your types from\n Document or any , or use properties of any type. Fixes the ServerHeartbeatSucceeded and ServerHeartbeatFailed event\nheartbeat duration so that it does not include the time to create the socket. Appropriately emits errors from cursor transform streams, rather than\nabsorbing them. Makes AWS session tokens optional when a username and password are provided,\nand allows AWS SDK to handle the authentication requests. The Node.js driver v6.3 release includes the following features: To learn more about this release, see the\n v6.3.0 Release Highlights . Adds the serverMonitoringMode client option to control the\nbehavior of the monitoring connection among the nodes in a topology.\nThis option takes a value of auto (default), poll , or\n stream . To learn more, see the entry for this option in the\n Connection Options guide. You can set the serverMonitoringMode option in a\n MongoClientOptions instance or as a connection string option. The\nfollowing example shows how to create a client with the option set to\n stream : Fixes a connection leak when the serverApi client option is set. Deprecates the contentType and aliases GridFS options. To\nstore the content type and aliases of a file, add contentType and aliases \nfields to the metadata document. The Node.js driver v6.2 release includes the following features: To learn more about this release, see the\n v6.2.0 Release Highlights . Updates the bson package version to 6.2.0 to include\ncolor visualization of types, as shown in the following image: To learn more, see the bson v6.2.0 release notes . Ensures that the result.insertedIds property of a bulk write error type\ncontains the _id values of successfully inserted documents. In\nprevious versions, when a bulk write operation rejected an insert\noperation, the result.insertedIds property contained the\n _id values for all attempted inserts. Closes the implicit session created when running the findOne() \nmethod on a time series collection regardless of the outcome of the\noperation. Allows the creation of collections that have names that start or end with the\n . character. This change aligns the driver's database and\ncollection name-checking behavior with the server's. The Node.js driver v6.1 release includes the following features: To learn more about this release, see the\n v6.1.0 Release Highlights . Updates the bson package version to 6.1.0 to expose the\n Decimal128.fromStringWithRounding() method. To learn more, see the\n v6.1.0 bson release notes . Detects environment variables for region settings when\nauthenticating by using the MONGODB-AWS authentication mechanism.\nTo instruct the driver to use your region options, you must set both\nof the following environment variables: AWS_STS_REGIONAL_ENDPOINTS AWS_REGION Fixes a memory leak issue caused by recursive calls to the next() \nmethod of the ChangeStream type. The Node.js driver v6.0 release includes the following features: To learn more about this release, see the\n v6.0.0 Release Highlights . This driver version introduces breaking changes. For a list of these changes, see\nthe Version 6.0 Breaking Changes section in the\nUpgrade guide. All of the ssl -prefixed options in the MongoClientOptions \ntype are deprecated. In addition, the tlsCertificateFile option\nis deprecated. Instead, you should store your certificates in a SecureContext \nobject or set the tls -prefixed options in your\n MongoClientOptions instance. To learn more, see Enable TLS on a Connection . Removal of support for the addUser() helper command. Use the\n createUser MongoDB Shell command instead. Removal of support for the collStats operation. Use the\n $collStats aggregation operator\ninstead. The options field of the ConnectionPoolCreatedEvent type\ncontains only the following fields, which are the non-default pool\noptions: maxConnecting maxPoolSize minPoolSize maxIdleTimeMS waitQueueTimeoutMS The driver asynchronously reads files set in the tlsCAFile and\n tlsCertificateKeyFile connection options when you call\nthe MongoClient.connect() method, not when you create a\n MongoClient instance. Removal of the keepAlive and keepAliveInitialDelay connection\noptions. The value of keepAlive is permanently set to true and the\nvalue of keepAliveInitialDelay is set to 300000 milliseconds (300\nseconds). To learn how to set keepalive settings at a system level,\nsee the Does TCP keepalive time affect MongoDB Deployments? \nFAQ entry in the Server manual. Removes the following options for the Db.command() method: Although you cannot pass these options to the\n Db.command() method, you can still set them in the command\ndocument. To learn more, see the Command Options section of the Run a Command guide. willRetryWrite omitReadPreference writeConcern explain readConcern collation maxTimeMS comment retryWrites dbName authdb noResponse The Node.js driver v5.9 release includes the following features: To learn more about this release, see the\n v5.9.0 Release Highlights . This version includes a fix to a memory leak introduced in v5.7.\nWe recommend upgrading to v5.9. Fixed a memory leak introduced in v5.7. The Decimal128 constructor and fromString() methods now throw an exception\nwhen they detect a loss of precision of more than 34 significant digits.\nThe Decimal128 class exposes a new fromStringWithRounding() static method that\nuses the rounding behavior from previous versions of the driver. For more information,\nsee the Release Notes for v5.5 of the js-bson package \non GitHub. Added support for detecting the AWS_STS_REGIONAL_ENDPOINTS and AWS_REGION \nenvironment variables and setting the appropriate options when calling the\n fromNodeProviderChain() method in the AWS SDK. The Node.js driver v5.8 release includes the following features: To learn more about this release, see the\n v5.8.0 Release Highlights . This version includes a fix to a memory leak introduced in v5.7.\nWe recommend upgrading to v5.9. The AutoEncrypter interface is deprecated. Support for Kerberos versions 1.x and 2.x. Deprecation errors are not emitted for the\n tlsCertificateFile property when you set the\n tlsCertificateKeyFile property. Removes credential availability in the\n ConnectionPoolCreatedEvent type. You can still access credentials\nthrough the credentials property of a MongoOptions instance. Lowers the @aws-sdk/credential-providers version to 3.188.0\nand zstd to ^1.0.0. The Node.js driver v5.7 release includes the following features: To learn more about this release, see the\n v5.7.0 Release Highlights . The following Write Concern options are deprecated: To specify the write concern behavior, use the wtimeoutMS and\n journal options instead. To learn more about these options, see the\n Connection Options page. wtimeout j fsync SSL options and other transport encryption options are deprecated.\nTo learn more about the deprecated options and which options to use\ninstead, see the Legacy SSL options deprecated section in the\nv5.7.0 Release Highlights linked at the end of this section. A new option for compound operation methods. The\n includeResultMetaData \noption allows you to specify whether to include information about the\noperation result. See the Built-in Methods section of the Compound Operations\nguide for more information. Support for change stream split events which enables processing change\nstream documents that exceed the 16MB maximum BSON size limit. An API to manage Search indexes from within your application. To\nlearn more, see Search Indexes . The Node.js driver v5.6 release includes the following features: To learn more about this release, see the v5.6.0 Release Highlights . The driver now supports Node.js v20. The driver can return a cursor as the response to a server command when you\ncall the runCursorCommand() method. To learn more about this feature,\nsee the runCursorCommand API documentation . Support for specifying time series collection creation options\n bucketMaxSpanSeconds \nand\n bucketRoundingSeconds .\nTo learn more about these time series collection options, see\n Set Granularity for Time Series Data \nin the Server manual. New features of the 5.5 Node.js driver release include: To learn more about this release, see the v5.5.0 Release Highlights . The driver now accurately detects Function-as-a-Service (FaaS)\nenvironments in AWS by considering AWS environment variables only if\nthey begin with AWS_Lambda_ . You must upgrade mongodb-client-encryption to version 2.8.0 or\nlater if you want to create an encrypted collection by using the\nQueryable Encryption feature. New features of the 5.4 Node.js driver release include: To learn more, see the v5.4.0 Release Highlights . The collStats operation is deprecated. Use the $collStats aggregation operator instead. The TypeScript interface passed to the db.command() method incorrectly\nincludes certain options. These options have been deprecated. The ChangeStream.tryNext method now uses the schema-specific\n TChange generic type instead of the Document interface. New features of the 5.3 Node.js driver release include: To learn more, see the v5.3.0 Release Highlights . The forEach() cursor method, which allows you to iteratively access\nresults from queries and aggregations, is deprecated. Use the\n for await...of syntax instead, as shown\n here. The addUser() method is deprecated. Use createUser() instead. The keepAlive and keepAliveInitialDelay connection options are\ndeprecated. Methods that contain duplicated functionality in the BulkWriteResult class are deprecated.\nSee the\n API documentation \nfor a full list of deprecated methods and the preferred alternatives. Client metadata now includes function as a service (FaaS) environment information\nand alternative runtime detection. The driver now allows SRV record addresses that contain a trailing dot. UpdateResult.upsertedId now returns null when no documents are updated. New features of the 5.2 Node.js driver release include: To learn more, see the v5.2.0 Release Highlights . The driver now supports automatically obtaining Azure credentials when using\nautomatic Queryable Encryption. New features of the 5.1 Node.js driver release include: To learn more, see the v5.1.0 Release Highlights . The driver now supports automatic serialization of JavaScript bigint to\n BSON.Long . It also supports the deserialization of BSON.Long values returned\nfrom the server to bigint values when the useBigInt64 flag is passed\nas true. New features of the 5.0 Node.js driver release include: This driver version introduces breaking changes. For a list of these changes, see\nthe Version 5.0 Breaking Changes section in the\nUpgrade guide. By default, the driver no longer checks types referenced in dot notation\nunless the StrictFilter type annotation is explicitly\nused. To learn more about this change, see the Typescript fundamentals\npage . This change is for Typescript only, and does not affect queries or operations\nat runtime. Optional installation of @aws-sdk/credential-providers as a peer dependency. The driver no longer includes AWS SDK modules by default. Use the\nfollowing npm command to install the SDK: If you install the SDK, npm notifies you if the version of the SDK you\ninstalled is incompatible with the driver. Once you install the\ndependency successfully, the driver uses the AWS SDK itself to\nmanage credentials from the environment. New features of the 4.17 Node.js driver release include: To learn more, see the v4.17.0 Release Highlights . Adds the mongodb-js/saslprep package as a driver dependency. Improves compatibility with the Queryable Encryption feature. New features of the 4.16 Node.js driver release include: To learn more, see the v4.16.0 Release Highlights . Includes Function-as-a-Service (FaaS) platform information in the driver\nhandshake metadata. Identifies Deno runtime usage in the client metadata. New features of the 4.15 Node.js driver release include: To learn more, see the v4.15.0 Release Highlights . Support for AWS IAM roles for service accounts. New features of the 4.14 Node.js driver release include: This version includes a fix to a memory leak introduced in v4.13.\nWe recommend upgrading to v4.14. Fixed a memory leak introduced in v4.13. Deprecated methods and options that reference the legacy Logger. New features of the 4.13 Node.js driver release include: Automatic cancellation of in-flight operations in the connection pool when\nthe driver encounters network timeout errors. Disabled causal consistency in implicit sessions to prevent conflicting\nwith the linearizable and available read concern settings. Fixed a potential memory leak by ensuring that the driver destroys\n MessageStream instances whenever their connections are destroyed. New features of the 4.12 Node.js driver release include: To learn more, see the v4.12.0 Release Highlights . The 4.12.1 Node.js driver includes a fix to a regression in monitoring logic\nthat could cause processes to crash. Redefinition of the ChangeStream class as an async iterable.\nYou can use ChangeStream instances in any context that expects an\n AsyncIterator . Notably, change streams can now be used in Javascript for-await \nloops: Fix to server monitoring when the driver skips monitoring events. In\nthis release, the driver always updates its view of the topology when\nprocessing monitoring events. Performance improvements with buffering as a result of modification to\ndata structures used internally in the driver. When connecting to MongoDB Server version 6.0 or later, the driver prioritizes\n electionId settings before setVersion settings during Server Discovery and\nMonitoring events. In previous versions, the prioritization order was reversed. When you install the optional aws-sdk/credential-providers \ndependency, the driver uses the AWS SDK to retrieve AWS credentials from the\nenvironment. To learn more about this behavior, see the MONGODB-AWS section of the Authentication Mechanisms guide. This release includes added support for mutually\nrecursive collection schema types. The driver also provides type safety for\ndot-notation queries up to a depth of eight in this release. At a depth greater\nthan or equal to eight, Typescript successfully compiles your code but does not\nprovide type safety. This depth limit on recursive types is a current limitation\nof TypeScript. Suppose we have a collection of type Collection that contains the\nfollowing mutually recursive types: TypeScript enforces type checking up to a depth of eight. The following\ncode causes a TypeScript compilation error because the name property\nvalue must be a string type: At a depth greater than or equal to eight, TypeScript compiles your code but no\nlonger type checks it. For example, the following code assigns a number to a\n string property but does not cause a compilation error because the\nreferenced property is at a depth of 10: To learn more, see the v4.11.0 Release Highlights . New features of the 4.10 Node.js driver release include: To learn more, see v4.10.0 Release Highlights . Callback Deprecation Callbacks are now deprecated in favor of Promises. Callbacks will\nbe removed in the next major release. The Node driver team recommends\nmigrating to promises where possible: Use async/await syntax. Use the Node.js callbackify utility : Use then syntax: If you are unable to migrate to Promises in a large codebase, you can\nuse the legacy Node.js driver with optional callback support . New features of the 4.9 Node.js driver release include: To learn more, see v4.9.0 Release Highlights . Fixed an inconsistency with writeConcern options in the type definitions. Included the latest BSON release, which adds automatic UUID support. See the\nBSON release notes here . New features of the 4.8 Node.js driver release include: To learn more, see v4.8.0 Release Highlights . Version 4.8.1 fixes a type regression issue introduced in v4.8.0. By\nupgrading to v4.8.1, you can specify _id values and sub-documents\nwhen performing updates with the $set or $setOnInsert operators. Added auto-completion and type safety for nested keys in an update filter client.startSession() can now be called before connecting to MongoDB estimatedDocumentCount() method can now accept a comment New features of the 4.7 Node.js driver release include: The MongoClient.connect() method is now optional when connecting to your MongoDB instance Ability to compress messages with the Zstandard compression algorithm Added support for the maxConnecting connection option Ability for change stream documents to show your documents before and after an update Added support for new change stream fields related to Cluster to Cluster Replication The estimatedDocumentCount() method now uses the $count database command Improved connecting to MongoDB in the AWS Lambda Init phase The ResumeOptions interface is deprecated. Use the\n ChangeStreamCursorOptions interface instead. New features of the 4.6 Node.js driver release include: To learn more, see v4.6.0 Release Highlights . Improved the ChangeStreamDocument in TypeScript. Even distribution of server selection based on load across servers. See v4.5.0 Release Highlights \non GitHub. New features of the 4.4 Node.js driver release include: KMIP provider support when using CSFLE. TLS support when using CSFLE. Hostname canonicalization now accepts \"none\", \"forward\", and \"forwardAndReverse\" as authMechanismProperties when using GSSAPI. In the 4.0.0 release of the driver, the deprecated collection.count() method was inadvertently changed to behave like collection.countDocuments() .\nIn this release, the collection.count() method is updated to match legacy behavior: If a query is provided, collection.count() behaves the same as collection.countDocuments() and performs a collection scan. If no query is provided, collection.count() behaves the same as collection.estimatedDocumentCount() and relies on\ncollection metadata. The cursor.count() method is deprecated and will be removed in the next major version, along with collection.count() .\nUse the collection.estimatedDocumentCount() or collection.countDocuments() \nmethods instead. New features of the 4.3 Node.js driver release include: SOCKS5 support Option to disable UTF-8 validation Type inference for nested documents New features of the 4.2 Node.js driver release include: srvMaxHosts and srvServiceName DNS seedlist connection options New features of the 4.1 Node.js driver release include: Added load balanced connection support for all cluster types including\nthe beta Serverless platform . Added support for the advanceClusterTime() method to determine if\nthe ClientSession should update its cluster time. New features of the 4.0 Node.js driver release include: This driver version introduces breaking changes. For a list of these changes, see\nthe Version 4.0 Breaking Changes section in\nthe Upgrade guide. In this release of the driver, the deprecated collection.count() method was\ninadvertently changed to behave like collection.countDocuments() . This behavior\nis corrected in version 4.4 . We'd love to hear your TypeScript related feature requests. Please submit\nideas on our JIRA project here . We've migrated the driver to TypeScript. You can now harness the type\nhinting and intellisense features in editors that support it to develop\nyour MongoDB applications. Enjoy the benefits of this work in pure JavaScript\nprojects as well. The underlying BSON library used by this version is now migrated to\nTypeScript. Inline documentation is now consistently formatted to improve display\nin editors. If you are a user of the community types @types/mongodb , there will\n likely be issues adopting the types from our codebase. We could not\nachieve a one to one match in types due to the details of writing the\ncodebase in TypeScript. The minimum supported version of Node.js is now v12.9 or greater for\nversion 4 of the driver. Support for our 3.x branches will continue\nuntil mid-year 2022 to allow time for users to upgrade. 3.x supports back to Node.js v4. Our Cursor implementation is now updated to make it clear what is possible\nbefore and after execution of an operation. There was inconsistency surrounding how the cursor would error if a\nsetting was applied after cursor execution began. Now, the cursor will\nthrow an error when attempting to apply operations in an invalid state,\nsimilar to the following: MongoError: Cursor is already initialized Affected classes: AbstractCursor FindCursor AggregationCursor ChangeStreamCursor (This is the underlying cursor for ChangeStream ) ListCollectionsCursor Our Cursor types no longer extend Readable directly. They must be\ntransformed into a stream by calling cursor.stream() . Use hasNext() and next() for manual iteration.\nUse for await of syntax or any Promise helpers for\nasynchronous iteration. With type hinting, you should find that options passed to a MongoClient \nare enumerated and discoverable. We've made a large effort to process\nall options in the driver to give early warnings about incompatible settings\nto get your app up and running in a correct state quickly. checkServerIdentity is no longer checked before being passed to the\nunderlying Node API. Previously, accepted values were false , or\na function. Now, the argument must be a function. Specifying a\nboolean will result in an error being thrown. It is no longer required to specify useUnifiedTopology or useNewUrlParser . This method no longer supports a strict option, which returned\nan error if the collection did not exist. To assert the existence of\na collection, use the listCollections() method instead. BulkWriteError is now renamed to MongoBulkWriteError . When running bulk operations that make writes you can encounter errors\ndepending on your settings. Import the new class name MongoBulkWriteError \nwhen testing for errors in bulk operations. DB is no longer an EventEmitter . Listen for events directly from your\n MongoClient instance. The Collection.group() helper, deprecated since MongoDB 3.4,\nis now removed. Use the aggregation pipeline $group \noperator instead. gssapiServiceName is now removed. Use authMechanismProperties.SERVICE_NAME in the URI or as an option on MongoClientOptions . Specifying username and password as options is only supported in the URI\nor as an option on MongoClientOptions . The GridStore API (already deprecated in 3.x) is now replaced with GridFSBucket .\nFor more information on GridFS , see the mongodb manual . Below are some snippets that represent equivalent operations. GridFSBucket uses the Node.js Stream API. You can replicate file seeking\nby using the start and end options, creating a download stream\nfrom your GridFSBucket . GridFSBucket does not need to be closed like GridStore . File metadata that used to be accessible on the GridStore instance can be\nfound by querying the bucket. We internally now only manage a unifiedTopology when you connect\nto a mongod . The differences between this and previous versions\nis detailed here . It is no longer required to specify useUnifiedTopology or useNewUrlParser . You must use the new directConnection option \nto connect to uninitialized replica set members. Support is now added for fine-grained verbosity modes. You can learn more\nabout each mode here . The instrument() method is now removed. Use command monitoring instead.\nSee our guide on command monitoring \nfor more information. New features of the 3.7 Node.js driver release include: Added support for load balancer mode while enabling the useUnifiedTopology option Added support for Stable API while enabling the useUnifiedTopology option New features of the 3.6 Node.js driver release include: Added support for the MONGODB-AWS authentication mechanism using Amazon Web Services (AWS) Identity and Access Management (IAM) credentials The find() method supports allowDiskUse() for sorts that require too much memory to execute in RAM The update() and replaceOne() methods support index hints A reduction in recovery time for topology changes and failover events Improvements in validation testing for the default writeConcern Authentication requires fewer round trips to the server, resulting in faster connection setup Shorter Salted Challenge Response Authentication Mechanism ( SCRAM ) conversations Ability to create collections and indexes for multiple document transactions Running validation for a collection in the background", + "code": [ + { + "lang": "javascript", + "value": "const clientEncryption = new ClientEncryption(keyVaultClient, {\n 'aws:key1': {\n accessKeyId: ...,\n secretAccessKey: ...\n },\n 'aws:key2': {\n accessKeyId: ...,\n secretAccessKey: ...\n },\n\nclientEncryption.createDataKey('aws:key-1', { ... });" + }, + { + "lang": "javascript", + "value": "clientEncryption.createDataKey('kmip', { masterKey: { delegated: true } } );" + }, + { + "lang": "javascript", + "value": "const documents = await users.aggregate().addStage({ $project: { name: true } }).toArray();" + }, + { + "lang": "js", + "value": "const indexName = await collection.createSearchIndex({\n name: 'my-vector-search-index',\n type: 'vectorSearch',\n definition: {\n mappings: { dynamic: false }\n }\n});" + }, + { + "lang": "js", + "value": "new MongoClient('', { serverMonitoringMode: 'stream' });" + }, + { + "lang": "bash", + "value": "npm install --save \"@aws-sdk/credential-providers@^3.201.0\"" + }, + { + "lang": "js", + "value": "const changeStream = myColl.watch();\nfor await (const change of changeStream) {\n console.log(\"Received change: \", change);\n}" + }, + { + "lang": "js", + "value": "interface Author {\n name: string;\n bestBook: Book;\n}\n\ninterface Book {\n title: string;\n author: Author;\n }" + }, + { + "lang": "js", + "value": "myColl.findOne({ 'bestBook.author.bestBook.title': 25 });" + }, + { + "lang": "js", + "value": "myColl.findOne({\n 'bestBook.author.bestBook.author.bestBook.author.bestBook.author.bestBook.author.name': 25\n});" + }, + { + "lang": "js", + "value": "require('util').callbackify(() => myColl.findOne())(callback)" + }, + { + "lang": "js", + "value": "myColl.findOne().then(res => callback(null, res), err => callback(err))" + }, + { + "lang": "js", + "value": "const fc = myColl.find({a: 2.3}).skip(1)\nfor await (const doc of fc) {\n console.log(doc)\n fc.limit(1) // incorrect usage, cursor already executing\n}" + }, + { + "lang": "js", + "value": "const cursor = myColl.find({});\nconst stream = cursor.stream();\nstream.on(\"data\", data => console.log);\nstream.on(\"error\", () => client.close());" + }, + { + "lang": "js", + "value": "const collections = (await db.listCollections({}, { nameOnly: true })\n .toArray()).map(\n ({name}) => name\n );\nif (!collections.includes(myNewCollectionName)) {\n throw new Error(`${myNewCollectionName} doesn't exist`);\n}" + }, + { + "lang": "js", + "value": "?authMechanismProperties.SERVICE_NAME\n// or\nnew MongoClient(url, { SERVICE_NAME: \"alternateServiceName\" })" + }, + { + "lang": "js", + "value": "new MongoClient(\"mongodb://username:password@\")\n// or\nnew MongoClient(url, { auth: { username: \"<>\", password: \"<>\" } })" + }, + { + "lang": "javascript", + "value": "// old way\nconst gs = new GridStore(db, filename, mode[, options])\n// new way\nconst bucket = new GridFSBucket(client.db('test')[,options])" + }, + { + "lang": "js", + "value": "bucket.openDownloadStreamByName(filename, { start: 0, end: 100 })" + }, + { + "lang": "javascript", + "value": "await client.connect();\nconst filename = 'test.txt'; // whatever local file name you want\nconst db = client.db();\nconst bucket = new GridFSBucket(db);\n\nfs.createReadStream(filename)\n .pipe(bucket.openUploadStream(filename))\n .on('error', console.error)\n .on('finish', () => {\n console.log('done writing to db!');\n\n bucket\n .find()\n .toArray()\n .then(files => {\n console.log(files);\n\n bucket\n .openDownloadStreamByName(filename)\n .pipe(fs.createWriteStream('downloaded_' + filename))\n .on('error', console.error)\n .on('finish', () => {\n console.log('done downloading!');\n client.close();\n });\n });\n });" + }, + { + "lang": "js", + "value": "// old way\nGridStore.unlink(db, name, callback);\n// new way\nbucket.delete(file_id);" + }, + { + "lang": "js", + "value": "const fileMetaDataList: GridFSFile[] = bucket.find({}).toArray();" + } + ], + "preview": "Learn what's new in:", + "tags": "version, update, upgrade, backwards compatibility", + "facets": { + "genre": ["reference"], + "target_product": ["drivers"], + "programming_language": ["javascript/typescript"] + } + } + ] } diff --git a/search-manifest/tests/snapshots/index.test.ts b/search-manifest/tests/snapshots/index.test.ts index 755fed77d..0f164b4fd 100644 --- a/search-manifest/tests/snapshots/index.test.ts +++ b/search-manifest/tests/snapshots/index.test.ts @@ -2,10 +2,10 @@ import { describe, expect, test } from 'vitest'; function sum(a: number, b: number) { - return a + b; + return a + b; } test('adds 1 + 2 to equal 3', () => { - expect(sum(1, 2)).toBe(3); + expect(sum(1, 2)).toBe(3); }); //write output of generatemanifest and then compose upserts to files, check if they're the same diff --git a/search-manifest/tests/unit/getProperties.test.ts b/search-manifest/tests/unit/getProperties.test.ts index 672c5e2f0..ad053f6c9 100644 --- a/search-manifest/tests/unit/getProperties.test.ts +++ b/search-manifest/tests/unit/getProperties.test.ts @@ -1,20 +1,20 @@ import { - describe, - beforeEach, - expect, - test, - vi, - beforeAll, - afterAll, + describe, + beforeEach, + expect, + test, + vi, + beforeAll, + afterAll, } from 'vitest'; import getProperties, { - getBranch, + getBranch, } from '../../src/uploadToAtlas/getProperties'; import { - mockDb, - teardownMockDbClient, - insert, - removeDocuments, + mockDb, + teardownMockDbClient, + insert, + removeDocuments, } from '../utils/mockDB'; // simulate the repos_branches collection in an object import repos_branches from '../resources/mockCollections/repos-branches.json'; @@ -38,200 +38,200 @@ const DOCS_APP_SERVICES_NAME = 'docs-app-services'; const DOCS_MONGODB_INTERNAL_NAME = 'docs-mongodb-internal'; beforeAll(async () => { - db = await mockDb(); - await insert(db, 'repos_branches', repos_branches); - await insert(db, 'docsets', docsets); + db = await mockDb(); + await insert(db, 'repos_branches', repos_branches); + await insert(db, 'docsets', docsets); }); //mock repos_branches database beforeEach(async () => { - vi.mock('../../src/uploadToAtlas/searchConnector', async () => { - const { mockDb, teardownMockDbClient } = await import('../utils/mockDB'); - return { - teardown: teardownMockDbClient, - db: async () => { - //mock db of repos_branches - db = await mockDb(); - return db; - }, - }; - }); + vi.mock('../../src/uploadToAtlas/searchConnector', async () => { + const { mockDb, teardownMockDbClient } = await import('../utils/mockDB'); + return { + teardown: teardownMockDbClient, + db: async () => { + //mock db of repos_branches + db = await mockDb(); + return db; + }, + }; + }); }); afterAll(async () => { - //teardown db instance - await removeDocuments('repos_branches'); - await teardownMockDbClient(); + //teardown db instance + await removeDocuments('repos_branches'); + await teardownMockDbClient(); }); describe('given an array of branches and a branch name, the corrct output is returned', () => { - //mock branches object - const branches: Array = repos_branches[1].branches; - test('given a branch name that exists in the branches array, the correct branch object is returned', () => { - expect(getBranch(branches, BRANCH_NAME_MASTER)).toEqual({ - gitBranchName: 'master', - isStableBranch: true, - urlSlug: 'current', - active: true, - }); - }); - - test('given a branch name that exists with different capitalization than in the branches array, the correct branch object is still returned', () => { - expect(getBranch(branches, 'MASTER')).toEqual({ - gitBranchName: 'master', - isStableBranch: true, - urlSlug: 'current', - active: true, - }); - }); - - test("given a branch name that doesn't exist in the branches array, undefined is returned", () => { - expect(() => getBranch(branches, BRANCH_NAME_GIBBERISH)).toThrowError( - new Error(`Branch ${BRANCH_NAME_GIBBERISH} not found in branches object`), - ); - }); - test('given a branch name and an empty branches array, undefined is returned', () => { - expect(() => getBranch([], BRANCH_NAME_MASTER)).toThrowError( - `Branch ${BRANCH_NAME_MASTER} not found in branches object`, - ); - }); + //mock branches object + const branches: Array = repos_branches[1].branches; + test('given a branch name that exists in the branches array, the correct branch object is returned', () => { + expect(getBranch(branches, BRANCH_NAME_MASTER)).toEqual({ + gitBranchName: 'master', + isStableBranch: true, + urlSlug: 'current', + active: true, + }); + }); + + test('given a branch name that exists with different capitalization than in the branches array, the correct branch object is still returned', () => { + expect(getBranch(branches, 'MASTER')).toEqual({ + gitBranchName: 'master', + isStableBranch: true, + urlSlug: 'current', + active: true, + }); + }); + + test("given a branch name that doesn't exist in the branches array, undefined is returned", () => { + expect(() => getBranch(branches, BRANCH_NAME_GIBBERISH)).toThrowError( + new Error(`Branch ${BRANCH_NAME_GIBBERISH} not found in branches object`), + ); + }); + test('given a branch name and an empty branches array, undefined is returned', () => { + expect(() => getBranch([], BRANCH_NAME_MASTER)).toThrowError( + `Branch ${BRANCH_NAME_MASTER} not found in branches object`, + ); + }); }); //two tests for a repo with multiple branches, one test for a repo with only one branch describe('Given a branchname, get the properties associated with it from repos_branches', () => { - //mock repo name - test(`correct properties are retrieved for branch ${BRANCH_NAME_MASTER} of repoName ${DOCS_COMPASS_NAME}`, async () => { - //define expected properties object for master branch of Compass repo - process.env.REPO_NAME = DOCS_COMPASS_NAME; - const compassMasterProperties = { - searchProperty: 'compass-current', - projectName: 'compass', - url: 'http://mongodb.com/docs/compass/', - includeInGlobalSearch: true, - }; - expect(await getProperties(BRANCH_NAME_MASTER)).toEqual( - compassMasterProperties, - ); - }); - - test(`correct properties are retrieved for branch ${BRANCH_NAME_MASTER} of repoName ${DOCS_CLOUD_NAME}`, async () => { - //define expected properties object for master branch of cloud-docs repo - process.env.REPO_NAME = DOCS_CLOUD_NAME; - const cloudDocsMasterProperties = { - searchProperty: 'atlas-master', - projectName: 'cloud-docs', - url: 'http://mongodb.com/docs/atlas/', - includeInGlobalSearch: true, - }; - - expect(await getProperties(BRANCH_NAME_MASTER)).toEqual( - cloudDocsMasterProperties, - ); - }); + //mock repo name + test(`correct properties are retrieved for branch ${BRANCH_NAME_MASTER} of repoName ${DOCS_COMPASS_NAME}`, async () => { + //define expected properties object for master branch of Compass repo + process.env.REPO_NAME = DOCS_COMPASS_NAME; + const compassMasterProperties = { + searchProperty: 'compass-current', + projectName: 'compass', + url: 'http://mongodb.com/docs/compass/', + includeInGlobalSearch: true, + }; + expect(await getProperties(BRANCH_NAME_MASTER)).toEqual( + compassMasterProperties, + ); + }); + + test(`correct properties are retrieved for branch ${BRANCH_NAME_MASTER} of repoName ${DOCS_CLOUD_NAME}`, async () => { + //define expected properties object for master branch of cloud-docs repo + process.env.REPO_NAME = DOCS_CLOUD_NAME; + const cloudDocsMasterProperties = { + searchProperty: 'atlas-master', + projectName: 'cloud-docs', + url: 'http://mongodb.com/docs/atlas/', + includeInGlobalSearch: true, + }; + + expect(await getProperties(BRANCH_NAME_MASTER)).toEqual( + cloudDocsMasterProperties, + ); + }); }); describe( - 'GetProperties behaves as expected for stale properties', - () => { - afterEach(async () => { - console.log(await removeDocuments('documents')); - }); - - test('getting properties for an inactive branch with no existing documents executes correctly and does not change db document count', async () => { - //populate db with manifests - db = await mockDb(); - const manifest1 = await getManifest('mms-master'); - await uploadManifest(manifest1, 'mms-docs-stable'); - //reopen connection to db - await mockDb(); - //check number of documents initially in db - const documentCount = await db - .collection('documents') - .countDocuments(); - - //getProperties for beta doens't change number of documents in collection - process.env.repo_name = 'docs-compass'; - await expect(getProperties(BRANCH_NAME_BETA)).rejects.toThrow(); - expect( - await db.collection('documents').countDocuments(), - ).toEqual(documentCount); - }); - - test("non prod-deployable repo throws and doesn't return properties", async () => { - process.env.REPO_NAME = DOCS_MONGODB_INTERNAL_NAME; - await expect(getProperties('v5.0')).rejects.toThrow( - `Search manifest should not be generated for repo ${process.env.REPO_NAME}. Removing all associated manifests`, - ); - }); - - test(`no properties are retrieved for branch on repo ${DOCS_APP_SERVICES_NAME} without a "search" field. `, async () => { - process.env.REPO_NAME = DOCS_MONGODB_INTERNAL_NAME; - await expect(getProperties(BRANCH_NAME_MASTER)).rejects.toThrow(); - }); - - test('repo with no search categoryTitle removes all old documents with search properties beginning with that project name', async () => { - db = await mockDb(); - - //add documents for project from two diff branches to search DB - const manifest1 = await getManifest('mms-master'); - - await uploadManifest(manifest1, 'mms-docs-stable'); - await mockDb(); - - const manifest2 = await getManifest('mms-v1.3'); - await uploadManifest(manifest2, 'mms-docs-v1.3'); - - await mockDb(); - - //trying to get properties for repo removes those older documents - process.env.REPO_NAME = 'mms-docs'; - const documentCount = await db - .collection('documents') - .countDocuments(); - await expect(getProperties(BRANCH_NAME_MASTER)).rejects.toThrow(); - //throws - //no return type - - await mockDb(); - const documentCount2 = await db - - .collection('documents') - .countDocuments(); - expect(documentCount2).toEqual( - documentCount - manifest1.documents.length - manifest2.documents.length, - ); - }); - - test('getting properties for an inactive branch removes all old documents with that exact project-version searchProperty', async () => { - //add documents for project from two diff branches to DB-- docs-compass master and beta - db = await mockDb(); - //add documents for project from two diff branches to search DB - const manifest1 = await getManifest('compass-master'); - - await uploadManifest(manifest1, 'compass-current'); - await mockDb(); - - const manifest2 = await getManifest('compass-beta'); - await uploadManifest(manifest2, 'compass-upcoming'); - await mockDb(); - - //trying to get properties for repo removes only the older documents from that specific branch, beta - - //trying to get properties for repo removes those older documents - - process.env.REPO_NAME = 'docs-compass'; - const documentCount = await db - .collection('documents') - .countDocuments(); - await expect(getProperties(BRANCH_NAME_BETA)).rejects.toThrow(); - await mockDb(); - const documentCount2 = await db - .collection('documents') - .countDocuments(); - expect(documentCount2).toEqual( - documentCount - manifest2.documents.length, - ); - }); - }, - { timeout: 10000 }, + 'GetProperties behaves as expected for stale properties', + () => { + afterEach(async () => { + console.log(await removeDocuments('documents')); + }); + + test('getting properties for an inactive branch with no existing documents executes correctly and does not change db document count', async () => { + //populate db with manifests + db = await mockDb(); + const manifest1 = await getManifest('mms-master'); + await uploadManifest(manifest1, 'mms-docs-stable'); + //reopen connection to db + await mockDb(); + //check number of documents initially in db + const documentCount = await db + .collection('documents') + .countDocuments(); + + //getProperties for beta doens't change number of documents in collection + process.env.repo_name = 'docs-compass'; + await expect(getProperties(BRANCH_NAME_BETA)).rejects.toThrow(); + expect( + await db.collection('documents').countDocuments(), + ).toEqual(documentCount); + }); + + test("non prod-deployable repo throws and doesn't return properties", async () => { + process.env.REPO_NAME = DOCS_MONGODB_INTERNAL_NAME; + await expect(getProperties('v5.0')).rejects.toThrow( + `Search manifest should not be generated for repo ${process.env.REPO_NAME}. Removing all associated manifests`, + ); + }); + + test(`no properties are retrieved for branch on repo ${DOCS_APP_SERVICES_NAME} without a "search" field. `, async () => { + process.env.REPO_NAME = DOCS_MONGODB_INTERNAL_NAME; + await expect(getProperties(BRANCH_NAME_MASTER)).rejects.toThrow(); + }); + + test('repo with no search categoryTitle removes all old documents with search properties beginning with that project name', async () => { + db = await mockDb(); + + //add documents for project from two diff branches to search DB + const manifest1 = await getManifest('mms-master'); + + await uploadManifest(manifest1, 'mms-docs-stable'); + await mockDb(); + + const manifest2 = await getManifest('mms-v1.3'); + await uploadManifest(manifest2, 'mms-docs-v1.3'); + + await mockDb(); + + //trying to get properties for repo removes those older documents + process.env.REPO_NAME = 'mms-docs'; + const documentCount = await db + .collection('documents') + .countDocuments(); + await expect(getProperties(BRANCH_NAME_MASTER)).rejects.toThrow(); + //throws + //no return type + + await mockDb(); + const documentCount2 = await db + + .collection('documents') + .countDocuments(); + expect(documentCount2).toEqual( + documentCount - manifest1.documents.length - manifest2.documents.length, + ); + }); + + test('getting properties for an inactive branch removes all old documents with that exact project-version searchProperty', async () => { + //add documents for project from two diff branches to DB-- docs-compass master and beta + db = await mockDb(); + //add documents for project from two diff branches to search DB + const manifest1 = await getManifest('compass-master'); + + await uploadManifest(manifest1, 'compass-current'); + await mockDb(); + + const manifest2 = await getManifest('compass-beta'); + await uploadManifest(manifest2, 'compass-upcoming'); + await mockDb(); + + //trying to get properties for repo removes only the older documents from that specific branch, beta + + //trying to get properties for repo removes those older documents + + process.env.REPO_NAME = 'docs-compass'; + const documentCount = await db + .collection('documents') + .countDocuments(); + await expect(getProperties(BRANCH_NAME_BETA)).rejects.toThrow(); + await mockDb(); + const documentCount2 = await db + .collection('documents') + .countDocuments(); + expect(documentCount2).toEqual( + documentCount - manifest2.documents.length, + ); + }); + }, + { timeout: 10000 }, ); diff --git a/search-manifest/tests/unit/index.test.ts b/search-manifest/tests/unit/index.test.ts index 9243cc49b..e8fa255c3 100644 --- a/search-manifest/tests/unit/index.test.ts +++ b/search-manifest/tests/unit/index.test.ts @@ -5,99 +5,99 @@ import type { ManifestEntry } from '../../src/generateManifest/manifestEntry'; import { getManifest } from '../utils/getManifest'; describe.each([ - { manifestName: 'node', s3Manifest: nodeManifest }, - { manifestName: 'kotlin', s3Manifest: kotlinManifest }, + { manifestName: 'node', s3Manifest: nodeManifest }, + { manifestName: 'kotlin', s3Manifest: kotlinManifest }, ])('Generate manifests from ast', async ({ manifestName, s3Manifest }) => { - //generate new manifest - const manifest = await getManifest(manifestName); + //generate new manifest + const manifest = await getManifest(manifestName); - it('has generated the manifest', async () => { - expect(manifest).toBeTruthy(); - }); + it('has generated the manifest', async () => { + expect(manifest).toBeTruthy(); + }); - it('has the correct document length', () => { - expect(manifest.documents).toHaveLength(s3Manifest.documents.length); - }); + it('has the correct document length', () => { + expect(manifest.documents).toHaveLength(s3Manifest.documents.length); + }); }); describe.each([ - { - manifestName: 'node', - s3Manifest: nodeManifest, - }, - { manifestName: 'kotlin', s3Manifest: kotlinManifest }, + { + manifestName: 'node', + s3Manifest: nodeManifest, + }, + { manifestName: 'kotlin', s3Manifest: kotlinManifest }, ])( - 'has the correct document properties', - async ({ manifestName, s3Manifest }) => { - const manifest = await getManifest(manifestName); - const title = manifest.documents[0].title; + 'has the correct document properties', + async ({ manifestName, s3Manifest }) => { + const manifest = await getManifest(manifestName); + const title = manifest.documents[0].title; - //TODO: put in a loop to check multiple manifestEntries against each other - let equivDoc: ManifestEntry; - for (const document of s3Manifest.documents) { - if (document.title == manifest.documents[0].title) equivDoc = document; - continue; - } + //TODO: put in a loop to check multiple manifestEntries against each other + let equivDoc: ManifestEntry; + for (const document of s3Manifest.documents) { + if (document.title == manifest.documents[0].title) equivDoc = document; + continue; + } - it('is of type string', () => { - expect(title).toBeTypeOf('string'); - }); + it('is of type string', () => { + expect(title).toBeTypeOf('string'); + }); - it('matches the slug', () => { - //slug - expect(manifest.documents[0].slug).toEqual(equivDoc.slug); - }); + it('matches the slug', () => { + //slug + expect(manifest.documents[0].slug).toEqual(equivDoc.slug); + }); - it('matches the heading', () => { - //headings - expect(manifest.documents[0].headings).toEqual(equivDoc.headings); - }); + it('matches the heading', () => { + //headings + expect(manifest.documents[0].headings).toEqual(equivDoc.headings); + }); - it('matches the paragraphs', () => { - //paragraphs - expect(manifest.documents[0].paragraphs).toEqual(equivDoc.paragraphs); - }); + it('matches the paragraphs', () => { + //paragraphs + expect(manifest.documents[0].paragraphs).toEqual(equivDoc.paragraphs); + }); - it('matches the code', () => { - //code - expect(manifest.documents[0].code).toEqual(equivDoc.code); - }); - //preview - it('matches preview', () => { - expect(manifest.documents[0].preview).toEqual(equivDoc.preview); - }); + it('matches the code', () => { + //code + expect(manifest.documents[0].code).toEqual(equivDoc.code); + }); + //preview + it('matches preview', () => { + expect(manifest.documents[0].preview).toEqual(equivDoc.preview); + }); - //tags - it('matches tags', () => { - expect(manifest.documents[0].tags).toEqual(equivDoc.tags); - }); + //tags + it('matches tags', () => { + expect(manifest.documents[0].tags).toEqual(equivDoc.tags); + }); - //facets - it('matches facets', () => { - expect(manifest.documents[0].facets).toEqual(equivDoc.facets); - }); - }, + //facets + it('matches facets', () => { + expect(manifest.documents[0].facets).toEqual(equivDoc.facets); + }); + }, ); //TODO: test Document creation describe.each([ - { - manifestName: 'node', - }, - { manifestName: 'kotlin' }, + { + manifestName: 'node', + }, + { manifestName: 'kotlin' }, ])( - 'given a decoded document generate all of the correct properties', - async ({ manifestName }) => { - //declare decoded documents here + 'given a decoded document generate all of the correct properties', + async ({ manifestName }) => { + //declare decoded documents here - it('should return the proper metadata', () => {}); + it('should return the proper metadata', () => {}); - it('should return the proper paragraphs', () => {}); - it('should return the proper headings and titles', () => {}); - it('should return the proper slug', () => {}); - it('should return the proper preview', () => {}); - it('should return the proper facets', () => {}); - it('should correctly return whether the document is indexable', () => {}); - }, + it('should return the proper paragraphs', () => {}); + it('should return the proper headings and titles', () => {}); + it('should return the proper slug', () => {}); + it('should return the proper preview', () => {}); + it('should return the proper facets', () => {}); + it('should correctly return whether the document is indexable', () => {}); + }, ); //TODO: given a single decoded entry, use Document function on it diff --git a/search-manifest/tests/unit/utils.test.ts b/search-manifest/tests/unit/utils.test.ts index ffca4d296..ce41fd973 100644 --- a/search-manifest/tests/unit/utils.test.ts +++ b/search-manifest/tests/unit/utils.test.ts @@ -1,12 +1,12 @@ -import { joinUrl } from "../../src/utils"; -import { expect, it } from "vitest"; +import { joinUrl } from '../../src/utils'; +import { expect, it } from 'vitest'; //test joinUrl util -it("correctly joins base URLs with slugs", () => { - expect(joinUrl({ base: "https://example.com//", path: "//foo/" })).toEqual( - "https://example.com/foo/" +it('correctly joins base URLs with slugs', () => { + expect(joinUrl({ base: 'https://example.com//', path: '//foo/' })).toEqual( + 'https://example.com/foo/', ); - expect(joinUrl({ base: "https://example.com", path: "foo" })).toEqual( - "https://example.com/foo" + expect(joinUrl({ base: 'https://example.com', path: 'foo' })).toEqual( + 'https://example.com/foo', ); }); diff --git a/search-manifest/tests/utils/mockDB.ts b/search-manifest/tests/utils/mockDB.ts index 61479e415..5c0649e1d 100644 --- a/search-manifest/tests/utils/mockDB.ts +++ b/search-manifest/tests/utils/mockDB.ts @@ -1,6 +1,6 @@ -import { MongoMemoryServer } from "mongodb-memory-server"; -import * as mongodb from "mongodb"; -import type { DatabaseDocument } from "../../src/types"; +import { MongoMemoryServer } from 'mongodb-memory-server'; +import * as mongodb from 'mongodb'; +import type { DatabaseDocument } from '../../src/types'; let client: mongodb.MongoClient; @@ -12,20 +12,20 @@ export async function teardownMockDbClient() { export async function mockDb(): Promise { if (client) { await client.connect(); - return client.db("dummy_db"); + return client.db('dummy_db'); } const mongod = await MongoMemoryServer.create(); const uri = mongod.getUri(); client = new mongodb.MongoClient(uri); await client.connect(); - const dbInstance = client.db("dummy_db"); + const dbInstance = client.db('dummy_db'); return dbInstance; } export const insert = async ( dbName: mongodb.Db, collectionName: string, - docs: any[] + docs: any[], ) => { const coll = dbName.collection(collectionName); const result = await coll.insertMany(docs); @@ -37,7 +37,7 @@ export const removeDocuments = async (collectionName: string) => { const db = await mockDb(); await db.collection(collectionName).deleteMany({}); const documentCount = await db - .collection("documents") + .collection('documents') .countDocuments(); return documentCount; }; diff --git a/search-manifest/tsconfig.json b/search-manifest/tsconfig.json index 5a3eebc48..cfd7d3953 100644 --- a/search-manifest/tsconfig.json +++ b/search-manifest/tsconfig.json @@ -1,13 +1,13 @@ { - "compilerOptions": { - "target": "ES2022", - "module": "ES2022", - "moduleResolution": "bundler", - "strict": true, - "rootDir": ".", - "paths": { - "bson": ["./node_modules/bson/src/"] - } - }, - "exclude": ["node_modules", "dist"] + "compilerOptions": { + "target": "ES2022", + "module": "ES2022", + "moduleResolution": "bundler", + "strict": true, + "rootDir": ".", + "paths": { + "bson": ["./node_modules/bson/src/"] + } + }, + "exclude": ["node_modules", "dist"] } diff --git a/search-manifest/vitest.config.ts b/search-manifest/vitest.config.ts index 7fff64e77..60de9efb2 100644 --- a/search-manifest/vitest.config.ts +++ b/search-manifest/vitest.config.ts @@ -1,8 +1,8 @@ import { defineConfig } from 'vitest/config'; export default defineConfig({ - test: { - name: 'test-suite', - root: './tests', - }, + test: { + name: 'test-suite', + root: './tests', + }, }); diff --git a/snooty-cache/package.json b/snooty-cache/package.json index 20f6ed9cf..380ca6c55 100644 --- a/snooty-cache/package.json +++ b/snooty-cache/package.json @@ -1,23 +1,23 @@ { - "name": "snooty-cache-plugin", - "version": "0.0.1", - "main": "src/index.ts", - "type": "module", - "scripts": { - "build": "netlify-integration build -a", - "dev": "netlify-integration dev -a", - "preview": "netlify-integration preview", - "test": "vitest" - }, - "dependencies": { - "@netlify/sdk": "^1.60.2-pr-1468.3", - "axios": "^1.7.7", - "typescript": "^5.4.5" - }, - "devDependencies": { - "@netlify/build": "^29.50.2", - "@types/node": "^20.14.9", - "execa": "^6.1.0", - "vitest": "^2.0.5" - } + "name": "snooty-cache-plugin", + "version": "0.0.1", + "main": "src/index.ts", + "type": "module", + "scripts": { + "build": "netlify-integration build -a", + "dev": "netlify-integration dev -a", + "preview": "netlify-integration preview", + "test": "vitest" + }, + "dependencies": { + "@netlify/sdk": "^1.60.2-pr-1468.3", + "axios": "^1.7.7", + "typescript": "^5.4.5" + }, + "devDependencies": { + "@netlify/build": "^29.50.2", + "@types/node": "^20.14.9", + "execa": "^6.1.0", + "vitest": "^2.0.5" + } } diff --git a/snooty-cache/src/index.ts b/snooty-cache/src/index.ts index 5695d7e6c..ade0281fc 100644 --- a/snooty-cache/src/index.ts +++ b/snooty-cache/src/index.ts @@ -10,85 +10,85 @@ import { downloadPersistenceModule } from './persistence'; const readdirAsync = promisify(readdir); const getCacheFilePaths = (filesPaths: string[]): string[] => - filesPaths.filter((filePath) => filePath.endsWith('.cache.gz')); + filesPaths.filter((filePath) => filePath.endsWith('.cache.gz')); const integration = new NetlifyIntegration(); integration.addBuildEventHandler( - 'onPreBuild', - async ({ utils: { cache, run } }) => { - const files: string[] = await cache.list(); + 'onPreBuild', + async ({ utils: { cache, run } }) => { + const files: string[] = await cache.list(); - const cacheFiles = getCacheFilePaths(files); + const cacheFiles = getCacheFilePaths(files); - if (!cacheFiles.length) { - console.log('No snooty cache files found'); + if (!cacheFiles.length) { + console.log('No snooty cache files found'); - return; - } - // Don't want to restore duplicates, only restore snooty cache files - console.log('restoring snooty cache files'); + return; + } + // Don't want to restore duplicates, only restore snooty cache files + console.log('restoring snooty cache files'); - await Promise.all(cacheFiles.map((cacheFile) => cache.restore(cacheFile))); + await Promise.all(cacheFiles.map((cacheFile) => cache.restore(cacheFile))); - await checkForNewSnootyVersion(run); + await checkForNewSnootyVersion(run); - await downloadPersistenceModule(run); - }, + await downloadPersistenceModule(run); + }, ); integration.addBuildEventHandler( - 'onSuccess', - async ({ utils: { run, cache } }) => { - console.log('Creating cache files...'); - await run.command('./snooty-parser/snooty/snooty create-cache .'); - console.log('Cache files created'); - const filesPaths = await readdirAsync(process.cwd()); - - const cacheFiles = getCacheFilePaths(filesPaths); - - await Promise.all( - cacheFiles.map(async (filePath) => { - console.log(`Adding cache file: ${filePath}`); - await cache.save(filePath); - }), - ); - }, + 'onSuccess', + async ({ utils: { run, cache } }) => { + console.log('Creating cache files...'); + await run.command('./snooty-parser/snooty/snooty create-cache .'); + console.log('Cache files created'); + const filesPaths = await readdirAsync(process.cwd()); + + const cacheFiles = getCacheFilePaths(filesPaths); + + await Promise.all( + cacheFiles.map(async (filePath) => { + console.log(`Adding cache file: ${filePath}`); + await cache.save(filePath); + }), + ); + }, ); integration.addBuildEventHandler( - 'onEnd', - async ({ utils: { run, status } }) => { - console.log('Creating cache files...'); - const { all, stderr, stdout } = await run.command( - './snooty-parser/snooty/snooty create-cache .', - { all: true }, - ); - - const logs = all ?? stdout + stderr; - - const logsSplit = - logs - .split('\n') - .filter( - (row) => - !row.includes('INFO:snooty.gizaparser.domain') && - !row.includes('INFO:snooty.parser:cache'), - ) || []; - - let errorCount = 0; - let warningCount = 0; - - for (const row of logsSplit) { - if (row.includes('ERROR')) errorCount += 1; - if (row.includes('WARNING')) warningCount += 1; - } - - status.show({ - title: `Snooty Parser Logs - Errors: ${errorCount} | Warnings: ${warningCount}`, - summary: logsSplit.join('\n'), - }); - }, + 'onEnd', + async ({ utils: { run, status } }) => { + console.log('Creating cache files...'); + const { all, stderr, stdout } = await run.command( + './snooty-parser/snooty/snooty create-cache .', + { all: true }, + ); + + const logs = all ?? stdout + stderr; + + const logsSplit = + logs + .split('\n') + .filter( + (row) => + !row.includes('INFO:snooty.gizaparser.domain') && + !row.includes('INFO:snooty.parser:cache'), + ) || []; + + let errorCount = 0; + let warningCount = 0; + + for (const row of logsSplit) { + if (row.includes('ERROR')) errorCount += 1; + if (row.includes('WARNING')) warningCount += 1; + } + + status.show({ + title: `Snooty Parser Logs - Errors: ${errorCount} | Warnings: ${warningCount}`, + summary: logsSplit.join('\n'), + }); + }, ); export { integration }; diff --git a/snooty-cache/src/persistence.ts b/snooty-cache/src/persistence.ts index 8a83ae004..f90289476 100644 --- a/snooty-cache/src/persistence.ts +++ b/snooty-cache/src/persistence.ts @@ -5,25 +5,25 @@ const WORKER_POOL_PATH = `${process.cwd()}/docs-worker-pool`; const PERSISTENCE_PATH = `${WORKER_POOL_PATH}/modules/persistence`; export async function downloadPersistenceModule( - run: NetlifyPluginUtils['run'], + run: NetlifyPluginUtils['run'], ): Promise { - const isModuleDownloaded = existsSync(WORKER_POOL_PATH); + const isModuleDownloaded = existsSync(WORKER_POOL_PATH); - if (isModuleDownloaded) return; + if (isModuleDownloaded) return; - await run.command( - 'git clone --depth 1 --filter=tree:0 https://github.com/mongodb/docs-worker-pool.git --sparse', - ); + await run.command( + 'git clone --depth 1 --filter=tree:0 https://github.com/mongodb/docs-worker-pool.git --sparse', + ); - await run.command('git sparse-checkout set --no-cone modules/persistence', { - cwd: WORKER_POOL_PATH, - }); + await run.command('git sparse-checkout set --no-cone modules/persistence', { + cwd: WORKER_POOL_PATH, + }); - await run.command('npm ci', { - cwd: PERSISTENCE_PATH, - }); + await run.command('npm ci', { + cwd: PERSISTENCE_PATH, + }); - await run.command('npm run build', { - cwd: PERSISTENCE_PATH, - }); + await run.command('npm run build', { + cwd: PERSISTENCE_PATH, + }); } diff --git a/snooty-cache/src/snooty-frontend-version-check.ts b/snooty-cache/src/snooty-frontend-version-check.ts index aa137efc9..479b23f44 100644 --- a/snooty-cache/src/snooty-frontend-version-check.ts +++ b/snooty-cache/src/snooty-frontend-version-check.ts @@ -9,9 +9,9 @@ import { promisify } from 'node:util'; const readFileAsync = promisify(readFile); interface GitHubCommitResponse { - commit: { - sha: string; - }; + commit: { + sha: string; + }; } /** @@ -21,34 +21,34 @@ interface GitHubCommitResponse { * @returns latest commit hash of the netlify-poc branch */ async function getLatestSnootyCommit(): Promise { - try { - const response = await axios.get( - 'https://api.github.com/repos/mongodb/snooty/branches/netlify-poc', - { - headers: { - Accept: 'application/vnd.github+json', - 'X-GitHub-Api-Version': '2022-11-28', - }, - }, - ); + try { + const response = await axios.get( + 'https://api.github.com/repos/mongodb/snooty/branches/netlify-poc', + { + headers: { + Accept: 'application/vnd.github+json', + 'X-GitHub-Api-Version': '2022-11-28', + }, + }, + ); - const latestSha = response.data.commit.sha; + const latestSha = response.data.commit.sha; - return latestSha; - } catch (e) { - console.error('Could not retrieve latest SHA', e); - } + return latestSha; + } catch (e) { + console.error('Could not retrieve latest SHA', e); + } } async function getPackageLockHash(): Promise { - const packageLock = await readFileAsync( - `${process.cwd()}/snooty/package-lock.json`, - ); + const packageLock = await readFileAsync( + `${process.cwd()}/snooty/package-lock.json`, + ); - const hashSum = createHash('sha256'); - hashSum.update(packageLock); + const hashSum = createHash('sha256'); + hashSum.update(packageLock); - return hashSum.digest('hex'); + return hashSum.digest('hex'); } /** @@ -57,36 +57,36 @@ async function getPackageLockHash(): Promise { * @param run the exec util provided by Netlify */ export async function checkForNewSnootyVersion(run: NetlifyPluginUtils['run']) { - console.log('Checking Snooty frontend version'); - const snootyDirExists = existsSync(`${process.cwd()}/snooty`); + console.log('Checking Snooty frontend version'); + const snootyDirExists = existsSync(`${process.cwd()}/snooty`); - if (snootyDirExists) { - const latestSha = await getLatestSnootyCommit(); + if (snootyDirExists) { + const latestSha = await getLatestSnootyCommit(); - const { stdout: currentSha } = await run.command('git rev-parse HEAD', { - cwd: `${process.cwd()}/snooty`, - }); + const { stdout: currentSha } = await run.command('git rev-parse HEAD', { + cwd: `${process.cwd()}/snooty`, + }); - if (currentSha === latestSha) { - console.log('No changes to the frontend. No update needed.'); - return; - } - console.log( - 'Current commit does not match the latest commit. Updating the snooty frontend repo', - ); - const prevPackageLockHash = await getPackageLockHash(); - await run.command('git pull --rebase', { cwd: `${process.cwd()}/snooty` }); + if (currentSha === latestSha) { + console.log('No changes to the frontend. No update needed.'); + return; + } + console.log( + 'Current commit does not match the latest commit. Updating the snooty frontend repo', + ); + const prevPackageLockHash = await getPackageLockHash(); + await run.command('git pull --rebase', { cwd: `${process.cwd()}/snooty` }); - const updatedPackageLockHash = await getPackageLockHash(); + const updatedPackageLockHash = await getPackageLockHash(); - if (prevPackageLockHash === updatedPackageLockHash) { - console.log( - 'Package-lock.json is unchanged. Not installing any additional dependencies', - ); - return; - } - console.log('Dependencies updating. Installing updates.'); - await run.command('npm ci', { cwd: `${process.cwd()}/snooty` }); - console.log('Updates for the frontend completed!'); - } + if (prevPackageLockHash === updatedPackageLockHash) { + console.log( + 'Package-lock.json is unchanged. Not installing any additional dependencies', + ); + return; + } + console.log('Dependencies updating. Installing updates.'); + await run.command('npm ci', { cwd: `${process.cwd()}/snooty` }); + console.log('Updates for the frontend completed!'); + } } diff --git a/snooty-cache/tsconfig.json b/snooty-cache/tsconfig.json index 5a3eebc48..cfd7d3953 100644 --- a/snooty-cache/tsconfig.json +++ b/snooty-cache/tsconfig.json @@ -1,13 +1,13 @@ { - "compilerOptions": { - "target": "ES2022", - "module": "ES2022", - "moduleResolution": "bundler", - "strict": true, - "rootDir": ".", - "paths": { - "bson": ["./node_modules/bson/src/"] - } - }, - "exclude": ["node_modules", "dist"] + "compilerOptions": { + "target": "ES2022", + "module": "ES2022", + "moduleResolution": "bundler", + "strict": true, + "rootDir": ".", + "paths": { + "bson": ["./node_modules/bson/src/"] + } + }, + "exclude": ["node_modules", "dist"] } From 7c94a4b3c117ce843e423601c59ab3566957ac0f Mon Sep 17 00:00:00 2001 From: branberry Date: Fri, 27 Sep 2024 14:32:46 -0500 Subject: [PATCH 7/7] Add error handling --- snooty-cache/src/index.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/snooty-cache/src/index.ts b/snooty-cache/src/index.ts index ade0281fc..589d5a0e2 100644 --- a/snooty-cache/src/index.ts +++ b/snooty-cache/src/index.ts @@ -33,7 +33,11 @@ integration.addBuildEventHandler( await checkForNewSnootyVersion(run); - await downloadPersistenceModule(run); + try { + await downloadPersistenceModule(run); + } catch (e) { + console.error('Unable to run the persistence module', e); + } }, );