From 0a023cbe8633880018f82e9719010a9def51f75a Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Fri, 27 Sep 2024 16:22:33 -0400 Subject: [PATCH 01/40] DOP-5036 clean up some comments --- .../src/generateManifest/manifest.ts | 17 +++++++------- search-manifest/src/index.ts | 22 ++++++------------- search-manifest/tests/utils/getManifest.ts | 7 +++--- 3 files changed, 19 insertions(+), 27 deletions(-) diff --git a/search-manifest/src/generateManifest/manifest.ts b/search-manifest/src/generateManifest/manifest.ts index 7476f6e1a..1d5690956 100644 --- a/search-manifest/src/generateManifest/manifest.ts +++ b/search-manifest/src/generateManifest/manifest.ts @@ -1,30 +1,29 @@ -import type { ManifestEntry } from './manifestEntry'; +import type { ManifestEntry } from "./manifestEntry"; export class Manifest { url: string; global: boolean; documents: ManifestEntry[]; - constructor(url = '', includeInGlobalSearch = false) { - this.url = url; - this.documents = []; - this.global = includeInGlobalSearch; - } + constructor(url = "", includeInGlobalSearch = false) { + this.url = url; + this.documents = []; + this.global = includeInGlobalSearch; + } + // Adds a document to a manifest addDocument(document: ManifestEntry) { - //Add a document to the manifest this.documents.push(document); } + // Returns the manifest as JSON formatted string export() { - //return the manifest as JSON formatted string const manifest = { url: this.url, includeInGlobalSearch: this.global, documents: this.documents, }; - //TODO: check that .stringify has exactly the same functionality + output as python "dumps" as was used in Mut return JSON.stringify(manifest); } } diff --git a/search-manifest/src/index.ts b/search-manifest/src/index.ts index 94189fb91..1c246b066 100644 --- a/search-manifest/src/index.ts +++ b/search-manifest/src/index.ts @@ -17,13 +17,11 @@ const readdirAsync = promisify(readdir); const integration = new NetlifyIntegration(); export const generateManifest = async () => { - // create Manifest object const manifest = new Manifest(); console.log("=========== generating manifests ================"); - //go into documents directory and get list of file entries + // Get list of file entries in documents dir const entries = await readdirAsync("documents", { recursive: true }); - const mappedEntries = entries.filter((fileName) => { return ( fileName.includes(".bson") && @@ -33,14 +31,12 @@ export const generateManifest = async () => { ); }); - process.chdir("documents"); for (const entry of mappedEntries) { - //each file is read and decoded - const decoded = BSON.deserialize(readFileSync(`${entry}`)); - //put file into Document object - //export Document object + // Read and decode each entry + const decoded = BSON.deserialize(readFileSync(`documents/${entry}`)); + + // Parse data into a document and format it as a Manifest document const processedDoc = new Document(decoded).exportAsManifestDocument(); - //add document to manifest object if it was able to be indexed if (processedDoc) manifest.addDocument(processedDoc); } return manifest; @@ -50,12 +46,11 @@ export const generateManifest = async () => { integration.addBuildEventHandler( "onSuccess", async ({ utils: { run }, netlifyConfig }) => { - // Get content repo zipfile in AST representation. + // Get content repo zipfile as AST representation await run.command("unzip -o bundle.zip"); const branch = netlifyConfig.build?.environment["BRANCH"]; - //use export function for uploading to S3 const manifest = await generateManifest(); console.log("=========== finished generating manifests ================"); @@ -71,9 +66,7 @@ integration.addBuildEventHandler( includeInGlobalSearch: boolean; } = await getProperties(branch); - //uploads manifests to S3 console.log("=========== Uploading Manifests to S3================="); - //upload manifests to S3 const uploadParams: s3UploadParams = { bucket: "docs-search-indexes-test", //TODO: change this values based on environments @@ -91,8 +84,7 @@ integration.addBuildEventHandler( manifest.url = url; manifest.global = includeInGlobalSearch; - //uploads manifests to atlas - console.log("=========== Uploading Manifests ================="); + console.log("=========== Uploading Manifests to Atlas ================="); await uploadManifest(manifest, searchProperty); console.log("=========== Manifests uploaded to Atlas ================="); } catch (e) { diff --git a/search-manifest/tests/utils/getManifest.ts b/search-manifest/tests/utils/getManifest.ts index ad1909c2d..792218e7f 100644 --- a/search-manifest/tests/utils/getManifest.ts +++ b/search-manifest/tests/utils/getManifest.ts @@ -1,8 +1,9 @@ -import { generateManifest } from '../../src'; +import { generateManifest } from "../../src"; export const getManifest = async (manifestName: string) => { - process.chdir(`./documents/docs-${manifestName}`); + process.chdir(`documents/docs-${manifestName}`); const manifest = await generateManifest(); - process.chdir(`../../../`); + // Restore cwd + process.chdir(`../../`); return manifest; }; From fe44e16bbf42b593f04afa3612a3a8618a1bac8a Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Fri, 27 Sep 2024 17:35:03 -0400 Subject: [PATCH 02/40] DOP-5036 refactor delete stale --- .../src/uploadToAtlas/deleteStale.ts | 71 +++++++++---------- .../uploadToAtlas/deleteStaleProperties.ts | 16 ----- 2 files changed, 33 insertions(+), 54 deletions(-) delete mode 100644 search-manifest/src/uploadToAtlas/deleteStaleProperties.ts diff --git a/search-manifest/src/uploadToAtlas/deleteStale.ts b/search-manifest/src/uploadToAtlas/deleteStale.ts index 85ad833ad..dcd009c71 100644 --- a/search-manifest/src/uploadToAtlas/deleteStale.ts +++ b/search-manifest/src/uploadToAtlas/deleteStale.ts @@ -1,41 +1,36 @@ -export const deleteStaleDocuments = async ( - searchProperty: string, - manifestRevisionId: string, -) => { - console.debug(`Removing old documents`); - return { - deleteMany: { - filter: { - searchProperty: searchProperty, - manifestRevisionId: { $ne: manifestRevisionId }, - }, - }, - }; - // const deleteResult = await collection.deleteMany( - // { - // searchProperty: searchProperty, - // manifestRevisionId: { $ne: manifestRevisionId }, - // }, - // { session } - // ); - // status.deleted += - // deleteResult.deletedCount === undefined ? 0 : deleteResult.deletedCount; - // console.debug( - // `Removed ${deleteResult.deletedCount} entries from ${collection.collectionName}` - // ); +export const deleteStaleDocuments = async ({ + searchProperty, + manifestRevisionId, +}: { + searchProperty: string; + manifestRevisionId: string; +}) => { + console.log( + `Removing stale documents with search property ${searchProperty} ` + ); + return { + deleteMany: { + filter: { + searchProperty: searchProperty, + manifestRevisionId: { $ne: manifestRevisionId }, + }, + }, + }; }; -export const deleteStaleProperties = async ( - searchProperty: string, - manifestRevisionId: string, -) => { - console.debug(`Removing old documents`); - return { - deleteMany: { - filter: { - searchProperty: searchProperty, - manifestRevisionId: { $ne: manifestRevisionId }, - }, - }, - }; +import { db } from "./searchConnector"; +import { DatabaseDocument } from "../types"; + +const ATLAS_SEARCH_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_SEARCH_HOST}/?retryWrites=true&w=majority`; + +//TODO: change these teamwide env vars in Netlify UI when ready to move to prod +const SEARCH_DB_NAME = `${process.env.MONGO_ATLAS_SEARCH_DB_NAME}`; + +export const deleteStaleProperties = async (searchProperty: string) => { + const dbSession = await db({ uri: ATLAS_SEARCH_URI, dbName: SEARCH_DB_NAME }); + const documentsColl = dbSession.collection("documents"); + console.debug(`Removing all documents with stale property ${searchProperty}`); + const query = { searchProperty: { $regex: searchProperty } }; + const status = await documentsColl?.deleteMany(query); + return status; }; diff --git a/search-manifest/src/uploadToAtlas/deleteStaleProperties.ts b/search-manifest/src/uploadToAtlas/deleteStaleProperties.ts deleted file mode 100644 index 94fe76d2e..000000000 --- a/search-manifest/src/uploadToAtlas/deleteStaleProperties.ts +++ /dev/null @@ -1,16 +0,0 @@ -import { db, teardown } from "./searchConnector"; -import { DatabaseDocument } from "../types"; - -const ATLAS_SEARCH_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_SEARCH_HOST}/?retryWrites=true&w=majority`; - -//TODO: change these teamwide env vars in Netlify UI when ready to move to prod -const SEARCH_DB_NAME = `${process.env.MONGO_ATLAS_SEARCH_DB_NAME}`; - -export const deleteStaleProperties = async (searchProperty: string) => { - const dbSession = await db({ uri: ATLAS_SEARCH_URI, dbName: SEARCH_DB_NAME }); - const documentsColl = dbSession.collection("documents"); - console.debug(`Removing old documents`); - const query = { searchProperty: { $regex: searchProperty } }; - const status = await documentsColl?.deleteMany(query); - return status; -}; From 18c4b40d4448258c1833c3952788859a147aa0d8 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Fri, 27 Sep 2024 17:36:46 -0400 Subject: [PATCH 03/40] DOP-5036 refactor delete stale --- search-manifest/src/uploadToAtlas/getProperties.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/search-manifest/src/uploadToAtlas/getProperties.ts b/search-manifest/src/uploadToAtlas/getProperties.ts index d08d4939b..644946ec8 100644 --- a/search-manifest/src/uploadToAtlas/getProperties.ts +++ b/search-manifest/src/uploadToAtlas/getProperties.ts @@ -7,7 +7,7 @@ import { ReposBranchesDocument, } from "../types"; import { assertTrailingSlash } from "../utils"; -import { deleteStaleProperties } from "./deleteStaleProperties"; +import { deleteStaleProperties } from "./deleteStale"; // helper function to find the associated branch export const getBranch = (branches: Array, branchName: string) => { From 35fafb37fd7af5ac0fdd16ab0dfd1e425df8ea67 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Fri, 27 Sep 2024 17:44:19 -0400 Subject: [PATCH 04/40] DOP-5036 improve how to get collections --- .../src/uploadToAtlas/getProperties.ts | 34 ++++++++++--------- .../src/uploadToAtlas/searchConnector.ts | 17 ++++++++-- .../tests/integration/deleteStale.test.ts | 9 ----- .../tests/unit/getProperties.test.ts | 5 +++ 4 files changed, 37 insertions(+), 28 deletions(-) delete mode 100644 search-manifest/tests/integration/deleteStale.test.ts diff --git a/search-manifest/src/uploadToAtlas/getProperties.ts b/search-manifest/src/uploadToAtlas/getProperties.ts index 644946ec8..75670a05a 100644 --- a/search-manifest/src/uploadToAtlas/getProperties.ts +++ b/search-manifest/src/uploadToAtlas/getProperties.ts @@ -1,5 +1,5 @@ -import { Collection, Db, Document, WithId } from "mongodb"; -import { db, teardown } from "./searchConnector"; +import { Collection, Db } from "mongodb"; +import { db, getCollection, teardown } from "./searchConnector"; import { BranchEntry, DatabaseDocument, @@ -9,6 +9,9 @@ import { import { assertTrailingSlash } from "../utils"; import { deleteStaleProperties } from "./deleteStale"; +const ATLAS_CLUSTER0_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_CLUSTER0_HOST}/?retryWrites=true&w=majority`; +const SNOOTY_DB_NAME = `${process.env.MONGO_ATLAS_POOL_DB_NAME}`; + // helper function to find the associated branch export const getBranch = (branches: Array, branchName: string) => { for (const branchObj of branches) { @@ -20,8 +23,6 @@ export const getBranch = (branches: Array, branchName: string) => { }; const getProperties = async (branchName: string) => { - const ATLAS_CLUSTER0_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_CLUSTER0_HOST}/?retryWrites=true&w=majority`; - const SNOOTY_DB_NAME = `${process.env.MONGO_ATLAS_POOL_DB_NAME}`; const REPO_NAME = process.env.REPO_NAME; //check that an environment variable for repo name was set @@ -31,9 +32,6 @@ const getProperties = async (branchName: string) => { ); } - let dbSession: Db; - let repos_branches: Collection; - let docsets: Collection; let url: string = ""; let searchProperty: string = ""; let includeInGlobalSearch: boolean = false; @@ -41,14 +39,13 @@ const getProperties = async (branchName: string) => { let docsetRepo: DocsetsDocument | null; let version: string; - try { - //connect to database and get repos_branches, docsets collections - dbSession = await db({ uri: ATLAS_CLUSTER0_URI, dbName: SNOOTY_DB_NAME }); - repos_branches = dbSession.collection("repos_branches"); - docsets = dbSession.collection("docsets"); - } catch (e) { - throw new Error(`issue starting session for Snooty Pool Database ${e}`); - } + //connect to database and get repos_branches, docsets collections + const dbSession = await db({ + uri: ATLAS_CLUSTER0_URI, + dbName: SNOOTY_DB_NAME, + }); + const repos_branches = getCollection(dbSession, "repos_branches"); + const docsets = getCollection(dbSession, "docsets"); const query = { repoName: REPO_NAME, @@ -119,11 +116,16 @@ const getProperties = async (branchName: string) => { `Search manifest should not be generated for inactive version ${version} of repo ${REPO_NAME}. Removing all associated manifests` ); } + return { + searchProperty, + projectName: project, + url, + includeInGlobalSearch, + }; } catch (e) { console.error(`Error`, e); throw e; } - return { searchProperty, projectName: project, url, includeInGlobalSearch }; }; export default getProperties; diff --git a/search-manifest/src/uploadToAtlas/searchConnector.ts b/search-manifest/src/uploadToAtlas/searchConnector.ts index 00d9beb7e..5e10c59ab 100644 --- a/search-manifest/src/uploadToAtlas/searchConnector.ts +++ b/search-manifest/src/uploadToAtlas/searchConnector.ts @@ -1,5 +1,6 @@ -import type { Db } from 'mongodb'; -import * as mongodb from 'mongodb'; +import type { Db } from "mongodb"; +import * as mongodb from "mongodb"; +import { DatabaseDocument } from "../types"; // We should only ever have one client active at a time. @@ -8,7 +9,7 @@ let dbInstance: Db; let client: mongodb.MongoClient; export const teardown = async () => { - await client.close(); + await client.close(); }; // Handles memoization of db object, and initial connection logic if needs to be initialized @@ -24,3 +25,13 @@ export const db = async ({ uri, dbName }: { uri: string; dbName: string }) => { } return dbInstance; }; + +export const getCollection = (dbSession: Db, collection: string) => { + try { + return dbSession.collection(collection); + } catch (e) { + throw new Error( + `Error getting ${collection} collection from client: ${dbSession}` + ); + } +}; diff --git a/search-manifest/tests/integration/deleteStale.test.ts b/search-manifest/tests/integration/deleteStale.test.ts deleted file mode 100644 index 7bca6afcf..000000000 --- a/search-manifest/tests/integration/deleteStale.test.ts +++ /dev/null @@ -1,9 +0,0 @@ -//TODO: test that it removes old search properties, documents -import { describe, expect, test, it, vi } from 'vitest'; - -function sum(a: number, b: number) { - return a + b; -} -test('dummy test', () => { - expect(sum(1, 2)).toBe(3); -}); diff --git a/search-manifest/tests/unit/getProperties.test.ts b/search-manifest/tests/unit/getProperties.test.ts index c112f0a7c..92f547278 100644 --- a/search-manifest/tests/unit/getProperties.test.ts +++ b/search-manifest/tests/unit/getProperties.test.ts @@ -47,8 +47,12 @@ beforeAll(async () => { beforeEach(async () => { vi.mock("../../src/uploadToAtlas/searchConnector", async () => { const { mockDb, teardownMockDbClient } = await import("../utils/mockDB"); + const { getCollection } = await import( + "../../src/uploadToAtlas/searchConnector" + ); return { teardown: teardownMockDbClient, + getCollection: getCollection, db: async () => { //mock db of repos_branches db = await mockDb(); @@ -152,6 +156,7 @@ describe( //getProperties for beta doens't change number of documents in collection process.env.repo_name = "docs-compass"; await expect(getProperties(BRANCH_NAME_BETA)).rejects.toThrow(); + await mockDb(); expect( await db.collection("documents").countDocuments() ).toEqual(documentCount); From 6625f517474dce61ac650e7f4edd2536e4984476 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Fri, 27 Sep 2024 18:04:14 -0400 Subject: [PATCH 05/40] DOP-5036 create getRepo function --- .../src/uploadToAtlas/getProperties.ts | 83 ++++++++++--------- .../src/uploadToAtlas/uploadManifest.ts | 20 ++--- .../tests/integration/uploadToAtlas.test.ts | 6 +- .../tests/integration/uploadToS3.test.ts | 3 +- 4 files changed, 61 insertions(+), 51 deletions(-) diff --git a/search-manifest/src/uploadToAtlas/getProperties.ts b/search-manifest/src/uploadToAtlas/getProperties.ts index 75670a05a..a45459d26 100644 --- a/search-manifest/src/uploadToAtlas/getProperties.ts +++ b/search-manifest/src/uploadToAtlas/getProperties.ts @@ -12,6 +12,49 @@ import { deleteStaleProperties } from "./deleteStale"; const ATLAS_CLUSTER0_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_CLUSTER0_HOST}/?retryWrites=true&w=majority`; const SNOOTY_DB_NAME = `${process.env.MONGO_ATLAS_POOL_DB_NAME}`; +export const getRepo = async ({ + repoName, + repos_branches, +}: { + repoName: string; + repos_branches: Collection; +}) => { + const query = { + repoName: repoName, + }; + + const repo = await repos_branches.findOne(query, { + projection: { + _id: 0, + project: 1, + search: 1, + branches: 1, + prodDeployable: 1, + internalOnly: 1, + }, + }); + if (!repo) { + throw new Error( + `Could not get repos_branches entry for repo ${repoName}, ${repo}, ${JSON.stringify( + query + )}` + ); + } + if ( + repo.internalOnly || + !repo.prodDeployable || + !repo.search?.categoryTitle + ) { + // deletestaleproperties here for ALL manifests beginning with this repo? or just for this project-version searchproperty + await deleteStaleProperties(repo.project); + throw new Error( + `Search manifest should not be generated for repo ${repoName}. Removing all associated manifests` + ); + } + + return repo; +}; + // helper function to find the associated branch export const getBranch = (branches: Array, branchName: string) => { for (const branchObj of branches) { @@ -35,7 +78,6 @@ const getProperties = async (branchName: string) => { let url: string = ""; let searchProperty: string = ""; let includeInGlobalSearch: boolean = false; - let repo: ReposBranchesDocument | null; let docsetRepo: DocsetsDocument | null; let version: string; @@ -47,32 +89,10 @@ const getProperties = async (branchName: string) => { const repos_branches = getCollection(dbSession, "repos_branches"); const docsets = getCollection(dbSession, "docsets"); - const query = { + const repo: ReposBranchesDocument = await getRepo({ repoName: REPO_NAME, - }; - - try { - repo = await repos_branches.findOne(query, { - projection: { - _id: 0, - project: 1, - search: 1, - branches: 1, - prodDeployable: 1, - internalOnly: 1, - }, - }); - if (!repo) { - throw new Error( - `Could not get repos_branches entry for repo ${REPO_NAME}, ${repo}, ${JSON.stringify( - query - )}` - ); - } - } catch (e) { - console.error(`Error while getting repos_branches entry in Atlas: ${e}`); - throw e; - } + repos_branches, + }); const { project } = repo; @@ -99,17 +119,6 @@ const getProperties = async (branchName: string) => { version = urlSlug || gitBranchName; searchProperty = `${repo.search?.categoryName ?? project}-${version}`; - if ( - repo.internalOnly || - !repo.prodDeployable || - !repo.search?.categoryTitle - ) { - // deletestaleproperties here for ALL manifests beginning with this repo? or just for this project-version searchproperty - await deleteStaleProperties(project); - throw new Error( - `Search manifest should not be generated for repo ${REPO_NAME}. Removing all associated manifests` - ); - } if (!active) { deleteStaleProperties(searchProperty); throw new Error( diff --git a/search-manifest/src/uploadToAtlas/uploadManifest.ts b/search-manifest/src/uploadToAtlas/uploadManifest.ts index a0bd36783..8904850e1 100644 --- a/search-manifest/src/uploadToAtlas/uploadManifest.ts +++ b/search-manifest/src/uploadToAtlas/uploadManifest.ts @@ -1,5 +1,5 @@ import type { Manifest } from "../generateManifest/manifest"; -import { db, teardown } from "./searchConnector"; +import { db, getCollection, teardown } from "./searchConnector"; import assert from "assert"; import type { RefreshInfo, DatabaseDocument } from "../types"; import { generateHash, joinUrl } from "../utils"; @@ -52,17 +52,13 @@ export const uploadManifest = async ( if (!manifest?.documents?.length) { return Promise.reject(new Error("Invalid manifest")); } - //start a session - let documentsColl; - try { - const dbSession = await db({ - uri: ATLAS_SEARCH_URI, - dbName: SEARCH_DB_NAME, - }); - documentsColl = dbSession.collection("documents"); - } catch (e) { - console.error("issue starting session for Search Database", e); - } + + const dbSession = await db({ + uri: ATLAS_SEARCH_URI, + dbName: SEARCH_DB_NAME, + }); + const documentsColl = getCollection(dbSession, "documents"); + const status: RefreshInfo = { deleted: 0, upserted: 0, diff --git a/search-manifest/tests/integration/uploadToAtlas.test.ts b/search-manifest/tests/integration/uploadToAtlas.test.ts index 794785d56..8277ab8cd 100644 --- a/search-manifest/tests/integration/uploadToAtlas.test.ts +++ b/search-manifest/tests/integration/uploadToAtlas.test.ts @@ -19,9 +19,13 @@ const PROPERTY_NAME = "dummyName"; //teardown connections beforeEach(async () => { - vi.mock("../../src/uploadToAtlas/searchConnector", async () => { + vi.mock("../../src/uploadToAtlas/searchConnector", async (importOriginal) => { const { mockDb, teardownMockDbClient } = await import("../utils/mockDB"); + const { getCollection } = await import( + "../../src/uploadToAtlas/searchConnector" + ); return { + getCollection: getCollection, teardown: teardownMockDbClient, db: async () => { const db = await mockDb(); diff --git a/search-manifest/tests/integration/uploadToS3.test.ts b/search-manifest/tests/integration/uploadToS3.test.ts index b569f847c..b8de24b93 100644 --- a/search-manifest/tests/integration/uploadToS3.test.ts +++ b/search-manifest/tests/integration/uploadToS3.test.ts @@ -7,6 +7,7 @@ import { import { mockClient } from "aws-sdk-client-mock"; import { getManifest } from "../utils/getManifest"; import { uploadManifestToS3 } from "../../src/uploadToS3/uploadManifest"; +import { s3UploadParams } from "../../src/types"; const MANIFEST = await getManifest("node"); const PROJECT_NAME = `node`; @@ -38,7 +39,7 @@ beforeEach(async () => { }); describe("upload manifest to S3 behaves as expected", () => { - const uploadParams = { + const uploadParams: s3UploadParams = { bucket: "docs-search-indexes-test", prefix: "search-indexes/ab-testing", fileName: `${PROJECT_NAME}-${BRANCH}.json`, From a2fd57ead93ab1d067150013e816a2099b1726e4 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Fri, 27 Sep 2024 18:15:15 -0400 Subject: [PATCH 06/40] DOP-5036 create getDocsetEntry function --- .../src/uploadToAtlas/getProperties.ts | 83 +++++++++---------- 1 file changed, 39 insertions(+), 44 deletions(-) diff --git a/search-manifest/src/uploadToAtlas/getProperties.ts b/search-manifest/src/uploadToAtlas/getProperties.ts index a45459d26..9ba632532 100644 --- a/search-manifest/src/uploadToAtlas/getProperties.ts +++ b/search-manifest/src/uploadToAtlas/getProperties.ts @@ -12,7 +12,19 @@ import { deleteStaleProperties } from "./deleteStale"; const ATLAS_CLUSTER0_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_CLUSTER0_HOST}/?retryWrites=true&w=majority`; const SNOOTY_DB_NAME = `${process.env.MONGO_ATLAS_POOL_DB_NAME}`; -export const getRepo = async ({ +export const getDocsetEntry = async ( + docsets: Collection, + project: string +) => { + const docsetsQuery = { project: { $eq: project } }; + const docset = await docsets.findOne(docsetsQuery); + if (!docset) { + throw new Error(`Error while getting docsets entry in Atlas ${e}`); + } + return docset; +}; + +export const getRepoEntry = async ({ repoName, repos_branches, }: { @@ -75,12 +87,6 @@ const getProperties = async (branchName: string) => { ); } - let url: string = ""; - let searchProperty: string = ""; - let includeInGlobalSearch: boolean = false; - let docsetRepo: DocsetsDocument | null; - let version: string; - //connect to database and get repos_branches, docsets collections const dbSession = await db({ uri: ATLAS_CLUSTER0_URI, @@ -89,52 +95,41 @@ const getProperties = async (branchName: string) => { const repos_branches = getCollection(dbSession, "repos_branches"); const docsets = getCollection(dbSession, "docsets"); - const repo: ReposBranchesDocument = await getRepo({ + const repo: ReposBranchesDocument = await getRepoEntry({ repoName: REPO_NAME, repos_branches, }); const { project } = repo; - try { - const docsetsQuery = { project: { $eq: project } }; - docsetRepo = await docsets.findOne(docsetsQuery); - if (docsetRepo) { - //TODO: change based on environment - url = assertTrailingSlash( - docsetRepo.url?.dotcomprd + docsetRepo.prefix.dotcomprd - ); - } - } catch (e) { - console.error(`Error while getting docsets entry in Atlas ${e}`); - throw e; - } + const docsetEntry = await getDocsetEntry(docsets, project); + //TODO: change based on environment + const url = assertTrailingSlash( + docsetEntry.url?.dotcomprd + docsetEntry.prefix.dotcomprd + ); - try { - const { isStableBranch, gitBranchName, active, urlSlug } = getBranch( - repo.branches, - branchName + const { isStableBranch, gitBranchName, active, urlSlug } = getBranch( + repo.branches, + branchName + ); + + const includeInGlobalSearch = isStableBranch; + const version = urlSlug || gitBranchName; + const searchProperty = `${repo.search?.categoryName ?? project}-${version}`; + + if (!active) { + await deleteStaleProperties(searchProperty); + throw new Error( + `Search manifest should not be generated for inactive version ${version} of repo ${REPO_NAME}. Removing all associated manifests` ); - includeInGlobalSearch = isStableBranch; - version = urlSlug || gitBranchName; - searchProperty = `${repo.search?.categoryName ?? project}-${version}`; - - if (!active) { - deleteStaleProperties(searchProperty); - throw new Error( - `Search manifest should not be generated for inactive version ${version} of repo ${REPO_NAME}. Removing all associated manifests` - ); - } - return { - searchProperty, - projectName: project, - url, - includeInGlobalSearch, - }; - } catch (e) { - console.error(`Error`, e); - throw e; } + await teardown(); + return { + searchProperty, + projectName: project, + url, + includeInGlobalSearch, + }; }; export default getProperties; From c126e41ec1c61351aca86f0d31a1eda70002def4 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Fri, 27 Sep 2024 18:16:31 -0400 Subject: [PATCH 07/40] DOP-5036 create getDocsetEntry function --- search-manifest/src/uploadToAtlas/getProperties.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/search-manifest/src/uploadToAtlas/getProperties.ts b/search-manifest/src/uploadToAtlas/getProperties.ts index 9ba632532..a3606efa5 100644 --- a/search-manifest/src/uploadToAtlas/getProperties.ts +++ b/search-manifest/src/uploadToAtlas/getProperties.ts @@ -19,7 +19,7 @@ export const getDocsetEntry = async ( const docsetsQuery = { project: { $eq: project } }; const docset = await docsets.findOne(docsetsQuery); if (!docset) { - throw new Error(`Error while getting docsets entry in Atlas ${e}`); + throw new Error(`Error while getting docsets entry in Atlas`); } return docset; }; From eaa2126933701a7fa29ee7430c81164e9bbfaf45 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Mon, 30 Sep 2024 11:07:28 -0400 Subject: [PATCH 08/40] DOP-5036 log client --- search-manifest/src/uploadToAtlas/searchConnector.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/search-manifest/src/uploadToAtlas/searchConnector.ts b/search-manifest/src/uploadToAtlas/searchConnector.ts index 5e10c59ab..b0d72e521 100644 --- a/search-manifest/src/uploadToAtlas/searchConnector.ts +++ b/search-manifest/src/uploadToAtlas/searchConnector.ts @@ -17,6 +17,7 @@ export const db = async ({ uri, dbName }: { uri: string; dbName: string }) => { client = new mongodb.MongoClient(uri); try { await client.connect(); + console.log(JSON.stringify(client)); dbInstance = client.db(dbName); } catch (error) { const err = `Error at db client connection: ${error} for uri ${uri} and db name ${dbName}`; From 1e7587458982f48433784d588e678f8d85f1443a Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Mon, 30 Sep 2024 11:32:49 -0400 Subject: [PATCH 09/40] DOP-5036 unlog client --- search-manifest/src/uploadToAtlas/searchConnector.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/search-manifest/src/uploadToAtlas/searchConnector.ts b/search-manifest/src/uploadToAtlas/searchConnector.ts index b0d72e521..31123a8c1 100644 --- a/search-manifest/src/uploadToAtlas/searchConnector.ts +++ b/search-manifest/src/uploadToAtlas/searchConnector.ts @@ -17,7 +17,7 @@ export const db = async ({ uri, dbName }: { uri: string; dbName: string }) => { client = new mongodb.MongoClient(uri); try { await client.connect(); - console.log(JSON.stringify(client)); + // console.log(JSON.stringify(client)); dbInstance = client.db(dbName); } catch (error) { const err = `Error at db client connection: ${error} for uri ${uri} and db name ${dbName}`; From 10ace664267d56bb3cc3cb2235b9f928f1f798e0 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Mon, 30 Sep 2024 11:40:49 -0400 Subject: [PATCH 10/40] DOP-5036 log client --- search-manifest/src/uploadToAtlas/searchConnector.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/search-manifest/src/uploadToAtlas/searchConnector.ts b/search-manifest/src/uploadToAtlas/searchConnector.ts index 31123a8c1..c9c1ef837 100644 --- a/search-manifest/src/uploadToAtlas/searchConnector.ts +++ b/search-manifest/src/uploadToAtlas/searchConnector.ts @@ -17,7 +17,7 @@ export const db = async ({ uri, dbName }: { uri: string; dbName: string }) => { client = new mongodb.MongoClient(uri); try { await client.connect(); - // console.log(JSON.stringify(client)); + console.log(client); dbInstance = client.db(dbName); } catch (error) { const err = `Error at db client connection: ${error} for uri ${uri} and db name ${dbName}`; From 99567dce4a9f377d77e7e6a9d589f59f0cfb896b Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Mon, 30 Sep 2024 12:55:10 -0400 Subject: [PATCH 11/40] DOP-5036 test new teardown methods --- search-manifest/src/index.ts | 4 +- .../src/uploadToAtlas/getProperties.ts | 4 +- .../src/uploadToAtlas/searchConnector.ts | 48 ++++++++++++++++--- .../src/uploadToAtlas/uploadManifest.ts | 4 +- 4 files changed, 48 insertions(+), 12 deletions(-) diff --git a/search-manifest/src/index.ts b/search-manifest/src/index.ts index 1c246b066..1071e9f26 100644 --- a/search-manifest/src/index.ts +++ b/search-manifest/src/index.ts @@ -9,7 +9,7 @@ import { uploadManifest } from "./uploadToAtlas/uploadManifest"; import { readdir, readFileSync } from "fs"; import getProperties from "./uploadToAtlas/getProperties"; import { uploadManifestToS3 } from "./uploadToS3/uploadManifest"; -import { teardown } from "./uploadToAtlas/searchConnector"; +import { closeSearchDb, teardown } from "./uploadToAtlas/searchConnector"; import { s3UploadParams } from "./types"; const readdirAsync = promisify(readdir); @@ -90,7 +90,7 @@ integration.addBuildEventHandler( } catch (e) { console.log("Manifest could not be uploaded", e); } finally { - teardown(); + closeSearchDb(); } } ); diff --git a/search-manifest/src/uploadToAtlas/getProperties.ts b/search-manifest/src/uploadToAtlas/getProperties.ts index a3606efa5..d979507fe 100644 --- a/search-manifest/src/uploadToAtlas/getProperties.ts +++ b/search-manifest/src/uploadToAtlas/getProperties.ts @@ -1,5 +1,5 @@ import { Collection, Db } from "mongodb"; -import { db, getCollection, teardown } from "./searchConnector"; +import { closeSnootyDb, db, getCollection, teardown } from "./searchConnector"; import { BranchEntry, DatabaseDocument, @@ -123,7 +123,7 @@ const getProperties = async (branchName: string) => { `Search manifest should not be generated for inactive version ${version} of repo ${REPO_NAME}. Removing all associated manifests` ); } - await teardown(); + await closeSnootyDb(); return { searchProperty, projectName: project, diff --git a/search-manifest/src/uploadToAtlas/searchConnector.ts b/search-manifest/src/uploadToAtlas/searchConnector.ts index c9c1ef837..b382e0807 100644 --- a/search-manifest/src/uploadToAtlas/searchConnector.ts +++ b/search-manifest/src/uploadToAtlas/searchConnector.ts @@ -5,26 +5,62 @@ import { DatabaseDocument } from "../types"; // We should only ever have one client active at a time. // cached db object, so we can handle initial connection process once if unitialized -let dbInstance: Db; -let client: mongodb.MongoClient; -export const teardown = async () => { +const ATLAS_CLUSTER0_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_CLUSTER0_HOST}/?retryWrites=true&w=majority`; +const SNOOTY_DB_NAME = `${process.env.MONGO_ATLAS_POOL_DB_NAME}`; + +const ATLAS_SEARCH_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_SEARCH_HOST}/?retryWrites=true&w=majority`; +//TODO: change these teamwide env vars in Netlify UI when ready to move to prod +const SEARCH_DB_NAME = `${process.env.MONGO_ATLAS_SEARCH_DB_NAME}`; + +let searchDbClient: mongodb.MongoClient; +let snootyDbClient: mongodb.MongoClient; + +export const teardown = async (client: mongodb.MongoClient) => { await client.close(); }; +export const closeSnootyDb = async () => { + if (snootyDbClient) teardown(snootyDbClient); + else { + console.log("No client connection open to Snooty Db"); + } +}; + +export const closeSearchDb = async () => { + if (searchDbClient) teardown(searchDbClient); + else { + console.log("No client connection open to Snooty Db"); + } +}; + // Handles memoization of db object, and initial connection logic if needs to be initialized export const db = async ({ uri, dbName }: { uri: string; dbName: string }) => { - client = new mongodb.MongoClient(uri); + const client = new mongodb.MongoClient(uri); try { await client.connect(); console.log(client); - dbInstance = client.db(dbName); + const dbInstance = client.db(dbName); + return dbInstance; } catch (error) { const err = `Error at db client connection: ${error} for uri ${uri} and db name ${dbName}`; console.error(err); throw err; } - return dbInstance; +}; + +export const getSearchDb = async () => { + const uri = ATLAS_SEARCH_URI; + const dbName = SEARCH_DB_NAME; + const searchDbClient = db({ uri, dbName }); + return searchDbClient; +}; + +export const getSnootyDb = async () => { + const uri = ATLAS_CLUSTER0_URI; + const dbName = SNOOTY_DB_NAME; + const snootyDbClient = db({ uri, dbName }); + return snootyDbClient; }; export const getCollection = (dbSession: Db, collection: string) => { diff --git a/search-manifest/src/uploadToAtlas/uploadManifest.ts b/search-manifest/src/uploadToAtlas/uploadManifest.ts index 8904850e1..569cab50f 100644 --- a/search-manifest/src/uploadToAtlas/uploadManifest.ts +++ b/search-manifest/src/uploadToAtlas/uploadManifest.ts @@ -1,5 +1,5 @@ import type { Manifest } from "../generateManifest/manifest"; -import { db, getCollection, teardown } from "./searchConnector"; +import { closeSearchDb, db, getCollection, teardown } from "./searchConnector"; import assert from "assert"; import type { RefreshInfo, DatabaseDocument } from "../types"; import { generateHash, joinUrl } from "../utils"; @@ -107,6 +107,6 @@ export const uploadManifest = async ( `Error writing upserts to Search.documents collection with error ${e}` ); } finally { - await teardown(); + await closeSearchDb(); } }; From 752f76790ca4416543b1368b0443d4ef5596cfb2 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Mon, 30 Sep 2024 13:07:40 -0400 Subject: [PATCH 12/40] DOP-5036 test new connection method --- search-manifest/src/uploadToAtlas/getProperties.ts | 13 ++++++++----- .../src/uploadToAtlas/searchConnector.ts | 2 +- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/search-manifest/src/uploadToAtlas/getProperties.ts b/search-manifest/src/uploadToAtlas/getProperties.ts index d979507fe..f1258e401 100644 --- a/search-manifest/src/uploadToAtlas/getProperties.ts +++ b/search-manifest/src/uploadToAtlas/getProperties.ts @@ -1,5 +1,11 @@ import { Collection, Db } from "mongodb"; -import { closeSnootyDb, db, getCollection, teardown } from "./searchConnector"; +import { + closeSnootyDb, + db, + getCollection, + getSnootyDb, + teardown, +} from "./searchConnector"; import { BranchEntry, DatabaseDocument, @@ -88,10 +94,7 @@ const getProperties = async (branchName: string) => { } //connect to database and get repos_branches, docsets collections - const dbSession = await db({ - uri: ATLAS_CLUSTER0_URI, - dbName: SNOOTY_DB_NAME, - }); + const dbSession = await getSnootyDb(); const repos_branches = getCollection(dbSession, "repos_branches"); const docsets = getCollection(dbSession, "docsets"); diff --git a/search-manifest/src/uploadToAtlas/searchConnector.ts b/search-manifest/src/uploadToAtlas/searchConnector.ts index b382e0807..408ee5cdc 100644 --- a/search-manifest/src/uploadToAtlas/searchConnector.ts +++ b/search-manifest/src/uploadToAtlas/searchConnector.ts @@ -30,7 +30,7 @@ export const closeSnootyDb = async () => { export const closeSearchDb = async () => { if (searchDbClient) teardown(searchDbClient); else { - console.log("No client connection open to Snooty Db"); + console.log("No client connection open to Search Db"); } }; From 185882075f6a247fa3c464944e5fea9f5bb04d43 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Mon, 30 Sep 2024 13:49:34 -0400 Subject: [PATCH 13/40] DOP-5036 use new connection methods --- search-manifest/biome.json | 31 +++++++++++++++++++ .../src/uploadToAtlas/deleteStale.ts | 4 +-- .../src/uploadToAtlas/searchConnector.ts | 11 ++++--- .../src/uploadToAtlas/uploadManifest.ts | 13 +++++--- 4 files changed, 47 insertions(+), 12 deletions(-) create mode 100644 search-manifest/biome.json diff --git a/search-manifest/biome.json b/search-manifest/biome.json new file mode 100644 index 000000000..d6f187d7d --- /dev/null +++ b/search-manifest/biome.json @@ -0,0 +1,31 @@ +{ + "$schema": "https://biomejs.dev/schemas/1.9.1/schema.json", + "vcs": { + "enabled": false, + "clientKind": "git", + "useIgnoreFile": true + }, + "files": { + "ignoreUnknown": false, + "ignore": ["*/.ntli/*"] + }, + "formatter": { + "enabled": true, + "indentStyle": "tab" + }, + "organizeImports": { + "enabled": true + }, + "linter": { + "enabled": true, + "rules": { + "recommended": true + } + }, + "javascript": { + "formatter": { + "quoteStyle": "single", + "trailingCommas": "all" + } + } +} diff --git a/search-manifest/src/uploadToAtlas/deleteStale.ts b/search-manifest/src/uploadToAtlas/deleteStale.ts index dcd009c71..7220edb94 100644 --- a/search-manifest/src/uploadToAtlas/deleteStale.ts +++ b/search-manifest/src/uploadToAtlas/deleteStale.ts @@ -18,7 +18,7 @@ export const deleteStaleDocuments = async ({ }; }; -import { db } from "./searchConnector"; +import { db, getSearchDb } from "./searchConnector"; import { DatabaseDocument } from "../types"; const ATLAS_SEARCH_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_SEARCH_HOST}/?retryWrites=true&w=majority`; @@ -27,7 +27,7 @@ const ATLAS_SEARCH_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${pr const SEARCH_DB_NAME = `${process.env.MONGO_ATLAS_SEARCH_DB_NAME}`; export const deleteStaleProperties = async (searchProperty: string) => { - const dbSession = await db({ uri: ATLAS_SEARCH_URI, dbName: SEARCH_DB_NAME }); + const dbSession = await getSearchDb(); const documentsColl = dbSession.collection("documents"); console.debug(`Removing all documents with stale property ${searchProperty}`); const query = { searchProperty: { $regex: searchProperty } }; diff --git a/search-manifest/src/uploadToAtlas/searchConnector.ts b/search-manifest/src/uploadToAtlas/searchConnector.ts index 408ee5cdc..6990a6e8f 100644 --- a/search-manifest/src/uploadToAtlas/searchConnector.ts +++ b/search-manifest/src/uploadToAtlas/searchConnector.ts @@ -21,14 +21,14 @@ export const teardown = async (client: mongodb.MongoClient) => { }; export const closeSnootyDb = async () => { - if (snootyDbClient) teardown(snootyDbClient); + if (snootyDbClient) await teardown(snootyDbClient); else { console.log("No client connection open to Snooty Db"); } }; export const closeSearchDb = async () => { - if (searchDbClient) teardown(searchDbClient); + if (searchDbClient) await teardown(searchDbClient); else { console.log("No client connection open to Search Db"); } @@ -39,7 +39,6 @@ export const db = async ({ uri, dbName }: { uri: string; dbName: string }) => { const client = new mongodb.MongoClient(uri); try { await client.connect(); - console.log(client); const dbInstance = client.db(dbName); return dbInstance; } catch (error) { @@ -50,16 +49,18 @@ export const db = async ({ uri, dbName }: { uri: string; dbName: string }) => { }; export const getSearchDb = async () => { + console.log("getting search db"); const uri = ATLAS_SEARCH_URI; const dbName = SEARCH_DB_NAME; - const searchDbClient = db({ uri, dbName }); + const searchDbClient = await db({ uri, dbName }); return searchDbClient; }; export const getSnootyDb = async () => { + console.log("getting snooty db"); const uri = ATLAS_CLUSTER0_URI; const dbName = SNOOTY_DB_NAME; - const snootyDbClient = db({ uri, dbName }); + const snootyDbClient = await db({ uri, dbName }); return snootyDbClient; }; diff --git a/search-manifest/src/uploadToAtlas/uploadManifest.ts b/search-manifest/src/uploadToAtlas/uploadManifest.ts index 569cab50f..bc398f898 100644 --- a/search-manifest/src/uploadToAtlas/uploadManifest.ts +++ b/search-manifest/src/uploadToAtlas/uploadManifest.ts @@ -1,5 +1,11 @@ import type { Manifest } from "../generateManifest/manifest"; -import { closeSearchDb, db, getCollection, teardown } from "./searchConnector"; +import { + closeSearchDb, + db, + getCollection, + getSearchDb, + teardown, +} from "./searchConnector"; import assert from "assert"; import type { RefreshInfo, DatabaseDocument } from "../types"; import { generateHash, joinUrl } from "../utils"; @@ -53,10 +59,7 @@ export const uploadManifest = async ( return Promise.reject(new Error("Invalid manifest")); } - const dbSession = await db({ - uri: ATLAS_SEARCH_URI, - dbName: SEARCH_DB_NAME, - }); + const dbSession = await getSearchDb(); const documentsColl = getCollection(dbSession, "documents"); const status: RefreshInfo = { From 7a67d3e863ffc74aadf3efdec5ceb67d8fc8ff1a Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Mon, 30 Sep 2024 14:05:23 -0400 Subject: [PATCH 14/40] DOP-5036 use new connection methods --- search-manifest/src/index.ts | 10 ++++++++-- search-manifest/src/uploadToAtlas/getProperties.ts | 2 +- search-manifest/src/uploadToAtlas/uploadManifest.ts | 2 +- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/search-manifest/src/index.ts b/search-manifest/src/index.ts index 1071e9f26..95de15b01 100644 --- a/search-manifest/src/index.ts +++ b/search-manifest/src/index.ts @@ -9,7 +9,11 @@ import { uploadManifest } from "./uploadToAtlas/uploadManifest"; import { readdir, readFileSync } from "fs"; import getProperties from "./uploadToAtlas/getProperties"; import { uploadManifestToS3 } from "./uploadToS3/uploadManifest"; -import { closeSearchDb, teardown } from "./uploadToAtlas/searchConnector"; +import { + closeSearchDb, + closeSnootyDb, + teardown, +} from "./uploadToAtlas/searchConnector"; import { s3UploadParams } from "./types"; const readdirAsync = promisify(readdir); @@ -85,12 +89,14 @@ integration.addBuildEventHandler( manifest.global = includeInGlobalSearch; console.log("=========== Uploading Manifests to Atlas ================="); - await uploadManifest(manifest, searchProperty); + const status = await uploadManifest(manifest, searchProperty); + console.log(status); console.log("=========== Manifests uploaded to Atlas ================="); } catch (e) { console.log("Manifest could not be uploaded", e); } finally { closeSearchDb(); + closeSnootyDb(); } } ); diff --git a/search-manifest/src/uploadToAtlas/getProperties.ts b/search-manifest/src/uploadToAtlas/getProperties.ts index f1258e401..2842cad40 100644 --- a/search-manifest/src/uploadToAtlas/getProperties.ts +++ b/search-manifest/src/uploadToAtlas/getProperties.ts @@ -126,7 +126,7 @@ const getProperties = async (branchName: string) => { `Search manifest should not be generated for inactive version ${version} of repo ${REPO_NAME}. Removing all associated manifests` ); } - await closeSnootyDb(); + // await closeSnootyDb(); return { searchProperty, projectName: project, diff --git a/search-manifest/src/uploadToAtlas/uploadManifest.ts b/search-manifest/src/uploadToAtlas/uploadManifest.ts index bc398f898..5644b5bf6 100644 --- a/search-manifest/src/uploadToAtlas/uploadManifest.ts +++ b/search-manifest/src/uploadToAtlas/uploadManifest.ts @@ -110,6 +110,6 @@ export const uploadManifest = async ( `Error writing upserts to Search.documents collection with error ${e}` ); } finally { - await closeSearchDb(); + // await closeSearchDb(); } }; From c0b08473382b893046b267d61c15fba331bb6825 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Mon, 30 Sep 2024 14:30:42 -0400 Subject: [PATCH 15/40] DOP-5036 fix connection methods --- search-manifest/src/index.ts | 4 +- .../src/uploadToAtlas/deleteStale.ts | 2 +- .../src/uploadToAtlas/getProperties.ts | 5 +- .../src/uploadToAtlas/searchConnector.ts | 62 ++++++++++++------- .../src/uploadToAtlas/uploadManifest.ts | 1 - 5 files changed, 43 insertions(+), 31 deletions(-) diff --git a/search-manifest/src/index.ts b/search-manifest/src/index.ts index 95de15b01..4d8aaf725 100644 --- a/search-manifest/src/index.ts +++ b/search-manifest/src/index.ts @@ -95,8 +95,8 @@ integration.addBuildEventHandler( } catch (e) { console.log("Manifest could not be uploaded", e); } finally { - closeSearchDb(); - closeSnootyDb(); + await closeSearchDb(); + await closeSnootyDb(); } } ); diff --git a/search-manifest/src/uploadToAtlas/deleteStale.ts b/search-manifest/src/uploadToAtlas/deleteStale.ts index 7220edb94..48fb74a67 100644 --- a/search-manifest/src/uploadToAtlas/deleteStale.ts +++ b/search-manifest/src/uploadToAtlas/deleteStale.ts @@ -18,7 +18,7 @@ export const deleteStaleDocuments = async ({ }; }; -import { db, getSearchDb } from "./searchConnector"; +import { getSearchDb } from "./searchConnector"; import { DatabaseDocument } from "../types"; const ATLAS_SEARCH_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_SEARCH_HOST}/?retryWrites=true&w=majority`; diff --git a/search-manifest/src/uploadToAtlas/getProperties.ts b/search-manifest/src/uploadToAtlas/getProperties.ts index 2842cad40..724292755 100644 --- a/search-manifest/src/uploadToAtlas/getProperties.ts +++ b/search-manifest/src/uploadToAtlas/getProperties.ts @@ -1,7 +1,6 @@ -import { Collection, Db } from "mongodb"; +import { Collection, Db, DbOptions } from "mongodb"; import { closeSnootyDb, - db, getCollection, getSnootyDb, teardown, @@ -94,7 +93,7 @@ const getProperties = async (branchName: string) => { } //connect to database and get repos_branches, docsets collections - const dbSession = await getSnootyDb(); + const dbSession: Db = await getSnootyDb(); const repos_branches = getCollection(dbSession, "repos_branches"); const docsets = getCollection(dbSession, "docsets"); diff --git a/search-manifest/src/uploadToAtlas/searchConnector.ts b/search-manifest/src/uploadToAtlas/searchConnector.ts index 6990a6e8f..2d9605842 100644 --- a/search-manifest/src/uploadToAtlas/searchConnector.ts +++ b/search-manifest/src/uploadToAtlas/searchConnector.ts @@ -13,36 +13,27 @@ const ATLAS_SEARCH_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${pr //TODO: change these teamwide env vars in Netlify UI when ready to move to prod const SEARCH_DB_NAME = `${process.env.MONGO_ATLAS_SEARCH_DB_NAME}`; -let searchDbClient: mongodb.MongoClient; -let snootyDbClient: mongodb.MongoClient; +let searchDb: mongodb.MongoClient; +let snootyDb: mongodb.MongoClient; export const teardown = async (client: mongodb.MongoClient) => { await client.close(); }; -export const closeSnootyDb = async () => { - if (snootyDbClient) await teardown(snootyDbClient); - else { - console.log("No client connection open to Snooty Db"); - } -}; - -export const closeSearchDb = async () => { - if (searchDbClient) await teardown(searchDbClient); - else { - console.log("No client connection open to Search Db"); - } -}; - // Handles memoization of db object, and initial connection logic if needs to be initialized -export const db = async ({ uri, dbName }: { uri: string; dbName: string }) => { +export const dbClient = async ({ + uri, + dbName, +}: { + uri: string; + dbName: string; +}) => { const client = new mongodb.MongoClient(uri); try { await client.connect(); - const dbInstance = client.db(dbName); - return dbInstance; + return client; } catch (error) { - const err = `Error at db client connection: ${error} for uri ${uri} and db name ${dbName}`; + const err = `Error at client connection: ${error} for uri ${uri} `; console.error(err); throw err; } @@ -52,16 +43,25 @@ export const getSearchDb = async () => { console.log("getting search db"); const uri = ATLAS_SEARCH_URI; const dbName = SEARCH_DB_NAME; - const searchDbClient = await db({ uri, dbName }); - return searchDbClient; + if (searchDb) { + console.log("search db client already exists, using existing instance"); + } else { + searchDb = await dbClient({ uri, dbName }); + } + return searchDb.db(dbName); }; export const getSnootyDb = async () => { console.log("getting snooty db"); const uri = ATLAS_CLUSTER0_URI; const dbName = SNOOTY_DB_NAME; - const snootyDbClient = await db({ uri, dbName }); - return snootyDbClient; + + if (snootyDb) { + console.log("snooty db client already exists, using existing instance"); + } else { + snootyDb = await dbClient({ uri, dbName }); + } + return snootyDb.db(dbName); }; export const getCollection = (dbSession: Db, collection: string) => { @@ -73,3 +73,17 @@ export const getCollection = (dbSession: Db, collection: string) => { ); } }; + +export const closeSnootyDb = async () => { + if (snootyDb) await teardown(snootyDb); + else { + console.log("No client connection open to Snooty Db"); + } +}; + +export const closeSearchDb = async () => { + if (searchDb) await teardown(searchDb); + else { + console.log("No client connection open to Search Db"); + } +}; diff --git a/search-manifest/src/uploadToAtlas/uploadManifest.ts b/search-manifest/src/uploadToAtlas/uploadManifest.ts index 5644b5bf6..63bdd6fb4 100644 --- a/search-manifest/src/uploadToAtlas/uploadManifest.ts +++ b/search-manifest/src/uploadToAtlas/uploadManifest.ts @@ -1,7 +1,6 @@ import type { Manifest } from "../generateManifest/manifest"; import { closeSearchDb, - db, getCollection, getSearchDb, teardown, From f21306700aac32cbd4e5ea5c95f9d5757be21118 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Mon, 30 Sep 2024 16:55:03 -0400 Subject: [PATCH 16/40] DOP-5036 linting --- .../src/generateManifest/document.ts | 464 +++++++++--------- .../src/generateManifest/manifestEntry.ts | 2 +- search-manifest/src/index.ts | 14 +- .../src/uploadToAtlas/deleteStale.ts | 2 +- .../src/uploadToAtlas/getProperties.ts | 15 +- .../src/uploadToAtlas/searchConnector.ts | 2 +- .../src/uploadToAtlas/uploadManifest.ts | 4 +- .../src/uploadToS3/uploadManifest.ts | 12 +- search-manifest/src/utils.ts | 12 +- .../tests/integration/uploadToAtlas.test.ts | 4 +- .../tests/integration/uploadToS3.test.ts | 8 +- .../tests/unit/getProperties.test.ts | 5 +- search-manifest/tests/unit/index.test.ts | 155 +++--- search-manifest/tests/unit/utils.test.ts | 2 +- search-manifest/tests/utils/getManifest.ts | 2 +- 15 files changed, 343 insertions(+), 360 deletions(-) diff --git a/search-manifest/src/generateManifest/document.ts b/search-manifest/src/generateManifest/document.ts index f0c61607b..612a1d353 100644 --- a/search-manifest/src/generateManifest/document.ts +++ b/search-manifest/src/generateManifest/document.ts @@ -1,256 +1,256 @@ import { JSONPath } from "jsonpath-plus"; import { Facet } from "./createFacets"; import { ManifestEntry } from "./manifestEntry"; -import { BSON } from "bson"; +import type { BSON } from "bson"; export class Document { - //Return indexing data from a page's JSON-formatted AST for search purposes - tree: any; - robots: any; - keywords: any; - description: any; - paragraphs: string; - code: { lang: string; value: any }[]; - title: any; - headings: any; - slug: string; - preview?: string; - facets: any; - noIndex: any; - reasons: any; + //Return indexing data from a page's JSON-formatted AST for search purposes + tree: any; + robots: any; + keywords: any; + description: any; + paragraphs: string; + code: { lang: string; value: any }[]; + title: any; + headings: any; + slug: string; + preview?: string; + facets: any; + noIndex: any; + reasons: any; constructor(doc: BSON.Document) { this.tree = doc; - //find metadata - [this.robots, this.keywords, this.description] = this.findMetadata(); - //find paragraphs - this.paragraphs = this.findParagraphs(); - //find code - this.code = this.findCode(); - - //find title, headings - [this.title, this.headings] = this.findHeadings(); - - //derive slug - this.slug = this.deriveSlug(); - - //derive preview - this.preview = this.derivePreview(); - - //derive facets - this.facets = deriveFacets(this.tree); - - //noindex, reasons - [this.noIndex, this.reasons] = this.getNoIndex(); - } - - findMetadata() { - let robots = true; //can be set in the rst if the page is supposed to be crawled - let keywords: string | null = null; //keywords is an optional list of strings - let description: string | null = null; //this can be optional?? - - const results = JSONPath({ - path: "$..children[?(@.name=='meta')]..options", - json: this.tree, - }); - if (results.length) { - if (results.length > 1) - console.log( - "length of results is greater than one, it's: " + results.length, - ); - const val = results[0]; - //check if robots, set to false if no robots - if ('robots' in val && (val.robots == 'None' || val.robots == 'noindex')) - robots = false; + //find metadata + [this.robots, this.keywords, this.description] = this.findMetadata(); + //find paragraphs + this.paragraphs = this.findParagraphs(); + //find code + this.code = this.findCode(); + + //find title, headings + [this.title, this.headings] = this.findHeadings(); + + //derive slug + this.slug = this.deriveSlug(); + + //derive preview + this.preview = this.derivePreview(); + + //derive facets + this.facets = deriveFacets(this.tree); + + //noindex, reasons + [this.noIndex, this.reasons] = this.getNoIndex(); + } + + findMetadata() { + let robots = true; //can be set in the rst if the page is supposed to be crawled + let keywords: string | null = null; //keywords is an optional list of strings + let description: string | null = null; //this can be optional?? + + const results = JSONPath({ + path: "$..children[?(@.name=='meta')]..options", + json: this.tree, + }); + if (results.length) { + if (results.length > 1) + console.log( + `length of results is greater than one, length = ${results.length}` + ); + const val = results[0]; + //check if robots, set to false if no robots + if ("robots" in val && (val.robots == "None" || val.robots === "noindex")) + robots = false; keywords = val?.keywords; description = val?.description; } - return [robots, keywords, description]; - } - - findParagraphs() { - let paragraphs = ''; - - const results = JSONPath({ - path: "$..children[?(@.type=='paragraph')]..value", - json: this.tree, - }); - - for (const r of results) { - paragraphs += ' ' + r; - } - return paragraphs.trim(); - } - - findCode() { - const results = JSONPath({ - path: "$..children[?(@.type=='code')]", - json: this.tree, - }); - - const codeContents = []; - for (const r of results) { - const lang = r.lang ?? null; - codeContents.push({ lang: lang, value: r.value }); - } - return codeContents; - } - - findHeadings() { - const headings: string[] = []; - let title = ''; - // Get the children of headings nodes - - const results = JSONPath({ - path: "$..children[?(@.type=='heading')].children", - json: this.tree, - }); - - //no heading nodes found?? page doesn't have title, or headings - if (!results.length) return [title, headings]; - - for (const r of results) { - const heading = []; - const parts = JSONPath({ - path: '$..value', - json: r, - }); - - //add a check in case there is no parts found - for (const part of parts) { - // add a check in case there is no value field found - heading.push(part); - } - headings.push(heading.join()); - } - - title = headings.shift() ?? ''; - return [title, headings]; - } - - deriveSlug() { - let pageId = this.tree['filename']?.split('.')[0]; - if (pageId == 'index') pageId = ''; - return pageId; - } - - derivePreview() { - //set preview to the meta description if one is specified - - if (this.description) return this.description; - - // Set preview to the paragraph value that's a child of a 'target' element - // (for reference pages that lead with a target definition) - - let results = JSONPath({ - path: "$..children[?(@.type=='target')].children[?(@.type=='paragraph')]", - json: this.tree, - }); - - if (!results.length) { - // Otherwise attempt to set preview to the first content paragraph on the page, - // excluding admonitions. - results = JSONPath({ - path: "$..children[?(@.type=='section')].children[?(@.type=='paragraph')]", - json: this.tree, - }); - } - - if (results.length) { - const strList = []; - - //get value in results - const first = JSONPath({ - path: '$..value', - json: results[0], - }); - - for (const f of first) { - strList.push(f); - } - return strList.join(''); - } - - //else, give up and don't provide a preview - return null; - } - - getNoIndex() { - //determining indexability - - let noIndex = false; - const reasons: string[] = []; - - //if :robots: None in metadata, do not index - if (!this.robots) { - noIndex = true; - reasons.push('robots=None or robots=noindex in meta directive'); - } - - //if page has no title, do not index - if (!this.title) { - noIndex = true; - reasons.push('This page has no headings'); - } - - return [noIndex, reasons]; - } - - exportAsManifestDocument = () => { - // Generate the manifest dictionary entry from the AST source + return [robots, keywords, description]; + } + + findParagraphs() { + let paragraphs = ""; + + const results = JSONPath({ + path: "$..children[?(@.type=='paragraph')]..value", + json: this.tree, + }); + + for (const r of results) { + paragraphs += ` ${r}`; + } + return paragraphs.trim(); + } + + findCode() { + const results = JSONPath({ + path: "$..children[?(@.type=='code')]", + json: this.tree, + }); + + const codeContents = []; + for (const r of results) { + const lang = r.lang ?? null; + codeContents.push({ lang: lang, value: r.value }); + } + return codeContents; + } + + findHeadings() { + const headings: string[] = []; + let title = ""; + // Get the children of headings nodes + + const results = JSONPath({ + path: "$..children[?(@.type=='heading')].children", + json: this.tree, + }); + + //no heading nodes found?? page doesn't have title, or headings + if (!results.length) return [title, headings]; + + for (const r of results) { + const heading = []; + const parts = JSONPath({ + path: "$..value", + json: r, + }); + + //add a check in case there is no parts found + for (const part of parts) { + // add a check in case there is no value field found + heading.push(part); + } + headings.push(heading.join()); + } + + title = headings.shift() ?? ""; + return [title, headings]; + } + + deriveSlug() { + let pageId = this.tree.filename?.split(".")[0]; + if (pageId === "index") pageId = ""; + return pageId; + } + + derivePreview() { + //set preview to the meta description if one is specified + + if (this.description) return this.description; + + // Set preview to the paragraph value that's a child of a 'target' element + // (for reference pages that lead with a target definition) + + let results = JSONPath({ + path: "$..children[?(@.type=='target')].children[?(@.type=='paragraph')]", + json: this.tree, + }); + + if (!results.length) { + // Otherwise attempt to set preview to the first content paragraph on the page, + // excluding admonitions. + results = JSONPath({ + path: "$..children[?(@.type=='section')].children[?(@.type=='paragraph')]", + json: this.tree, + }); + } + + if (results.length) { + const strList = []; + + //get value in results + const first = JSONPath({ + path: "$..value", + json: results[0], + }); + + for (const f of first) { + strList.push(f); + } + return strList.join(""); + } + + //else, give up and don't provide a preview + return null; + } + + getNoIndex() { + //determining indexability + + let noIndex = false; + const reasons: string[] = []; + + //if :robots: None in metadata, do not index + if (!this.robots) { + noIndex = true; + reasons.push("robots=None or robots=noindex in meta directive"); + } + + //if page has no title, do not index + if (!this.title) { + noIndex = true; + reasons.push("This page has no headings"); + } + + return [noIndex, reasons]; + } + + exportAsManifestDocument = () => { + // Generate the manifest dictionary entry from the AST source if (this.noIndex) { console.info("Refusing to index"); return; } - const document = new ManifestEntry({ - slug: this.slug, - title: this.title, - headings: this.headings, - paragraphs: this.paragraphs, - code: this.code, - preview: this.preview, - keywords: this.keywords, - facets: this.facets, - }); - - return document; - }; + const document = new ManifestEntry({ + slug: this.slug, + title: this.title, + headings: this.headings, + paragraphs: this.paragraphs, + code: this.code, + preview: this.preview, + keywords: this.keywords, + facets: this.facets, + }); + + return document; + }; } const deriveFacets = (tree: any) => { - //Format facets for ManifestEntry from bson entry tree['facets'] if it exists - - const insertKeyVals = (facet: any, prefix = '') => { - const key = prefix + facet.category; - documentFacets[key] = documentFacets[key] ?? []; - documentFacets[key].push(facet.value); - - if (!facet.subFacets) return; - - for (const subFacet of facet.subFacets) { - insertKeyVals(subFacet, key + '>' + facet.value + '>'); - } - }; - - const createFacet = (facetEntry: any) => { - const facet = new Facet( - facetEntry.category, - facetEntry.value, - facetEntry.sub_facets, - ); - insertKeyVals(facet); - }; - - const documentFacets: any = {}; - if (tree['facets']) { - for (const facetEntry of tree['facets']) { - createFacet(facetEntry); - } - } - return documentFacets; + //Format facets for ManifestEntry from bson entry tree['facets'] if it exists + + const insertKeyVals = (facet: any, prefix = "") => { + const key = prefix + facet.category; + documentFacets[key] = documentFacets[key] ?? []; + documentFacets[key].push(facet.value); + + if (!facet.subFacets) return; + + for (const subFacet of facet.subFacets) { + insertKeyVals(subFacet, `${key}>${facet.value}>`); + } + }; + + const createFacet = (facetEntry: any) => { + const facet = new Facet( + facetEntry.category, + facetEntry.value, + facetEntry.sub_facets + ); + insertKeyVals(facet); + }; + + const documentFacets: any = {}; + if (tree.facets) { + for (const facetEntry of tree.facets) { + createFacet(facetEntry); + } + } + return documentFacets; }; diff --git a/search-manifest/src/generateManifest/manifestEntry.ts b/search-manifest/src/generateManifest/manifestEntry.ts index 6e4243e31..7254aee7b 100644 --- a/search-manifest/src/generateManifest/manifestEntry.ts +++ b/search-manifest/src/generateManifest/manifestEntry.ts @@ -1,4 +1,4 @@ -import { Facet } from "./createFacets"; +import type { Facet } from "./createFacets"; //change this to an interface export class ManifestEntry { diff --git a/search-manifest/src/index.ts b/search-manifest/src/index.ts index 4d8aaf725..084a74168 100644 --- a/search-manifest/src/index.ts +++ b/search-manifest/src/index.ts @@ -1,20 +1,16 @@ // Documentation: https://sdk.netlify.com import { NetlifyIntegration } from "@netlify/sdk"; import { Manifest } from "./generateManifest/manifest"; -import { promisify } from "util"; +import { promisify } from "node:util"; import { BSON } from "bson"; import { Document } from "./generateManifest/document"; import { uploadManifest } from "./uploadToAtlas/uploadManifest"; -import { readdir, readFileSync } from "fs"; +import { readdir, readFileSync } from "node:fs"; import getProperties from "./uploadToAtlas/getProperties"; import { uploadManifestToS3 } from "./uploadToS3/uploadManifest"; -import { - closeSearchDb, - closeSnootyDb, - teardown, -} from "./uploadToAtlas/searchConnector"; -import { s3UploadParams } from "./types"; +import { closeSearchDb, closeSnootyDb } from "./uploadToAtlas/searchConnector"; +import type { s3UploadParams } from "./types"; const readdirAsync = promisify(readdir); @@ -53,7 +49,7 @@ integration.addBuildEventHandler( // Get content repo zipfile as AST representation await run.command("unzip -o bundle.zip"); - const branch = netlifyConfig.build?.environment["BRANCH"]; + const branch = netlifyConfig.build?.environment.BRANCH; const manifest = await generateManifest(); diff --git a/search-manifest/src/uploadToAtlas/deleteStale.ts b/search-manifest/src/uploadToAtlas/deleteStale.ts index 48fb74a67..662888da6 100644 --- a/search-manifest/src/uploadToAtlas/deleteStale.ts +++ b/search-manifest/src/uploadToAtlas/deleteStale.ts @@ -19,7 +19,7 @@ export const deleteStaleDocuments = async ({ }; import { getSearchDb } from "./searchConnector"; -import { DatabaseDocument } from "../types"; +import type { DatabaseDocument } from "../types"; const ATLAS_SEARCH_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_SEARCH_HOST}/?retryWrites=true&w=majority`; diff --git a/search-manifest/src/uploadToAtlas/getProperties.ts b/search-manifest/src/uploadToAtlas/getProperties.ts index 724292755..9c880a741 100644 --- a/search-manifest/src/uploadToAtlas/getProperties.ts +++ b/search-manifest/src/uploadToAtlas/getProperties.ts @@ -1,11 +1,6 @@ -import { Collection, Db, DbOptions } from "mongodb"; -import { - closeSnootyDb, - getCollection, - getSnootyDb, - teardown, -} from "./searchConnector"; -import { +import { type Collection, type Db, DbOptions } from "mongodb"; +import { getCollection, getSnootyDb } from "./searchConnector"; +import type { BranchEntry, DatabaseDocument, DocsetsDocument, @@ -24,7 +19,7 @@ export const getDocsetEntry = async ( const docsetsQuery = { project: { $eq: project } }; const docset = await docsets.findOne(docsetsQuery); if (!docset) { - throw new Error(`Error while getting docsets entry in Atlas`); + throw new Error("Error while getting docsets entry in Atlas"); } return docset; }; @@ -75,7 +70,7 @@ export const getRepoEntry = async ({ // helper function to find the associated branch export const getBranch = (branches: Array, branchName: string) => { for (const branchObj of branches) { - if (branchObj.gitBranchName.toLowerCase() == branchName.toLowerCase()) { + if (branchObj.gitBranchName.toLowerCase() === branchName.toLowerCase()) { return { ...branchObj }; } } diff --git a/search-manifest/src/uploadToAtlas/searchConnector.ts b/search-manifest/src/uploadToAtlas/searchConnector.ts index 2d9605842..32d476fc2 100644 --- a/search-manifest/src/uploadToAtlas/searchConnector.ts +++ b/search-manifest/src/uploadToAtlas/searchConnector.ts @@ -1,6 +1,6 @@ import type { Db } from "mongodb"; import * as mongodb from "mongodb"; -import { DatabaseDocument } from "../types"; +import type { DatabaseDocument } from "../types"; // We should only ever have one client active at a time. diff --git a/search-manifest/src/uploadToAtlas/uploadManifest.ts b/search-manifest/src/uploadToAtlas/uploadManifest.ts index 63bdd6fb4..74800a697 100644 --- a/search-manifest/src/uploadToAtlas/uploadManifest.ts +++ b/search-manifest/src/uploadToAtlas/uploadManifest.ts @@ -5,7 +5,7 @@ import { getSearchDb, teardown, } from "./searchConnector"; -import assert from "assert"; +import assert from "node:assert"; import type { RefreshInfo, DatabaseDocument } from "../types"; import { generateHash, joinUrl } from "../utils"; @@ -85,7 +85,7 @@ export const uploadManifest = async ( //TODO: make sure url of manifest doesn't have excess leading slashes(as done in getManifests) //check property types - console.info(`Starting transaction`); + console.info("Starting transaction"); assert.strictEqual(typeof manifest.global, "boolean"); assert.strictEqual(typeof hash, "string"); assert.ok(hash); diff --git a/search-manifest/src/uploadToS3/uploadManifest.ts b/search-manifest/src/uploadToS3/uploadManifest.ts index 0921b4f79..bf3e89e54 100644 --- a/search-manifest/src/uploadToS3/uploadManifest.ts +++ b/search-manifest/src/uploadToS3/uploadManifest.ts @@ -1,7 +1,7 @@ -import { PutObjectCommand, S3Client } from "@aws-sdk/client-s3"; +import { PutObjectCommand, type S3Client } from "@aws-sdk/client-s3"; import { assertTrailingSlash } from "../utils"; import { connectToS3 } from "./connectToS3"; -import { s3UploadParams } from "../types"; +import type { s3UploadParams } from "../types"; const upload = async ( client: S3Client, @@ -22,16 +22,10 @@ export const uploadManifestToS3 = async ({ fileName, manifest, }: s3UploadParams) => { - let client: S3Client; //TODO: maybe also ensure there isn't a double trailing slash here to begin with ?? (altho idk y there would be) prefix = assertTrailingSlash(prefix); const key = prefix + fileName; - try { - client = connectToS3(); - } catch (e) { - throw e; - } - + const client = connectToS3(); const uploadStatus = await upload(client, { Bucket: bucket, Key: key, diff --git a/search-manifest/src/utils.ts b/search-manifest/src/utils.ts index c494157b7..8169f6558 100644 --- a/search-manifest/src/utils.ts +++ b/search-manifest/src/utils.ts @@ -1,7 +1,7 @@ -import crypto from 'crypto'; +import crypto from "node:crypto"; export function generateHash(data: string): Promise { - const hash = crypto.createHash('sha256'); + const hash = crypto.createHash("sha256"); return new Promise((resolve) => { hash.on("readable", () => { @@ -11,9 +11,9 @@ export function generateHash(data: string): Promise { } }); - hash.write(data); - hash.end(); - }); + hash.write(data); + hash.end(); + }); } export function joinUrl({ @@ -27,5 +27,5 @@ export function joinUrl({ } export function assertTrailingSlash(path: string): string { - return path.endsWith('/') ? path : `${path}/`; + return path.endsWith("/") ? path : `${path}/`; } diff --git a/search-manifest/tests/integration/uploadToAtlas.test.ts b/search-manifest/tests/integration/uploadToAtlas.test.ts index 8277ab8cd..333be494a 100644 --- a/search-manifest/tests/integration/uploadToAtlas.test.ts +++ b/search-manifest/tests/integration/uploadToAtlas.test.ts @@ -11,7 +11,7 @@ import { uploadManifest } from "../../src/uploadToAtlas/uploadManifest"; import { Manifest } from "../../src/generateManifest/manifest"; import nodeManifest from "../resources/s3Manifests/node-current.json"; import { mockDb, insert, removeDocuments } from "../utils/mockDB"; -import { DatabaseDocument } from "../../src/types"; +import type { DatabaseDocument } from "../../src/types"; import { getManifest } from "../utils/getManifest"; import { generateHash } from "../../src/utils"; @@ -111,7 +111,7 @@ describe( afterEach(async () => { await removeDocuments("documents"); }); - let manifest1: Manifest = new Manifest( + const manifest1: Manifest = new Manifest( nodeManifest.url, nodeManifest.includeInGlobalSearch ); diff --git a/search-manifest/tests/integration/uploadToS3.test.ts b/search-manifest/tests/integration/uploadToS3.test.ts index b8de24b93..cad20a6eb 100644 --- a/search-manifest/tests/integration/uploadToS3.test.ts +++ b/search-manifest/tests/integration/uploadToS3.test.ts @@ -1,5 +1,5 @@ import { beforeEach, describe, expect, test, vi } from "vitest"; -import { +import type { PutObjectCommand, PutObjectCommandOutput, S3Client, @@ -7,11 +7,11 @@ import { import { mockClient } from "aws-sdk-client-mock"; import { getManifest } from "../utils/getManifest"; import { uploadManifestToS3 } from "../../src/uploadToS3/uploadManifest"; -import { s3UploadParams } from "../../src/types"; +import type { s3UploadParams } from "../../src/types"; const MANIFEST = await getManifest("node"); -const PROJECT_NAME = `node`; -const BRANCH = `master`; +const PROJECT_NAME = "node"; +const BRANCH = "master"; const output: PutObjectCommandOutput = { $metadata: { diff --git a/search-manifest/tests/unit/getProperties.test.ts b/search-manifest/tests/unit/getProperties.test.ts index 92f547278..cf7585997 100644 --- a/search-manifest/tests/unit/getProperties.test.ts +++ b/search-manifest/tests/unit/getProperties.test.ts @@ -20,9 +20,8 @@ import { import repos_branches from "../resources/mockCollections/repos-branches.json"; //simulate the docsests collection in an object import docsets from "../resources/mockCollections/docsets.json"; -import * as mongodb from "mongodb"; -import { BranchEntry, DatabaseDocument } from "../../src/types"; -import { Manifest } from "../../src/generateManifest/manifest"; +import type * as mongodb from "mongodb"; +import type { BranchEntry, DatabaseDocument } from "../../src/types"; import { getManifest } from "../utils/getManifest"; import { uploadManifest } from "../../src/uploadToAtlas/uploadManifest"; import { afterEach } from "node:test"; diff --git a/search-manifest/tests/unit/index.test.ts b/search-manifest/tests/unit/index.test.ts index 9243cc49b..5890e7c3e 100644 --- a/search-manifest/tests/unit/index.test.ts +++ b/search-manifest/tests/unit/index.test.ts @@ -1,103 +1,102 @@ -import { describe, expect, afterEach, test, it, vi, beforeAll } from 'vitest'; -import nodeManifest from '../resources/s3Manifests/node-current.json'; -import kotlinManifest from '../resources/s3Manifests/kotlin-upcoming.json'; -import type { ManifestEntry } from '../../src/generateManifest/manifestEntry'; -import { getManifest } from '../utils/getManifest'; +import { describe, expect, afterEach, test, it, vi, beforeAll } from "vitest"; +import nodeManifest from "../resources/s3Manifests/node-current.json"; +import kotlinManifest from "../resources/s3Manifests/kotlin-upcoming.json"; +import type { ManifestEntry } from "../../src/generateManifest/manifestEntry"; +import { getManifest } from "../utils/getManifest"; describe.each([ - { manifestName: 'node', s3Manifest: nodeManifest }, - { manifestName: 'kotlin', s3Manifest: kotlinManifest }, -])('Generate manifests from ast', async ({ manifestName, s3Manifest }) => { - //generate new manifest - const manifest = await getManifest(manifestName); + { manifestName: "node", s3Manifest: nodeManifest }, + { manifestName: "kotlin", s3Manifest: kotlinManifest }, +])("Generate manifests from ast", async ({ manifestName, s3Manifest }) => { + //generate new manifest + const manifest = await getManifest(manifestName); - it('has generated the manifest', async () => { - expect(manifest).toBeTruthy(); - }); + it("has generated the manifest", async () => { + expect(manifest).toBeTruthy(); + }); - it('has the correct document length', () => { - expect(manifest.documents).toHaveLength(s3Manifest.documents.length); - }); + it("has the correct document length", () => { + expect(manifest.documents).toHaveLength(s3Manifest.documents.length); + }); }); describe.each([ - { - manifestName: 'node', - s3Manifest: nodeManifest, - }, - { manifestName: 'kotlin', s3Manifest: kotlinManifest }, + { + manifestName: "node", + s3Manifest: nodeManifest, + }, + { manifestName: "kotlin", s3Manifest: kotlinManifest }, ])( - 'has the correct document properties', - async ({ manifestName, s3Manifest }) => { - const manifest = await getManifest(manifestName); - const title = manifest.documents[0].title; + "has the correct document properties", + async ({ manifestName, s3Manifest }) => { + const manifest = await getManifest(manifestName); + const title = manifest.documents[0].title; - //TODO: put in a loop to check multiple manifestEntries against each other - let equivDoc: ManifestEntry; - for (const document of s3Manifest.documents) { - if (document.title == manifest.documents[0].title) equivDoc = document; - continue; - } + //TODO: put in a loop to check multiple manifestEntries against each other + let equivDoc: ManifestEntry; + for (const document of s3Manifest.documents) { + if (document.title === manifest.documents[0].title) equivDoc = document; + } - it('is of type string', () => { - expect(title).toBeTypeOf('string'); - }); + it("is of type string", () => { + expect(title).toBeTypeOf("string"); + }); - it('matches the slug', () => { - //slug - expect(manifest.documents[0].slug).toEqual(equivDoc.slug); - }); + it("matches the slug", () => { + //slug + expect(manifest.documents[0].slug).toEqual(equivDoc.slug); + }); - it('matches the heading', () => { - //headings - expect(manifest.documents[0].headings).toEqual(equivDoc.headings); - }); + it("matches the heading", () => { + //headings + expect(manifest.documents[0].headings).toEqual(equivDoc.headings); + }); - it('matches the paragraphs', () => { - //paragraphs - expect(manifest.documents[0].paragraphs).toEqual(equivDoc.paragraphs); - }); + it("matches the paragraphs", () => { + //paragraphs + expect(manifest.documents[0].paragraphs).toEqual(equivDoc.paragraphs); + }); - it('matches the code', () => { - //code - expect(manifest.documents[0].code).toEqual(equivDoc.code); - }); - //preview - it('matches preview', () => { - expect(manifest.documents[0].preview).toEqual(equivDoc.preview); - }); + it("matches the code", () => { + //code + expect(manifest.documents[0].code).toEqual(equivDoc.code); + }); + //preview + it("matches preview", () => { + expect(manifest.documents[0].preview).toEqual(equivDoc.preview); + }); - //tags - it('matches tags', () => { - expect(manifest.documents[0].tags).toEqual(equivDoc.tags); - }); + //tags + it("matches tags", () => { + expect(manifest.documents[0].tags).toEqual(equivDoc.tags); + }); - //facets - it('matches facets', () => { - expect(manifest.documents[0].facets).toEqual(equivDoc.facets); - }); - }, + //facets + it("matches facets", () => { + expect(manifest.documents[0].facets).toEqual(equivDoc.facets); + }); + } ); //TODO: test Document creation describe.each([ - { - manifestName: 'node', - }, - { manifestName: 'kotlin' }, + { + manifestName: "node", + }, + { manifestName: "kotlin" }, ])( - 'given a decoded document generate all of the correct properties', - async ({ manifestName }) => { - //declare decoded documents here + "given a decoded document generate all of the correct properties", + async ({ manifestName }) => { + //declare decoded documents here - it('should return the proper metadata', () => {}); + it("should return the proper metadata", () => {}); - it('should return the proper paragraphs', () => {}); - it('should return the proper headings and titles', () => {}); - it('should return the proper slug', () => {}); - it('should return the proper preview', () => {}); - it('should return the proper facets', () => {}); - it('should correctly return whether the document is indexable', () => {}); - }, + it("should return the proper paragraphs", () => {}); + it("should return the proper headings and titles", () => {}); + it("should return the proper slug", () => {}); + it("should return the proper preview", () => {}); + it("should return the proper facets", () => {}); + it("should correctly return whether the document is indexable", () => {}); + } ); //TODO: given a single decoded entry, use Document function on it diff --git a/search-manifest/tests/unit/utils.test.ts b/search-manifest/tests/unit/utils.test.ts index bebf092f9..ffca4d296 100644 --- a/search-manifest/tests/unit/utils.test.ts +++ b/search-manifest/tests/unit/utils.test.ts @@ -2,7 +2,7 @@ import { joinUrl } from "../../src/utils"; import { expect, it } from "vitest"; //test joinUrl util -it("correctly joins base URLs with slugs", function () { +it("correctly joins base URLs with slugs", () => { expect(joinUrl({ base: "https://example.com//", path: "//foo/" })).toEqual( "https://example.com/foo/" ); diff --git a/search-manifest/tests/utils/getManifest.ts b/search-manifest/tests/utils/getManifest.ts index 792218e7f..cd4ce5f16 100644 --- a/search-manifest/tests/utils/getManifest.ts +++ b/search-manifest/tests/utils/getManifest.ts @@ -4,6 +4,6 @@ export const getManifest = async (manifestName: string) => { process.chdir(`documents/docs-${manifestName}`); const manifest = await generateManifest(); // Restore cwd - process.chdir(`../../`); + process.chdir("../../"); return manifest; }; From 2efa9f4e57a997498005447cbfb36a3af6153e15 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Mon, 30 Sep 2024 17:19:24 -0400 Subject: [PATCH 17/40] DOP-5036 use functions specific to each collection --- .../src/uploadToAtlas/deleteStale.ts | 13 ++---- .../src/uploadToAtlas/getProperties.ts | 13 +++--- .../src/uploadToAtlas/searchConnector.ts | 42 ++++++++++--------- .../src/uploadToAtlas/uploadManifest.ts | 11 +---- 4 files changed, 33 insertions(+), 46 deletions(-) diff --git a/search-manifest/src/uploadToAtlas/deleteStale.ts b/search-manifest/src/uploadToAtlas/deleteStale.ts index 662888da6..81eff18d2 100644 --- a/search-manifest/src/uploadToAtlas/deleteStale.ts +++ b/search-manifest/src/uploadToAtlas/deleteStale.ts @@ -1,3 +1,5 @@ +import { getDocumentsCollection } from "./searchConnector"; + export const deleteStaleDocuments = async ({ searchProperty, manifestRevisionId, @@ -18,17 +20,8 @@ export const deleteStaleDocuments = async ({ }; }; -import { getSearchDb } from "./searchConnector"; -import type { DatabaseDocument } from "../types"; - -const ATLAS_SEARCH_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_SEARCH_HOST}/?retryWrites=true&w=majority`; - -//TODO: change these teamwide env vars in Netlify UI when ready to move to prod -const SEARCH_DB_NAME = `${process.env.MONGO_ATLAS_SEARCH_DB_NAME}`; - export const deleteStaleProperties = async (searchProperty: string) => { - const dbSession = await getSearchDb(); - const documentsColl = dbSession.collection("documents"); + const documentsColl = await getDocumentsCollection(); console.debug(`Removing all documents with stale property ${searchProperty}`); const query = { searchProperty: { $regex: searchProperty } }; const status = await documentsColl?.deleteMany(query); diff --git a/search-manifest/src/uploadToAtlas/getProperties.ts b/search-manifest/src/uploadToAtlas/getProperties.ts index 9c880a741..8e0c328ff 100644 --- a/search-manifest/src/uploadToAtlas/getProperties.ts +++ b/search-manifest/src/uploadToAtlas/getProperties.ts @@ -1,5 +1,8 @@ import { type Collection, type Db, DbOptions } from "mongodb"; -import { getCollection, getSnootyDb } from "./searchConnector"; +import { + getDocsetsCollection, + getReposBranchesCollection, +} from "./searchConnector"; import type { BranchEntry, DatabaseDocument, @@ -9,9 +12,6 @@ import type { import { assertTrailingSlash } from "../utils"; import { deleteStaleProperties } from "./deleteStale"; -const ATLAS_CLUSTER0_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_CLUSTER0_HOST}/?retryWrites=true&w=majority`; -const SNOOTY_DB_NAME = `${process.env.MONGO_ATLAS_POOL_DB_NAME}`; - export const getDocsetEntry = async ( docsets: Collection, project: string @@ -88,9 +88,8 @@ const getProperties = async (branchName: string) => { } //connect to database and get repos_branches, docsets collections - const dbSession: Db = await getSnootyDb(); - const repos_branches = getCollection(dbSession, "repos_branches"); - const docsets = getCollection(dbSession, "docsets"); + const repos_branches = await getReposBranchesCollection(); + const docsets = await getDocsetsCollection(); const repo: ReposBranchesDocument = await getRepoEntry({ repoName: REPO_NAME, diff --git a/search-manifest/src/uploadToAtlas/searchConnector.ts b/search-manifest/src/uploadToAtlas/searchConnector.ts index 32d476fc2..0b0b84cd7 100644 --- a/search-manifest/src/uploadToAtlas/searchConnector.ts +++ b/search-manifest/src/uploadToAtlas/searchConnector.ts @@ -10,9 +10,12 @@ const ATLAS_CLUSTER0_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${ const SNOOTY_DB_NAME = `${process.env.MONGO_ATLAS_POOL_DB_NAME}`; const ATLAS_SEARCH_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_SEARCH_HOST}/?retryWrites=true&w=majority`; -//TODO: change these teamwide env vars in Netlify UI when ready to move to prod const SEARCH_DB_NAME = `${process.env.MONGO_ATLAS_SEARCH_DB_NAME}`; +const REPOS_BRANCHES_COLLECTION = "repos_branches"; +const DOCSETS_COLLECTION = "docsets"; +const DOCUMENTS_COLLECTION = "documents"; + let searchDb: mongodb.MongoClient; let snootyDb: mongodb.MongoClient; @@ -21,13 +24,7 @@ export const teardown = async (client: mongodb.MongoClient) => { }; // Handles memoization of db object, and initial connection logic if needs to be initialized -export const dbClient = async ({ - uri, - dbName, -}: { - uri: string; - dbName: string; -}) => { +export const dbClient = async (uri: string) => { const client = new mongodb.MongoClient(uri); try { await client.connect(); @@ -46,7 +43,7 @@ export const getSearchDb = async () => { if (searchDb) { console.log("search db client already exists, using existing instance"); } else { - searchDb = await dbClient({ uri, dbName }); + searchDb = await dbClient(uri); } return searchDb.db(dbName); }; @@ -59,21 +56,11 @@ export const getSnootyDb = async () => { if (snootyDb) { console.log("snooty db client already exists, using existing instance"); } else { - snootyDb = await dbClient({ uri, dbName }); + snootyDb = await dbClient(uri); } return snootyDb.db(dbName); }; -export const getCollection = (dbSession: Db, collection: string) => { - try { - return dbSession.collection(collection); - } catch (e) { - throw new Error( - `Error getting ${collection} collection from client: ${dbSession}` - ); - } -}; - export const closeSnootyDb = async () => { if (snootyDb) await teardown(snootyDb); else { @@ -87,3 +74,18 @@ export const closeSearchDb = async () => { console.log("No client connection open to Search Db"); } }; + +export const getDocsetsCollection = async () => { + const dbSession = await getSnootyDb(); + return dbSession.collection(DOCSETS_COLLECTION); +}; + +export const getReposBranchesCollection = async () => { + const dbSession = await getSnootyDb(); + return dbSession.collection(REPOS_BRANCHES_COLLECTION); +}; + +export const getDocumentsCollection = async () => { + const dbSession = await getSearchDb(); + return dbSession.collection(DOCUMENTS_COLLECTION); +}; diff --git a/search-manifest/src/uploadToAtlas/uploadManifest.ts b/search-manifest/src/uploadToAtlas/uploadManifest.ts index 74800a697..59f0c7f18 100644 --- a/search-manifest/src/uploadToAtlas/uploadManifest.ts +++ b/search-manifest/src/uploadToAtlas/uploadManifest.ts @@ -1,10 +1,5 @@ import type { Manifest } from "../generateManifest/manifest"; -import { - closeSearchDb, - getCollection, - getSearchDb, - teardown, -} from "./searchConnector"; +import { getDocumentsCollection } from "./searchConnector"; import assert from "node:assert"; import type { RefreshInfo, DatabaseDocument } from "../types"; import { generateHash, joinUrl } from "../utils"; @@ -57,9 +52,7 @@ export const uploadManifest = async ( if (!manifest?.documents?.length) { return Promise.reject(new Error("Invalid manifest")); } - - const dbSession = await getSearchDb(); - const documentsColl = getCollection(dbSession, "documents"); + const documentsColl = await getDocumentsCollection(); const status: RefreshInfo = { deleted: 0, From a0609b577ebd18f13b8b44d5b68631ac9d90e30f Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Mon, 30 Sep 2024 18:19:24 -0400 Subject: [PATCH 18/40] DOP-5036 check for env vars --- search-manifest/src/assertEnvVars.ts | 22 +++++++++++++++++++ search-manifest/src/index.ts | 16 ++++++++++---- .../src/uploadToAtlas/getProperties.ts | 21 ++++++++---------- .../tests/integration/uploadToS3.test.ts | 4 ++-- 4 files changed, 45 insertions(+), 18 deletions(-) create mode 100644 search-manifest/src/assertEnvVars.ts diff --git a/search-manifest/src/assertEnvVars.ts b/search-manifest/src/assertEnvVars.ts new file mode 100644 index 000000000..4c7b7ac6b --- /dev/null +++ b/search-manifest/src/assertEnvVars.ts @@ -0,0 +1,22 @@ +export const assertEnvVars = (vars: any) => { + const missingVars = Object.entries(vars) + .filter(([, value]) => !value) + .map(([key]) => `- ${key}`) + .join("\n"); + if (missingVars) + throw new Error(`Missing env var(s) ${JSON.stringify(missingVars)}`); + return true; +}; + +export const getEnvVars = () => { + const ENV_VARS = { + ATLAS_CLUSTER0_URI: `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_CLUSTER0_HOST}/?retryWrites=true&w=majority`, + SNOOTY_DB_NAME: `${process.env.MONGO_ATLAS_POOL_DB_NAME}`, + ATLAS_SEARCH_URI: `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_SEARCH_HOST}/?retryWrites=true&w=majority`, + SEARCH_DB_NAME: `${process.env.MONGO_ATLAS_SEARCH_DB_NAME}`, + REPOS_BRANCHES_COLLECTION: "repos_branches", + DOCSETS_COLLECTION: "docsets", + DOCUMENTS_COLLECTION: "documents", + }; + if (assertEnvVars(ENV_VARS)) return ENV_VARS; +}; diff --git a/search-manifest/src/index.ts b/search-manifest/src/index.ts index 084a74168..28be1c228 100644 --- a/search-manifest/src/index.ts +++ b/search-manifest/src/index.ts @@ -41,7 +41,6 @@ export const generateManifest = async () => { } return manifest; }; - //Return indexing data from a page's AST for search purposes. integration.addBuildEventHandler( "onSuccess", @@ -49,7 +48,16 @@ integration.addBuildEventHandler( // Get content repo zipfile as AST representation await run.command("unzip -o bundle.zip"); - const branch = netlifyConfig.build?.environment.BRANCH; + + const branchName = netlifyConfig.build?.environment.BRANCH; + const repoName = + process.env.REPO_NAME ?? netlifyConfig.build?.environment.SITE_NAME; + //check that an environment variable for repo name was set + if (!repoName || !branchName) { + throw new Error( + "Repo or branch name was not found, manifest cannot be uploaded to Atlas or S3 " + ); + } const manifest = await generateManifest(); @@ -64,14 +72,14 @@ integration.addBuildEventHandler( projectName: string; url: string; includeInGlobalSearch: boolean; - } = await getProperties(branch); + } = await getProperties({ branchName, repoName }); console.log("=========== Uploading Manifests to S3================="); const uploadParams: s3UploadParams = { bucket: "docs-search-indexes-test", //TODO: change this values based on environments prefix: "search-indexes/ab-testing", - fileName: `${projectName}-${branch}.json`, + fileName: `${projectName}-${branchName}.json`, manifest: manifest.export(), }; diff --git a/search-manifest/src/uploadToAtlas/getProperties.ts b/search-manifest/src/uploadToAtlas/getProperties.ts index 8e0c328ff..5fa0db366 100644 --- a/search-manifest/src/uploadToAtlas/getProperties.ts +++ b/search-manifest/src/uploadToAtlas/getProperties.ts @@ -77,22 +77,19 @@ export const getBranch = (branches: Array, branchName: string) => { throw new Error(`Branch ${branchName} not found in branches object`); }; -const getProperties = async (branchName: string) => { - const REPO_NAME = process.env.REPO_NAME; - - //check that an environment variable for repo name was set - if (!REPO_NAME) { - throw new Error( - "No repo name supplied as environment variable, manifest cannot be uploaded to Atlas Search.Documents collection " - ); - } - +const getProperties = async ({ + branchName, + repoName, +}: { + branchName: string; + repoName: string; +}) => { //connect to database and get repos_branches, docsets collections const repos_branches = await getReposBranchesCollection(); const docsets = await getDocsetsCollection(); const repo: ReposBranchesDocument = await getRepoEntry({ - repoName: REPO_NAME, + repoName: repoName, repos_branches, }); @@ -116,7 +113,7 @@ const getProperties = async (branchName: string) => { if (!active) { await deleteStaleProperties(searchProperty); throw new Error( - `Search manifest should not be generated for inactive version ${version} of repo ${REPO_NAME}. Removing all associated manifests` + `Search manifest should not be generated for inactive version ${version} of repo ${repoName}. Removing all associated manifests` ); } // await closeSnootyDb(); diff --git a/search-manifest/tests/integration/uploadToS3.test.ts b/search-manifest/tests/integration/uploadToS3.test.ts index cad20a6eb..5f635a093 100644 --- a/search-manifest/tests/integration/uploadToS3.test.ts +++ b/search-manifest/tests/integration/uploadToS3.test.ts @@ -1,7 +1,7 @@ import { beforeEach, describe, expect, test, vi } from "vitest"; -import type { +import { PutObjectCommand, - PutObjectCommandOutput, + type PutObjectCommandOutput, S3Client, } from "@aws-sdk/client-s3"; import { mockClient } from "aws-sdk-client-mock"; From 0557a192084a4d7aef6fca4cd709f7cbc6f6add3 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Mon, 30 Sep 2024 18:27:31 -0400 Subject: [PATCH 19/40] DOP-5036 casing --- search-manifest/src/assertEnvVars.ts | 5 ++- .../src/uploadToAtlas/searchConnector.ts | 39 +++++++------------ 2 files changed, 18 insertions(+), 26 deletions(-) diff --git a/search-manifest/src/assertEnvVars.ts b/search-manifest/src/assertEnvVars.ts index 4c7b7ac6b..5b171171c 100644 --- a/search-manifest/src/assertEnvVars.ts +++ b/search-manifest/src/assertEnvVars.ts @@ -1,4 +1,4 @@ -export const assertEnvVars = (vars: any) => { +const assertEnvVars = (vars: any) => { const missingVars = Object.entries(vars) .filter(([, value]) => !value) .map(([key]) => `- ${key}`) @@ -18,5 +18,6 @@ export const getEnvVars = () => { DOCSETS_COLLECTION: "docsets", DOCUMENTS_COLLECTION: "documents", }; - if (assertEnvVars(ENV_VARS)) return ENV_VARS; + assertEnvVars(ENV_VARS); + return ENV_VARS; }; diff --git a/search-manifest/src/uploadToAtlas/searchConnector.ts b/search-manifest/src/uploadToAtlas/searchConnector.ts index 0b0b84cd7..efbda208f 100644 --- a/search-manifest/src/uploadToAtlas/searchConnector.ts +++ b/search-manifest/src/uploadToAtlas/searchConnector.ts @@ -1,20 +1,9 @@ import type { Db } from "mongodb"; import * as mongodb from "mongodb"; import type { DatabaseDocument } from "../types"; +import { getEnvVars } from "../assertEnvVars"; -// We should only ever have one client active at a time. - -// cached db object, so we can handle initial connection process once if unitialized - -const ATLAS_CLUSTER0_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_CLUSTER0_HOST}/?retryWrites=true&w=majority`; -const SNOOTY_DB_NAME = `${process.env.MONGO_ATLAS_POOL_DB_NAME}`; - -const ATLAS_SEARCH_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_SEARCH_HOST}/?retryWrites=true&w=majority`; -const SEARCH_DB_NAME = `${process.env.MONGO_ATLAS_SEARCH_DB_NAME}`; - -const REPOS_BRANCHES_COLLECTION = "repos_branches"; -const DOCSETS_COLLECTION = "docsets"; -const DOCUMENTS_COLLECTION = "documents"; +const ENV_VARS = getEnvVars(); let searchDb: mongodb.MongoClient; let snootyDb: mongodb.MongoClient; @@ -37,11 +26,11 @@ export const dbClient = async (uri: string) => { }; export const getSearchDb = async () => { - console.log("getting search db"); - const uri = ATLAS_SEARCH_URI; - const dbName = SEARCH_DB_NAME; + console.log("Getting search Db"); + const uri = ENV_VARS.ATLAS_SEARCH_URI; + const dbName = ENV_VARS.SEARCH_DB_NAME; if (searchDb) { - console.log("search db client already exists, using existing instance"); + console.log("search Db client already exists, using existing instance"); } else { searchDb = await dbClient(uri); } @@ -49,12 +38,12 @@ export const getSearchDb = async () => { }; export const getSnootyDb = async () => { - console.log("getting snooty db"); - const uri = ATLAS_CLUSTER0_URI; - const dbName = SNOOTY_DB_NAME; + console.log("Getting snooty Db"); + const uri = ENV_VARS.ATLAS_CLUSTER0_URI; + const dbName = ENV_VARS.SNOOTY_DB_NAME; if (snootyDb) { - console.log("snooty db client already exists, using existing instance"); + console.log("Snooty Db client already exists, using existing instance"); } else { snootyDb = await dbClient(uri); } @@ -77,15 +66,17 @@ export const closeSearchDb = async () => { export const getDocsetsCollection = async () => { const dbSession = await getSnootyDb(); - return dbSession.collection(DOCSETS_COLLECTION); + return dbSession.collection(ENV_VARS.DOCSETS_COLLECTION); }; export const getReposBranchesCollection = async () => { const dbSession = await getSnootyDb(); - return dbSession.collection(REPOS_BRANCHES_COLLECTION); + return dbSession.collection( + ENV_VARS.REPOS_BRANCHES_COLLECTION + ); }; export const getDocumentsCollection = async () => { const dbSession = await getSearchDb(); - return dbSession.collection(DOCUMENTS_COLLECTION); + return dbSession.collection(ENV_VARS.DOCUMENTS_COLLECTION); }; From b6f3b0aa0d02669247c999bf9b4d627d6b042cf8 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Tue, 1 Oct 2024 16:03:21 -0400 Subject: [PATCH 20/40] DOP-5036 fix some casing --- search-manifest/src/uploadToAtlas/getProperties.ts | 1 - search-manifest/src/uploadToAtlas/searchConnector.ts | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/search-manifest/src/uploadToAtlas/getProperties.ts b/search-manifest/src/uploadToAtlas/getProperties.ts index 5fa0db366..b61107e1d 100644 --- a/search-manifest/src/uploadToAtlas/getProperties.ts +++ b/search-manifest/src/uploadToAtlas/getProperties.ts @@ -116,7 +116,6 @@ const getProperties = async ({ `Search manifest should not be generated for inactive version ${version} of repo ${repoName}. Removing all associated manifests` ); } - // await closeSnootyDb(); return { searchProperty, projectName: project, diff --git a/search-manifest/src/uploadToAtlas/searchConnector.ts b/search-manifest/src/uploadToAtlas/searchConnector.ts index efbda208f..e5259c16b 100644 --- a/search-manifest/src/uploadToAtlas/searchConnector.ts +++ b/search-manifest/src/uploadToAtlas/searchConnector.ts @@ -19,18 +19,18 @@ export const dbClient = async (uri: string) => { await client.connect(); return client; } catch (error) { - const err = `Error at client connection: ${error} for uri ${uri} `; + const err = `Error at client connection: ${error} `; console.error(err); throw err; } }; export const getSearchDb = async () => { - console.log("Getting search Db"); + console.log("Getting Search Db"); const uri = ENV_VARS.ATLAS_SEARCH_URI; const dbName = ENV_VARS.SEARCH_DB_NAME; if (searchDb) { - console.log("search Db client already exists, using existing instance"); + console.log("Search Db client already exists, using existing instance"); } else { searchDb = await dbClient(uri); } @@ -38,7 +38,7 @@ export const getSearchDb = async () => { }; export const getSnootyDb = async () => { - console.log("Getting snooty Db"); + console.log("Getting Snooty Db"); const uri = ENV_VARS.ATLAS_CLUSTER0_URI; const dbName = ENV_VARS.SNOOTY_DB_NAME; From 95e14f738033b1977501e5a49c277b3ddaf61e77 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Tue, 1 Oct 2024 17:53:09 -0400 Subject: [PATCH 21/40] DOP-5036 fix tests --- .../src/generateManifest/manifestEntry.ts | 2 +- .../tests/integration/uploadToAtlas.test.ts | 56 ++---- .../tests/unit/getProperties.test.ts | 174 +++++++++--------- search-manifest/tests/utils/mockDB.ts | 23 +++ 4 files changed, 130 insertions(+), 125 deletions(-) diff --git a/search-manifest/src/generateManifest/manifestEntry.ts b/search-manifest/src/generateManifest/manifestEntry.ts index 7254aee7b..c536b76a1 100644 --- a/search-manifest/src/generateManifest/manifestEntry.ts +++ b/search-manifest/src/generateManifest/manifestEntry.ts @@ -11,7 +11,7 @@ export class ManifestEntry { preview?: string | null; tags: string | null; //TODO: add type - facets: Facet; + facets: any; // TODO: add type for entry constructor(entry: any) { diff --git a/search-manifest/tests/integration/uploadToAtlas.test.ts b/search-manifest/tests/integration/uploadToAtlas.test.ts index 333be494a..86924682e 100644 --- a/search-manifest/tests/integration/uploadToAtlas.test.ts +++ b/search-manifest/tests/integration/uploadToAtlas.test.ts @@ -1,46 +1,41 @@ import { afterAll, - beforeEach, afterEach, describe, expect, test, vi, + beforeAll, } from "vitest"; import { uploadManifest } from "../../src/uploadToAtlas/uploadManifest"; import { Manifest } from "../../src/generateManifest/manifest"; import nodeManifest from "../resources/s3Manifests/node-current.json"; import { mockDb, insert, removeDocuments } from "../utils/mockDB"; -import type { DatabaseDocument } from "../../src/types"; import { getManifest } from "../utils/getManifest"; import { generateHash } from "../../src/utils"; +import { getDocumentsCollection } from "../../src/uploadToAtlas/searchConnector"; const PROPERTY_NAME = "dummyName"; //teardown connections -beforeEach(async () => { - vi.mock("../../src/uploadToAtlas/searchConnector", async (importOriginal) => { - const { mockDb, teardownMockDbClient } = await import("../utils/mockDB"); - const { getCollection } = await import( - "../../src/uploadToAtlas/searchConnector" - ); +beforeAll(async () => { + vi.mock("../../src/uploadToAtlas/searchConnector", async () => { + const { getSearchDb, teardownMockDbClient, getDocumentsCollection } = + await import("../utils/mockDB"); + return { - getCollection: getCollection, + getSearchDb: getSearchDb, + getDocumentsCollection: getDocumentsCollection, teardown: teardownMockDbClient, - db: async () => { - const db = await mockDb(); - return db; - }, }; }); }); const checkCollection = async () => { - const db = await mockDb(); - const documentCount = await db - .collection("documents") - .estimatedDocumentCount(); - expect(documentCount).toEqual(0); + const docCount = await ( + await getDocumentsCollection() + ).estimatedDocumentCount(); + expect(docCount).toEqual(0); }; afterAll(async () => { @@ -84,8 +79,7 @@ describe("Upload manifest uploads to Atlas db", () => { await uploadManifest(manifest, PROPERTY_NAME); //check that manifests have been uploaded - const db = await mockDb(); - const documents = db.collection("documents"); + const documents = await getDocumentsCollection(); //count number of documents in collection expect(await documents.countDocuments()).toEqual(manifest.documents.length); }); @@ -99,8 +93,7 @@ describe("Upload manifest uploads to Atlas db", () => { expect(status.upserted).toEqual(manifest.documents.length); //check that manifests have been uploaded - const db = await mockDb(); - const documents = db.collection("documents"); + const documents = await getDocumentsCollection(); expect(await documents.countDocuments()).toEqual(manifest.documents.length); }); }); @@ -116,8 +109,7 @@ describe( nodeManifest.includeInGlobalSearch ); manifest1.documents = nodeManifest.documents; - const db = await mockDb(); - const documents = db.collection("documents"); + const documents = await getDocumentsCollection(); const kotlinManifest = await getManifest("kotlin"); test("nodeManifest uploads all documents", async () => { @@ -125,8 +117,6 @@ describe( const status1 = await uploadManifest(manifest1, PROPERTY_NAME); expect(status1.upserted).toEqual(manifest1.documents.length); //reopen connection to count current num of documents in collection - await mockDb(); - expect(await documents.countDocuments()).toEqual( manifest1.documents.length ); @@ -138,9 +128,7 @@ describe( test("two separate manifests uplodaded uploads correct number of entries", async () => { //find a way to check that there are no documents in the collection yet - await mockDb(); const status = await uploadManifest(manifest1, PROPERTY_NAME); - await mockDb(); expect(await documents.countDocuments()).toEqual( manifest1.documents.length ); @@ -156,18 +144,14 @@ describe( test("stale documents from same search property are removed", async () => { //upload documents - const db = await mockDb(); const status = await uploadManifest(manifest1, PROPERTY_NAME); - await mockDb(); const status1 = await uploadManifest(kotlinManifest, "docs-kotlin"); //reopen connection to count current num of documents in collection - await mockDb(); expect(await documents.countDocuments()).toEqual( kotlinManifest.documents.length + manifest1.documents.length ); //insert entries with random slugs - await mockDb(); const dummyHash = generateHash("dummyManifest"); const dummyDate = new Date(); const dummyDocs = [ @@ -184,17 +168,17 @@ describe( slug: "dummySlug2", }, ]; - + const db = await mockDb(); insert(db, "documents", dummyDocs); //upload node documents again - await mockDb(); const status3 = await uploadManifest(manifest1, PROPERTY_NAME); expect(status3.deleted).toEqual(dummyDocs.length); expect(status3.modified).toEqual(manifest1.documents.length); //check all documents have current hash, time - await mockDb(); - const empty = await db.collection("documents").findOne({ + const empty = await ( + await getDocumentsCollection() + ).findOne({ searchProperty: PROPERTY_NAME, manifestRevisionId: dummyHash, }); diff --git a/search-manifest/tests/unit/getProperties.test.ts b/search-manifest/tests/unit/getProperties.test.ts index cf7585997..dceec9af4 100644 --- a/search-manifest/tests/unit/getProperties.test.ts +++ b/search-manifest/tests/unit/getProperties.test.ts @@ -1,62 +1,57 @@ +import { describe, expect, test, vi, beforeAll, afterAll } from "vitest"; import { - describe, - beforeEach, - expect, - test, - vi, - beforeAll, - afterAll, -} from "vitest"; -import getProperties, { - getBranch, -} from "../../src/uploadToAtlas/getProperties"; -import { - mockDb, - teardownMockDbClient, insert, removeDocuments, + teardownMockDbClient, + mockDb, } from "../utils/mockDB"; + +import getProperties, { + getBranch, +} from "../../src/uploadToAtlas/getProperties"; + // simulate the repos_branches collection in an object import repos_branches from "../resources/mockCollections/repos-branches.json"; //simulate the docsests collection in an object import docsets from "../resources/mockCollections/docsets.json"; -import type * as mongodb from "mongodb"; import type { BranchEntry, DatabaseDocument } from "../../src/types"; import { getManifest } from "../utils/getManifest"; import { uploadManifest } from "../../src/uploadToAtlas/uploadManifest"; import { afterEach } from "node:test"; +import { getDocumentsCollection } from "../../src/uploadToAtlas/searchConnector"; const BRANCH_NAME_MASTER = "master"; const BRANCH_NAME_BETA = "beta"; const BRANCH_NAME_GIBBERISH = "gibberish"; -let db: mongodb.Db; const DOCS_COMPASS_NAME = "docs-compass"; const DOCS_CLOUD_NAME = "cloud-docs"; const DOCS_APP_SERVICES_NAME = "docs-app-services"; const DOCS_MONGODB_INTERNAL_NAME = "docs-mongodb-internal"; +const DOCS_MMS_NAME = "mms-docs"; beforeAll(async () => { - db = await mockDb(); + //insert repo metadata into dummy repos_branches and docsets collections + const db = await mockDb(); await insert(db, "repos_branches", repos_branches); await insert(db, "docsets", docsets); -}); -//mock repos_branches database -beforeEach(async () => { vi.mock("../../src/uploadToAtlas/searchConnector", async () => { - const { mockDb, teardownMockDbClient } = await import("../utils/mockDB"); - const { getCollection } = await import( - "../../src/uploadToAtlas/searchConnector" - ); + const { + teardownMockDbClient, + getReposBranchesCollection, + getDocsetsCollection, + getDocumentsCollection, + getSearchDb, + getSnootyDb, + } = await import("../utils/mockDB"); return { teardown: teardownMockDbClient, - getCollection: getCollection, - db: async () => { - //mock db of repos_branches - db = await mockDb(); - return db; - }, + getSearchDb: getSearchDb, + getSnootyDb: getSnootyDb, + getDocumentsCollection: getDocumentsCollection, + getReposBranchesCollection: getReposBranchesCollection, + getDocsetsCollection: getDocsetsCollection, }; }); }); @@ -105,21 +100,22 @@ describe("Given a branchname, get the properties associated with it from repos_b //mock repo name test(`correct properties are retrieved for branch ${BRANCH_NAME_MASTER} of repoName ${DOCS_COMPASS_NAME}`, async () => { //define expected properties object for master branch of Compass repo - process.env.REPO_NAME = DOCS_COMPASS_NAME; const compassMasterProperties = { searchProperty: "compass-current", projectName: "compass", url: "http://mongodb.com/docs/compass/", includeInGlobalSearch: true, }; - expect(await getProperties(BRANCH_NAME_MASTER)).toEqual( - compassMasterProperties - ); + expect( + await getProperties({ + branchName: BRANCH_NAME_MASTER, + repoName: DOCS_COMPASS_NAME, + }) + ).toEqual(compassMasterProperties); }); test(`correct properties are retrieved for branch ${BRANCH_NAME_MASTER} of repoName ${DOCS_CLOUD_NAME}`, async () => { //define expected properties object for master branch of cloud-docs repo - process.env.REPO_NAME = DOCS_CLOUD_NAME; const cloudDocsMasterProperties = { searchProperty: "atlas-master", projectName: "cloud-docs", @@ -127,9 +123,12 @@ describe("Given a branchname, get the properties associated with it from repos_b includeInGlobalSearch: true, }; - expect(await getProperties(BRANCH_NAME_MASTER)).toEqual( - cloudDocsMasterProperties - ); + expect( + await getProperties({ + branchName: BRANCH_NAME_MASTER, + repoName: DOCS_CLOUD_NAME, + }) + ).toEqual(cloudDocsMasterProperties); }); }); @@ -142,65 +141,70 @@ describe( test("getting properties for an inactive branch with no existing documents executes correctly and does not change db document count", async () => { //populate db with manifests - db = await mockDb(); const manifest1 = await getManifest("mms-master"); await uploadManifest(manifest1, "mms-docs-stable"); - //reopen connection to db - await mockDb(); //check number of documents initially in db - const documentCount = await db - .collection("documents") - .countDocuments(); + const documentCount = await ( + await getDocumentsCollection() + ).countDocuments(); //getProperties for beta doens't change number of documents in collection - process.env.repo_name = "docs-compass"; - await expect(getProperties(BRANCH_NAME_BETA)).rejects.toThrow(); - await mockDb(); - expect( - await db.collection("documents").countDocuments() - ).toEqual(documentCount); + await expect( + getProperties({ + branchName: BRANCH_NAME_BETA, + repoName: DOCS_COMPASS_NAME, + }) + ).rejects.toThrow(); + expect(await (await getDocumentsCollection()).countDocuments()).toEqual( + documentCount + ); }); test("non prod-deployable repo throws and doesn't return properties", async () => { - process.env.REPO_NAME = DOCS_MONGODB_INTERNAL_NAME; - await expect(getProperties("v5.0")).rejects.toThrow( - `Search manifest should not be generated for repo ${process.env.REPO_NAME}. Removing all associated manifests` + await expect( + getProperties({ + branchName: "v5.0", + repoName: DOCS_MONGODB_INTERNAL_NAME, + }) + ).rejects.toThrow( + `Search manifest should not be generated for repo ${DOCS_MONGODB_INTERNAL_NAME}. Removing all associated manifests` ); }); test(`no properties are retrieved for branch on repo ${DOCS_APP_SERVICES_NAME} without a "search" field. `, async () => { - process.env.REPO_NAME = DOCS_MONGODB_INTERNAL_NAME; - await expect(getProperties(BRANCH_NAME_MASTER)).rejects.toThrow(); + await expect( + getProperties({ + branchName: BRANCH_NAME_MASTER, + repoName: DOCS_MONGODB_INTERNAL_NAME, + }) + ).rejects.toThrow(); }); test("repo with no search categoryTitle removes all old documents with search properties beginning with that project name", async () => { - db = await mockDb(); - //add documents for project from two diff branches to search DB const manifest1 = await getManifest("mms-master"); await uploadManifest(manifest1, "mms-docs-stable"); - await mockDb(); const manifest2 = await getManifest("mms-v1.3"); await uploadManifest(manifest2, "mms-docs-v1.3"); - await mockDb(); - //trying to get properties for repo removes those older documents - process.env.REPO_NAME = "mms-docs"; - const documentCount = await db - .collection("documents") - .countDocuments(); - await expect(getProperties(BRANCH_NAME_MASTER)).rejects.toThrow(); + const documentCount = await ( + await getDocumentsCollection() + ).countDocuments(); + await expect( + getProperties({ + branchName: BRANCH_NAME_MASTER, + repoName: DOCS_MMS_NAME, + }) + ).rejects.toThrow(); //throws //no return type - await mockDb(); - const documentCount2 = await db - - .collection("documents") - .countDocuments(); + const documentCount2 = await ( + await getDocumentsCollection() + ).countDocuments(); expect(documentCount2).toEqual( documentCount - manifest1.documents.length - manifest2.documents.length ); @@ -208,31 +212,25 @@ describe( test("getting properties for an inactive branch removes all old documents with that exact project-version searchProperty", async () => { //add documents for project from two diff branches to DB-- docs-compass master and beta - db = await mockDb(); - //add documents for project from two diff branches to search DB const manifest1 = await getManifest("compass-master"); - await uploadManifest(manifest1, "compass-current"); - await mockDb(); - const manifest2 = await getManifest("compass-beta"); await uploadManifest(manifest2, "compass-upcoming"); - await mockDb(); //trying to get properties for repo removes only the older documents from that specific branch, beta - let documentCount; - let documentCount2; - //trying to get properties for repo removes those older documents - process.env.REPO_NAME = "docs-compass"; - documentCount = await db - .collection("documents") - .countDocuments(); - await expect(getProperties(BRANCH_NAME_BETA)).rejects.toThrow(); - await mockDb(); - documentCount2 = await db - .collection("documents") - .countDocuments(); + const documentCount = await ( + await getDocumentsCollection() + ).countDocuments(); + await expect( + getProperties({ + branchName: BRANCH_NAME_BETA, + repoName: DOCS_COMPASS_NAME, + }) + ).rejects.toThrow(); + const documentCount2 = await ( + await getDocumentsCollection() + ).countDocuments(); expect(documentCount2).toEqual( documentCount - manifest2.documents.length ); diff --git a/search-manifest/tests/utils/mockDB.ts b/search-manifest/tests/utils/mockDB.ts index 61479e415..846dbcdc4 100644 --- a/search-manifest/tests/utils/mockDB.ts +++ b/search-manifest/tests/utils/mockDB.ts @@ -21,6 +21,29 @@ export async function mockDb(): Promise { const dbInstance = client.db("dummy_db"); return dbInstance; } +export const getSearchDb = async () => { + const db = await mockDb(); + return db; +}; +export const getSnootyDb = async () => { + const db = await mockDb(); + return db; +}; + +export const getDocumentsCollection = async () => { + const dbSession = await getSearchDb(); + return dbSession.collection("documents"); +}; + +export const getReposBranchesCollection = async () => { + const dbSession = await getSnootyDb(); + return dbSession.collection("repos_branches"); +}; + +export const getDocsetsCollection = async () => { + const dbSession = await getSnootyDb(); + return dbSession.collection("docsets"); +}; export const insert = async ( dbName: mongodb.Db, From c3658d3adc7b3c0c9034063ddb72662977a6974b Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:07:20 -0400 Subject: [PATCH 22/40] DOP-5036 add metadata type --- search-manifest/src/types.ts | 6 ++++++ search-manifest/tests/unit/getProperties.test.ts | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/search-manifest/src/types.ts b/search-manifest/src/types.ts index 73bdb896e..30cbe482e 100644 --- a/search-manifest/src/types.ts +++ b/search-manifest/src/types.ts @@ -55,3 +55,9 @@ export interface BranchEntry { isStableBranch: boolean; active: boolean; } + +export type metadata = { + robots: boolean; + keywords: string; + description?: string; +}; diff --git a/search-manifest/tests/unit/getProperties.test.ts b/search-manifest/tests/unit/getProperties.test.ts index dceec9af4..7f1e5f442 100644 --- a/search-manifest/tests/unit/getProperties.test.ts +++ b/search-manifest/tests/unit/getProperties.test.ts @@ -14,7 +14,7 @@ import getProperties, { import repos_branches from "../resources/mockCollections/repos-branches.json"; //simulate the docsests collection in an object import docsets from "../resources/mockCollections/docsets.json"; -import type { BranchEntry, DatabaseDocument } from "../../src/types"; +import type { BranchEntry } from "../../src/types"; import { getManifest } from "../utils/getManifest"; import { uploadManifest } from "../../src/uploadToAtlas/uploadManifest"; import { afterEach } from "node:test"; From 613462646d4cba595a899faa109b6b0c8a9d8018 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:09:58 -0400 Subject: [PATCH 23/40] DOP-5036 fix manifest typing --- .../src/generateManifest/document.ts | 59 +++++++++++-------- 1 file changed, 36 insertions(+), 23 deletions(-) diff --git a/search-manifest/src/generateManifest/document.ts b/search-manifest/src/generateManifest/document.ts index 612a1d353..91483bf34 100644 --- a/search-manifest/src/generateManifest/document.ts +++ b/search-manifest/src/generateManifest/document.ts @@ -2,35 +2,44 @@ import { JSONPath } from "jsonpath-plus"; import { Facet } from "./createFacets"; import { ManifestEntry } from "./manifestEntry"; import type { BSON } from "bson"; +import { metadata } from "../types"; export class Document { //Return indexing data from a page's JSON-formatted AST for search purposes - tree: any; - robots: any; - keywords: any; - description: any; + + tree: BSON.Document; + robots: boolean; + keywords: string | null; + description?: string; paragraphs: string; - code: { lang: string; value: any }[]; - title: any; - headings: any; + code: Array<{ lang: string; value: string }>; + title: string; + headings: Array; slug: string; - preview?: string; + preview: string | null; facets: any; - noIndex: any; - reasons: any; + noIndex: boolean; + reasons: Array; constructor(doc: BSON.Document) { this.tree = doc; //find metadata - [this.robots, this.keywords, this.description] = this.findMetadata(); + let { robots, keywords, description } = this.findMetadata(); + + this.robots = robots; + this.keywords = keywords; + this.description = description; //find paragraphs this.paragraphs = this.findParagraphs(); + //find code this.code = this.findCode(); //find title, headings - [this.title, this.headings] = this.findHeadings(); + let { title, headings } = this.findHeadings(); + this.title = title; + this.headings = headings; //derive slug this.slug = this.deriveSlug(); @@ -42,13 +51,15 @@ export class Document { this.facets = deriveFacets(this.tree); //noindex, reasons - [this.noIndex, this.reasons] = this.getNoIndex(); + let { noIndex, reasons } = this.getNoIndex(); + this.noIndex = noIndex; + this.reasons = reasons; } - findMetadata() { + findMetadata = () => { let robots = true; //can be set in the rst if the page is supposed to be crawled - let keywords: string | null = null; //keywords is an optional list of strings - let description: string | null = null; //this can be optional?? + let keywords: string = ""; //keywords is an optional list of strings + let description: string | undefined; //this can be optional?? const results = JSONPath({ path: "$..children[?(@.name=='meta')]..options", @@ -68,8 +79,8 @@ export class Document { description = val?.description; } - return [robots, keywords, description]; - } + return { robots, keywords, description }; + }; findParagraphs() { let paragraphs = ""; @@ -93,15 +104,17 @@ export class Document { const codeContents = []; for (const r of results) { + // when will there be no value for language?? do we want to set to null if that happens?? const lang = r.lang ?? null; + //TODO: maybe need r.value["value"] here instead codeContents.push({ lang: lang, value: r.value }); } return codeContents; } findHeadings() { - const headings: string[] = []; - let title = ""; + const headings: Array = []; + let title: string = ""; // Get the children of headings nodes const results = JSONPath({ @@ -110,7 +123,7 @@ export class Document { }); //no heading nodes found?? page doesn't have title, or headings - if (!results.length) return [title, headings]; + if (!results.length) return { title, headings }; for (const r of results) { const heading = []; @@ -128,7 +141,7 @@ export class Document { } title = headings.shift() ?? ""; - return [title, headings]; + return { title, headings }; } deriveSlug() { @@ -196,7 +209,7 @@ export class Document { reasons.push("This page has no headings"); } - return [noIndex, reasons]; + return { noIndex, reasons }; } exportAsManifestDocument = () => { From bf1dc812b7d1e181daa2a6cf122149fe71fe13ef Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:13:21 -0400 Subject: [PATCH 24/40] DOP-5036 fix manifest typing --- search-manifest/src/generateManifest/document.ts | 4 ++-- search-manifest/src/types.ts | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/search-manifest/src/generateManifest/document.ts b/search-manifest/src/generateManifest/document.ts index 91483bf34..713fcd88a 100644 --- a/search-manifest/src/generateManifest/document.ts +++ b/search-manifest/src/generateManifest/document.ts @@ -56,9 +56,9 @@ export class Document { this.reasons = reasons; } - findMetadata = () => { + findMetadata = (): metadata => { let robots = true; //can be set in the rst if the page is supposed to be crawled - let keywords: string = ""; //keywords is an optional list of strings + let keywords: string | null = null; //keywords is an optional list of strings let description: string | undefined; //this can be optional?? const results = JSONPath({ diff --git a/search-manifest/src/types.ts b/search-manifest/src/types.ts index 30cbe482e..e8737bbd5 100644 --- a/search-manifest/src/types.ts +++ b/search-manifest/src/types.ts @@ -58,6 +58,6 @@ export interface BranchEntry { export type metadata = { robots: boolean; - keywords: string; + keywords: string | null; description?: string; }; From bb05afe0f447f69b348e3bac5214945cba13adfd Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:24:07 -0400 Subject: [PATCH 25/40] DOP-5036 fix facet formatting --- .../src/generateManifest/createFacets.ts | 40 +++++++++---------- .../src/generateManifest/document.ts | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/search-manifest/src/generateManifest/createFacets.ts b/search-manifest/src/generateManifest/createFacets.ts index 0777da562..4fb0fa06b 100644 --- a/search-manifest/src/generateManifest/createFacets.ts +++ b/search-manifest/src/generateManifest/createFacets.ts @@ -1,25 +1,25 @@ -import { NetlifyIntegration } from '@netlify/sdk'; +import { NetlifyIntegration } from "@netlify/sdk"; export class Facet { - category: any; - value: any; - subFacets: any; + category: any; + value: any; + subFacets: any; - constructor(category: string, value: string, subFacets: []) { - this.category = category; - this.value = value; - this.subFacets = []; + constructor(category: string, value: string, subFacets: []) { + this.category = category; + this.value = value; + this.subFacets = []; - if (subFacets) { - for (const subFacet of subFacets) { - this.subFacets.push( - new Facet( - subFacet['category'], - subFacet['value'], - subFacet['sub_facets'] ?? [], - ), - ); - } - } - } + if (subFacets) { + for (const subFacet of subFacets) { + this.subFacets.push( + new Facet( + subFacet["category"], + subFacet["value"], + subFacet["subFacets"] ?? [] + ) + ); + } + } + } } diff --git a/search-manifest/src/generateManifest/document.ts b/search-manifest/src/generateManifest/document.ts index 713fcd88a..3cc5b9cd9 100644 --- a/search-manifest/src/generateManifest/document.ts +++ b/search-manifest/src/generateManifest/document.ts @@ -254,7 +254,7 @@ const deriveFacets = (tree: any) => { const facet = new Facet( facetEntry.category, facetEntry.value, - facetEntry.sub_facets + facetEntry.subFacets ); insertKeyVals(facet); }; From 6697b8591d0a69d78ec4dd3aa2c25c77d2f259c7 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:28:01 -0400 Subject: [PATCH 26/40] DOP-5036 fix facets --- search-manifest/src/generateManifest/createFacets.ts | 5 ++--- search-manifest/src/generateManifest/document.ts | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/search-manifest/src/generateManifest/createFacets.ts b/search-manifest/src/generateManifest/createFacets.ts index 4fb0fa06b..90824efbb 100644 --- a/search-manifest/src/generateManifest/createFacets.ts +++ b/search-manifest/src/generateManifest/createFacets.ts @@ -1,15 +1,14 @@ import { NetlifyIntegration } from "@netlify/sdk"; export class Facet { - category: any; - value: any; + category: string; + value: string; subFacets: any; constructor(category: string, value: string, subFacets: []) { this.category = category; this.value = value; this.subFacets = []; - if (subFacets) { for (const subFacet of subFacets) { this.subFacets.push( diff --git a/search-manifest/src/generateManifest/document.ts b/search-manifest/src/generateManifest/document.ts index 3cc5b9cd9..88b10904d 100644 --- a/search-manifest/src/generateManifest/document.ts +++ b/search-manifest/src/generateManifest/document.ts @@ -17,7 +17,7 @@ export class Document { headings: Array; slug: string; preview: string | null; - facets: any; + facets: Facet; noIndex: boolean; reasons: Array; @@ -250,7 +250,7 @@ const deriveFacets = (tree: any) => { } }; - const createFacet = (facetEntry: any) => { + const createFacet = (facetEntry: Facet) => { const facet = new Facet( facetEntry.category, facetEntry.value, From 9c1300099fa94dadfb3fbbb15820538e5317657a Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:48:34 -0400 Subject: [PATCH 27/40] DOP-5036 some formatting --- .../src/generateManifest/createFacets.ts | 10 +++------- search-manifest/src/generateManifest/document.ts | 14 +++++++------- search-manifest/tests/utils/mockDB.ts | 2 +- 3 files changed, 11 insertions(+), 15 deletions(-) diff --git a/search-manifest/src/generateManifest/createFacets.ts b/search-manifest/src/generateManifest/createFacets.ts index 90824efbb..9850cea6b 100644 --- a/search-manifest/src/generateManifest/createFacets.ts +++ b/search-manifest/src/generateManifest/createFacets.ts @@ -3,20 +3,16 @@ import { NetlifyIntegration } from "@netlify/sdk"; export class Facet { category: string; value: string; - subFacets: any; + subFacets: Array; - constructor(category: string, value: string, subFacets: []) { + constructor(category: string, value: string, subFacets: Array) { this.category = category; this.value = value; this.subFacets = []; if (subFacets) { for (const subFacet of subFacets) { this.subFacets.push( - new Facet( - subFacet["category"], - subFacet["value"], - subFacet["subFacets"] ?? [] - ) + new Facet(subFacet.category, subFacet.value, subFacet.subFacets ?? []) ); } } diff --git a/search-manifest/src/generateManifest/document.ts b/search-manifest/src/generateManifest/document.ts index 88b10904d..88a07d209 100644 --- a/search-manifest/src/generateManifest/document.ts +++ b/search-manifest/src/generateManifest/document.ts @@ -2,7 +2,7 @@ import { JSONPath } from "jsonpath-plus"; import { Facet } from "./createFacets"; import { ManifestEntry } from "./manifestEntry"; import type { BSON } from "bson"; -import { metadata } from "../types"; +import type { metadata } from "../types"; export class Document { //Return indexing data from a page's JSON-formatted AST for search purposes @@ -25,7 +25,7 @@ export class Document { this.tree = doc; //find metadata - let { robots, keywords, description } = this.findMetadata(); + const { robots, keywords, description } = this.findMetadata(); this.robots = robots; this.keywords = keywords; @@ -37,7 +37,7 @@ export class Document { this.code = this.findCode(); //find title, headings - let { title, headings } = this.findHeadings(); + const { title, headings } = this.findHeadings(); this.title = title; this.headings = headings; @@ -51,7 +51,7 @@ export class Document { this.facets = deriveFacets(this.tree); //noindex, reasons - let { noIndex, reasons } = this.getNoIndex(); + const { noIndex, reasons } = this.getNoIndex(); this.noIndex = noIndex; this.reasons = reasons; } @@ -114,7 +114,7 @@ export class Document { findHeadings() { const headings: Array = []; - let title: string = ""; + let title = ""; // Get the children of headings nodes const results = JSONPath({ @@ -235,7 +235,7 @@ export class Document { }; } -const deriveFacets = (tree: any) => { +const deriveFacets = (tree: BSON.Document) => { //Format facets for ManifestEntry from bson entry tree['facets'] if it exists const insertKeyVals = (facet: any, prefix = "") => { @@ -250,7 +250,7 @@ const deriveFacets = (tree: any) => { } }; - const createFacet = (facetEntry: Facet) => { + const createFacet = (facetEntry: any) => { const facet = new Facet( facetEntry.category, facetEntry.value, diff --git a/search-manifest/tests/utils/mockDB.ts b/search-manifest/tests/utils/mockDB.ts index 846dbcdc4..08f707e08 100644 --- a/search-manifest/tests/utils/mockDB.ts +++ b/search-manifest/tests/utils/mockDB.ts @@ -48,7 +48,7 @@ export const getDocsetsCollection = async () => { export const insert = async ( dbName: mongodb.Db, collectionName: string, - docs: any[] + docs: any ) => { const coll = dbName.collection(collectionName); const result = await coll.insertMany(docs); From ce383b4b639d692b166b4f5866cb8c20199cf580 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Wed, 2 Oct 2024 11:34:27 -0400 Subject: [PATCH 28/40] DOP-5036 merge createFacets paths --- search-manifest/src/generateManifest/document.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/search-manifest/src/generateManifest/document.ts b/search-manifest/src/generateManifest/document.ts index 88a07d209..25909621e 100644 --- a/search-manifest/src/generateManifest/document.ts +++ b/search-manifest/src/generateManifest/document.ts @@ -250,7 +250,7 @@ const deriveFacets = (tree: BSON.Document) => { } }; - const createFacet = (facetEntry: any) => { + const createFacet = (facetEntry: Facet) => { const facet = new Facet( facetEntry.category, facetEntry.value, From fc3abd7e6d0deda5b5e98671c6698addfe0108c5 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Fri, 4 Oct 2024 11:09:56 -0400 Subject: [PATCH 29/40] DOP-5036 cleaning a lot --- search-manifest/src/assertEnvVars.ts | 13 ++-- .../src/generateManifest/createFacets.ts | 2 +- .../src/generateManifest/document.ts | 39 +++++----- .../src/generateManifest/manifestEntry.ts | 27 ------- search-manifest/src/index.ts | 2 +- search-manifest/src/types.ts | 74 +++++++++++++------ .../src/uploadToAtlas/getProperties.ts | 6 +- .../src/uploadToAtlas/searchConnector.ts | 8 +- .../src/uploadToAtlas/uploadManifest.ts | 6 +- 9 files changed, 86 insertions(+), 91 deletions(-) delete mode 100644 search-manifest/src/generateManifest/manifestEntry.ts diff --git a/search-manifest/src/assertEnvVars.ts b/search-manifest/src/assertEnvVars.ts index 5b171171c..eb1458220 100644 --- a/search-manifest/src/assertEnvVars.ts +++ b/search-manifest/src/assertEnvVars.ts @@ -1,15 +1,17 @@ -const assertEnvVars = (vars: any) => { +import type { envVars } from "./types"; + +const assertEnvVars = (vars: envVars) => { const missingVars = Object.entries(vars) .filter(([, value]) => !value) .map(([key]) => `- ${key}`) .join("\n"); if (missingVars) throw new Error(`Missing env var(s) ${JSON.stringify(missingVars)}`); - return true; + return vars; }; export const getEnvVars = () => { - const ENV_VARS = { + const environmentVariables = assertEnvVars({ ATLAS_CLUSTER0_URI: `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_CLUSTER0_HOST}/?retryWrites=true&w=majority`, SNOOTY_DB_NAME: `${process.env.MONGO_ATLAS_POOL_DB_NAME}`, ATLAS_SEARCH_URI: `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_SEARCH_HOST}/?retryWrites=true&w=majority`, @@ -17,7 +19,6 @@ export const getEnvVars = () => { REPOS_BRANCHES_COLLECTION: "repos_branches", DOCSETS_COLLECTION: "docsets", DOCUMENTS_COLLECTION: "documents", - }; - assertEnvVars(ENV_VARS); - return ENV_VARS; + }); + return environmentVariables; }; diff --git a/search-manifest/src/generateManifest/createFacets.ts b/search-manifest/src/generateManifest/createFacets.ts index 9850cea6b..3c05e8d9e 100644 --- a/search-manifest/src/generateManifest/createFacets.ts +++ b/search-manifest/src/generateManifest/createFacets.ts @@ -8,7 +8,7 @@ export class Facet { constructor(category: string, value: string, subFacets: Array) { this.category = category; this.value = value; - this.subFacets = []; + this.subFacets = subFacets; if (subFacets) { for (const subFacet of subFacets) { this.subFacets.push( diff --git a/search-manifest/src/generateManifest/document.ts b/search-manifest/src/generateManifest/document.ts index 25909621e..ee462eee4 100644 --- a/search-manifest/src/generateManifest/document.ts +++ b/search-manifest/src/generateManifest/document.ts @@ -1,8 +1,7 @@ import { JSONPath } from "jsonpath-plus"; import { Facet } from "./createFacets"; -import { ManifestEntry } from "./manifestEntry"; import type { BSON } from "bson"; -import type { metadata } from "../types"; +import type { manifestFacets, metadata, manifestEntry } from "../types"; export class Document { //Return indexing data from a page's JSON-formatted AST for search purposes @@ -17,7 +16,7 @@ export class Document { headings: Array; slug: string; preview: string | null; - facets: Facet; + facets: manifestFacets; noIndex: boolean; reasons: Array; @@ -212,33 +211,33 @@ export class Document { return { noIndex, reasons }; } - exportAsManifestDocument = () => { - // Generate the manifest dictionary entry from the AST source + exportAsManifestEntry = (): manifestEntry | "" => { + // Generate a manifest entry from a document if (this.noIndex) { console.info("Refusing to index"); - return; + return ""; } - const document = new ManifestEntry({ + const manifestEntry = { slug: this.slug, title: this.title, headings: this.headings, paragraphs: this.paragraphs, code: this.code, preview: this.preview, - keywords: this.keywords, + tags: this.keywords, facets: this.facets, - }); + }; - return document; + return manifestEntry; }; } const deriveFacets = (tree: BSON.Document) => { //Format facets for ManifestEntry from bson entry tree['facets'] if it exists - const insertKeyVals = (facet: any, prefix = "") => { + const insertKeyVals = (facet: Facet, prefix = "") => { const key = prefix + facet.category; documentFacets[key] = documentFacets[key] ?? []; documentFacets[key].push(facet.value); @@ -250,19 +249,15 @@ const deriveFacets = (tree: BSON.Document) => { } }; - const createFacet = (facetEntry: Facet) => { - const facet = new Facet( - facetEntry.category, - facetEntry.value, - facetEntry.subFacets - ); - insertKeyVals(facet); - }; - - const documentFacets: any = {}; + const documentFacets: Record> = {}; if (tree.facets) { for (const facetEntry of tree.facets) { - createFacet(facetEntry); + const facet = new Facet( + facetEntry.category, + facetEntry.value, + facetEntry.subFacets + ); + insertKeyVals(facet); } } return documentFacets; diff --git a/search-manifest/src/generateManifest/manifestEntry.ts b/search-manifest/src/generateManifest/manifestEntry.ts deleted file mode 100644 index c536b76a1..000000000 --- a/search-manifest/src/generateManifest/manifestEntry.ts +++ /dev/null @@ -1,27 +0,0 @@ -import type { Facet } from "./createFacets"; - -//change this to an interface -export class ManifestEntry { - slug: string; - strippedSlug?: string; - title: string; - headings?: string[]; - paragraphs: string; - code: { lang: string | null; value: string }[]; - preview?: string | null; - tags: string | null; - //TODO: add type - facets: any; - - // TODO: add type for entry - constructor(entry: any) { - this.slug = entry.slug; - this.title = entry.title; - this.headings = entry.headings; - this.paragraphs = entry.paragraphs; - this.code = entry.code; - this.preview = entry.preview; - this.tags = entry.keywords; - this.facets = entry.facets; - } -} diff --git a/search-manifest/src/index.ts b/search-manifest/src/index.ts index 28be1c228..bee984191 100644 --- a/search-manifest/src/index.ts +++ b/search-manifest/src/index.ts @@ -36,7 +36,7 @@ export const generateManifest = async () => { const decoded = BSON.deserialize(readFileSync(`documents/${entry}`)); // Parse data into a document and format it as a Manifest document - const processedDoc = new Document(decoded).exportAsManifestDocument(); + const processedDoc = new Document(decoded).exportAsManifestEntry(); if (processedDoc) manifest.addDocument(processedDoc); } return manifest; diff --git a/search-manifest/src/types.ts b/search-manifest/src/types.ts index e8737bbd5..60d8c876d 100644 --- a/search-manifest/src/types.ts +++ b/search-manifest/src/types.ts @@ -1,5 +1,4 @@ -import type { WithId } from "mongodb"; -import type { ManifestEntry } from "./generateManifest/manifestEntry"; +import type { Document, WithId } from "mongodb"; export type RefreshInfo = { deleted: number; @@ -9,6 +8,12 @@ export type RefreshInfo = { elapsedMS: number; }; +export type metadata = { + robots: boolean; + keywords: string | null; + description?: string; +}; + export type s3UploadParams = { bucket: string; prefix: string; @@ -17,29 +22,31 @@ export type s3UploadParams = { }; type EnvironmentConfig = { - dev: string; + dev?: string; stg: string; dotcomstg: string; dotcomprd: string; prd: string; }; -export interface DocsetsDocument extends WithId { - url: EnvironmentConfig; - prefix: EnvironmentConfig; +export interface BranchEntry { + name?: string; + gitBranchName: string; + urlSlug: string; + isStableBranch: boolean; + active: boolean; } -export interface DatabaseDocument extends ManifestEntry { - url: string; - lastModified: Date; - manifestRevisionId: string; - searchProperty: string[]; - includeInGlobalSearch: boolean; +export interface DocsetsDocument { + project: string; + url: EnvironmentConfig; + prefix: EnvironmentConfig; } -export interface ReposBranchesDocument extends WithId { +export interface ReposBranchesDocument { + repoName: string; project: string; - search: { + search?: { categoryTitle: string; categoryName?: string; }; @@ -48,16 +55,35 @@ export interface ReposBranchesDocument extends WithId { internalOnly: boolean; } -export interface BranchEntry { - name?: string; - gitBranchName: string; - urlSlug: string; - isStableBranch: boolean; - active: boolean; +export interface SearchDocument { + url: string; + slug: string; + lastModified: Date; + manifestRevisionId: string; + searchProperty: Array; + includeInGlobalSearch: boolean; } -export type metadata = { - robots: boolean; - keywords: string | null; - description?: string; +export type manifestFacets = Record | undefined>; + +export type manifestEntry = { + slug: string; + strippedSlug?: string; + title: string; + headings?: Array; + paragraphs: string; + code: Array<{ lang: string | null; value: string }>; + preview?: string | null; + tags: string | null; + facets: manifestFacets; +}; + +export type envVars = { + ATLAS_CLUSTER0_URI: string; + SNOOTY_DB_NAME: string; + ATLAS_SEARCH_URI: string; + SEARCH_DB_NAME: string; + REPOS_BRANCHES_COLLECTION: string; + DOCSETS_COLLECTION: string; + DOCUMENTS_COLLECTION: string; }; diff --git a/search-manifest/src/uploadToAtlas/getProperties.ts b/search-manifest/src/uploadToAtlas/getProperties.ts index b61107e1d..885133483 100644 --- a/search-manifest/src/uploadToAtlas/getProperties.ts +++ b/search-manifest/src/uploadToAtlas/getProperties.ts @@ -5,7 +5,7 @@ import { } from "./searchConnector"; import type { BranchEntry, - DatabaseDocument, + SearchDocument, DocsetsDocument, ReposBranchesDocument, } from "../types"; @@ -13,7 +13,7 @@ import { assertTrailingSlash } from "../utils"; import { deleteStaleProperties } from "./deleteStale"; export const getDocsetEntry = async ( - docsets: Collection, + docsets: Collection, project: string ) => { const docsetsQuery = { project: { $eq: project } }; @@ -29,7 +29,7 @@ export const getRepoEntry = async ({ repos_branches, }: { repoName: string; - repos_branches: Collection; + repos_branches: Collection; }) => { const query = { repoName: repoName, diff --git a/search-manifest/src/uploadToAtlas/searchConnector.ts b/search-manifest/src/uploadToAtlas/searchConnector.ts index e5259c16b..41c1eccbf 100644 --- a/search-manifest/src/uploadToAtlas/searchConnector.ts +++ b/search-manifest/src/uploadToAtlas/searchConnector.ts @@ -1,6 +1,6 @@ import type { Db } from "mongodb"; import * as mongodb from "mongodb"; -import type { DatabaseDocument } from "../types"; +import type { SearchDocument } from "../types"; import { getEnvVars } from "../assertEnvVars"; const ENV_VARS = getEnvVars(); @@ -66,17 +66,17 @@ export const closeSearchDb = async () => { export const getDocsetsCollection = async () => { const dbSession = await getSnootyDb(); - return dbSession.collection(ENV_VARS.DOCSETS_COLLECTION); + return dbSession.collection(ENV_VARS.DOCSETS_COLLECTION); }; export const getReposBranchesCollection = async () => { const dbSession = await getSnootyDb(); - return dbSession.collection( + return dbSession.collection( ENV_VARS.REPOS_BRANCHES_COLLECTION ); }; export const getDocumentsCollection = async () => { const dbSession = await getSearchDb(); - return dbSession.collection(ENV_VARS.DOCUMENTS_COLLECTION); + return dbSession.collection(ENV_VARS.DOCUMENTS_COLLECTION); }; diff --git a/search-manifest/src/uploadToAtlas/uploadManifest.ts b/search-manifest/src/uploadToAtlas/uploadManifest.ts index 59f0c7f18..f20cd72fe 100644 --- a/search-manifest/src/uploadToAtlas/uploadManifest.ts +++ b/search-manifest/src/uploadToAtlas/uploadManifest.ts @@ -1,7 +1,7 @@ import type { Manifest } from "../generateManifest/manifest"; import { getDocumentsCollection } from "./searchConnector"; import assert from "node:assert"; -import type { RefreshInfo, DatabaseDocument } from "../types"; +import type { RefreshInfo, SearchDocument } from "../types"; import { generateHash, joinUrl } from "../utils"; const ATLAS_SEARCH_URI = `mongodb+srv://${process.env.MONGO_ATLAS_USERNAME}:${process.env.MONGO_ATLAS_PASSWORD}@${process.env.MONGO_ATLAS_SEARCH_HOST}/?retryWrites=true&w=majority`; @@ -22,7 +22,7 @@ const composeUpserts = async ( document.strippedSlug = document.slug.replaceAll("/", ""); - const newDocument: DatabaseDocument = { + const newDocument: SearchDocument = { ...document, lastModified: lastModified, url: joinUrl({ base: manifest.url, path: document.slug }), @@ -59,7 +59,7 @@ export const uploadManifest = async ( upserted: 0, modified: 0, dateStarted: new Date(), - //TODO: set elapsed ms + //TODO: set elapsed ms ? elapsedMS: 0, }; From 3018c2645ddee9d376e857f6326ef4dfeab10a1b Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Fri, 4 Oct 2024 11:11:23 -0400 Subject: [PATCH 30/40] DOP-5036 updating tests --- .../tests/integration/uploadToAtlas.test.ts | 10 ++++++++++ search-manifest/tests/unit/index.test.ts | 4 ++-- search-manifest/tests/unit/utils.test.ts | 2 ++ search-manifest/tests/utils/mockDB.ts | 18 +++++++++++------- 4 files changed, 25 insertions(+), 9 deletions(-) diff --git a/search-manifest/tests/integration/uploadToAtlas.test.ts b/search-manifest/tests/integration/uploadToAtlas.test.ts index 86924682e..1ca9b3dfd 100644 --- a/search-manifest/tests/integration/uploadToAtlas.test.ts +++ b/search-manifest/tests/integration/uploadToAtlas.test.ts @@ -156,12 +156,22 @@ describe( const dummyDate = new Date(); const dummyDocs = [ { + repoName: "", + project: "", + branches: [], + prodDeployable: true, + internalOnly: true, manifestRevisionId: dummyHash, lastModified: dummyDate, searchProperty: PROPERTY_NAME, slug: "dummySlug1", }, { + repoName: "", + project: "", + branches: [], + prodDeployable: true, + internalOnly: true, manifestRevisionId: dummyHash, lastModified: dummyDate, searchProperty: PROPERTY_NAME, diff --git a/search-manifest/tests/unit/index.test.ts b/search-manifest/tests/unit/index.test.ts index 5890e7c3e..81bb531f2 100644 --- a/search-manifest/tests/unit/index.test.ts +++ b/search-manifest/tests/unit/index.test.ts @@ -1,7 +1,7 @@ import { describe, expect, afterEach, test, it, vi, beforeAll } from "vitest"; import nodeManifest from "../resources/s3Manifests/node-current.json"; import kotlinManifest from "../resources/s3Manifests/kotlin-upcoming.json"; -import type { ManifestEntry } from "../../src/generateManifest/manifestEntry"; +import type { manifestEntry } from "../../src/types"; import { getManifest } from "../utils/getManifest"; describe.each([ @@ -33,7 +33,7 @@ describe.each([ const title = manifest.documents[0].title; //TODO: put in a loop to check multiple manifestEntries against each other - let equivDoc: ManifestEntry; + let equivDoc: manifestEntry; for (const document of s3Manifest.documents) { if (document.title === manifest.documents[0].title) equivDoc = document; } diff --git a/search-manifest/tests/unit/utils.test.ts b/search-manifest/tests/unit/utils.test.ts index ffca4d296..24d38a874 100644 --- a/search-manifest/tests/unit/utils.test.ts +++ b/search-manifest/tests/unit/utils.test.ts @@ -10,3 +10,5 @@ it("correctly joins base URLs with slugs", () => { "https://example.com/foo" ); }); + +//TODO: test assertTrailingSlash, generateHash diff --git a/search-manifest/tests/utils/mockDB.ts b/search-manifest/tests/utils/mockDB.ts index 08f707e08..ef03abfd6 100644 --- a/search-manifest/tests/utils/mockDB.ts +++ b/search-manifest/tests/utils/mockDB.ts @@ -1,6 +1,10 @@ import { MongoMemoryServer } from "mongodb-memory-server"; import * as mongodb from "mongodb"; -import type { DatabaseDocument } from "../../src/types"; +import type { + DocsetsDocument, + ReposBranchesDocument, + SearchDocument, +} from "../../src/types"; let client: mongodb.MongoClient; @@ -32,23 +36,23 @@ export const getSnootyDb = async () => { export const getDocumentsCollection = async () => { const dbSession = await getSearchDb(); - return dbSession.collection("documents"); + return dbSession.collection("documents"); }; export const getReposBranchesCollection = async () => { const dbSession = await getSnootyDb(); - return dbSession.collection("repos_branches"); + return dbSession.collection("repos_branches"); }; export const getDocsetsCollection = async () => { const dbSession = await getSnootyDb(); - return dbSession.collection("docsets"); + return dbSession.collection("docsets"); }; export const insert = async ( dbName: mongodb.Db, collectionName: string, - docs: any + docs: Array | Array ) => { const coll = dbName.collection(collectionName); const result = await coll.insertMany(docs); @@ -58,9 +62,9 @@ export const insert = async ( export const removeDocuments = async (collectionName: string) => { //delete all documents in repo const db = await mockDb(); - await db.collection(collectionName).deleteMany({}); + await db.collection(collectionName).deleteMany({}); const documentCount = await db - .collection("documents") + .collection("documents") .countDocuments(); return documentCount; }; From e6f8e5feafeff11d0bdf88394a6dc8acf86a0081 Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Fri, 4 Oct 2024 11:11:56 -0400 Subject: [PATCH 31/40] DOP-5036 add app services s3 manifest --- .../atlas-app-services-master.json | 410 ++++++++++++++++++ 1 file changed, 410 insertions(+) create mode 100644 search-manifest/tests/resources/s3Manifests/atlas-app-services-master.json diff --git a/search-manifest/tests/resources/s3Manifests/atlas-app-services-master.json b/search-manifest/tests/resources/s3Manifests/atlas-app-services-master.json new file mode 100644 index 000000000..8c388ccb4 --- /dev/null +++ b/search-manifest/tests/resources/s3Manifests/atlas-app-services-master.json @@ -0,0 +1,410 @@ +{ + "url": "http://mongodb.com/docs/atlas/app-services", + "includeInGlobalSearch": true, + "documents": [ + { + "slug": "graphql/migrate-neurelo", + "title": "Migrate GraphQL to Neurelo", + "headings": [], + "paragraphs": "Neurelo is a platform for developers designed to simplify the process of working\nwith databases. It provides a database abstraction with API-first\napproach, instantly transforming databases into REST and GraphQL APIs. Neurelo offers features such as building and managing schemas with\nText-to-Schema support, fully-documented REST and GraphQL APIs (with SDKs)\ngenerated from your schema with an API playground, custom API endpoints for\ncomplex queries with Text-to-MQL support, multiple CI/CD environments,\nschema-aware mock data generation, and more. This abstraction layer enables developers to program with databases through\nAPIs, simplifying communication between the application and the database, and\nmaking it easier and faster to integrate databases into their applications. Refer to Neurelo GraphQL API MongoDB Atlas Migration Guide to\nlearn more.", + "code": [], + "preview": "Learn how to migrate your GraphQL host from Atlas App Services to Neurelo.", + "tags": null, + "facets": { + "genre": [ + "tutorial" + ], + "target_product": [ + "atlas" + ], + "target_product>atlas>sub_product": [ + "atlas-app-services" + ] + } + }, + { + "slug": "triggers/authentication-triggers", + "title": "Authentication Triggers", + "headings": [ + "Create an Authentication Trigger", + "Configuration", + "Authentication Events", + "Example", + "Additional Examples" + ], + "paragraphs": "An authentication trigger fires when a user interacts with an\n authentication provider . You can\nuse authentication triggers to implement advanced user management. Some uses include: Storing new user data in your linked cluster Maintaining data integrity upon user deletion Calling a service with a user's information when they log in. To open the authentication trigger configuration screen in the Atlas App Services UI,\nclick Triggers in the left navigation menu, select the\n Authentication Triggers tab, and then click Add a\nTrigger . Configure the trigger and then click Save at the bottom of the\npage to add it to your current deployment draft. To create an authentication trigger with App Services CLI : Add an authentication trigger configuration file to the triggers subdirectory of a\nlocal application directory. App Services does not enforce specific filenames for Atlas Trigger\nconfiguration files. However, once imported, App Services will\nrename each configuration file to match the name of the\ntrigger it defines, e.g. mytrigger.json . Deploy the trigger: Authentication Triggers have the following configuration options: Field Description Trigger Type The type of the trigger. For authentication triggers,\nset this value to AUTHENTICATION . Trigger Name The name of the trigger. Linked Function The name of the function that the trigger\nexecutes when it fires. An authentication\nevent object causes the trigger to fire.\nThis object is the only argument the trigger passes to the function. Operation Type The authentication operation\ntype that causes the trigger to\nfire. Providers A list of one or more authentication provider types. The trigger only listens for\n authentication events produced by these\nproviders. Authentication events represent user interactions with an authentication\nprovider. Each event corresponds to a single user action with one of the\nfollowing operation types: Authentication event objects have the following form: Operation Type Description LOGIN Represents a single instance of a user logging in. CREATE Represents the creation of a new user. DELETE Represents the deletion of a user. Field Description operationType The operation type \nof the authentication event. providers The authentication providers \nthat emitted the event. One of the following names represents each authentication provider: \"anon-user\" \"local-userpass\" \"api-key\" \"custom-token\" \"custom-function\" \"oauth2-facebook\" \"oauth2-google\" \"oauth2-apple\" Generally, only one authentication provider emits each event.\nHowever, you may need to delete a user linked to multiple providers.\nIn this case, the DELETE event for that user includes all linked providers. user The user object of the user that interacted with\nthe authentication provider. time The time at which the event occurred. An online store wants to store custom metadata for each of its customers\nin Atlas .\nEach customer needs a document in the store.customers collection.\nThen, the store can record and query metadata in the customer's document. The collection must represent each customer. To guarantee this, the store\ncreates an Authentication Trigger. This Trigger listens for newly created users\nin the email/password authentication\nprovider. Then, it passes the\n authentication event object to its linked\nfunction, createNewUserDocument . The function creates a new document\nwhich describes the user and their activity. The function then inserts the document\ninto the store.customers collection. For additional examples of Triggers integrated into an App Services App,\ncheckout the example Triggers on Github .", + "code": [ + { + "lang": "shell", + "value": "appservices push" + }, + { + "lang": "json", + "value": "{\n \"operationType\": ,\n \"providers\": ,\n \"user\": ,\n \"time\": \n}" + }, + { + "lang": "javascript", + "value": "exports = async function(authEvent) {\n const mongodb = context.services.get(\"mongodb-atlas\");\n const customers = mongodb.db(\"store\").collection(\"customers\");\n\n const { user, time } = authEvent;\n const isLinkedUser = user.identities.length > 1;\n\n if(isLinkedUser) {\n const { identities } = user;\n return users.updateOne(\n { id: user.id },\n { $set: { identities } }\n )\n\n } else {\n return users.insertOne({ _id: user.id, ...user })\n .catch(console.error)\n }\n await customers.insertOne(newUser);\n}" + }, + { + "lang": "json", + "value": "{\n \"type\": \"AUTHENTICATION\",\n \"name\": \"newUserHandler\",\n \"function_name\": \"createNewUserDocument\",\n \"config\": {\n \"providers\": [\"local-userpass\"],\n \"operation_type\": \"CREATE\"\n },\n \"disabled\": false\n}" + } + ], + "preview": "An authentication trigger fires when a user interacts with an\nauthentication provider. You can\nuse authentication triggers to implement advanced user management. Some uses include:", + "tags": null, + "facets": { + "target_product": [ + "atlas" + ], + "target_product>atlas>sub_product": [ + "atlas-app-services" + ] + } + }, + { + "slug": "triggers/aws-eventbridge", + "title": "Send Trigger Events to AWS EventBridge", + "headings": [ + "Overview", + "Procedure", + "Set Up the MongoDB Partner Event Source", + "Configure the Trigger", + "Associate the Trigger Event Source with an Event Bus", + "Custom Error Handling", + "Create a New Custom Error Handler", + "Create a New Error Handler", + "Name the New Function", + "Write the Function Code", + "Test the Function", + "Save the Function", + "Write the Error Handler", + "Add an Error Handler to Your Trigger Configuration", + "Authenticate a MongoDB Atlas User", + "Create a Deployment Draft (Optional)", + "Create the Error Handler Function", + "Create the AWS EventBridge Trigger", + "Deploy the Draft", + "Error Handler Parameters", + "error", + "changeEvent", + "Error Codes", + "DOCUMENT_TOO_LARGE", + "OTHER", + "Error Handler Logs", + "Example Event", + "Performance Optimization" + ], + "paragraphs": "MongoDB offers an AWS Eventbridge partner event source that lets\nyou send Atlas Trigger events to an event bus instead of\ncalling an Atlas Function. You can configure any Trigger type to send events to\nEventBridge. Database Triggers also support custom error handling,\nto reduce trigger suspensions due to non-critical errors. All you need to send Trigger events to EventBridge is an AWS account ID.\nThis guide walks through finding your account ID, configuring the\nTrigger, associating the Trigger event source with an event bus, and setting\nup custom error handling. This guide is based on Amazon's Receiving Events from a\nSaaS Partner \ndocumentation. The AWS put entry for an EventBridge trigger event must be smaller than 256 KB. Learn how to reduce the size of your PutEvents entry in the Performance Optimization section. To send trigger events to AWS EventBridge, you need the AWS\naccount ID of the account that should receive the events.\nOpen the Amazon EventBridge console and click\n Partner event sources in the navigation menu. Search for\nthe MongoDB partner event source and then click\n Set up . On the MongoDB partner event source page, click\n Copy to copy your AWS account ID to the clipboard. Once you have the AWS account ID , you can configure a\ntrigger to send events to EventBridge. In the App Services UI, create and configure a new database\ntrigger , authentication\ntrigger , or scheduled\ntrigger and select the\n EventBridge event type. Paste in the AWS Account ID that you copied from\nEventBridge and select an AWS Region to send the trigger events\nto. Optionally, you can configure a function for handling trigger errors.\nCustom error handling is only valid for database triggers.\nFor more details, refer to the Custom Error Handling \nsection on this page. By default, triggers convert the BSON types in event objects into\nstandard JSON types. To preserve BSON type information, you can\nserialize event objects into Extended JSON format instead. Extended JSON preserves type\ninformation at the expense of readability and interoperability. To enable Extended JSON,\nclick the Enable Extended JSON toggle in the\n Advanced (Optional) section. Create a trigger configuration file \nin the /triggers directory. Omit the function_name field\nand define an AWS_EVENTBRIDGE event processor. Set the account_id field to the AWS Account ID \nthat you copied from EventBridge and set the region field to\nan AWS Region. By default, triggers convert the BSON types in event objects into\nstandard JSON types. To preserve BSON type information, you can\nserialize event objects into Extended JSON format instead. Extended JSON preserves type\ninformation at the expense of readability and interoperability. To enable Extended JSON, set the extended_json_enabled field to true . Optionally, you can configure a function for handling trigger errors.\nCustom error handling is only valid for database triggers.\nFor more details, refer to the Custom Error Handling \nsection on this page. The trigger configuration file should resemble the following: For a full list of supported AWS regions, refer to Amazon's\n Receiving Events from a SaaS Partner \nguide. Go back to the EventBridge console and choose Partner event sources in\nthe navigation pane. In the Partner event sources table,\nfind and select the Pending trigger source and then click\n Associate with event bus . On the Associate with event bus screen, define any\nrequired access permissions for other accounts and organizations and\nthen click Associate . Once confirmed, the status of the trigger event source changes from\n Pending to Active , and the name of the event\nbus updates to match the event source name. You can now start creating\nrules that trigger on events from that partner event source. For more\ninformation, see Creating a Rule That Triggers on a SaaS Partner Event . You can create an error handler to be executed on a trigger failure,\nwhen retry does not succeed. Custom error handling allows you to determine\nwhether an error from AWS EventBridge is critical enough to suspend the Trigger,\nor if it is acceptable to ignore the error and continue processing other events.\nFor more information on suspended database triggers, refer to\n Suspended Triggers . Currently, only database triggers support custom error handling.\nAuthentication triggers and scheduled triggers do not support\ncustom error handling at this time. You can create the new function directly in the Create a Trigger page, as below,\nor from the Functions tab. For more information on how to define functions in\nApp Services, refer to Define a Function . In the Configure Error Function section, select\n + New Function . You can also select an existing Function, if one is already defined,\nfrom the dropdown. Enter a unique, identifying name for the function in the Name field.\nThis name must be distinct from all other functions in the application. In the Function section, write the JavaScript code directly in\nthe function editor. The function editor contains a default function that\nyou can edit as needed. For more information on creating functions, refer\nto the Functions documentation. In the Testing Console tab beneath the function editor, you can\ntest the function by passing in example values to the error and\n changeEvent parameters, as shown in the comments of the testing console. For more information on these paramaters, refer to the\n Error Handler Parameters \nsection on this page. Click Run to run the test. Once you are satisfied with the custom error handler, click\n Save . In order to update your trigger's configuration with an error handler,\nfollow these steps to Update an App . When you\nupdate your configuration files in Step 3, do the following: Follow the steps in Define a Function \nto write your error handler source code and configuration file. For the error handler source code, see the following template error handler: Add an error_handler attribute to your trigger configuration file\nin the Triggers folder. The trigger configuration file should\nresemble the following: For more information on trigger configuration files, see\n Trigger Configuration Files . Call the admin user authentication endpoint with your MongoDB Atlas API\nkey pair: If authentication succeeds, the response body contains a JSON object\nwith an access_token value: The access_token grants access to the App Services Admin API. You\nmust include it as a Bearer token in the Authorization header for\nall Admin API requests. API Authentication Documentation A draft represents a group of application changes that you\ncan deploy or discard as a single unit. If you don't create\na draft, updates automatically deploy individually. To create a draft, send a POST request with no body to\nthe Create a Deployment Draft endpoint: Create the function to handle errors for a failed AWS\nEventBridge trigger via a POST request to the\n Create a new\nFunction endpoint. Create the AWS EventBridge Trigger with error handling\nenabled via a POST request to the\n Create a Trigger endpoint. If you created a draft, you can deploy all changes in\nthe draft by sending a POST request with no body to the\n Deploy a deployment draft endpoint.\nIf you did not create a draft as a first step, the\nindividual function and trigger requests deployed automatically. The default error handler has two parameters: error and changeEvent . Has the following two attributes: code : The code for the errored EventBridge put request. For a list of\nerror codes used by the error handler, see the below section. message : The unfiltered error message from an errored EventBridge\nput request. The requested change to your data made by EventBridge. For more information\non types of change events and their configurations, see\n Change Event Types . If an error was recevied from EventBridge, the event processor will parse the\nerror as either DOCUMENT_TOO_LARGE or OTHER . This parsed error is passed\nto the error handler function through the error parameter. If the put entry for an EventBridge trigger event is larger\nthan 256 KB, EventBridge will throw an error. The error will contain either: For more information on reducing put entry size, see the below Performance\nOptimization section. status code: 400 and\n total size of the entries in the request is over the limit . status code: 413 ,\nwhich indicates a too large payload. The default bucket for all other errors. You can make special error handling cases for\nyour most common error messages to optimize your error handling for\nerrors with an OTHER code. To determine which errors need\nspecial cases, we recommended keeping track of\nthe most common error messages you receive in error.message . You can view Trigger Error Handler logs for\nyour EventBridge Trigger error handler in the application logs. To learn more about viewing application logs, see View Application Logs . Click Logs in the left navigation of the App Services UI. Click the Filter by Type dropdown and select\n Triggers Error Handlers to view all error handler\nlogs for the App. Pass the trigger_error_handler value to the --type flag to\nview all error handler logs for the App. Retrieve TRIGGER_ERROR_HANDLER type logs via a GET request to\nthe Retreive App Services Logs endpoint: The following object configures a trigger to send events to AWS\nEventbridge and handle errors: The AWS put entry for an EventBridge trigger event must be smaller than 256 KB. For more information, see the AWS Documentation to calculate Amazon\nPutEvents event entry size . When using Database Triggers, the Project Expression can be useful reduce the document size\nbefore sending messages to EventBridge.\nThis expression lets you include only specified fields, reducing document size. Learn more in the Database Trigger Project Expression documentation.", + "code": [ + { + "lang": "json", + "value": "{\n \"name\": \"...\",\n \"type\": \"...\",\n \"event_processors\": {\n \"AWS_EVENTBRIDGE\": {\n \"config\": {\n \"account_id\": \"\",\n \"region\": \"\",\n \"extended_json_enabled\": \n }\n }\n }\n}" + }, + { + "lang": "js", + "value": "exports = async function(error, changeEvent) {\n // This sample function will log additional details if the error is not\n // a DOCUMENT_TOO_LARGE error\n if (error.code === 'DOCUMENT_TOO_LARGE') {\n console.log('Document too large error');\n\n // Comment out the line below in order to skip this event and not suspend the Trigger\n throw new Error(`Encountered error: ${error.code}`);\n }\n\n console.log('Error sending event to EventBridge');\n console.log(`DB: ${changeEvent.ns.db}`);\n console.log(`Collection: ${changeEvent.ns.coll}`);\n console.log(`Operation type: ${changeEvent.operationType}`);\n\n // Throw an error in your function to suspend the trigger and stop processing additional events\n throw new Error(`Encountered error: ${error.message}`);\n};" + }, + { + "lang": "json", + "value": " {\n \"name\": \"...\",\n \"type\": \"DATABASE\",\n \"event_processors\": {\n \"AWS_EVENTBRIDGE\": {\n \"config\": {\n \"account_id\": \"\",\n \"region\": \"\",\n \"extended_json_enabled\": \n }\n }\n },\n \"error_handler\": {\n \"config\": {\n \"enabled\": ,\n \"function_name\": \"\"\n }\n }\n }" + }, + { + "lang": "shell", + "value": "curl -X POST \\\n https://services.cloud.mongodb.com/api/admin/v3.0/auth/providers/mongodb-cloud/login \\\n -H 'Content-Type: application/json' \\\n -H 'Accept: application/json' \\\n -d '{\n \"username\": \"\",\n \"apiKey\": \"\"\n }'" + }, + { + "lang": "json", + "value": "{\n \"access_token\": \"\",\n \"refresh_token\": \"\",\n \"user_id\": \"\",\n \"device_id\": \"\"\n}" + }, + { + "lang": "bash", + "value": "curl -X POST 'https://services.cloud.mongodb.com/api/admin/v3.0/groups/{groupId}/apps/{appId}/drafts' \\\n-H 'Content-Type: application/json' \\\n-H 'Authorization: Bearer '" + }, + { + "lang": "bash", + "value": "curl -X POST \\\n https://services.cloud.mongodb.com/api/admin/v3.0/groups/{groupId}/apps/{appId}/functions \\\n -H 'Authorization: Bearer ' \\\n -d '{\n \"name\": \"string\",\n \"private\": true,\n \"source\": \"string\",\n \"run_as_system\": true\n }'" + }, + { + "lang": "bash", + "value": "curl -X POST \\\n https://services.cloud.mongodb.com/api/admin/v3.0/groups/{groupId}/apps/{appId}/triggers \\\n -H 'Authorization: Bearer ' \\\n -d '{\n \"name\": \"string\",\n \"type\": \"DATABASE\",\n \"config\": {\n \"service_id\": \"string\",\n \"database\": \"string\",\n \"collection\": \"string\",\n \"operation_types\": {\n \"string\"\n },\n \"match\": ,\n \"full_document\": false,\n \"full_document_before_change\": false,\n \"unordered\": true\n },\n \"event_processors\": {\n \"AWS_EVENTBRIDGE\": {\n \"account_id\": \"string\",\n \"region\": \"string\",\n \"extended_json_enabled\": false\n },\n },\n \"error_handler\": {\n \"enabled\": true,\n \"function_id\": \"string\"\n }\n }'" + }, + { + "lang": "shell", + "value": "curl -X POST \\\n'https://services.cloud.mongodb.com/api/admin/v3.0/groups/{groupId}/apps/{appId}/drafts/{draftId}/deployment' \\\n--header 'Content-Type: application/json' \\\n--header 'Authorization: Bearer ' \\" + }, + { + "lang": "shell", + "value": "appservices logs list --type=trigger_error_handler" + }, + { + "lang": "shell", + "value": "curl -X GET 'https://services.cloud.mongodb.com/api/admin/v3.0/groups/{groupId}/apps/{appId}/logs' \\\n -H 'Content-Type: application/json' \\\n -H 'Authorization: Bearer '\n -d '{\n \"type\": \"TRIGGER_ERROR_HANDLER\"\n }'" + }, + { + "lang": "json", + "value": "\"event_processors\": {\n \"AWS_EVENTBRIDGE\": {\n \"config\": {\n \"account_id\": \"012345678901\",\n \"region\": \"us-east-1\"\n }\n }\n},\n \"error_handler\": {\n \"config\": {\n \"enabled\": true,\n \"function_name\": \"myErrorHandler.js\"\n }\n}" + } + ], + "preview": "Learn how to set up AWS EventBridge to handle Atlas Trigger events.", + "tags": null, + "facets": { + "genre": [ + "tutorial" + ], + "target_product": [ + "atlas" + ], + "target_product>atlas>sub_product": [ + "atlas-app-services" + ] + } + }, + { + "slug": "triggers/database-triggers", + "title": "Database Triggers", + "headings": [ + "Create a Database Trigger", + "Configuration", + "Trigger Details", + "Trigger Source Details", + "Function", + "Advanced", + "Change Event Types", + "Database Trigger Example", + "Suspended Triggers", + "Automatically Resume a Suspended Trigger", + "Manually Resume a Suspended Trigger", + "Find the Suspended Trigger", + "Restart the Trigger", + "Pull Your App's Latest Configuration Files", + "Verify that the Trigger Configuration File Exists", + "Redeploy the Trigger", + "Trigger Time Reporting", + "Performance Optimization", + "Disable Event Ordering for Burst Operations", + "Disable Collection-Level Preimages", + "Use Match Expressions to Limit Trigger Invocations", + "Testing Match Expressions", + "Use Project Expressions to Reduce Input Data Size", + "Additional Examples" + ], + "paragraphs": "Database Triggers allow you to execute server-side logic whenever a database\nchange occurs on a linked MongoDB Atlas cluster. You can configure triggers on\nindividual collections, entire databases, and on an entire cluster. Unlike SQL data triggers, which run on the database server, triggers run\non a serverless compute layer that scales independently of the database\nserver. Triggers automatically call Atlas Functions \nand can forward events to external handlers through AWS EventBridge. Use database triggers to implement event-driven data interactions. For\nexample, you can automatically update information in one document when a\nrelated document changes or send a request to an external service\nwhenever a new document is inserted. Database triggers use MongoDB change streams \nto watch for real-time changes in a collection. A change stream is a\nseries of database events that each\ndescribe an operation on a document in the collection. Your app opens a\nsingle change stream for each collection with at least one enabled\ntrigger. If multiple triggers are enabled for a collection they all\nshare the same change stream. You control which operations cause a trigger to fire as well as what\nhappens when it does. For example, you can run a function whenever a\nspecific field of a document is updated. The function can access the\nentire change event, so you always know what changed. You can also pass\nthe change event to AWS EventBridge to handle\nthe event outside of Atlas. Triggers support $match \nexpressions to filter change events and $project \nexpressions to limit the data included in each event. There are limits on the total number of change streams you can open\non a cluster, depending on the cluster's size. Refer to change\nstream limitations for\nmore information. You cannot define a database trigger on a serverless instance or Federated database instance because they do not support change streams. In deployment and database level triggers, it is possible to configure triggers\nin a way that causes other triggers to fire, resulting in recursion.\nExamples include a database-level trigger writing to a collection within the\nsame database, or a cluster-level logger or log forwarder writing logs to\nanother database in the same cluster. To open the database trigger configuration screen in the App Services UI, click\n Triggers in the left navigation menu, select the\n Database Triggers tab, and then click Add a\nTrigger . Configure the trigger and then click Save at the bottom of\nthe page to add it to your current deployment draft. To create a database trigger with the App Services CLI : Add a database trigger configuration file to the triggers subdirectory of a\nlocal application directory. Deploy the trigger: Atlas App Services does not enforce specific filenames for Trigger\nconfiguration files. However, once imported, Atlas App Services will rename\neach configuration file to match the name of the Trigger it defines,\ne.g. mytrigger.json . Database Triggers have the following configuration options: Field Description Trigger Type The type of the Trigger. Set this value to Database for\ndatabase Triggers Name The name of the Trigger. Enabled by default. Used to enable or disable the trigger. Skip Events On Re-Enable Disabled by default. If enabled, any change events that occurred while this\ntrigger was disabled will not be processed. Event Ordering If enabled, trigger events are processed in the order in which they occur.\nIf disabled, events can be processed in parallel, which is faster when\nmany events occur at the same time. If event ordering is enabled, multiple executions of this Trigger will occur\nsequentially based on the timestamps of the change events. If event ordering is\ndisabled, multiple executions of this Trigger will occur independently. Improve performance for Triggers that respond to bulk database operations\nby disabling event ordering.\n Learn more. Within the Trigger Source Details section, you first select the\n Watch Against , based on the level of granularity you want. Your\noptions are: Depending on which source type you are using, the additional options differ. The\nfollowing table describes these options. Collection , when a change occurs on a specified collection Database , when a change occurs on any collection in a\nspecified database Deployment , when deployment changes occur on a specified\ncluster. If you select the Deployment source type, the following\ndatabases are not watched for changes: The admin databases admin , local , and config The sync databases __realm_sync and __realm_sync_ The deployment-level source type is only available on dedicated tiers. Source Type Options Collection Cluster Name . The name of the MongoDB cluster that the\nTrigger is associated with. Database Name . The MongoDB database that contains the watched\ncollection. Collection Name . The MongoDB collection to watch. Optional.\nIf you leave this option blank, the Source Type changes to \"Database.\" Operation Type . The operation types that cause the Trigger to fire.\nSelect the operation types you want the trigger to respond to. Options\ninclude: Insert Update Replace Delete Update operations executed from MongoDB Compass or the MongoDB Atlas\nData Explorer fully replace the previous document. As a result,\nupdate operations from these clients will generate Replace \nchange events rather than Update events. Full Document . If enabled, Update change events include\nthe latest majority-committed \nversion of the modified document after the change was applied in\nthe fullDocument field. Regardless of this setting, Insert and Replace events always\ninclude the fullDocument field. Delete events never include\nthe fullDocument field. Document Preimage . When enabled, change events include a\ncopy of the modified document from immediately before the change was\napplied in the fullDocumentBeforeChange field. This has\n performance considerations . All change events\nexcept for Insert events include the document preimage. Database Cluster Name . The name of the MongoDB cluster that the\nTrigger is associated with. Database Name . The MongoDB database to watch. Optional.\nIf you leave this option blank, the Source Type changes to \"Deployment,\"\nunless you are on a shared tier, in which case App Services will not\nlet you save the trigger. Operation Type . The operation types that cause the Trigger to fire.\nSelect the operation types you want the trigger to respond to.\nOptions include: Create Collection Modify Collection Rename Collection Drop Collection Shard Collection Reshard Collection Refine Collection Shard Key Update operations executed from MongoDB Compass or the MongoDB Atlas\nData Explorer fully replace the previous document. As a result,\nupdate operations from these clients will generate Replace \nchange events rather than Update events. Full Document . If enabled, Update change events include\nthe latest majority-committed \nversion of the modified document after the change was applied in\nthe fullDocument field. Regardless of this setting, Insert and Replace events always\ninclude the fullDocument field. Delete events never include\nthe fullDocument field. Document Preimage . When enabled, change events include a\ncopy of the modified document from immediately before the change was\napplied in the fullDocumentBeforeChange field. This has\n performance considerations . All change events\nexcept for Insert events include the document preimage. Disabled\nfor Database and Deployment sources to limit unnecessary watches on the\ncluster for a new collection being created. Deployment Cluster Name . The name of the MongoDB cluster that the\nTrigger is associated with. Operation Type . The operation types that occur in the cluster that cause\nthe Trigger to fire. Select the operation types you want the trigger\nto respond to. Options include: Drop Database Full Document . If enabled, Update change events include\nthe latest majority-committed \nversion of the modified document after the change was applied in\nthe fullDocument field. Regardless of this setting, Insert and Replace events always\ninclude the fullDocument field. Delete events never include\nthe fullDocument field. Document Preimage . When enabled, change events include a\ncopy of the modified document from immediately before the change was\napplied in the fullDocumentBeforeChange field. This has\n performance considerations . All change events\nexcept for Insert events include the document preimage. Disabled\nfor Database and Deployment sources to limit unnecessary watches on the\ncluster for a new collection being created. Preimages require additional storage overhead that may affect\nperformance. If you're not using preimages on a collection,\nyou should disable preimages. To learn more, see Disable\nCollection-Level Preimages . Document preimages are supported on non-sharded Atlas clusters running\nMongoDB 4.4+, and on sharded Atlas clusters running MongoDB 5.3 and later.\nYou can upgrade a non-sharded cluster (with preimages) to a\nsharded cluster, as long as the cluster is running 5.3 or later. Within the Function section, you choose what action is taken when\nthe trigger fires. You can choose to run a function or use\n AWS EventBridge . Within the Advanced section, the following optional configuration\noptions are available: Field Description Match Expression A $match expression document\nthat App Services uses to filter which change events cause the Trigger to\nfire. The Trigger evaluates all change event objects that it receives against\nthis match expression and only executes if the expression evaluates to true \nfor a given change event. MongoDB performs a full equality match for embedded documents in a match\nexpression. If you want to match a specific field in an embedded document,\nrefer to the field directly using dot-notation . For more information, see\n Query on Embedded Documents in\nthe MongoDB server manual. Limit the number of fields that the Trigger processes by using a\n $match expression.\n Learn more. Project Expression A $project \nexpression that selects a subset of fields from each event in the change\nstream. You can use this to optimize the trigger's execution . The expression is an object that maps the name of fields in the change\nevent to either a 0 , which excludes the field, or a 1 , which\nincludes it. An expression can have values of either 0 or 1 but\nnot both together. This splits projections into two categories,\ninclusive and exclusive: An inclusive project expression specifies fields to include in each\nchange event document. The expression is an object that maps the name\nof fields to include to a 1 . If you don't include a field, it is\nnot included in the projected change event. The following projection includes only the _id and\n fullDocument fields: An exclusive project expression specifies fields to exclude from\neach change event document. The expression is an object that maps the\nname of fields to include to a 0 . If you don't exclude a field, it\nis included in the projected change event. The following projection excludes the _id and\n fullDocument fields: You cannot exclude the operation_type field with a projection.\nThis ensures that the trigger can always check if it should run for\na given event's operation type. Auto-Resume Triggers If enabled, when this Trigger's resume token\ncannot be found in the cluster's oplog, the Trigger automatically resumes\nprocessing events at the next relevant change stream event.\nAll change stream events from when the Trigger was suspended until the Trigger\nresumes execution do not have the Trigger fire for them. Maximum Throughput Triggers If the linked data source is a dedicated server (M10+ Tier),\nyou can increase the maximum throughput \nbeyond the default 10,000 concurrent processes. Before increasing the maximum throughput, consider whether one or more of\nyour triggers are calling a rate-limited external API. Increasing the\ntrigger rate might result in exceeding those limits. Increasing the throughput may also add a larger workload, affecting\noverall cluster performance. To enable maximum throughput, you must disable Event Ordering. Database change events represent individual changes in a specific\ncollection of your linked MongoDB Atlas cluster. Every database event has the same operation type and structure as the\n change event object that was\nemitted by the underlying change stream. Change events have the\nfollowing operation types: Database change event objects have the following general form: Operation Type Description Insert Document (All trigger types) Represents a new document added to the collection. Update Document (All trigger types) Represents a change to an existing document in the collection. Delete Document (All trigger types) Represents a document deleted from the collection. Replace Document (All trigger types) Represents a new document that replaced a document in the collection. Create Collection (Database and Deployment trigger types only) Represents the creation of a new collection. Modify Collection (Database and Deployment trigger types only) Represents the modification collection. Rename Collection (Database and Deployment trigger types only) Represents collection being renamed. Drop Collection (Database and Deployment trigger types only) Represents a collection being dropped. Shard Collection (Database and Deployment trigger types only) Represents a collection changing from unsharded to sharded. Reshard Collection (Database and Deployment trigger types only) Represents a change to a collection's sharding. Refine Collection Shard Key (Database and Deployment trigger types only) Represents a change in the shard key of a collection. Create Indexes (Database and Deployment trigger types only) Represents the creation of a new index. Drop Indexes (Database and Deployment trigger types only) Represents an index being dropped. Drop Database (Deployment trigger type only) Represents a database being dropped. An online store wants to notify its customers whenever one of their\norders changes location. They record each order in the store.orders \ncollection as a document that resembles the following: To automate this process, the store creates a Database Trigger that\nlistens for Update change events in the store.orders collection.\nWhen the trigger observes an Update event, it passes the\n change event object to its associated Function,\n textShippingUpdate . The Function checks the change event for any\nchanges to the shippingLocation field and, if it was updated, sends\na text message to the customer with the new location of the order. Database Triggers may enter a suspended state in response to an event\nthat prevents the Trigger's change stream from continuing. Events that\ncan suspend a Trigger include: In the event of a suspended or failed trigger, Atlas App Services sends the\nproject owner an email alerting them of the issue. invalidate events \nsuch as dropDatabase , renameCollection , or those caused by\na network disruption. the resume token required to resume the change stream is no longer in the\ncluster oplog . The App logs\nrefer to this as a ChangeStreamHistoryLost error. You can configure a Trigger to automatically resume if the Trigger was suspended\nbecause the resume token is no longer in the oplog.\nThe Trigger does not process any missed change stream events between\nwhen the resume token is lost and when the resume process completes. When creating or updating a Database Trigger \nin the App Services UI, navigate to the configuration page of the Trigger\nyou want to automatically resume if suspended. In the Advanced (Optional) section, select Auto-Resume Triggers . Save and deploy the changes. When creating or updating a Database Trigger \nwith the Realm CLI, create or navigate to the configuration file for the Trigger\nyou want to automatically resume if suspended. In the Trigger's configuration file ,\ninclude the following: Deploy the changes with the following command: When you manually resume a suspended Trigger, your App attempts to resume the Trigger\nat the next change stream event after the change stream stopped.\nIf the resume token is no longer in the cluster oplog, the Trigger\nmust be started without a resume token. This means the Trigger begins\nlistening to new events but does not process any missed past events. You can adjust the oplog size to keep the resume token for more time after\na suspension by scaling your Atlas cluster .\nMaintain an oplog size a few times greater than\nyour cluster's peak oplog throughput (GB/hour) to reduce the risk of a\nsuspended trigger's resume token dropping off the oplog\nbefore the trigger executes.\nView your cluster's oplog throughput in the Oplog GB/Hour graph in the\n Atlas cluster metrics . You can attempt to restart a suspended Trigger from the App Services UI or by\nimporting an application directory with the App Services CLI . On the Database Triggers tab of the Triggers \npage, find the trigger that you want to resume in the list of\ntriggers. App Services marks suspended triggers\nwith a Status of Suspended . Click Restart in the trigger's Actions column.\nYou can choose to restart the trigger with a change stream\n resume token or\nopen a new change stream. Indicate whether or not to use a resume\ntoken and then click Resume Database Trigger . If you use a resume token , App Services\nattempts to resume the trigger's underlying change\nstream at the event immediately following the last\nchange event it processed. If successful, the trigger\nprocesses any events that occurred while it was\nsuspended. If you do not use a resume token, the\ntrigger begins listening for new events but will not\nfire for any events that occurred while it was\nsuspended. If you exported a new copy of your application, it should already\ninclude an up-to-date configuration file for the suspended trigger.\nYou can confirm that the configuration file exists by looking\nin the /triggers directory for a trigger configuration file with the same name as the trigger. After you have verified that the trigger configuration file exists,\npush the configuration back to your app. App Services\nautomatically attempts to resume any suspended triggers included\nin the deployment. The list of Triggers in the Atlas App Services UI shows three timestamps: Last Modified This is the time the Trigger was created or most recently changed. Latest Heartbeat Atlas App Services keeps track of the last time a trigger was run. If the trigger\nis not sending any events, the server sends a heartbeat to ensure the trigger's\nresume token stays fresh. Whichever event is most recent is shown as the\n Latest Heartbeat . Last Cluster Time Processed Atlas App Services also keeps track of the Last Cluster Time Processed ,\nwhich is the last time the change stream backing a Trigger emitted an event. It\nwill be older than the Latest Heartbeat if there have been no events\nsince the most recent heartbeat. Consider disabling event ordering if your trigger fires on a collection that\nreceives short bursts of events (e.g. inserting data as part of a daily batch\njob). Ordered Triggers wait to execute a Function for a particular event until\nthe Functions of previous events have finished executing. As a\nconsequence, ordered Triggers are effectively rate-limited by the run\ntime of each sequential Trigger function. This may cause a significant\ndelay between the database event appearing on the change stream and the\nTrigger firing. In certain extreme cases, database events might fall off\nthe oplog before a long-running ordered trigger processes them. Unordered Triggers execute functions in parallel if possible, which can be\nsignificantly faster (depending on your use case) but does not guarantee that\nmultiple executions of a Trigger Function occur in event order. Document preimages require your cluster to record additional data about\neach operation on a collection. Once you enable preimages for any\ntrigger on a collection, your cluster stores preimages for every\noperation on the collection. The additional storage space and compute overhead may degrade trigger\nperformance depending on your cluster configuration. To avoid the storage and compute overhead of preimages, you must disable\npreimages for the entire underlying MongoDB collection. This is a\nseparate setting from any individual trigger's preimage setting. If you disable collection-level preimages, then no active trigger on\nthat collection can use preimages. However, if you delete or disable all\npreimage triggers on a collection, then you can also disable\ncollection-level preimages. To learn how, see Disable Preimages for a Collection . You can limit the number of Trigger invocations by specifying a $match expression in the Match\nExpression field. App Services evaluates the match expression against the\nchange event document and invokes the Trigger only if the expression evaluates\nto true for the given change event. The match expression is a JSON document that specifies the query conditions\nusing the MongoDB read query syntax . We recommend only using match expressions when the volume of Trigger events\nmeasurably becomes a performance issue. Until then, receive all events and\nhandle them individually in the Trigger function code. The exact shape of the change event document depends on the event that caused\nthe trigger to fire. For details, see the reference for each event type: insert update replace delete create modify rename drop shardCollection reshardCollection refineCollectionShardKey dropDatabase The following match expression allows the Trigger to fire\nonly if the change event object specifies that the status field in\na document changed. updateDescription is a field of the update Event object . The following match expression allows the Trigger to fire only when a\ndocument's needsTriggerResponse field is true . The fullDocument \nfield of the insert ,\n update , and replace events represents a document after the\ngiven operation. To receive the fullDocument field, you must enable\n Full Document in your Trigger configuration. The following procedure shows one way to test whether your match expression\nworks as expected: Download the MongoDB Shell (mongosh) and use it to\n connect to your cluster . Replacing DB_NAME with your database name, COLLECTION_NAME with your\ncollection name, and YOUR_MATCH_EXPRESSION with the match expression you\nwant to test, paste the following into mongosh to open a change stream on an\nexisting collection: In another terminal window, use mongosh to make changes to some test\ndocuments in the collection. Observe what the change stream filters in and out. In the Project Expression field,\nlimit the number of fields that the Trigger processes by using a\n $project expression. When using Triggers, a projection expression is inclusive only .\nProject does not support mixing inclusions and exclusions.\nThe project expression must be inclusive because Triggers require you\nto include operationType . If you want to exclude a single field, the projection expression must\ninclude every field except the one you want to exclude.\nYou can only explicitly exclude _id , which is included by default. A trigger is configured with the following Project Expression : The change event object that App Services passes to the trigger function\nonly includes the fields specifed in the projection, as in the following\nexample: For additional examples of Triggers integrated into an App Services App,\ncheckout the example Triggers on Github .", + "code": [ + { + "lang": "shell", + "value": "appservices push" + }, + { + "lang": "javascript", + "value": "{\n _id: 1,\n fullDocument: 1\n}" + }, + { + "lang": "javascript", + "value": "{\n _id: 0,\n fullDocument: 0\n}" + }, + { + "lang": "json", + "value": "{\n _id : ,\n \"operationType\": ,\n \"fullDocument\": ,\n \"fullDocumentBeforeChange\": ,\n \"ns\": {\n \"db\" : ,\n \"coll\" : \n },\n \"documentKey\": {\n \"_id\": \n },\n \"updateDescription\": ,\n \"clusterTime\": \n}" + }, + { + "lang": "json", + "value": "{\n _id: ObjectId(\"59cf1860a95168b8f685e378\"),\n customerId: ObjectId(\"59cf17e1a95168b8f685e377\"),\n orderDate: ISODate(\"2018-06-26T16:20:42.313Z\"),\n shipDate: ISODate(\"2018-06-27T08:20:23.311Z\"),\n orderContents: [\n { qty: 1, name: \"Earl Grey Tea Bags - 100ct\", price: NumberDecimal(\"10.99\") }\n ],\n shippingLocation: [\n { location: \"Memphis\", time: ISODate(\"2018-06-27T18:22:33.243Z\") },\n ]\n}" + }, + { + "lang": "javascript", + "value": "exports = async function (changeEvent) {\n // Destructure out fields from the change stream event object\n const { updateDescription, fullDocument } = changeEvent;\n\n // Check if the shippingLocation field was updated\n const updatedFields = Object.keys(updateDescription.updatedFields);\n const isNewLocation = updatedFields.some(field =>\n field.match(/shippingLocation/)\n );\n\n // If the location changed, text the customer the updated location.\n if (isNewLocation) {\n const { customerId, shippingLocation } = fullDocument;\n const mongodb = context.services.get(\"mongodb-atlas\");\n const customers = mongodb.db(\"store\").collection(\"customers\");\n const { location } = shippingLocation.pop();\n const customer = await customers.findOne({ _id: customerId });\n\n const twilio = require('twilio')(\n // Your Account SID and Auth Token from the Twilio console:\n context.values.get(\"TwilioAccountSID\"),\n context.values.get(\"TwilioAuthToken\"),\n );\n\n await twilio.messages.create({\n To: customer.phoneNumber,\n From: context.values.get(\"ourPhoneNumber\"),\n Body: `Your order has moved! The new location is ${location}.`\n })\n }\n};" + }, + { + "lang": "json", + "value": "{\n \"type\": \"DATABASE\",\n \"name\": \"shippingLocationUpdater\",\n \"function_name\": \"textShippingUpdate\",\n \"config\": {\n \"service_name\": \"mongodb-atlas\",\n \"database\": \"store\",\n \"collection\": \"orders\",\n \"operation_types\": [\"UPDATE\"],\n \"unordered\": false,\n \"full_document\": true,\n \"match\": {}\n },\n \"disabled\": false\n}" + }, + { + "lang": "js", + "value": "{\n \"name\": \"\",\n \"type\": \"DATABASE\",\n \"config\": {\n \"tolerate_resume_errors\": true,\n // ...rest of Database Trigger configuration\n },\n // ...rest of Trigger general configuration\n}" + }, + { + "lang": "shell", + "value": "appservices push --remote=" + }, + { + "lang": "shell", + "value": "appservices pull --remote=" + }, + { + "lang": "shell", + "value": "appservices push" + }, + { + "lang": "javascript", + "value": "{\n \"updateDescription.updatedFields.status\": {\n \"$exists\": true\n }\n}" + }, + { + "lang": "javascript", + "value": "{\n \"fullDocument.needsTriggerResponse\": true\n}" + }, + { + "lang": "js", + "value": "db.getSiblingDB(DB_NAME).COLLECTION_NAME.watch([{$match: YOUR_MATCH_EXPRESSION}])\nwhile (!watchCursor.isClosed()) {\n if (watchCursor.hasNext()) {\n print(tojson(watchCursor.next()));\n }\n}" + }, + { + "lang": "json", + "value": "{\n \"_id\": 0,\n \"operationType\": 1,\n \"updateDescription.updatedFields.status\": 1\n}" + }, + { + "lang": "json", + "value": "{\n \"operationType\": \"update\",\n \"updateDescription\": {\n \"updatedFields\": {\n \"status\": \"InProgress\"\n }\n }\n}" + } + ], + "preview": "Use Database Triggers to execute server-side logic when database changes occur", + "tags": null, + "facets": { + "genre": [ + "reference" + ], + "target_product": [ + "atlas" + ], + "target_product>atlas>sub_product": [ + "atlas-app-services" + ] + } + }, + { + "slug": "triggers/disable", + "title": "Disable a Trigger", + "headings": [ + "Overview", + "Find the Trigger", + "Disable the Trigger", + "Deploy Your Changes", + "Pull Your App's Latest Configuration Files", + "Verify that the Trigger Configuration File Exists", + "Disable the Trigger", + "Deploy Your Changes", + "Restoring from a Snapshot" + ], + "paragraphs": "Triggers may enter a suspended state in response to\nan event that prevents the Trigger's change stream from continuing, such\nas a network disruption or change to the underlying cluster. When a\nTrigger enters a suspended state, it does not receive change events and will not\nfire. You can suspend a Trigger from the Atlas App Services UI or by\nimporting an application directory with the App Services CLI . In the event of a suspended or failed trigger, Atlas App Services sends the\nproject owner an email alerting them of the issue. On the Database Triggers tab of the Triggers \npage, find the trigger that you want to disable in the list of\nTriggers. Switch the Enabled toggle to the \"off\" setting. If Development Mode is not enabled, press the\n review draft & deploy button to release your changes. If you exported a new copy of your application, it should already include an\nup-to-date configuration file for the suspended trigger. You can confirm that\nthe configuration file exists by looking in the /triggers directory for a\n trigger configuration file with the same name\nas the trigger. After you have verified that the trigger configuration file exists, add\na field named \"disabled\" with the value true to the top level\nof the trigger json definition: Finally, push the configuration back to your app: Consider the following scenario: In this case, the trigger picks up all of the newly-added documents and fires\nfor each document. It will not fire again for events that have already been\nprocessed. A database trigger is disabled or suspended. New documents are added while the trigger is disabled. The database is restored from a snapshot to a time prior to the new documents\nbeing added. The database trigger is restarted. If a previously-enabled database trigger is running during snapshot restoration,\nyou will see an error in the Edit Trigger section of the Atlas UI because the\ntrigger cannot connect to the Atlas cluster during the restore process. Once\nsnapshot restoration completes, the error disappears and the trigger continues\nto execute normally.", + "code": [ + { + "lang": "shell", + "value": "appservices pull --remote=" + }, + { + "lang": "json", + "value": "{\n \"id\": \"6142146e2f052a39d38e1605\",\n \"name\": \"steve\",\n \"type\": \"SCHEDULED\",\n \"config\": {\n \"schedule\": \"*/1 * * * *\"\n },\n \"function_name\": \"myFunc\",\n \"disabled\": true\n}" + }, + { + "lang": "shell", + "value": "appservices push" + } + ], + "preview": "Triggers may enter a suspended state in response to\nan event that prevents the Trigger's change stream from continuing, such\nas a network disruption or change to the underlying cluster. When a\nTrigger enters a suspended state, it does not receive change events and will not\nfire.", + "tags": null, + "facets": { + "target_product": [ + "atlas" + ], + "target_product>atlas>sub_product": [ + "atlas-app-services" + ] + } + }, + { + "slug": "triggers/scheduled-triggers", + "title": "Scheduled Triggers", + "headings": [ + "Create a Scheduled Trigger", + "Configuration", + "CRON Expressions", + "Expression Syntax", + "Format", + "Field Values", + "Example", + "Performance Optimization", + "Additional Examples" + ], + "paragraphs": "Scheduled triggers allow you to execute server-side logic on a\n regular schedule that you define .\nYou can use scheduled triggers to do work that happens on a periodic\nbasis, such as updating a document every minute, generating a nightly\nreport, or sending an automated weekly email newsletter. To create a scheduled Trigger in the Atlas App Services UI: Click Triggers under Build in the\nleft navigation menu. Click Add a Trigger to open the Trigger configuration page. Select Scheduled for the Trigger Type . To create a scheduled Trigger with the App Services CLI : Add a scheduled Trigger configuration file to the triggers subdirectory of a local\napplication directory. Scheduled Trigger configuration files have the following form: You cannot create a Trigger that runs on a Basic \nschedule using App Services CLI. All imported scheduled Trigger\nconfigurations must specify a CRON expression . Deploy the trigger: Scheduled Triggers have the following configuration options: Field Description Select Scheduled . The name of the trigger. Enabled by default. Used to enable or disable the trigger. Disabled by default. If enabled, any change events that occurred while\nthis trigger was disabled will not be processed. Required. You can select Basic or Advanced . A Basic\nschedule executes the Trigger periodically based on the interval you set,\nsuch as \"every five minutes\" or \"every Monday\". An Advanced schedule runs the Trigger based on the custom\n CRON expression that you define. Within the Function section, you choose what action is taken when\nthe trigger fires. You can choose to run a function or use\n AWS EventBridge . A Scheduled Trigger does not pass any arguments to its linked\nFunction. CRON expressions are user-defined strings that use standard\n cron job syntax to define when a scheduled\ntrigger should execute.\nApp Services executes Trigger CRON expressions based on UTC time .\nWhenever all of the fields in a CRON expression match the current date and time,\nApp Services fires the trigger associated with the expression. CRON expressions are strings composed of five space-delimited fields.\nEach field defines a granular portion of the schedule on which its\nassociated trigger executes: Field Valid Values Description minute [0 - 59] Represents one or more minutes within an hour. If the minute field of a CRON expression has a value of\n 10 , the field matches any time ten minutes after the hour\n(e.g. 9:10 AM ). hour [0 - 23] Represents one or more hours within a day on a 24-hour clock. If the hour field of a CRON expression has a value of\n 15 , the field matches any time between 3:00 PM and\n 3:59 PM . dayOfMonth [1 - 31] Represents one or more days within a month. If the dayOfMonth field of a CRON expression has a value\nof 3 , the field matches any time on the third day of the\nmonth. month Represents one or more months within a year. A month can be represented by either a number (e.g. 2 for\nFebruary) or a three-letter string (e.g. APR for April). If the month field of a CRON expression has a value of\n 9 , the field matches any time in the month of September. weekday Represents one or more days within a week. A weekday can be represented by either a number (e.g. 2 for a\nTuesday) or a three-letter string (e.g. THU for a Thursday). If the weekday field of a CRON expression has a value of\n 3 , the field matches any time on a Wednesday. Each field in a CRON expression can contain either a specific value or\nan expression that evaluates to a set of values. The following table\ndescribes valid field values and expressions: Expression Type Description Matches all possible field values. Available in all expression fields. The following CRON expression schedules a trigger to execute\nonce every minute of every day: Matches a specific field value. For fields other than weekday \nand month this value will always be an integer. A weekday \nor month field can be either an integer or a three-letter\nstring (e.g. TUE or AUG ). Available in all expression fields. The following CRON expression schedules a trigger to execute\nonce every day at 11:00 AM UTC: Matches a list of two or more field expressions or specific\nvalues. Available in all expression fields. The following CRON expression schedules a trigger to execute\nonce every day in January, March, and July at 11:00 AM UTC: Matches a continuous range of field values between and including\ntwo specific field values. Available in all expression fields. The following CRON expression schedules a trigger to execute\nonce every day from January 1st through the end of April at\n11:00 AM UTC: Matches any time where the step value evenly divides the\nfield value with no remainder (i.e. when Value % Step == 0 ). Available in the minute and hour expression fields. The following CRON expression schedules a trigger to execute\non the 0th, 25th, and 50th minutes of every hour: An online store wants to generate a daily report of all sales from the\nprevious day. They record all orders in the store.orders collection\nas documents that resemble the following: To generate the daily report, the store creates a scheduled Trigger\nthat fires every day at 7:00 AM UTC . When the\nTrigger fires, it calls its linked Atlas Function,\n generateDailyReport , which runs an aggregation\nquery on the store.orders collection to generate the report. The\nFunction then stores the result of the aggregation in the\n store.reports collection. Use the Query API with a a $match \nexpression to reduce the number of documents your Function looks at.\nThis helps your Function improve performance and not reach\n Function memory limits . Refer the Example section for a Scheduled Trigger using a $match expression. For additional examples of Triggers integrated into an App Services App,\ncheckout the example Triggers on Github .", + "code": [ + { + "lang": "none", + "value": "{\n \"type\": \"SCHEDULED\",\n \"name\": \"\",\n \"function_name\": \"\",\n \"config\": {\n \"schedule\": \"\"\n },\n \"disabled\": \n}" + }, + { + "lang": "shell", + "value": "appservices push" + }, + { + "lang": "text", + "value": "* * * * *\n\u2502 \u2502 \u2502 \u2502 \u2514\u2500\u2500 weekday...........[0 (SUN) - 6 (SAT)]\n\u2502 \u2502 \u2502 \u2514\u2500\u2500\u2500\u2500 month.............[1 (JAN) - 12 (DEC)]\n\u2502 \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500 dayOfMonth........[1 - 31]\n\u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 hour..............[0 - 23]\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 minute............[0 - 59]" + }, + { + "lang": "text", + "value": "* * * * *" + }, + { + "lang": "text", + "value": "0 11 * * *" + }, + { + "lang": "text", + "value": "0 11 * 1,3,7 *" + }, + { + "lang": "text", + "value": "0 11 * 1-4 *" + }, + { + "lang": "text", + "value": "*/25 * * * *" + }, + { + "lang": "json", + "value": "{\n _id: ObjectId(\"59cf1860a95168b8f685e378\"),\n customerId: ObjectId(\"59cf17e1a95168b8f685e377\"),\n orderDate: ISODate(\"2018-06-26T16:20:42.313Z\"),\n shipDate: ISODate(\"2018-06-27T08:20:23.311Z\"),\n orderContents: [\n { qty: 1, name: \"Earl Grey Tea Bags - 100ct\", price: Decimal128(\"10.99\") }\n ],\n shippingLocation: [\n { location: \"Memphis\", time: ISODate(\"2018-06-27T18:22:33.243Z\") },\n ]\n}" + }, + { + "lang": "javascript", + "value": "exports = function() {\n // Instantiate MongoDB collection handles\n const mongodb = context.services.get(\"mongodb-atlas\");\n const orders = mongodb.db(\"store\").collection(\"orders\");\n const reports = mongodb.db(\"store\").collection(\"reports\");\n\n // Generate the daily report\n return orders.aggregate([\n // Only report on orders placed since yesterday morning\n { $match: {\n orderDate: {\n $gte: makeYesterdayMorningDate(),\n $lt: makeThisMorningDate()\n }\n } },\n // Add a boolean field that indicates if the order has already shipped\n { $addFields: {\n orderHasShipped: {\n $cond: {\n if: \"$shipDate\", // if shipDate field exists\n then: 1,\n else: 0\n }\n }\n } },\n // Unwind individual items within each order\n { $unwind: {\n path: \"$orderContents\"\n } },\n // Calculate summary metrics for yesterday's orders\n { $group: {\n _id: \"$orderDate\",\n orderIds: { $addToSet: \"$_id\" },\n numSKUsOrdered: { $sum: 1 },\n numItemsOrdered: { $sum: \"$orderContents.qty\" },\n totalSales: { $sum: \"$orderContents.price\" },\n averageOrderSales: { $avg: \"$orderContents.price\" },\n numItemsShipped: { $sum: \"$orderHasShipped\" },\n } },\n // Add the total number of orders placed\n { $addFields: {\n numOrders: { $size: \"$orderIds\" }\n } }\n ]).next()\n .then(dailyReport => {\n reports.insertOne(dailyReport);\n })\n .catch(err => console.error(\"Failed to generate report:\", err));\n};\n\nfunction makeThisMorningDate() {\n return setTimeToMorning(new Date());\n}\n\nfunction makeYesterdayMorningDate() {\n const thisMorning = makeThisMorningDate();\n const yesterdayMorning = new Date(thisMorning);\n yesterdayMorning.setDate(thisMorning.getDate() - 1);\n return yesterdayMorning;\n}\n\nfunction setTimeToMorning(date) {\n date.setHours(7);\n date.setMinutes(0);\n date.setSeconds(0);\n date.setMilliseconds(0);\n return date;\n}" + }, + { + "lang": "json", + "value": "{\n \"type\": \"SCHEDULED\",\n \"name\": \"reportDailyOrders\",\n \"function_name\": \"generateDailyReport\",\n \"config\": {\n \"schedule\": \"0 7 * * *\"\n },\n \"disabled\": false\n}" + } + ], + "preview": "Scheduled triggers allow you to execute server-side logic on a\nregular schedule that you define.\nYou can use scheduled triggers to do work that happens on a periodic\nbasis, such as updating a document every minute, generating a nightly\nreport, or sending an automated weekly email newsletter.", + "tags": null, + "facets": { + "target_product": [ + "atlas" + ], + "target_product>atlas>sub_product": [ + "atlas-app-services" + ] + } + }, + { + "slug": "triggers", + "title": "Atlas Triggers", + "headings": [ + "Trigger Types", + "Limitations", + "Atlas Function Constraints Apply", + "Event Processing Throughput", + "Number of Triggers Cannot Exceed Available Change Streams", + "Diagnose Duplicate Events" + ], + "paragraphs": "Atlas Triggers execute application and database logic. Triggers\ncan respond to events or use pre-defined schedules. Triggers listen for events of a configured type. Each Trigger links to a\nspecific Atlas Function .\nWhen a Trigger observes an event that matches your\nconfiguration, it \"fires\" . The Trigger passes this event object as the\nargument to its linked Function. A Trigger might fire on: App Services keeps track of the latest execution time for each\nTrigger and guarantees that each event is processed at least once. A specific operation type in a given Collection. An authentication event, such as user creation or deletion. A scheduled time. App Services supports three types of triggers: Database triggers \nrespond to document insert, changes, or deletion. You can configure\nDatabase Triggers for each linked MongoDB collection. Authentication triggers \nrespond to user creation, login, or deletion. Scheduled triggers \nexecute functions according to a pre-defined schedule. Triggers invoke Atlas Functions. This means they have the same\nconstraints as all Atlas Functions. Learn more about Atlas Function constraints. Triggers process events when capacity becomes available. A Trigger's\ncapacity is determined by its event ordering configuration: Trigger capacity is not a direct measure of throughput or a guaranteed\nexecution rate. Instead, it is a threshold for the maximum number of\nevents that a Trigger can process at one time. In practice, the rate at\nwhich a Trigger can process events depends on the Trigger function's run\ntime logic and the number of events that it receives in a given\ntimeframe. To increase the throughput of a Trigger, you can try to: Ordered triggers process events from the change stream one at a time\nin sequence. The next event begins processing only after the previous\nevent finishes processing. Unordered triggers can process multiple events concurrently, up to\n10,000 at once by default. If your Trigger data source is an M10+\nAtlas cluster, you can configure individual unordered triggers to\nexceed the 10,000 concurrent event threshold. To learn more, see\n Maximum Throughput Triggers . Optimize the Trigger function's run time behavior. For example, you\nmight reduce the number of network calls that you make. Reduce the size of each event object with the Trigger's\n projection filter . For the best\nperformance, limit the size of each change event to 2KB or less. Use a match filter to reduce the number of events that the Trigger\nprocesses. For example, you might want to do something only if a\nspecific field changed. Instead of matching every update event and\nchecking if the field changed in your Function code, you can use the\nTrigger's match filter to fire only if the field is included in the\nevent's updateDescription.updatedFields object. App Services limits the total number of Database Triggers. The size of your\nAtlas cluster drives this limit. Each Atlas cluster tier has a maximum number of supported change\nstreams. A Database Trigger requires its own change stream. Other App Services\nalso use change streams, such as Atlas Device Sync. Database Triggers\nmay not exceed the number of available change streams. Learn more about the number of supported change streams for Atlas tiers. During normal Trigger operation, Triggers do not send duplicate events.\nHowever, when some failure or error conditions occur, Triggers may deliver\nduplicate events. You may see a duplicate Trigger event when: If you notice duplicate Trigger events, check the App Logs for suspended\nTriggers or server failures. A server responsible for processing and tracking events experiences a\nfailure. This failure prevents the server from recording its progress in a\ndurable or long-term storage system, making it \"forget\" it has processed\nsome of the latest events. Using unordered processing where events 1 through 10 are sent simultaneously.\nIf event 9 fails and leads to Trigger suspension, events like event 10 might\nget processed again when the system resumes from event 9. This can lead to\nduplicates, as the system doesn't strictly follow the sequence of events and\nmay reprocess already-handled events.", + "code": [], + "preview": "Use Atlas Triggers to execute application and database logic in response to events or schedules.", + "tags": null, + "facets": { + "target_product": [ + "atlas" + ], + "target_product>atlas>sub_product": [ + "atlas-app-services" + ] + } + } + ] +} \ No newline at end of file From 8afdc8c77664dd584d98d987ac5ff0b50742aa2d Mon Sep 17 00:00:00 2001 From: anabellabuckvar <41971124+anabellabuckvar@users.noreply.github.com> Date: Fri, 4 Oct 2024 11:13:24 -0400 Subject: [PATCH 32/40] DOP-5036 add compass current s3 manifest --- .../s3Manifests/compass-current.json | 3082 +++++++++++++++++ 1 file changed, 3082 insertions(+) create mode 100644 search-manifest/tests/resources/s3Manifests/compass-current.json diff --git a/search-manifest/tests/resources/s3Manifests/compass-current.json b/search-manifest/tests/resources/s3Manifests/compass-current.json new file mode 100644 index 000000000..cd2f8d02c --- /dev/null +++ b/search-manifest/tests/resources/s3Manifests/compass-current.json @@ -0,0 +1,3082 @@ +{ + "url": "http://mongodb.com/docs/compass/current", + "includeInGlobalSearch": true, + "documents": [ + { + "slug": "agg-pipeline-builder/aggregation-pipeline-builder-settings", + "title": "Aggregation Pipeline Builder Settings", + "headings": [ + "Settings", + "Learn More" + ], + "paragraphs": "You can adjust your MongoDB Compass Aggregation Pipeline Builder settings to customize\nyour aggregation experience and improve pipeline performance. To view and change your aggregation pipeline settings, click the gear icon at\nthe upper right of the pipeline builder to open the Settings panel. Option Description Default Value Comment Mode When enabled, adds helper comments to each stage. Enabled Number of Preview Documents Sets number of documents to show in the preview. 10 Limit Specifies the number of documents passed to $group ,\n $bucket , and $bucketAuto pipeline stages. Lower\nlimits improve pipeline run time but might result in missing documents. This setting is only applied to document previews. It is not applied\nwhen the pipeline is run. 100000 Create an Aggregation Pipeline Specify Custom Collation For Your Pipeline Set Max Time MS for Aggregation Queries", + "code": [], + "preview": "You can adjust your MongoDB Compass Aggregation Pipeline Builder settings to customize\nyour aggregation experience and improve pipeline performance.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "agg-pipeline-builder/count-pipeline-results", + "title": "Count Pipeline Results Documents", + "headings": [ + "About this Task", + "Before You Begin", + "Steps", + "Click the count results button", + "(Optional) Click the refresh icon", + "Learn More" + ], + "paragraphs": "You can view the number of documents outputted by your pipeline with the\n count results button. When you delete or add a document, you must manually refresh the\n count results value on the Aggregations tab to reflect\nthe new document count. To count result documents, you must first create and run your aggregation pipeline. You can't count result\ndocuments while editing your pipeline. After you run your pipeline, click count results , which\nappears under the Run button. The count results \nbutton will update with the count of resulting documents. If you edit your pipeline, you must press the button\nto update your document count. Create an Aggregation Pipeline Manage Documents", + "code": [], + "preview": "You can view the number of documents outputted by your pipeline with the\ncount results button.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "agg-pipeline-builder/create-a-view", + "title": "Create a View from Pipeline Results", + "headings": [ + "About this Task", + "Steps", + "Click the Save drop-down button", + "Enter a name for your view", + "Create your view", + "Learn More" + ], + "paragraphs": "To quickly access the results of an aggregation pipeline without having to\nrun it, you can create a view on MongoDB Compass . Views are\nread-only, so they can help keep your data secure by limiting user access to a\npredefined set of results. Creating a view does not save the aggregation pipeline itself. In the aggregation pipeline pane, click the Save drop-down\nbutton and select Create view . The view name must be between 6 and 1024 characters long. Click the Create button to create your view. Compass \ncreates a view from your pipeline results in the same database where the\npipeline was created and displays saved views with the \nicon. Views", + "code": [], + "preview": "To quickly access the results of an aggregation pipeline without having to\nrun it, you can create a view on MongoDB Compass. Views are\nread-only, so they can help keep your data secure by limiting user access to a\npredefined set of results.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "agg-pipeline-builder/export-pipeline-results", + "title": "Export Aggregation Pipeline Results", + "headings": [ + "About this Task", + "Steps", + "Click the Export button", + "Choose the file type for your export", + "(Optional) Choose an advanced JSON format option", + "Click Export", + "Specify export destination", + "Learn More" + ], + "paragraphs": "To use your aggregated data in other external tools, you can export your\npipeline results from MongoDB Compass as a JSON or CSV file. Avoid using CSV files exports to back up your data. CSV files may lose type\ninformation and are not suitable for backing up your data. In the top right of the aggregation pipeline builder, click the\n Export button to open the modal. Under Export File Type , select either JSON or\n CSV . If you select JSON , your data is exported\nto the target file as an array of JSON objects. If choose to export your data as a JSON file, you can expand\nthe Advanced JSON Format drop-down menu and select from the\nfollowing extended JSON formats: JSON Format Description Sample Document Default Extended JSON A string format that avoids any loss of BSON type information. This\nis the default Compass setting. Relaxed Extended JSON A string format that emphasizes readability and interoperability at\nthe expense of type preservation. That is, conversion from relaxed\nformat to BSON can lose type information. WARNING: This format is not recommended for data integrity. Canonical Extended JSON A string format that emphasizes type preservation at the expense of\nreadability and interoperability. That is, conversion from canonical\nto BSON will generally preserve type information except in certain\nspecific cases. In the new pop-up modal, enter a name for your export file, specify the file\ndestination, and click Select to export your pipeline results. Export Data from a Collection Export Pipeline to Specific Language", + "code": [ + { + "lang": "javascript", + "value": "{\n \"fortyTwo\" : 42,\n \"oneHalf\" : 0.5,\n \"bignumber\" : {\n \"$numberLong\" : \"5000000000\"\n }\n}" + }, + { + "lang": "javascript", + "value": "{\n \"fortyTwo\" : 42,\n \"oneHalf\": 0.5,\n \"bignumber\" : 5000000000\n}" + }, + { + "lang": "javascript", + "value": "{\n \"fortyTwo\" : {\n \"$numberInt\" : \"42\"\n },\n \"oneHalf\" : {\n \"$numberDouble\" : \"0.5\"\n },\n \"bignumber\" : {\n \"$numberLong\" : \"5000000000\"\n }\n}" + } + ], + "preview": "To use your aggregated data in other external tools, you can export your\npipeline results from MongoDB Compass as a JSON or CSV file.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "agg-pipeline-builder/export-pipeline-to-language", + "title": "Export Pipeline to Specific Language", + "headings": [ + "About this Task", + "Steps", + "Click the Export to Language button", + "Select your export language", + "(Optional) Include import statements", + "(Optional) Include driver syntax", + "Click ", + "Learn More" + ], + "paragraphs": "You can use the Aggregation Pipeline Builder to format and export finished pipelines. You can export\npipelines to a chosen language to use in your application. You can export your pipeline to the following languages: C# Go Java Node PHP Python Ruby Rust In the aggregation pipeline pane, click the Export to Language \nbutton to open the pipeline export card. On the right side of the card, click the drop-down menu under\n Exported Pipeline and select your desired programming\nlanguage. The My Pipeline pane on the left of the export card displays\nyour pipeline in mongosh syntax. The\n Exported Pipeline pane to the right displays your pipeline in\nthe selected programming language. Click the Include Import Statements checkbox to include the\nrequired import statements for the selected programming language. Click the Include Driver Syntax checkbox to include\napplication code for the selected programming language. If you include\ndriver syntax, the copyable code reflects project , sort , maxtimems , collation ,\n skip and limit options. Click the icon at the top-right of the pipeline to\ncopy your pipeline for the selected programming language. You can now\nintegrate and execute your created pipeline in your application. Aggregation Pipeline Builder MongoDB Driver Documentation", + "code": [], + "preview": "You can use the Aggregation Pipeline Builder to format and export finished pipelines. You can export\npipelines to a chosen language to use in your application.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "agg-pipeline-builder/maxtime-ms-pipeline", + "title": "Set Max Time MS for Aggregation Queries", + "headings": [ + "About this Task", + "Steps", + "Click More Options", + "Specify a Max Time MS value", + "Learn More" + ], + "paragraphs": "Use the Max Time MS option on the Aggregations tab to\nspecify an upper time limit in milliseconds for aggregation pipelines\nthat run in MongoDB Compass . By default, Max Time MS is set to 60000 milliseconds, or 60 seconds.\nConsider raising this value if you have a large collection or your operations\nfrequently time out. Alternatively, consider lowering the\n Max Time MS value to quickly identify inefficient or\nresource-intensive pipeline operations. If you already specified a maxTimeMS value in the Compass Settings\npanel , the command line , or\na configuration file , the Max Time MS \nthat you specify for your pipeline must be lower than the limit you previously\nspecified. If your aggregation operation goes over the time limit, Compass raises\na timeout error. On the Aggregations tab, click More Options Next to the Max Time MS field, enter a numeric value to set as\nthe maximum amount of time in milliseconds that an aggregation\npipeline can run. For example, to set a 5 second limit, enter 5000 . Command Line Options Configuration File Settings Interface Settings Adjust Maximum Time for Query Operations", + "code": [], + "preview": "Use the Max Time MS option on the Aggregations tab to\nspecify an upper time limit in milliseconds for aggregation pipelines\nthat run in MongoDB Compass.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "agg-pipeline-builder/open-saved-pipeline", + "title": "Open a Saved Pipeline", + "headings": [ + "Before You Begin", + "Steps", + "Click the folder icon", + "Select an aggregation pipeline", + "Confirm your selection", + "Learn More" + ], + "paragraphs": "Instead of creating a new pipeline from the beginning, you can load and edit\nsaved aggregation pipelines. If you don't already have a saved pipeline, you must first: Create an aggregation pipeline . Save your aggregation pipeline . In the top-left corner of the pipeline builder, click the folder icon to\nopen the drop-down menu of saved pipelines. In the drop-down menu, hover over the pipeline you want to open and\nclick Open . In the pop-up modal, click Open Pipeline . Starting in MongoDB Compass 1.31, you can view your saved queries\nand aggregation pipelines on the My Queries tab\nafter you connect to your cluster. To learn more, see\n viewing saved aggregations for details. Save a Pipeline Using the My Queries Tab", + "code": [], + "preview": "Instead of creating a new pipeline from the beginning, you can load and edit\nsaved aggregation pipelines.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "agg-pipeline-builder/pipeline-custom-collation", + "title": "Specify Custom Collation For Your Pipeline", + "headings": [ + "About this Task", + "Steps", + "Click More Options", + "Enter your collation document", + "Example", + "Learn More" + ], + "paragraphs": "Use custom collation to specify language-specific rules for string comparison,\nsuch as rules for letter case and accent marks, within your aggregation pipeline. When entering a collation document, the locale field is mandatory. Default\ncollation field values vary depending on which locale you specify. To learn\nmore about supported languages and locales, see\n Collation Locales and Default Parameters . In the top-right corner of the pipeline builder, click\n More Options . Next to the Collation field, enter your\n collation document . After you enter your collation document, the aggregation pipeline builder\nconsiders the language-specific rules that you specified in your document. The following sample collation document specifies French as the chosen\n locale and sorts uppercase letters before lowercase letters with the\n caseFirst field: Collation Collation Locales and Default Parameters", + "code": [ + { + "lang": "javascript", + "value": "{\n locale: \"fr\",\n caseFirst: \"upper\"\n}" + } + ], + "preview": "Use custom collation to specify language-specific rules for string comparison,\nsuch as rules for letter case and accent marks, within your aggregation pipeline.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "agg-pipeline-builder/save-agg-pipeline", + "title": "Save a Pipeline", + "headings": [ + "Steps", + "Click the save dropdown button", + "Enter a name for your pipeline", + "Save the pipeline", + "Learn More" + ], + "paragraphs": "You can save your aggregation pipelines to find and use them again later. If\nyou load a saved pipeline, you can make edits to the pipeline stages without\nchanging the original saved pipeline. You can also create a view from your pipeline results. In the aggregation pipeline pane, click the Save drop-down\nmenu and select Save as . Click the Save button to save your pipeline. Your pipeline\nwill be saved under the folder icon at the top left of the\npipeline builder. Open a Saved Pipeline Create a View from Pipeline Results Managing Saved Queries and Aggregations", + "code": [], + "preview": "You can save your aggregation pipelines to find and use them again later. If\nyou load a saved pipeline, you can make edits to the pipeline stages without\nchanging the original saved pipeline. You can also create a view from your pipeline results.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "agg-pipeline-builder/view-pipeline-explain-plan", + "title": "View Explain Plans for a Pipeline", + "headings": [ + "About this Task", + "Steps", + "Click the Explain button", + "Select an aggregation pipeline", + "(Optional) Select the Raw Output view", + "Learn More" + ], + "paragraphs": "To help you better understand the performance of your pipeline, you can view\nyour pipeline's explain plan. You can view the explain plan at any point while\ncreating or editing your pipeline. On the Explain modal, you can view the explain stages as a\n Visual Tree , where each stage of the pipeline appears as a node on\nthe tree. Alternatively, you can view the explain details in raw JSON format by selecting\nthe Raw Output view. The explain plan includes a Query Performance Summary with\ninformation on the execution of your aggregation pipeline such as: Execution time The number of returned documents The number of examined documents The number of examined index keys In the top right of the aggregation pipeline builder, click the\n Explain button to open the Explain Plan modal. By default, the explain stages are are shown as a Visual Tree .\nEach stage of the pipeline appears as a node on the tree. You can click on each stage for more detailed execution information about\nthe stage. To view your full explain plan as raw JSON, select the\n Raw Output view. Analyze Query Performance View Query Performance", + "code": [], + "preview": "To help you better understand the performance of your pipeline, you can view\nyour pipeline's explain plan. You can view the explain plan at any point while\ncreating or editing your pipeline.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "collections/capped-collection", + "title": "Create a Capped Collection", + "headings": [ + "Steps", + "Click the Create Collection button", + "Enter the collection name", + "Click the Additional preferences dropdown", + "Enter the size of the capped collection", + "Click Create Collection to create the collection", + "Restrictions and Limitations" + ], + "paragraphs": "Capped Collections are fixed-sized\ncollections that support high-throughput operations that insert and retrieve\ndocuments based on insertion order. From the Collections screen, click the\n Create Collection button. Check the Capped Collection option. Enter the maximum number of bytes that the collection can hold. The following restrictions and limitations apply when creating a\ncapped collection: Custom collation is the only Advanced Collection Option \nthat can be used alongside your capped collection. If you import a data set larger than the maximum size of the capped collection,\n Compass only loads the last documents of the data set and drops the oldest\ndocuments. Once the collection is created, you cannot adjust the maximum number of bytes.", + "code": [], + "preview": "Capped Collections are fixed-sized\ncollections that support high-throughput operations that insert and retrieve\ndocuments based on insertion order.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "collections/clustered-collection", + "title": "Create a Clustered Collection", + "headings": [ + "Restrictions", + "Steps", + "Open the Create Collection dialog box.", + "Enter the collection name.", + "Select the type of collection you want to create.", + "(Optional) Name your clustered index.", + "(Optional) Enter the number of seconds for the expireAfterSeconds field.", + "Click Create Collection to create your new collection.", + "Next Steps" + ], + "paragraphs": "Clustered collections are collections\nwith a clustered index. Clustered collections store documents ordered by clustered index \nkey value. You can use clustered collections when only one clustered index is\nnecessary. Clustered collection limitations: The clustered index key must be on the _id field. Clustered collections may not be capped collections . Select a database and from the Collections screen, click the\n Create Collection button. You can also click the + next to the name of the database you select\nto open the Create Collection dialog box. From the Additional preferences drop-down, select\n Clustered Collections . You can enter a name for the clustered index or use the automatically\ngenerated name. The expireAfterSeconds field is a TTL index \nthat enables automatic deletion of documents older than the specified\nnumber of seconds. The expireAfterSeconds field must be a positive,\nnon-zero value. In the Collections screen, your new collection is marked by a\n Clustered badge next to the collection name. Manage Documents Query Your Data Analyze Your Data Scheme", + "code": [], + "preview": "Clustered collections are collections\nwith a clustered index. Clustered collections store documents ordered by clustered index\nkey value. You can use clustered collections when only one clustered index is\nnecessary.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "collections/collation-collection", + "title": "Create a Collection with Collation", + "headings": [ + "Procedure", + "Click the Create Collection button.", + "Enter the collection name.", + "Click the Additional preferences dropdown.", + "Select a value for locale.", + "Click Create Collection to create the collection.", + "Restrictions and Limitations", + "Example" + ], + "paragraphs": "Collation allows you to specify\nlanguage-specific rules for string comparison, such as rules for lettercase\nand accent marks. From the Collections screen, click the\n Create Collection button. Check the Use Custom Collaton option. You are required to select a locale from the MongoDB\nsupported languages . All other collation options parameters are optional. For descriptions of\nthe fields, see Collation . The following restrictions apply when the parameter numericOrdering \nis set to true : Only contiguous non-negative integer substrings of digits are\nconsidered in the comparisons. numericOrdering does not support: + - exponents Only Unicode code points in the Number or Decimal Digit (Nd) category\nare treated as digits. If the number length exceeds 254 characters, the excess characters are\ntreated as a separate number. Consider a collection with the following string number and decimal values: The following find query uses a collation document containing the\n numericOrdering parameter: For more information on querying documents in Compass , see\n Query Your Data . The operations returns the following results: numericOrdering: true sorts the string values in ascending order as if\nthey were numeric values. The two negative values -2.1 and -10 are not sorted in the\nexpected sort order because they have unsupported - characters.", + "code": [ + { + "lang": "javascript", + "value": "[\n { \"n\": \"1\" },\n { \"n\": \"2\" },\n { \"n\": \"-2.1\" },\n { \"n\": \"2.0\" },\n { \"n\": \"2.20\" },\n { \"n\": \"10\"},\n { \"n\": \"20\" },\n { \"n\": \"20.1\" },\n { \"n\": \"-10\" },\n { \"n\": \"3\" }\n]" + }, + { + "lang": "javascript", + "value": " db.c.find(\n { }, { _id: 0 }\n ).sort(\n { n: 1 }\n ).collation( {\n locale: 'en_US',\n numericOrdering: true\n} )" + }, + { + "lang": "javascript", + "value": "[\n { \"n\": \"-2.1\" },\n { \"n\": \"-10\" },\n { \"n\": \"1\" },\n { \"n\": \"2\" },\n { \"n\": \"2.0\" }\n { \"n\": \"2.20\" },\n { \"n\": \"3\" },\n { \"n\": \"10\" },\n { \"n\": \"20\" },\n {\"n\": \"20.1\" }\n]" + } + ], + "preview": "Collation allows you to specify\nlanguage-specific rules for string comparison, such as rules for lettercase\nand accent marks.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "collections/encrypted-collection", + "title": "Create a Collection with Queryable Encryption", + "headings": [ + "Procedure", + "Click the Create Collection button.", + "Enter the collection name.", + "Click the Additional preferences dropdown.", + "Specify an Encrypted Field.", + "Click Create Collection to create the collection.", + "Restrictions and Limitations" + ], + "paragraphs": "Queryable Encryption allows you to\nencrypt a subset of fields in your collection. From the Collections screen, click the\n Create Collection button. Check the Queryable Encryption option. Specify which fields should be encrypted and whether they should\nbe queryable. The following fields must be used if no existing keyId was specified for\nat least one encrypted field, and have no effect otherwise: KMS Provider . Key Encryption Key . Your collection will be marked by a Queryable Encryption \nbadge. Your deployment must be connected using In-Use Encryption to encrypt your\ncollection using Queryable Encryption. Custom collation is the only Advanced Collection Option \nthat can be used alongside Queryable Encryption.", + "code": [], + "preview": "Queryable Encryption allows you to\nencrypt a subset of fields in your collection.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "collections/time-series-collection", + "title": "Create a Time Series Collection", + "headings": [ + "Procedure", + "Click the Create Collection button.", + "Enter the collection name.", + "Check the Time Series Collection option.", + "Specify a timeField.", + "Optional. Specify a metaField.", + "Optional. Select a granularity from the dropdown.", + "Optional. Specify a numeric value for the following fields.", + "Click Create Collection to create the collection.", + "Restrictions and Limitations" + ], + "paragraphs": "Time series collections efficiently\nstore sequences of measurements over a period of time. For more information on time series fields, see Time Series\nObject Fields . From the Collections screen, click the\n Create Collection button. Specify which field should be used as the timeField for the time-series\ncollection. This field must have a BSON type date . Specify the name of the field that contains metadata in each time\nseries document. The metadata in the specified field should be\ndata that is used to label a unique series of documents. Specify a coarser granularity so measurements over a longer time\nspan can be more efficiently stored and queried. The default value\nis \"seconds\" . If you set the granularity parameter, you can't set the\n bucketMaxSpanSeconds and bucketRoundingSeconds parameters. Field Type Description bucketMaxSpanSeconds number Specifies the maximum time span between measurements in a bucket. The value of bucketMaxSpanSeconds must be the same as\n bucketRoundingSeconds . If you set the bucketMaxSpanSeconds ,\nparameter, you can't set the granularity parameter. bucketRoundingSeconds number Specifies the time interval that determines the starting timestamp\nfor a new bucket. The value of bucketRoundingSeconds must be the same as\n bucketMaxSpanSeconds . If you set the bucketRoundingSeconds ,\nparameter, you can't set the granularity parameter. expireAfterSeconds number Enables the automatic deletion of documents that are older than\nthe specified number of seconds. Your collection will be marked by a time series badge. The following restrictions and limitations apply when creating a time series\ncollection: Custom collation is the only Advanced Collection Option \nthat can be used alongside your time series collation. See Time Series Collection Limitations \nfor all time series collection limitations.", + "code": [], + "preview": "Time series collections efficiently\nstore sequences of measurements over a period of time.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "collections", + "title": "Collections", + "headings": [ + "Collections Screen", + "Collection Information", + "Create a Collection", + "Click the Create Collection button.", + "Enter the collection information.", + "Click Create Collection to create the collection.", + "Drop a Collection", + "Click the trash can icon to delete a collection.", + "Confirm the collection to delete.", + "Click Drop Collection to drop the collection.", + "Collection Details", + "Tabbed View", + "Click the caret icon next to the database which contains the collection you wish to view.", + "Hover over the desired collection.", + "Click the appearing ellipses (...) button.", + "Click Open in New Tab.", + "Limitations" + ], + "paragraphs": "A collection is a grouping of MongoDB\n documents . Documents within a collection can\nhave different fields. A collection is the equivalent of a\ntable in a relational database system. A collection exists within a\nsingle database The Collections screen lists the existing\n collections and\n views in the selected database. Each\nlist item includes the name and other general information for\nthe collection or view. To access the Collections screen for a database, from the\n Databases screen either: Click a Database Name in the main Databases \nview, or Click a database in the left navigation. The Collections screen displays the following\ninformation for each collection in the selected database: Collection name Number of documents in the collection Compass bases the document count that appears on the\n Collections screen on cached metadata using\n collStats . This count\nmight differ from the actual document count in the collection. For\nexample, an unexpected shutdown can throw off the count.\nUse the db.collection.countDocuments() method for the\nmost accurate document count. Average size of documents in the collection Total size of all documents in the collection Number of indexes on the collection Total size of all indexes on the collection Collation properties for the\ncollection. Hover over a Collation banner to view\nthe properties for that collection. You can create new collections in an existing database. From the Collections screen, click the\n Create Collection button. In the Create Collection dialog, enter the name of the\ncollection to create. Compass also provides you with Additional preferences .\nYou can select from the following: Create a Capped Collection Create a Clustered Collection Create a Collection with Collation Create a Collection with Encrypted Fields Create a Time Series Collection From the Collections screen, click on the trash can for\nthe collection to delete. A confirmation dialog appears. In the dialog, enter the name of the collection to delete. The Collection Detail screen shows detailed information for a\ncollection, including the documents the collection contains. To see\ncollection details, either: After you select a collection, Compass shows you that\ncollection's Documents tab . Compass provides\nthe following collection information and functionality in the detailed\nview: Click a Collection Name in the main\n Collections screen, or Click a collection in the left navigation. Open the collection in a new tab . Manage Documents Import and Export Data to / from the collection Create an Aggregation Pipeline Schema Analysis View Query Performance Manage Indexes Set Validation Rules for Your Schema The following functionality is not available if you are connected to\na Data Lake : Import data into a collection Schema Analysis View Query Performance Manage Indexes Set Validation Rules for Your Schema You can open multiple Collection Detail screens in separate tabs. To\nopen a new tab to view collection details: Creating and dropping collections is not permitted in MongoDB Compass Readonly Edition . The Create Collection button is not available if you are\nconnected to a Data Lake .", + "code": [], + "preview": "How to create or drop a collection, which is a grouping of MongoDB documents equivalent to a table in a relational database system.", + "tags": null, + "facets": { + "genre": [ + "tutorial" + ], + "target_product": [ + "compass" + ] + } + }, + { + "slug": "connect/advanced-connection-options/advanced-connection", + "title": "Advanced Connection Tab", + "headings": [ + "Procedure", + "Open the New Connection modal.", + "Click Advanced Connection Options.", + "Click the Advanced tab.", + "Additional Optional Fields", + "Click Connect." + ], + "paragraphs": "The Advanced connection tab provides additional connection options\nfor your deployment. These options can be used with the\n General , Authentication , TLS / SSL , and\n Proxy / SSH Tunnel connection options. In the bottom panel of the Connections Sidebar , click\n Add New Connection to open the New Connection \nmodal. If you already have connections listed in the Connections\nSidebar , click the icon on the top right of the sidebar\nto open the New Connection modal. (Optional) Select a Read Preference from the following\noptions: Read Preference Description Primary Default mode. All operations read from the current replica set primary.\nIf the primary is unavailable, the operation fails. Primary Preferred Operations read from the primary unless the primary is unavailable.\nIf the primary is unavailable, operations read from secondary members. Secondary All operations read from the secondary members of the replica set.\nIf the secondary is unavailable, the operation fails. Secondary Preferred Operations read from the secondary unless the secondary is unavailable.\nIf the secondary is unavailable, operations read from the primary\non sharded clusters. Nearest Operations read from a random eligible replica set member,\nirrespective of whether that member is a primary or secondary, based\non a specified latency threshold. The operation considers the following\nwhen calculating latency: The localThresholdMS \nconnection string option. The maxStalenessSeconds \nread preference option. Any specified tag sets. Read Preferrence . Read Preferrence Use Cases . Field Description Replica Set Name (Optional) Name of replica set. Default Authentication Database (Optional) Authentication database used when authSource is not specified.\nFor more information, see Authentication Options . URI Options Additional options to customize your connection. You\nspecify these options as key-value pairs, and Compass \nautomatically adds the key-value pairs to the connection string.\nFor more information, see Connection String Options . To disconnect from your deployment, see Disconnect from MongoDB .", + "code": [], + "preview": "The Advanced connection tab provides additional connection options\nfor your deployment. These options can be used with the\nGeneral, Authentication, TLS / SSL, and\nProxy / SSH Tunnel connection options.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "connect/advanced-connection-options/authentication-connection", + "title": "Authentication Connection Tab", + "headings": [ + "Procedure", + "Open the New Connection modal.", + "Click Advanced Connection Options.", + "Click the Authentication tab.", + "Username / Password", + "OIDC", + "X.509", + "Kerberos", + "Authenticate as a Different Kerberos User on Windows", + "LDAP", + "AWS IAM", + "(Optional) For advanced connection configuration options, click the Advanced tab.", + "Click Connect." + ], + "paragraphs": "The Authentication tab allows you to connect to deployments that\nrequire authentication. To learn about authentication mechanisms within MongoDB,\nsee Authentication Mechanisms . In the bottom panel of the Connections Sidebar , click\n Add New Connection to open the New Connection \nmodal. If you already have connections listed in the Connections\nSidebar , click the icon on the top right of the sidebar\nto open the New Connection modal. Select your authentication method from the following options: Username / Password OIDC X.509 Kerberos LDAP AWS IAM Provide the following information: Username Password (Optional) Authentication Database Authentication Mechancism: Default The Default setting selects the first authentication mechanism\nsupported by the server according to an order of preference. With the Default setting, MongoDB tries to authenticate using\nthe following mechanisms in the order they are listed: SCRAM-SHA-256 SCRAM-SHA-1 MONGODB-CR SCRAM-SHA-1 SCRAM-SHA-256 Select OIDC if the deployment uses OpenID Connect \nas its authentication mechanism. Provide the following information: Field Description Username Optional. OpenID Connect username. Auth Code Flow Redirect URI Optional. Specify a URI where the identity provider redirects you after authentication.\nThe URI must match the configuration of the Identity Provider.\nThe default is http://localhost:27097/redirect . Consider Target Endpoint Trusted Optional. Allows connecting to a target endpoint that is not in the\nlist of endpoints that are considered trusted by default. Only use\nthis option when connecting to servers that you trust. Enable Device Authentication Flow Optional. When the Show Device Auth Flow Checkbox \nsetting is enabled, Compass can provide you with a URL and code\nto finish authentication. This is a less secure authentication flow that can be used as a\nfallback when browser-based authentication is unavailable. Select X.509 if the deployment uses X.509 as its authentication mechanism. X.509 Authentication requires a client certificate. To enable\nTLS and add a client certificate, see the TLS / SSL tab \nin Compass . Select Kerberos if the deployment uses Kerberos as its authentication mechanism. Provide the following information: Field Description Principal Every participant in the authenticated communication is known as a\n\"principal\", and every principal must have a unique name. (Optional) Service Name Every MongoDB mongod and mongos instance\n(or mongod.exe and mongos.exe on Windows) must have an associated service name. The\ndefault is mongodb . (Optional) Canonicalize Host Name Kerberos uses the canonicalized form of the host name (cname) \nwhen constructing the principal for MongoDB Compass . (Optional) Service Realm The service realm is the domain over which a Kerberos authentication\nserver has the authority to authenticate a user. If you choose to Canonicalize Host Name , you can specify\none of the following options: For more information on principal name canonicalization in Kerberos, see\nthis RFC document . Option Description Forward The driver does a cname lookup on the kerberos hostname. Forward and Reverse Performs a forward DNS lookup and then a reverse lookup on that\nvalue to canonicalize the hostname. (Optional) Provide password directly Used to verify your identity. To show the Kerberos password field,\nyou must enable the showKerberosPasswordField option. When you authenticate with Kerberos on Windows, the\n Principal you specify must match the principal of the\nsecurity context that Compass is running. Normally, this\nis the logged-in user who is running Compass . To authenticate as a different Kerberos user, run MongoDB Compass as the\nchosen user and specify the Principal for that user.\nTo run MongoDB Compass as a different user, either: After you start MongoDB Compass as the chosen user, to authenticate\nagainst your Kerberos-enabled MongoDB deployment, specify the\n Principal for the corresponding user. Hold Shift and right-click the MongoDB Compass program\nicon to select Run as a different user . Use the runas command-line tool. For example, the following\ncommand runs MongoDB Compass as a user named admin : Select LDAP if the deployment uses LDAP as its authentication mechanism. Provide the following information: Username Password Select AWS IAM if the deployment uses AWS IAM as\nits authentication mechanism. The following fields are optional as they can be defined on your platform\nusing their respective AWS IAM environment variables. MongoDB Compass will\nuse these environment variable values to authenticate; you do not\nneed to specify them in the connection string. (Optional) AWS Access Key Id (Optional) AWS Secret Access Key (Optional) AWS Session Token To disconnect from your deployment, see Disconnect from MongoDB .", + "code": [ + { + "lang": "sh", + "value": "runas /profile /user:mymachine\\admin " + } + ], + "preview": "The Authentication tab allows you to connect to deployments that\nrequire authentication. To learn about authentication mechanisms within MongoDB,\nsee Authentication Mechanisms.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "connect/advanced-connection-options/general-connection", + "title": "General Connection Tab", + "headings": [ + "Procedure", + "Open the New Connection modal.", + "Click Advanced Connection Options.", + "Click the General tab.", + "(Optional) For advanced connection configuration options, click the Advanced tab.", + "Click Connect." + ], + "paragraphs": "The General connection tab allows you to select a\n Connection String Scheme and configure the hostname or hosts\nused to connect to your MongoDB deployment. Starting in version 1.44.0, you can connect to multiple MongoDB\ninstances at once through Compass. In the bottom panel of the Connections Sidebar , click\n Add New Connection to open the New Connection \nmodal. If you already have connections listed in the Connections\nSidebar , click the icon on the top right of the sidebar\nto open the New Connection modal. Select a connection string scheme. You can select one of the following options: Connection String Scheme Definition mongodb Standard Connection String Format .\nThe standard format of the MongoDB connection URI is used to\nconnect to a MongoDB deployment by specifying its hosts directly. mongodb+srv DNS Seed List Connection Format .\nThe +srv indicates to MongoDB Compass that the hostname that follows\ncorresponds to a DNS SRV record. Enter a Hostname. In the Host field, enter the hosts or hostname of the server\nwhere the deployment is running. If you are running your deployment locally, this value is localhost .\nIf you are connecting to an Atlas cluster, see\n Connect to Database Deployment \nfor Atlas-specific guidance on how to connect with Compass . If you are not sure of your hostname, contact your Database\nAdministrator for information. (Optional) Enable Direct Connection. When you use the mongodb Connection String Scheme, you have the\noption to enable a Direct Connection . When you\nenable this setting, Compass runs all operations on the specified\nhost. When you specify directConnection and connect to a secondary member\nof a replica set, your write operations fail because it is not the\nprimary member. To disconnect from your deployment, see Disconnect from MongoDB .", + "code": [], + "preview": "The General connection tab allows you to select a\nConnection String Scheme and configure the hostname or hosts\nused to connect to your MongoDB deployment.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "connect/advanced-connection-options/in-use-encryption", + "title": "In-Use Encryption Connection Tab", + "headings": [ + "Procedure", + "Open the New Connection modal.", + "Click Advanced Connection Options.", + "Click the In-Use Encryption tab.", + "Click Connect.", + "KMS Providers", + "Local KMS", + "AWS", + "GCP", + "Azure", + "KMIP", + "(Optional) Specify an EncryptedFieldsMap:" + ], + "paragraphs": "In-Use Encryption is an Enterprise/Atlas only feature. You need a\nreplica set or sharded cluster to use this connection option. Your replica set\ncan be a single node or larger. The In-Use Encryption connection tab allows you to connect your\ndeployments with Queryable Encryption . In the bottom panel of the Connections Sidebar , click\n Add New Connection to open the New Connection \nmodal. If you already have connections listed in the Connections\nSidebar , click the icon on the top right of the sidebar\nto open the New Connection modal. Provide a Key Vault Namespace . A Key Vault Namespace refers to a collection that\ncontains all the data keys used for encryption and decryption. Specify a collection in which data encryption keys are stored in\nthe format . . The non-official default\ndatabase/collection for keyVault is encryption.__keyVault . Select a KMS Provider . You can select from the following Key Management Systems: Local KMS AWS GCP Azure KMIP You can locally manage your key as a KMS using the\n Local KMS \noption. Click Generate Random Key to generate a 96-byte long\nbase64-encoded string. You need this key to access encrypted and\necrypted data. Compass does not save KMS credentials by default. Copy\nand save the key in an external location. You can use AWS \nto manage your keys. Specify the following fields: Field Required Description Access Key Id Yes Value of your AWS access key Id. Secret Access Key Yes Value of your AWS secret key. Session Token No Value of your AWS session token. Certificate Authority No One or more certificate files from trusted Certificate\nAuthorities to validate the certificate provided by the deployment. Client Certificate and Key No Specifies the location of a local .pem file that contains\neither the client's TLS/SSL X.509 certificate or the client's TLS/SSL\ncertificate and key. Client Key Password No If the Client Private Key is protected with a password,\nyou must provide the password. You can use Google Cloud Services to manage your keys. Specify the following fields: Field Required Description Service Account Email Yes The service account email to authenticate. Private Key Yes A base64-encoded private key. Endpoint No A host with an optional port. Certificate Authority No One or more certificate files from trusted Certificate\nAuthorities to validate the certificate provided by the deployment. Client Certificate and Key No Specifies the location of a local .pem file that contains\neither the client's TLS/SSL X.509 certificate or the client's TLS/SSL\ncertificate and key. Client Key Password No If the Client Private Key is protected with a password,\nyou must provide the password. You can use Azure Key Vault \nto manage your keys. Specify the following fields: Field Required Description Tenant Id Yes Identifies the organization for the account. Client Id Yes Authenticates a registered application. Client Secret Yes The client secret to authenticate a registered application. Identity Platform Endpoint Yes A host with an optional port. Certificate Authority No One or more certificate files from trusted Certificate\nAuthorities to validate the certificate provided by the deployment. Client Certificate and Key No Specifies the location of a local .pem file that contains\neither the client's TLS/SSL X.509 certificate or the client's TLS/SSL\ncertificate and key. Client Key Password No If the Client Private Key is protected with a password,\nyou must provide the password. You can use KMIP \nto manage your keys. Field Required Description Endpoint Yes The endpoint consists of a hostname and port separated by a colon. Certificate Authority No One or more certificate files from trusted Certificate\nAuthorities to validate the certificate provided by the deployment. Client Certificate and Key No Specifies the location of a local .pem file that contains\neither the client's TLS/SSL X.509 certificate or the client's TLS/SSL\ncertificate and key. Client Key Password No If the Client Private Key is protected with a password,\nyou must provide the password. Add an optional client-side EncryptedFieldsMap for enhanced security.\nFor more information, see Fields for Encryption .", + "code": [], + "preview": "In-Use Encryption is an Enterprise/Atlas only feature. You need a\nreplica set or sharded cluster to use this connection option. Your replica set\ncan be a single node or larger.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "connect/advanced-connection-options/ssh-connection", + "title": "Proxy / SSH Tunnel Connection Tab", + "headings": [ + "Procedure", + "Open the New Connection modal.", + "Click Advanced Connection Options.", + "Click the Proxy / SSH Tunnel tab.", + "SSH Connection", + "Behavior", + "Socks5", + "Behavior", + "(Optional) For advanced connection configuration options, click the Advanced tab.", + "Click Connect." + ], + "paragraphs": "The Proxy / SSH Tunnel tab allows you to connect to your deployment\nwith a Proxy method or SSH tunnel. In the bottom panel of the Connections Sidebar , click\n Add New Connection to open the New Connection \nmodal. If you already have connections listed in the Connections\nSidebar , click the icon on the top right of the sidebar\nto open the New Connection modal. You have the following connection options: Option Description SSH with Password Connects to a deployment using a SSH Tunnel and has the option to secure\nthe connection with a password. For information on SSH connections and\nthe fields for this connection option, see SSH Connection . SSH with Identity File Connects to a deployment using a SSH Tunnel and uses an Identity File\nto secure the connection. For information on SSH connections and\nthe fields for this connection option, see SSH Connection . Socks5 Connects to a deployment using a seperate proxy server. For more\ninformation on the fields for this connection option, see\n Socks5 . To connect to a deployment using SSH with Password or\n SSH with Identity File , use the following fields: Field Description SSH Hostname Bastion (jumpbox) hostname. This is the unique identifier\n(Fully Qualified Domain Name, or FQDN) for the computer to access. SSH Port Port used for the SSH connection. This defaults to 22,\nthe standard port for SSH. SSH Username This is the user for which the profile to log into on the remote\nsystem. This is the user for which you want to establish the\nSSH connection. SSH Password (Optional) Password used to secure the SSH connection. This is\nrequired if you are not using an identity file. SSH Identity File File from which the identity (private key) for SSH public\nkey authentication is read. Unix or OS X: If using OpenSSH, identity files are found in the ~/.ssh \ndirectory. By default, the private key files have one of the following\nfile names: On Windows, the location of the identity files depends on your choice\nof SSH client, such as PuTTY. id_dsa id_ecdsa id_ed25519 id_rsa SSH Passphrase (Optional) If your private key is encrypted, provide the passphrase\nused to decrypt your private key. A passphrase provides an extra\nlayer of security for an SSH connection. In MongoDB Compass , SSH Tunnel Connections allow users to connect\nto replica sets. Users can connect to replica sets using SSH Tunnels and\n TLS / SSL connections . To connect to a deployment using Socks5 , use the following\nfields: Field Description Proxy Hostname Domain name, IPv4, or IPv6 address on which a socks5 proxy is listening. Proxy Tunnel Port (Optional) TCP port number. Default is 1080. Proxy Username (Optional) Username used to authenticate the connection with the proxy\nserver. Proxy Password (Optional) Password used to authenticate the connection with the\nproxy server. If you specify a non-local Socks5 proxy host and provide a\nusername and password authentication, you receive a warning\nstating that the credentials will be sent in plaintext. If you specify a remote Socks5 proxy host and do not enable TLS,\nyou receive a warning stating that the proxy administrator will\nbe able to intercept data sent and received by Compass . To disconnect from your deployment, see Disconnect from MongoDB .", + "code": [], + "preview": "The Proxy / SSH Tunnel tab allows you to connect to your deployment\nwith a Proxy method or SSH tunnel.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "connect/advanced-connection-options/tls-ssl-connection", + "title": "TLS / SSL Connection Tab", + "headings": [ + "Procedure", + "Open the New Connection modal.", + "Click Advanced Connection Options.", + "Click the TLS / SSL tab.", + "Additional TLS / SSL Options", + "(Optional) For advanced connection configuration options, click the Advanced tab.", + "Click Connect." + ], + "paragraphs": "The TLS / SSL tab allows you to connect deployments with TLS / SSL.\nFor more information on TLS / SSL , see TLS Options In the bottom panel of the Connections Sidebar , click\n Add New Connection to open the New Connection \nmodal. If you already have connections listed in the Connections\nSidebar , click the icon on the top right of the sidebar\nto open the New Connection modal. You can leave TLS unset with the Default option or set the TLS / SSL\nconnection On or Off . Option Description Default The Default option leaves the TLS option unset . The\n Default / unset TLS /SSL option is enabled when using a\n DNS seedlist\n(SRV) \nin the connection string. To learn more about the additional options\navailable, see Additional TLS / SSL Options . On Select the On option when using a DNS seedlist (SRV) in the\nconnection string. When TLS / SSL Connection is On , you can\nspecify additional certificate options for your connection string.\nTo see more on the additional certificate options available, see\n Additional TLS / SSL Options . Off The Off option initiates a connection without \nTLS / SSL. Enable TLS / SSL to avoid security vulnerabilities. When TLS is On you can specify the following: Option Description Certificate Authority One or more certificate files from trusted Certificate Authorities\nto validate the certificate provided by the deployment. Client Certificate Specifies the location of a local .pem file that contains either\nthe client's TLS/SSL X.509 certificate or the client's TLS/SSL\ncertificate and key. Client Key Password If the Client Private Key is protected with a password,\nyou must provide the password. tlsInsecure Disables various certificate validations. tlsAllowInvalidHostnames Disables hostname validation of the certificate presented by\nthe the deployment. tlsAllowInvalidCertificates Disable the validation of the server certificates. Enabling tlsInsecure , tlsAllowInvalidHostnames , and\n tlsAllowInvalidCertificates may cause a security vulnerabilty. To disconnect from your deployment, see Disconnect from MongoDB .", + "code": [], + "preview": "The TLS / SSL tab allows you to connect deployments with TLS / SSL.\nFor more information on TLS / SSL, see TLS Options", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "connect/advanced-connection-options", + "title": "Advanced Connection Options", + "headings": [ + "Options" + ], + "paragraphs": "The advanced connection options provide additional ways to connect Compass\nto MongoDB. Advanced connection options allow you to specify authentication,\nTLS/SSL, and SSH to securely connect to your deployment. Learn how to select a connection string scheme for your deployment. Learn how to connect your deployments that require authentication. Learn how to connect your deployments with TLS/SSL. Learn how to connect your deployments with a proxy method or SSH tunnel. Learn how to connect your deployments with Queryable Encryption. Learn about additional advanced connection options for your deployments.", + "code": [], + "preview": null, + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "connect/connect-from-the-command-line", + "title": "Start Compass from the Command Line", + "headings": [ + "Compass Executable Location", + "Command Line Connection Specification", + "Basic Connection String", + "Username and Password Parameters", + "Configuration File Connection Specification", + "Learn More" + ], + "paragraphs": "There are two ways to start Compass from the command line: If your connection string contains sensitive information, consider\nusing a configuration file to avoid exposing that information on\nthe command line. You can start a Compass session from the command line. In enterprise environments, a scripted start can make it easier to\ndeploy Compass . For example, to limit access to sensitive\nsystems, you can configure a command line start so that Compass \ncan run on a jump host. Specify a connection string on the command line Specify connection details in a file The name and location of the Compass executable varies by\noperating system. Operating System Executable Name Location Linux mongodb-compass The installer installs it in the /usr/bin directory. Windows MongoDBCompass.exe The installer installs it in a folder you pick during the installation\nprocess. MacOS MongoDB Compass The installer installs it under the Applications folder: The command line invocation for Compass has two components, the\npath to the Compass executable and a connection string. You can\noptionally provide the username and password on the command line or\nthe configuration file. The format is: If the username and password arguments are not provided, Compass uses\nthe credentials in the connection string. The following example uses a basic connection string for a MongoDB\nUniversity training cluster. Modify the\nconnection details to connect to your MongoDB installation: This example uses the username and password parameters to\nauthenticate Compass to the MongoDB deployment provided in the\nconnection string: The command line invocation for Compass can specify a\nconfiguration file. The format is: The components of the command invocation are: To create the connection configuration file, follow the steps to\n export the connection details \nfrom your Compass instance. The export process creates a file that\nincludes all of your favorite connections. To open Compass and connect to your MongoDB instance, use a\ncommand line like: If you have multiple favorites, include the connection id from the\nconfiguration file to specify which connection to use: The path to the Compass executable A connection configuration file An optional passphrase for the connection configuration file An optional connection id If you export your saved connections without using a passphrase, the\nconfiguration file contains the plaintext version of your username\nand password. Use a passphrase to encrypt the password. Command Line Options", + "code": [ + { + "lang": "shell", + "value": "/Applications/MongoDB\\ Compass.app/Contents/MacOS/MongoDB\\ Compass" + }, + { + "lang": "shell", + "value": " \n \n --username --password " + }, + { + "lang": "shell", + "value": "mongodb-compass mongodb+srv://cluster0.xxxxxx.mongodb.net/library" + }, + { + "lang": "shell", + "value": "mongodb-compass mongodb+srv://cluster0.xxxxxx.mongodb.net/library\n--username user1 --password password1" + }, + { + "lang": "shell", + "value": " \\\n --file= \\\n [--passphrase=] \\\n []" + }, + { + "lang": null, + "value": "mongodb-compass --file=learningConnectionFile \\\n --passphrase=superSecret" + }, + { + "lang": null, + "value": "mongodb-compass --file=multipleConnectionFile \\\n --passphrase=superSecret \\\n 27ba0eda-c27e-46f5-a74a-2c041b1b58c4" + } + ], + "preview": "There are two ways to start Compass from the command line:", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "connect/connections", + "title": "Connections Sidebar", + "headings": [ + "Connections List", + "Connect to MongoDB", + "Connection Options" + ], + "paragraphs": "The Connections Sidebar on the left of the Compass window\ncontains the following options: Starting in version 1.44.0, you can connect to multiple MongoDB\ninstances at once through Compass. icon to add new connections icon to import and export connections Search bar to search active connections Connections list The Connections list in the sidebar contains your\n favorite and saved connections. Favorite\nconnections have a next to the connection name and\nalways appear at the top of the connections list. To connect to a saved or favorite deployment, click the name of the\nconnection in the Connections list. Once you connect,\nCompass shows a success toast and displays a green dot next to the\nconnection name. If a connection error occurs, Compass shows a toast with error\ninformation and displays a red triangle next to the connection name. To\nsee additional error information, click Review in the error\ntoast. To add a new connection to Compass, click the button at\nthe top of the Connections Sidebar to open the\n New Connection modal. For more information, see\n Connect to MongoDB . To switch between connections, click the name of the connection you want\nto interact with to open the Databases tab. When you open a\ndatabase, MongoDB opens a new tab and labels it with the name of the\nMongoDB instance that contains that database. For more information about\nthe Databases tab and instructions on viewing a database,\nsee Databases . To see connection options, hover over the name of the connection in the\n Connections list. Then, click on the \nicon to open a menu with the following items: To open the MongoDB Shell, click the icon on the right\nof the connection name. View performance metrics Show connection information Refresh databases Disconnect Copy connection string Favorite (or Unfavorite) Duplicate Remove", + "code": [], + "preview": "The Connections Sidebar on the left of the Compass window\ncontains the following options:", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "connect/disconnect", + "title": "Disconnect from MongoDB", + "headings": [ + "Steps", + "Click Connect in the menu bar.", + "Select Disconnect from the dropdown menu.", + "Learn more" + ], + "paragraphs": "Disconnecting from a MongoDB deployment closes the Compass connection to\nthe active deployment and returns the Compass view to the initial\nconnection dialog. To create a new connection, see Connect to MongoDB .", + "code": [], + "preview": "Disconnecting from a MongoDB deployment closes the Compass connection to\nthe active deployment and returns the Compass view to the initial\nconnection dialog.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "connect/favorite-connections/import-export-cli/export", + "title": "Export Saved Connections with the CLI", + "headings": [ + "About This Task", + "Export Encrypted Saved Connections", + "Procedure", + "Example", + "Results", + "Export Unencrypted Saved Connections", + "Procedure", + "Example", + "Results", + "Next Steps" + ], + "paragraphs": "You can use the Compass CLI (Command-Line Interface) to\nexport saved connections. Colleagues can import your saved connections, or\nyou can use them yourself in your other workspaces. When you export saved connections, Compass exports the list\nof connections as a JSON file. By default, when you export saved connections, passwords are included in\nplaintext. To protect sensitive connection information, encrypt the exported\nfile with a passphrase. This section shows how to export encrypted saved connections. When\nyou encrypt the export file, users must specify the matching passphrase\nto import the connections. To export encrypted saved connections with the Compass \n CLI (Command-Line Interface) , specify: Your operation should resemble the following prototype: The path to the MongoDB Compass executable. The name and file path of the\nexecutable depend on your operating system. The --export-connections option set to the destination of the\noutput file. The --passphrase option set to a passphrase used to encrypt the\noutput file. This example exports saved Compass connections to a file with\nthe path /tmp/compass-connections/favorites-encrypted.json . The exported file\nis encrypted with the passphrase abc123 . Run the following command in the folder containing your MongoDB Compass\nexecutable: You will see this output: The name and file path of the executable depend on your operating\nsystem. The preceding command is for macOS. After the export completes, the\n /tmp/compass-connections/favorites-encrypted.json file resembles the\nfollowing: Sensitive connection information is encrypted in the\n connectionSecrets field. This section shows how to export unencrypted saved connections. If\nyou do not encrypt the export file, database usernames and passwords are\nexported in plaintext. Only export unencrypted connections if no other\nusers will have access to the exported file. To export encrypted saved connections with the Compass \n CLI (Command-Line Interface) , specify: Your operation should resemble the following prototype: The path to the MongoDB Compass executable. The name and file path of the\nexecutable depend on your operating system. The --export-connections option set to the destination of the\noutput file. This example exports saved Compass connections to a file with\nthe path /tmp/compass-connections/favorites.json . Run the following command in the folder containing your MongoDB Compass\nexecutable: You will see this output: The name and file path of the executable depend on your operating\nsystem. The preceding command is for macOS. After the export completes, the\n /tmp/compass-connections/favorites.json file resembles the\nfollowing: To learn how to import exported connections, see\n Import Saved Connections with the CLI .", + "code": [ + { + "lang": "sh", + "value": " \\\n--export-connections= \\\n--passphrase=" + }, + { + "lang": "sh", + "value": "./MongoDB\\ Compass \\\n--export-connections=/tmp/compass-connections/favorites-encrypted.json \\\n--passphrase=abc123" + }, + { + "lang": "sh", + "value": "Exporting connections to \"/tmp/compass-connections/favorites-encrypted.json\" (with passphrase)" + }, + { + "lang": "json", + "value": "{\n \"type\": \"Compass Connections\",\n \"version\": {\n \"$numberInt\": \"1\"\n },\n \"connections\": [\n {\n \"id\": \"5a92e195-3ef5-49ae-aff6-720af362770d\",\n \"connectionOptions\": {\n \"connectionString\": \"mongodb+srv://jallen@cluster0.ylwlz.mongodb.net/\"\n },\n \"favorite\": {\n \"name\": \"Dochub\",\n \"color\": \"color7\"\n },\n \"lastUsed\": {\n \"$date\": {\n \"$numberLong\": \"1663785601002\"\n }\n },\n \"connectionSecrets\": \"AAGRWyDUI+Jbc9GkvSpEZeFtbvSzqtcOpA+1zLi5fma3AISOOVVBJBPqqh/a6VeNyEcf9TdX6aCqSpagXgMAOmmN0XgkJ4wxwBuSZwZH/h1dlgEFYqEG9Oh88e5z\"\n },\n {\n \"id\": \"655f3e6e-b13b-4813-8578-50d896bd9240\",\n \"connectionOptions\": {\n \"connectionString\": \"mongodb://localhost:27017/\"\n },\n \"favorite\": {\n \"name\": \"Local Host\",\n \"color\": \"color7\"\n },\n \"lastUsed\": {\n \"$date\": {\n \"$numberLong\": \"1663790327679\"\n }\n },\n \"connectionSecrets\": \"AAG63lys6oVtPmCGVs7wYkTCjFU0yXi9rYUYCKuWGNMSNBy4rAZlu06b/qDblON4OBXDJzhPNQ/WKs79veewNw==\"\n }\n ]\n}" + }, + { + "lang": "sh", + "value": " \\\n--export-connections=" + }, + { + "lang": "sh", + "value": "./MongoDB\\ Compass \\\n--export-connections=/tmp/compass-connections/favorites.json" + }, + { + "lang": "sh", + "value": "Exporting connections to \"/tmp/compass-connections/favorites.json\" (without passphrase)" + }, + { + "lang": "json", + "value": "{\n \"type\": \"Compass Connections\",\n \"version\": {\n \"$numberInt\": \"1\"\n },\n \"connections\": [\n {\n \"id\": \"5a92e195-3ef5-49ae-aff6-720af362770d\",\n \"connectionOptions\": {\n \"connectionString\": \"\"\n },\n \"favorite\": {\n \"name\": \"QA Cluster\",\n \"color\": \"color7\"\n },\n \"lastUsed\": {\n \"$date\": {\n \"$numberLong\": \"1663785601002\"\n }\n }\n },\n {\n \"id\": \"655f3e6e-b13b-4813-8578-50d896bd9240\",\n \"connectionOptions\": {\n \"connectionString\": \"mongodb://localhost:27017/\"\n },\n \"favorite\": {\n \"name\": \"Local Host\",\n \"color\": \"color7\"\n },\n \"lastUsed\": {\n \"$date\": {\n \"$numberLong\": \"1663790327679\"\n }\n }\n }\n ]\n}" + } + ], + "preview": "You can use the Compass CLI (Command-Line Interface) to\nexport saved connections. Colleagues can import your saved connections, or\nyou can use them yourself in your other workspaces.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "connect/favorite-connections/import-export-cli/import", + "title": "Import Saved Connections with the CLI", + "headings": [ + "Prerequisite", + "Import Encrypted Saved Connections", + "Procedure", + "Example", + "Results", + "Errors", + "Import Unencrypted Saved Connections", + "Procedure", + "Example", + "Results" + ], + "paragraphs": "You can use the Compass CLI (Command-Line Interface) to\nimport saved connections. This lets you use saved connections from\nother workspaces or connections used by other team members. To import saved connections, you must first export the\nconnections . Use this procedure to import a list of saved connections that is\n encrypted with a passphrase . To import unencrypted saved connections with the Compass \n CLI (Command-Line Interface) , specify: Your operation should resemble the following prototype: The path to the MongoDB Compass executable. The name and filepath of the\nexecutable depend on your operating system. The --import-connections option set to the destination of the file\ncontaining saved connections. The --passphrase option set to the passphrase used to encrypt the\nexported file. The following example imports saved Compass connections from\na file with the path\n /tmp/compass-connections/favorites-encrypted.json that is encrypted\nwith the passphrase abc123 . Run the following command in the folder containing your MongoDB Compass\nexecutable: The name and file path of the executable depend on your operating\nsystem. The preceding command is for macOS. After you run the import command, you will see this output: MongoDB Compass starts and you will see the imported connections under\n Saved Connections on the Connect screen. When you try to import encrypted saved connections, you may\nsee these errors: If do not specify a passphrase, you will see this error: If you specify an incorrect passphrase, you will see this error: Use this procedure to import a list of saved connections that is not\nencrypted with a passphrase. To import unencrypted saved connections with the Compass \n CLI (Command-Line Interface) , specify: Your operation should resemble the following prototype: The path to the MongoDB Compass executable. The name and file path of the\nexecutable depend on your operating system. The --import-connections option set to the destination of the file\ncontaining saved connections. The following example imports saved Compass connections from\na file with the path /tmp/compass-connections/favorites.json . Run the following command in the folder containing your MongoDB Compass\nexecutable: The name and file path of the executable depend on your operating\nsystem. The preceding command is for macOS. After you run the import command, you will see this output: MongoDB Compass starts and you will see the imported connections under\n Saved Connections on the Connect screen.", + "code": [ + { + "lang": "sh", + "value": " \\\n--import-connections= \\\n--passphrase=" + }, + { + "lang": "sh", + "value": "./MongoDB\\ Compass \\\n--import-connections=/tmp/compass-connections/favorites-encrypted.json \\\n--passphrase=abc123" + }, + { + "lang": "sh", + "value": "Importing connections from \"/tmp/compass-connections/favorites-encrypted.json\" (with passphrase)" + }, + { + "lang": "sh", + "value": "Failed to perform operation Be [Error]: Input file contains encrypted\nsecrets but no passphrase was provided" + }, + { + "lang": "sh", + "value": "Failed to perform operation Be [Error]: Cannot decrypt due to corrupt\ndata or wrong passphrase" + }, + { + "lang": "sh", + "value": " --import-connections=" + }, + { + "lang": "sh", + "value": "./MongoDB\\ Compass --import-connections=/tmp/compass-connections/favorites.json" + }, + { + "lang": "sh", + "value": "Importing connections from \"/tmp/compass-connections/favorites.json\" (without passphrase)" + } + ], + "preview": "You can use the Compass CLI (Command-Line Interface) to\nimport saved connections. This lets you use saved connections from\nother workspaces or connections used by other team members.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "connect/favorite-connections/import-export-ui/export", + "title": "Export Saved Connections in Compass", + "headings": [ + "About This Task", + "Procedure", + "Select Export saved connections.", + "Next Steps" + ], + "paragraphs": "You can export saved connections in MongoDB Compass . Colleagues can import your\nsaved connections, or you can use them in your other workspaces. When you export saved connections, Compass exports the list\nof connections as a JSON file. By default, when you export saved connections, passwords are included in\nplaintext. To protect sensitive connection information, encrypt the exported\nfile with a passphrase. To export encrypted saved connections with Compass : On the Connect screen, hover over Saved Connections in\nthe left-side navigation and click the Ellipses (...) button. In the Export saved connections dialog box, select the\nconnection names you want to export. Under Target File , click Select a file... \nto specify the output file destination. Optional . Toggle Remove secrets to omit passwords,\naccess tokens, and other sensitive information from the exported file. If you select Remove secrets , you cannot specify an encryption\npassword. Optional . Specify an Encryption Password to encrypt\npasswords, access tokens, and other sensitive information. If you do not encrypt the export file, passwords are exported in\nplaintext. Only export unencrypted connections if no other users will have\naccess to the exported file. Click Export to export your saved connections to\na JSON file. To learn how to import saved connections, see\n Import Saved Connections in Compass .", + "code": [], + "preview": "You can export saved connections in MongoDB Compass. Colleagues can import your\nsaved connections, or you can use them in your other workspaces.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "connect/favorite-connections/import-export-ui/import", + "title": "Import Saved Connections in Compass", + "headings": [ + "Prerequisite", + "Procedure", + "Select Import saved connections.", + "Under Source File, click Select a file.", + "Select the exported JSON file you want to import.", + "Select the connection names you want to import.", + "Click Import.", + "Results" + ], + "paragraphs": "You can import saved connections in MongoDB Compass . This lets you use saved\nconnections from other workspaces or connections used by other team members. Before you import saved connections, you must export the\nconnections . To import saved connections from a JSON file into Compass : On the Connect screen, hover over Saved Connections \nin the left-side navigation and click the Ellipses (...) \nbutton. If your file is encrypted with a password, enter the passphrase\nunder Decryption Password . If the connection name already exists under your saved connections,\n Compass overwrites the existing connection with the imported\nconnection. After the connections are imported, they appear on the left-side\nnavigation of the Connect screen under Saved Connections .", + "code": [], + "preview": "You can import saved connections in MongoDB Compass. This lets you use saved\nconnections from other workspaces or connections used by other team members.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "connect/favorite-connections/import-export", + "title": "Export and Import Saved Connections", + "headings": [ + "Tasks" + ], + "paragraphs": "To quickly connect to a shared deployment, export and share your\nsaved connections. Colleagues can import your saved connections, or you can\nuse them yourself in your other workspaces. You can manage saved connections\nin the Compass UI (User Interface) or with the Compass \n CLI (Command-Line Interface) . By default, when you export saved connections, passwords are included in\nplaintext. To protect sensitive connection information, encrypt the exported\nfile with a passphrase. When you do, users must specify the matching\npassphrase to import the connections. To learn how to export and import saved connections in the Compass \n UI (User Interface) , see: To learn how to export and import saved connections with the\n CLI (Command-Line Interface) , see: Export Saved Connections in Compass Import Saved Connections in Compass Export Saved Connections with the CLI Import Saved Connections with the CLI", + "code": [], + "preview": "To quickly connect to a shared deployment, export and share your\nsaved connections. Colleagues can import your saved connections, or you can\nuse them yourself in your other workspaces. You can manage saved connections\nin the Compass UI (User Interface) or with the Compass\nCLI (Command-Line Interface).", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "connect/favorite-connections", + "title": "Favorite Connections", + "headings": [ + "Save a Favorite Connection", + "Save from New Connection Modal", + "Configure your connection form with a connection you would like to save.", + "Click the Favorite this connection box at the bottom of the form.", + "(Optional). Enter a name for this favorite connection.", + "(Optional) Select a color for your favorite to more easily identify the favorite connection.", + "Click Save or Connect.", + "Save after Connecting to a Deployment", + "Click the next to the connection name.", + "Click the Favorite option in the menu.", + "Considerations" + ], + "paragraphs": "Compass allows you to save MongoDB connection configurations to\neasily reconnect to the same MongoDB deployment using the same\nspecifications. Starting in version 1.39.2, MongoDB Compass no longer supports migrating from legacy\nconnection files that pre-date version 1.31.0. Legacy connections refer to an\ninternal Compass connection-options format that is stored on disk and no\nlonger supported after version 1.39.0. If you have legacy connections saved in your favorites, export the\nconnections on version 1.39.0 to convert them\ninto the new format before updating to version 1.39.2 or later. You can save a favorite connection from the New Connection\nModal or after you successfully connect to a MongoDB deployment. On macOS systems, the first time that you update MongoDB Compass to version\n1.20 or later, you will need to allow access to your system storage\n for each saved connection in Recents and\n Favorites . To learn more, see Allow Keychain Access for Recent and Favorite Connections . If you run MongoDB Compass on KDE Linux, you must have GNOME Keyring\ninstalled to successfully load saved connections. The Electron module\n Keytar uses GNOME Keyring\nto securely store the database credentials for your connections.", + "code": [], + "preview": "Compass allows you to save MongoDB connection configurations to\neasily reconnect to the same MongoDB deployment using the same\nspecifications.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "connect/required-access", + "title": "Required Access", + "headings": [], + "paragraphs": "MongoDB Compass users require specific privileges to access various\n Compass features if authentication/authorization is enforced on\nthe connected MongoDB instance. The following table lists the privileges required to access the\nfeatures as well as the built-in roles that can provide these\nprivileges: Compass View Capability Privilege(s) MongoDB Built-In Role Home/MongoDB Instance View performance clusterMonitor Database Create a Database createCollection readWrite Database Drop a Database dropDatabase dbAdmin Collection Create a Collection createCollection readWrite Collection Drop a Collection dropCollection readWrite Schema Query/View a Schema find read Documents Query/View a Document find read Documents Clone, insert, update, delete a document readWrite Indexes View an index listIndexes read Indexes Create, drop an index readWrite Explain Plan Query/View a query plan find read Validation View rules listCollections read Validation Update rules collMod dbAdmin The built-in roles may provide more access than required. You can\nalso create a User-Defined Roles on Self-Managed Deployments to grant specific privileges.", + "code": [], + "preview": "MongoDB Compass users require specific privileges to access various\nCompass features if authentication/authorization is enforced on\nthe connected MongoDB instance.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "connect", + "title": "Connect to MongoDB", + "headings": [ + "Considerations", + "Connect", + "Provide your Connection String", + "Open the New Connection modal.", + "Paste your connection string.", + "(Optional). Name your connection.", + "(Optional). Choose a color for your connection.", + "(Optional). Favorite your connection.", + "Connect to your cluster.", + "Use Advanced Connection Options", + "Connect to Compass from the Command Line" + ], + "paragraphs": "This page outlines how to use MongoDB Compass to connect to a\nMongoDB host. You can connect to a standalone, replica set,\nor sharded cluster host. If you need to install Compass , see\n Download and Install Compass for instructions. If you need to create a MongoDB host, consider using\n MongoDB Atlas .\nAtlas is a cloud-hosted database-as-a-service which requires no\ninstallation, offers a free tier to get started, and provides a\ncopyable URI to easily connect Compass to your deployment. When connecting Compass to a replica set, it is not\nrecommended to connect directly to an individual replica\nset member. If the member to which you are connected switches from a\n primary member to a\n secondary or vice versa as\nthe result of an election, Compass may either forcibly close\nthe connection or display stale data. Instead, to connect to a replica set, use either the replica set\n SRV record or Replica Set Name when\nfilling in your connection information. Starting in MongoDB Compass 1.19, Compass displays a\nwarning message when connected to non-genuine MongoDB instances as\nthese instances may behave differently from the official MongoDB\ninstances; e.g. missing or incomplete features, different feature\nbehaviors, etc. Starting in version 1.39.2, MongoDB Compass no longer supports migrating from legacy\nconnection files that pre-date version 1.31.0. Legacy connections refer to an\ninternal Compass connection-options format that is stored on disk and no\nlonger supported after version 1.39.0. If you have legacy connections saved in your favorites, export the\nconnections on version 1.39.0 to convert them\ninto the new format before updating to version 1.39.2 or later. If your connection name is already in the Connections\nSidebar of your Compass window, click on the connection name to\nconnect. Otherwise, you can connect your deployments by either: Providing your connection string. Specifying Advanced Connection Options .\nAdvanced connection options allow you to specify authentication,\nTLS/SSL, and SSH connection options. To learn more, see\n Advanced Connection Options . Starting in version 1.44.0, you can connect to multiple MongoDB\ninstances at once through Compass. When you provide a connection string, Compass supports\nmost Connection String Options \nsupported by MongoDB. By default, Compass 's default\n socketTimeoutMS \nvalue is 60000, or 60 seconds. If you are frequently experiencing\ntimeouts in Compass , consider setting this option to a\nhigher value in your connection string. For a complete list of the connection string options that\n Compass supports, see the\n Compass Connection README \non GitHub. In the bottom panel of the Connections Sidebar , click\n Add New Connection to open the New Connection \nmodal. If you already have connections listed in the Connections\nSidebar , click the icon on the top right of the sidebar\nto open the New Connection modal. If you have the connection string for your deployment available, you can\npaste the string directly into the dialog box. You can use either the\n Standard Connection String Format \nor the DNS Seedlist Connection Format . To obtain the connection string for an Atlas cluster: To learn how to format the connection string for a deployment that is not\nhosted on Atlas , see Connection String URI Format . Navigate to your Atlas Clusters view. Click Connect for your desired cluster. Click Connect with MongoDB Compass . Copy the provided connection string. If you click into the MongoDB Compass connection form to edit your connection\nstring, Compass shows credentials in plaintext by default. To edit your connection string credentials without exposing your password,\nuse the Advanced Connection Options > Authentication tab on the\n Compass connection form. Use the name field to enter a name for your connection. If you do\nnot specify a name, Compass uses the cluster's hostname as the\nconnection name. Use the color drop-down menu to select a label color for your\nconnection. When you connect to a connection, the label color is\nthe background color of tabs that reference your connection. If you want to save the connection as a favorite, check the\n Favorite this connection option in the modal. Click Save or Save & Connect to navigate to the\n Compass Home Page . Once you are connected to your MongoDB deployment, you may require\nspecific user roles to access\nvarious Compass features. For more information on the required\nroles for Compass features, see Required Access . For details, see Advanced Connection Options . The advanced connection options provide additional ways to connect Compass\nto MongoDB. Advanced connection options allow you to specify authentication,\nTLS/SSL, and SSH to securely connect to your deployment. For details, see Start Compass from the Command Line . You can start a Compass session from the command line. In enterprise environments, a scripted start can make it easier to\ndeploy Compass . For example, to limit access to sensitive\nsystems, you can configure a command line start so that Compass \ncan run on a jump host. To learn how to disconnect your deployment, see Disconnect from MongoDB", + "code": [], + "preview": "How to use MongoDB Compass to connect to a MongoDB standalone, replica set, or sharded cluster host.", + "tags": "atlas, server", + "facets": { + "genre": [ + "tutorial" + ], + "target_product": [ + "compass" + ] + } + }, + { + "slug": "create-agg-pipeline", + "title": "Create an Aggregation Pipeline", + "headings": [ + "About this Task", + "Before You Begin", + "Steps", + "Select the Stages view", + "Add an aggregation stage", + "Select an aggregation pipeline stage", + "Fill in your pipeline stage", + "Add additional pipeline stages", + "Run the pipeline", + "Select the Stages view", + "Open the Stage Wizard card", + "(Optional) Search for an aggregation use case", + "Select an aggregation pipeline stage use case", + "Fill in your pipeline stage", + "Add additional pipeline stages", + "Run the pipeline", + "Select the Stages view", + "Add an aggregation stage", + "Open Focus Mode", + "Select an aggregation pipeline stage", + "Fill in your pipeline stage", + "Add additional pipeline stages", + "Navigate between stages", + "Run the pipeline", + "Select the Text view", + "Enter your aggregation pipeline", + "Run the pipeline", + "Learn More" + ], + "paragraphs": "The Aggregation Pipeline Builder in MongoDB Compass helps you create\n aggregation pipelines to process\ndocuments from a collection or view and return computed results. MongoDB Compass provides different modes to create aggregation pipelines: Stage View Mode, a visual pipeline editor that preloads pipeline syntax based\non your selected stages. Stage Wizard, a feature of Stage View Mode that provides a set of templates\nfor simple aggregation stage use cases. The Stage Wizard only includes\nsimple use cases to help you get started with your aggregation pipeline. Focus Mode, a feature of Stage View Mode where you edit one pipeline stage\nat a time. Focus Mode helps you manage complex or deeply nested aggregation\npipeline stages. Text View Mode, a text-based pipeline editor that accepts raw\npipeline syntax. To build an aggregation pipeline, choose a collection and click the\n Aggregations tab. Compass displays a blank\naggregation pipeline. The Preview of Documents in the\nCollection section shows 10 documents randomly sampled from the chosen\ncollection. When you connect Compass to a MongoDB deployment hosted on\n Atlas ,\nadditional Atlas-only stages\n $search and\n $searchMeta \nare available in the Aggregation Pipeline Builder. Use these stages\nto perform\n full-text search \non Atlas collections. To see how to create an aggregation pipeline, select the tab corresponding to\nyour chosen view mode: In the aggregation pipeline pane, ensure the\n {} Stages toggle switch is selected. At the bottom of the aggregation pipeline pane, click the\n + Add Stage button. On the upper-left corner of the aggregation stage card,\nclick the Select drop-down menu and select the\n aggregation pipeline stage to use for the first\nstage of the pipeline. Fill in your selected stage. You can adjust the width of the\npipeline stage by dragging its border to the right. For example, the following pipeline excludes the first\n $match stage\nand only includes the\n $project \nstage: The toggle to the right of each pipeline stage name dictates whether that\nstage is included in the pipeline. Toggling a pipeline stage also updates the\npipeline preview, which updates based on whether or not that stage is included. To add an additional pipeline stage after your last aggregation\nstage, click Add Stage . To add an aggregation stage\nbefore your most recently added stage, click the + icon\nabove the stage card. Repeat steps 3 and 4 for each additional stage. You can change the order of pipeline stages by dragging\nthe header of each stage card. At the top-right corner of the pipeline builder, click\n Run . Compass returns your results in the\ndocument view. Some aggregation operators, like $merge and\n $out , can modify your collection's data. If your aggregation pipeline contains operators that can modify\nyour collection's data, you are prompted for confirmation before\nthe pipeline is executed. In the aggregation pipeline pane, ensure the\n {} Stages toggle switch is selected. To the right of the view mode toggle, click the wand icon to open\nthe Stage Wizard card. On the Stage Wizard card, you can filter the\nuse cases by searching for keywords associated with the use case or\naggregation stage. On the Stage Wizard card, select a stage use\ncase for the first stage of your pipeline. You can click the stage\ncard to add it to the end of your pipeline or drag it to your\npreferred position. After you select a use case, Compass populates the stage\ncard with a form that corresponds to the selected aggregation\npipeline stage. The Stage Wizard use cases include the following aggregation stages: $group $lookup $match $project $sort Fill in the form for your selected stage and click\n Apply . After you click Apply , the form will\nturn into a stage card that you can edit in Stage View Mode, Focus\nMode, or Text View Mode. Compass populates the Stage Output with up to ten sample\noutput documents. You cannot edit an existing stage through the Stage Wizard. The\nStage Wizard can only add new stages. To edit an existing stage,\nuse Stage View Mode, Focus Mode, or Text View Mode. To add more aggregation stages to your pipeline, repeat steps 3\nand 4 for each additional stage. You can change the order of pipeline stages by dragging the\nheader of each stage card. At the top-right corner of the pipeline builder, click\n Run . Compass returns your results in the\ndocument view. In the aggregation pipeline pane, ensure the\n {} Stages toggle switch is selected. If you have not already created an aggregation stage, click the\n + Add Stage button at the bottom of the aggregation\npipeline pane. On the upper-right corner of the stage card, click the\nFocus Mode icon. Click the Select drop-down menu and select the\n aggregation pipeline stage to use for the first\nstage of the pipeline. Fill in your selected stage. Compass populates the\n Stage Output with up to ten sample output documents. You can adjust the width of the Stage Input , stage\neditor, and the Stage Output by dragging their border to\nthe desired size. The toggle to the right of each pipeline stage name dictates whether that\nstage is included in the pipeline. Toggling a pipeline stage also updates the\npipeline preview, which updates based on whether or not that stage is included. Click the Add Stage dropdown to add additional\naggregation stages before or after your last aggregation stage.\nRepeat steps 4 and 5 for each additional stage. You can add stages with the following keyboard shortcuts: To add a stage after the current stage: Windows / Linux: Ctrl + Shift + A Mac: \u2318 + Shift + A To add a stage before the current stage: Windows / Linux: Ctrl + Shift + B Mac: \u2318 + Shift + B To navigate between different stages, select the stage you\nwant to edit from the Stage dropdown in the upper-left\ncorner of the Focus Mode modal. You can navigate between stages with the following keyboard\nshortcuts: To go to the stage before the current stage: Windows / Linux: Ctrl + Shift + 9 Mac: \u2318 + Shift + 9 To add a stage before the current stage: Windows / Linux: Ctrl + Shift + 0 Mac: \u2318 + Shift + 0 Click x to exit Focus Mode and select\n Run at the top right of the pipeline builder.\n Compass returns your results in the document view. In the aggregation pipeline pane, click the\n Text toggle switch to enable text mode for pipeline\nediting. Enter valid aggregation syntax into the text editor. The text\neditor provides real-time linting for correct syntax and debugging\ninformation. You can also use Text View Mode to import aggregation pipelines from\nplain text by typing or pasting your pipeline into the text editor. For example, following pipeline limits the query results to 4 \ndocuments. To expand all embedded fields and documents within the preview\nresults, click Output Options and select\n Expand all fields . Click Run at the top right of the pipeline\nbuilder. Compass returns your results in the document view. Aggregation Pipeline Aggregation Pipeline Stages Aggregation Pipeline Builder Settings Export Pipeline to Specific Language", + "code": [ + { + "lang": "javascript", + "value": "[ { \"$limit\" : 4 } ]" + } + ], + "preview": "The Aggregation Pipeline Builder in MongoDB Compass helps you create\naggregation pipelines to process\ndocuments from a collection or view and return computed results.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "databases", + "title": "Databases", + "headings": [ + "Databases Tab", + "Create a Database", + "Open the Create Database dialog.", + "Enter database and first collection information.", + "Click Create Database to create the database and its first collection.", + "Drop a Database", + "Click the trash can icon for the database.", + "Confirm the database to delete.", + "Click Drop Database to delete the database.", + "Limitations" + ], + "paragraphs": "A database is a container for collections .\nEach database gets its own set of files on the host file system.\nA single MongoDB server typically has multiple databases. The Databases tab lists the existing databases for your\nMongoDB deployment. To access the Databases tab, click the\ndeployment name in the Connections Sidebar . From this view, you can click a database name in the sidebar to view its\n collections . Alternatively, you can view\ndatabase collections by clicking the desired database in the left-hand navigation. You can also create or\n drop databases from this view. In the Connections Sidebar , click the\n icon to the right of the connection name to bring up the\n Create Database dialog. In the dialog, enter the name of the database to create and its\nfirst collection. Both the database name and the collection name are\nrequired. If you want to create a capped collection ,\nselect the Capped Collection checkbox and enter the maximum bytes. If you want to use custom collation on the collection,\nselect the Use Custom Collation checkbox and select the\ndesired collation settings. If your deployment is connected using In-Use Encryption , you can\nuse Queryable Encryption on the newly\ncreated collection. Check the Queryable Encryption option\nand indicate the following encryption properties: Encrypted Fields . (Optional) KMS Provider . (Optional) Key Encryption Key . From the Databases tab, to delete a\ndatabase, click on the trash can icon for that database. A\nconfirmation dialog appears. In the dialog, enter the name of the database to delete. Creating and dropping databases is not permitted in MongoDB Compass Readonly Edition . Creating databases is not permitted if you are connected to a\n Data Lake .", + "code": [], + "preview": "A database is a container for collections.\nEach database gets its own set of files on the host file system.\nA single MongoDB server typically has multiple databases.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "documents/clone", + "title": "Clone Documents", + "headings": [], + "paragraphs": "You can insert new documents by cloning the schema and values of\nan existing document in a collection. Select the appropriate tab based on whether you are viewing your\ndocuments in List, JSON, or Table view: When you click the Clone button, Compass opens the\ndocument insertion dialog with the same schema and values as the cloned\ndocument. You can edit any of these fields and values before you insert\nthe new document. To learn more about inserting documents, see\n Insert Documents . Cloning documents is not permitted in\n MongoDB Compass Readonly Edition . To clone a document, hover over the desired document\nand click the Clone button. To clone a document, hover over the desired document\nand click the Clone button. JSON View is available starting in Compass 1.20. To clone a document, hover over the desired document\nand click the Clone button.", + "code": [], + "preview": "You can insert new documents by cloning the schema and values of\nan existing document in a collection.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "documents/delete-multiple", + "title": "Delete Multiple Documents", + "headings": [ + "About this Task", + "Before you Begin", + "Steps", + "Apply a query filter", + "Open the Delete Documents modal", + "(Optional) Export the Delete", + "Delete your documents", + "Example", + "Learn More" + ], + "paragraphs": "You can perform bulk delete operations on multiple documents in Compass\nby using the Delete Documents modal. This helps you visualize\ndeletes before applying them. Deleting documents is a permanent action and cannot not be undone.\nValidate documents in the Preview of the\n Delete Documents modal before confirming the\ndelete operation. The Delete Documents modal is available starting\nin Compass version 1.42.0 . For instructions on updating or installing\nthe latest Compass version, see Download and Install Compass . From the Documents tab, input a query into the\n Query Bar to filter deleted documents. To delete\nall documents in the collection, leave the Query Bar blank. On the Documents tab, click the \n Delete button to display the\n Delete Documents modal. The following\ntable summarizes the UI (User Interface) of the modal: UI Element Description Query Any filter criteria specified on the\n Query Bar applies to the\n Delete Documents modal.\nTo update the Query , exit the\n Delete Documents modal and modify the\nquery in the Query Bar . Export Opens the Export Delete Query To Language \nmodal, where you can convert the query to a supported\ndriver language. Preview A preview of the documents that will be deleted. You can export the Delete query to a\nsupported driver language using the Export button on\nthe Delete Documents modal. On the Delete Documents modal, click\n Export . The\n Export Delete Query To Language modal displays with\nthe delete syntax populated under My Delete Query . Select a programming language from the drop-down under\n Exported Delete Query . You can convert the command\nto C#, Go, Java, Node, PHP, Python, Ruby, or Rust. The field below\ndisplays the converted syntax. (Optional) Click the Include Import Statements \ncheckbox to include the required import statements for\nthe selected programming language. Click the icon to copy the converted syntax. Click Close . Compass deletes the documents that match the filter\nexpression. On the Delete Documents modal, click\n Delete Documents . Click the red Delete Documents button to confirm\nthe operation. The following example deletes two documents from\nthe movies collection in the sample_mflix dataset . In the Query Bar , enter a filter for movies\nwith a year of 1919 . Click the Delete button, the\n Delete Documents modal displays. The Preview pane shows the documents\nincluded in the delete operation. Click Delete Documents . A confirmation modal\ndisplays. Click the red Delete Documents button to confirm the\noperation. Modify Multiple Documents", + "code": [ + { + "lang": "javascript", + "value": "{ 'year' : 1919 }" + } + ], + "preview": "You can perform bulk delete operations on multiple documents in Compass\nby using the Delete Documents modal. This helps you visualize\ndeletes before applying them.", + "tags": null, + "facets": { + "genre": [ + "tutorial" + ], + "target_product": [ + "compass" + ] + } + }, + { + "slug": "documents/delete", + "title": "Delete Single Document", + "headings": [ + "Delete Multiple Documents" + ], + "paragraphs": "Select the appropriate tab based on whether you are viewing your\ndocuments in List, JSON, or Table view: Once you confirm, Compass deletes the document from the collection. Deleting documents is not permitted in\n MongoDB Compass Readonly Edition . To delete a document, hover over the document and click the\n icon. After you click the delete button, the document is flagged for deletion.\n Compass asks for confirmation that you want to remove the\ndocument. To delete a document, hover over the document and click the\n icon. After you click the delete button, the document is flagged for deletion.\n Compass asks for confirmation that you want to remove the\ndocument. JSON View is available starting in Compass 1.20. To delete a document, hover over the document and click the\n icon. After you click the delete button, the document is flagged for deletion.\n Compass asks for confirmation that you want to remove the\ndocument. You can use the bulk delete operations workflow to delete multiple\ndocuments in Compass. For details, see Delete Multiple Documents . You can also use the db.collection.deleteMany() method in the\n embedded MongoDB Shell to delete\nmultiple documents in a single operation.", + "code": [], + "preview": "Select the appropriate tab based on whether you are viewing your\ndocuments in List, JSON, or Table view:", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "documents/insert", + "title": "Insert Documents", + "headings": [ + "Procedure", + "Add New Fields", + "Change Field Type", + "Limitation" + ], + "paragraphs": "Compass provides two ways to insert documents into your\ncollections: JSON Mode and a Field-by-Field Editor. Inserting documents is not permitted in\n MongoDB Compass Readonly Edition . Allows you to write or paste JSON documents in the editor. Use\nthis mode to insert multiple documents at once as an array. Provides a more interactive experience to create documents, allowing\nyou to select individual field values and types. This mode only\nsupports inserting one document at a time. To insert documents into your collection: Click the Add Data dropdown and select\n Insert Document . Select the appropriate view based on how you would like to\ninsert documents. Click the { } brackets for JSON view. This is the default\nview. Click the list icon for Field-by-Field mode. In JSON format, type or paste the document(s) you want to\ninsert into the collection. To insert multiple documents,\nenter a comma-separated array of JSON documents. The following array inserts 5 documents into\nthe collection: If you do not provide an\n ObjectId in your document,\n Compass automatically generates an ObjectId. Click Insert . For each field in the document, select field type and fill\nin the field name and value. If you do not provide an\n ObjectId in your document,\n Compass automatically generates an ObjectId. To add a new field in the document, hover over the row number\nin the dialog (the row number is not part of the document but\nthe dialog display) and click the icon to\nadd a new field after the selected row. You can also add a new field at the end of the document by\npressing the tab key when your text cursor is in the value of\nthe last document field. You can change the data type of a field by using the data\ntype selectors on the right of the field. To change the _id field to use a custom value, change the\ndata type from ObjectID to string and then overwrite\nthe _id value: Click Insert . The Insert Document button is not available if you are connected\nto a Data Lake .", + "code": [ + { + "lang": "json", + "value": "[\n { \"_id\" : 8752, \"title\" : \"Divine Comedy\", \"author\" : \"Dante\", \"copies\" : 1 },\n { \"_id\" : 7000, \"title\" : \"The Odyssey\", \"author\" : \"Homer\", \"copies\" : 10 },\n { \"_id\" : 7020, \"title\" : \"Iliad\", \"author\" : \"Homer\", \"copies\" : 10 },\n { \"_id\" : 8645, \"title\" : \"Eclogues\", \"author\" : \"Dante\", \"copies\" : 2 },\n { \"_id\" : 8751, \"title\" : \"The Banquet\", \"author\" : \"Dante\", \"copies\" : 2 }\n]" + } + ], + "preview": "Compass provides two ways to insert documents into your\ncollections: JSON Mode and a Field-by-Field Editor.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "documents/modify-multiple", + "title": "Modify Multiple Documents", + "headings": [ + "About this Task", + "Before you Begin", + "Steps", + "Apply a query filter", + "Open the bulk update modal", + "Enter the update syntax", + "(Optional) Name and save your Update", + "Update your documents", + "Example", + "Learn More" + ], + "paragraphs": "You can perform bulk update operations on multiple documents in Compass\nby using the Update Documents modal. Performing updates with\nthe Update Documents modal helps you visualize updates\nto your data before you apply them. You can use any syntax that works with the update parameter of\n db.collection.updateMany() . The Update Documents modal does not support any options \nparameters such as upsert, writeConcern, or collation. Previews of the documents affected by bulk update operations are\nonly visible if your database is configured to support transactions.\nFor details, see Transactions . The Update Documents modal is available starting\nin Compass version 1.42.0 . For instructions on updating or installing\nthe latest Compass version, see Download and Install Compass . From the Documents tab, input a query into the\n Query bar . The filter criteria of the query specified\napplies to the documents in the Bulk Update modal. If\nyou need to apply an update to all documents in a collection,\nleave the Query bar blank. On the Documents tab, click the \n Update button to display the\n Update Documents modal. The following\ntable summarizes the UI (User Interface) of the modal: UI Element Description Filter Any filter criteria specified on the\n Query Bar applies to the\n Update Documents modal.\nTo update the filter query, exit the\n Update Documents modal and modify the\nquery in the Query Bar . Update The update syntax that is applied to the\ndocuments specified in the filter criteria. You can use\nany syntax that works with the update parameter of the\n db.collection.updateMany() . Preview A preview of documents with the update syntax applied. In the Update text field, provide the update\nsyntax. The number of documents affected by the update displays at\nthe top of the Update Documents modal. The documents under the Preview header show how the\n Update syntax affects documents in your collection. You can name and save the update query in the\n Update Documents modal. Saving your query adds it\nto your favorite queries for\nthat collection and allows you to load and copy the query after\nyou close the modal. Click the Save button on the bottom left of the\nmodal. Enter a name for the update syntax. Click the green Save button. Click Update Documents . Compass applies the Update to the documents\nwithin the Filter expression. The following example uses the\n sample_mflix dataset . This example updates the tomatoes.viewer.numReviews and\n tomatoes.viewer.meter fields with the Compass\n Update Documents modal. Apply a filter in the Query Bar to filter movies which\nhave a year of 1917 . Click the Update button, the Update Documents \nmodal displays. In the Update text box, paste the following syntax: This syntax: The Preview section populates with sample documents\naffected by the update query. To view the updates to the numReviews \nand meter fields: Click the Update Documents button to update the collection's\ndata. Increments the\n tomatoes.viewer.numReviews field by 1 . Sets the\n tomatoes.viewer.meter field to 99 . Click the arrow icon next to\n tomatoes . Click the arrow icon next to\n viewer . Delete Multiple Documents Using the Favorites Tab", + "code": [ + { + "lang": "javascript", + "value": "{ 'year' : 1917 }" + }, + { + "lang": "javascript", + "value": "{\n $inc: { \"tomatoes.viewer.numReviews\" : 1},\n $set: { \"tomatoes.viewer.meter\" : 99 }\n}" + } + ], + "preview": "You can perform bulk update operations on multiple documents in Compass\nby using the Update Documents modal. Performing updates with\nthe Update Documents modal helps you visualize updates\nto your data before you apply them.", + "tags": null, + "facets": { + "genre": [ + "tutorial" + ], + "target_product": [ + "compass" + ] + } + }, + { + "slug": "documents/modify", + "title": "Modify Single Document", + "headings": [ + "Limitations", + "Procedure", + "Delete Fields", + "Add New Fields", + "Modify an Existing Field", + "Save Changes", + "Revert a Change", + "Delete Fields", + "Add New Fields", + "Revert a Change", + "Cancel Changes", + "Modify Multiple Documents" + ], + "paragraphs": "You can edit existing documents in your collection. When you edit a document in List or Table view, Compass performs a\n findOneAndUpdate \noperation and updates only those fields that you have\nchanged. When you edit a document in JSON view, Compass performs a\n findOneAndReplace \noperation and replaces the document. Modifying documents is not permitted in\n MongoDB Compass Readonly Edition . Select the appropriate tab based on whether you are viewing your\ndocuments in List, JSON, or Table view: To modify a document, hover over the document and click the pencil\nicon: After you click the pencil icon, the document enters edit mode.\nYou can now make changes to the fields, values, or data types\nof values. To delete a field from a document, click the icon to\nthe left of the field: Once selected, the field is marked for removal and appears\nhighlighted in red. Compass asks for confirmation that you want to\nupdate the document by removing the field. To add a new field in the document after an existing field, hover\nover the row number in the dialog and click on the plus sign. The\nrow number is not part of the document but is part of the dialog display. You can also add a new field at the end of the document by\npressing the tab key when your text cursor is in the value of the\nlast document field. To modify documents, click on existing field names or\nvalues and make changes. In this example, the airline was\nchanged from 4 to 2 . Changed fields appear highlighted in\nyellow: When you edit a document in List or Table view, Compass performs a\n findOneAndUpdate \noperation and updates only those fields that you have\nchanged. If Compass detects that you have changed fields\nthat were modified outside of Compass , it notifies you, preventing\nyou from accidentally overwriting the changes made outside of Compass .\nYou can choose to proceed and replace the document by clicking Update ,\nor cancel your changes. When you are finished editing the document, click the Update \nbutton to commit your changes. To revert changes to a document, hover over the edited field\nand click the revert icon which appears to the left\nof the field's line number. To modify a document, hover over the document and click the pencil\nicon: After you click the pencil icon, the document enters edit mode.\nYou can now add, remove, and edit field values by modifying\nthe JSON document. By default, this view hides embedded objects and arrays. To expand\nembedded objects and array elements, hover over the target\ndocument and click the top arrow on the left side of the document. To expand individual objects and arrays, click the arrow to\nthe left of the desired field. JSON View is available starting in Compass 1.20. When you edit a document in JSON view, Compass performs a\n findOneAndReplace \noperation and replaces the document. If Compass detects that you have changed fields\nthat were modified outside of Compass , it notifies you, preventing\nyou from accidentally overwriting the changes made outside of Compass .\nYou can choose to proceed and replace the document by clicking Update ,\nor cancel your changes. To modify a document, hover over the document and click the pencil\nicon: After you click the pencil icon, the document enters edit mode. When you edit a document in List or Table view, Compass performs a\n findOneAndUpdate \noperation and updates only those fields that you have\nchanged. If Compass detects that you have changed fields\nthat were modified outside of Compass , it notifies you, preventing\nyou from accidentally overwriting the changes made outside of Compass .\nYou can choose to proceed and replace the document by clicking Update ,\nor cancel your changes. To delete a field from a document: Click the value of the field you want to delete. Click the icon. Click Update to confirm your changes. To add a new field to the document: Click the field after which you wish to add the new field. Click the icon. Click Add Field after . Populate your newly created field. Click Update to confirm your changes. While modifying a document, you have the option to revert changes\nmade to a field prior to saving the modified document. Click the revert icon which appears on the\nright side of the edited table element. To exit the edit mode and cancel all pending changes to the document,\nclick the Cancel button. You can use the bulk update operations workflow to update multiple\ndocuments in Compass. For details, see Modify Multiple Documents . You can also use the db.collection.updateMany() method in the\n embedded MongoDB Shell to update\nmultiple documents in a single operation.", + "code": [], + "preview": "You can edit existing documents in your collection.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "documents/view", + "title": "View Documents", + "headings": [ + "Expand Embedded Objects and Arrays", + "Copy Documents to Clipboard", + "Encrypted Fields" + ], + "paragraphs": "The Documents tab provides three ways to access documents: Use the View buttons to select which view you would like\nto use: The default document view. Documents are shown as individual members\nof a list. In this view you can easily expand embedded objects\nand arrays. Documents are shown as properly-formatted JSON objects. In this view\n Compass uses extended JSON to display the data types of\nfields where appropriate. Shows documents as rows of a table, with document fields\nshown in the table columns. With this view, you can more easily\nsee which documents contain specific field values. JSON View is available starting in Compass 1.20. You can reorder the table columns by clicking and dragging the\ncolumn headings. Reordering the columns is a strictly cosmetic\nchange and does not change the underlying data itself. To toggle the expansion of embedded objects and array elements, hover over the target document and click the arrow in\nthe top-left corner. To toggle the expansion of embedded objects and array elements,\nhover over the target document and click the top arrow on the\nleft side of the document. To expand individual objects and arrays, click the arrow to\nthe left of the desired field. To view nested object fields and array elements, hover your cursor\nover a field with a value type of Object or Array and\nclick the button with outward-pointing arrows which appears on the\nright side of the field. Compass opens a new tab in the Table View corresponding\nto the nested element. The following example displays the delays \nobject nested within the airlines collection documents. The\nnew tab displays the two properties of the delays object:\n international and domestic . To return to the original table display, click on the first tab\nin the Table View which displays the name of your collection. To copy a document to your clipboard, hover over the document\nand click the Copy icon: To copy a document to your clipboard, hover over the document\nand click the Copy icon: To copy a document to your clipboard, hover over the document\nand click the Copy icon: Compass obscures document fields encrypted with Field-Level\nEncryption (FLE). Compass displays the values of these fields\nas a series of asterisks. If your deployment is connected using In-Use Encryption and your\ncollection is configured with Queryable Encryption ,\nyou can toggle the In-Use Encryption connection\noption. When In-Use Encryption is enabled, you can modify and view\nthe encrypted values and fields. When In-Use Encryption is disabled, Compass displays the\nvalues of these fields as a series of asterisks. Compass obscures document fields encrypted with Field-Level\nEncryption (FLE). Compass displays the values of these fields\nas a series of asterisks. If your deployment is connected using In-Use Encryption and your\ncollection is configured with Queryable Encryption ,\nyou can toggle the In-Use Encryption connection\noption. When In-Use Encryption is enabled, you can modify and view\nthe encrypted values and fields. When In-Use Encryption is disabled, Compass displays the\nvalues of these fields as a series of asterisks. Compass obscures document fields encrypted with Field-Level\nEncryption (FLE). Compass displays the values of these fields\nas a series of asterisks. If your deployment is connected using In-Use Encryption and your\ncollection is configured with Queryable Encryption ,\nyou can toggle the In-Use Encryption connection\noption. When In-Use Encryption is enabled, you can modify and view\nthe encrypted values and fields. When In-Use Encryption is disabled, Compass displays the\nvalues of these fields as a series of asterisks.", + "code": [], + "preview": "The Documents tab provides three ways to access documents:", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "documents", + "title": "Manage Documents", + "headings": [], + "paragraphs": "Documents are individual records in a MongoDB collection and are the basic unit of data in MongoDB. From the Documents tab, you can\n view , insert ,\n modify ,\n modify multiple ,\n clone ,\n delete , and\n delete multiple \ndocuments in your selected collection or view. From the query bar , you can specify a query to\nfilter the displayed documents. Click Options to\nspecify query options. For query result sets larger than 20 documents, Compass shows\npaginated results. By default, pages display 20 documents at a time. View Documents Insert Documents Modify Single Document Modify Multiple Documents Clone Documents Delete Single Document Delete Multiple Documents", + "code": [], + "preview": "Documents are individual records in a MongoDB collection and are the basic unit of data in MongoDB.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "editions", + "title": "Capabilities of Compass Editions", + "headings": [], + "paragraphs": "The following feature chart can help you choose the edition to best\nsuit your needs. Compass Compass Readonly Compass Isolated Interact with documents ,\n collections , and\n databases with full CRUD functionality \u221a \u221a Create and execute queries and\n aggregation pipelines \u221a \u221a \u221a Create and delete indexes \u221a \u221a View and optimize query performance with visual\n explain plans \u221a \u221a \u221a Kerberos, LDAP, and x.509 authentication \u221a \u221a \u221a Schema Analysis \u221a \u221a \u221a Real Time Server Stats \u221a \u221a \u221a Create, delete and edit document validation \nrules \u221a \u221a Error reporting and data usage collection \u221a \u221a Automatic updates \u221a \u221a Embedded shell support \u221a \u221a", + "code": [], + "preview": "The following feature chart can help you choose the edition to best\nsuit your needs.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "embedded-shell", + "title": "Embedded MongoDB Shell", + "headings": [ + "Open the Embedded MongoDB Shell", + "Use the Embedded MongoDB Shell", + "Multi-Line Operations in the Embedded MongoDB Shell", + "Disable the Embedded MongoDB Shell", + "In the Compass top menu bar, click MongoDB Compass.", + "In the MongoDB Compass menu, click Settings.", + "Toggle Enable MongoDB Shell.", + "Click Save.", + "Learn More" + ], + "paragraphs": "Starting in version 1.22, MongoDB Compass contains an embedded shell,\n mongosh . mongosh is a JavaScript environment for\ninteracting with MongoDB deployments. You can use mongosh \nto test queries and operations in your database. To open the embedded mongosh , click _MONGOSH \nat the bottom of the MongoDB Compass window. By default, mongosh , connects to the test database.\nTo use a different database, run the following command in\n mongosh : To run an operation in the embedded MongoDB Shell, type the operation\ninto the shell and press Enter . The following example runs a db.collection.find() operation: To write an operation that spans multiple lines in the embedded\n mongosh , begin with the first line, then press\n Shift + Enter to move to the next line of code. When you are finished writing your operation, press Enter \nto run it. The following multi-line example runs the $match stage in\nan aggregation pipeline : You can disable the embedded MongoDB shell in Compass to avoid running\nunauthorized commands on mongosh . To disable the embedded MongoDB shell: Compass opens a dialog box where you configure your MongoDB Compass \nsettings. If you select Set Read-Only Mode , Compass \nautomatically unchecks the Enable MongoDB Shell setting. The following links direct to the\n mongosh documentation , which contains\nmore a complete reference for mongosh , including syntax and\nbehaviors. Learn how to perform CRUD operations in\n mongosh . Learn how to run aggregation pipelines \nin mongosh . See a complete list of mongosh methods .", + "code": [ + { + "lang": "sh", + "value": "use " + }, + { + "lang": "sh", + "value": "db.employees.find( { \"last_name\": \"Smith\" } )" + }, + { + "lang": "javascript", + "value": "db.employees.aggregate( [ // press Shift + Enter\n { $match: { \"last_name\": \"Smith\" } } // press Shift + Enter\n] ) // Press Enter" + } + ], + "preview": "Starting in version 1.22, MongoDB Compass contains an embedded shell,\nmongosh. mongosh is a JavaScript environment for\ninteracting with MongoDB deployments. You can use mongosh\nto test queries and operations in your database.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "export-query-to-language", + "title": "Export Query to Specific Language", + "headings": [ + "Procedure" + ], + "paragraphs": "New in version 1.15.0 You can export queries created in the query bar to one of the supported languages; Java, Node,\nC#, Python 3, Ruby, Go, Rust, and PHP. This feature allows you to reformat\nand use MongoDB Compass queries in your application. After constructing a query in the\n query bar , click to\nthe right of the Reset button. Click the Export to Language button : In the Export Query To: dropdown, select your desired\nlanguage: The My Query pane on the left displays your query in\n mongo shell syntax. The pane to the right displays your query in the language selected. (Optional) : Check the Include Import Statements option\nto include the required import statements for the language selected. (Optional) : Check the Include Driver Syntax option\nto include application code for the language selected. If you\ninclude driver syntax, the copyable code reflects\n project , sort ,\n maxtimems ,\n collation , skip \nand limit options. Click at the top-right of the formatted\nquery to copy the query for the selected language to your clipboard.\nYou can now easily integrate and execute your created query in\nyour application. Click Close to return to the\n Documents Tab .", + "code": [], + "preview": "New in version 1.15.0", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "faq", + "title": "FAQ", + "headings": [ + "What is the Compass Isolated Edition?", + "What is the performance impact of running this tool?", + "How does Compass handle confidential data?", + "Does Compass Maintain Logs?", + "Why am I seeing a warning about a non-genuine MongoDB server?", + "What happens to long running queries?", + "Slow Sampling", + "Slow Schema Analysis", + "Why are some features of MongoDB Compass not working?", + "How do I view and modify my Privacy Settings?", + "How do I enable geographic visualizations?" + ], + "paragraphs": "Compass Isolated Edition restricts network\nrequests to only the MongoDB server chosen on the\n Connect screen. All other\noutbound connections are not permitted in this edition, meaning no\nadditional firewall configuration is required when running Compass\nIsolated Edition. Testing has shown that MongoDB Compass has minimal impact in prototype\ndeployments, though additional performance testing and monitoring is in\nprogress. You should only execute queries that are indexed appropriately in the\ndatabase to avoid scanning the entire collection. MongoDB Compass stores and retrieves sensitive data such as passwords\nusing a credentials API specific to the operating system running\n Compass . The credentials API securely handles sensitive\ninformation by encrypting and protecting access to your data. MongoDB Compass stores the following data using the credentials API: MongoDB Compass utilizes a node.js native add-on, Keytar , to communicate with and store\ndata in the operating system's credentials API. For more information on\nhow Keytar operates and the specific APIs it accesses, refer to the\n Keytar Github documentation . MongoDB server passwords, SSH passwords for tunneling, TLS (Transport Layer Security) / SSL (Secure Sockets Layer) passphrases As part of normal operation, MongoDB Compass maintains a running log of\nevents. Compass logs provide a history of operations and can\nhelp diagnose errors. For more information on Compass logs,\nincluding their format and location, see Retrieve Compass Logs . Starting in MongoDB Compass 1.19, Compass displays a\nwarning message when connected to non-genuine MongoDB instances as\nthese instances may behave differently from the official MongoDB\ninstances; e.g. missing or incomplete features, different feature\nbehaviors, etc. As a precaution, Compass aborts long running queries to prevent\nexcessive querying on your database. All queries that Compass sends to your MongoDB instance have a timeout\nflag set which automatically aborts a request if it takes longer than\nthe specified timeout. This timeout is currently set to 10 seconds. If\n sampling on the database takes longer, Compass will\nnotify you about the timeout and give you the options of (a) retrying\nwith a longer timeout (60 seconds) or (b) running a different query. It is recommended that you only increase the sampling timeout if you\nare not connected to a production instance, as this may negatively\naffect the performance and response time of your database. Sampling time may be affected by a number of factors, like load on\nthe server, number of documents and existence of a suitable index\nfor your query. If the database returns documents faster than the specified timeout (10\nor 60 seconds), but the schema analysis of the documents takes longer\nthan expected (due to complex, large documents), Compass gives you the\noption to abort the analysis step and show the partial results. If you can connect to your MongoDB instance using MongoDB Compass but some\nfeatures do not work as expected, your system firewall may be blocking\nnetwork requests required by MongoDB Compass . MongoDB Compass must connect to\nexternal services to enable features such as: To fix this issue, configure your system firewall to allow incoming\nconnections for MongoDB Compass . Ensure ports 80 and 443 are open\nbetween MongoDB Compass and the host for the MongoDB deployment to which\nCompass connects. Third party mapping services Intercom Bugsnag for error reporting Compass Isolated Edition restricts network\nrequests to only the MongoDB server chosen on the\n Connect screen. All other\noutbound connections are not permitted in this edition, meaning no\nadditional firewall configuration is required when running Compass\nIsolated Edition. To view and modify your MongoDB Compass privacy settings, from the top-level\nmenu: The privacy settings dialog allows you to toggle various\n MongoDB Compass settings such as enabling automatic updates.\nSee the following screenshot for all available privacy settings options: Click MongoDB Compass . Click Settings . Under Settings , click Privacy . Automatic updates are not available in Compass Isolated Edition . You can enable geographic visualizations in MongoDB Compass in your\nprivacy settings. When Enable Geographic Visualizations is\nselected, MongoDB Compass is allowed to make requests to a third-party\nmapping service. Third party mapping services are not available in\n Compass Isolated Edition .", + "code": [], + "preview": "Testing has shown that MongoDB Compass has minimal impact in prototype\ndeployments, though additional performance testing and monitoring is in\nprogress.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "import-export", + "title": "Import and Export Data", + "headings": [ + "Import Data into a Collection", + "Limitations", + "Format Your Data", + "Procedure", + "Connect to the deployment containing the collection you wish to import data into.", + "Navigate to your target collection.", + "Click the Add Data dropdown and select Import JSON or CSV file.", + "Select the appropriate file type.", + "Configure import options.", + "Click Import.", + "Export Data from a Collection", + "Behavior", + "Procedure", + "Connect to the deployment containing the collection you wish to export data from.", + "Navigate to your desired collection.", + "Click the Export Data dropdown and select Export the full collection.", + "Select your file type.", + "Click Export.", + "Connect to the deployment containing the collection you wish to export data from.", + "Navigate to your desired collection.", + "Specify a filter in the query bar.", + "Click the Export Data dropdown and select Export query results.", + "Select document fields to include in your exported file.", + "Select the appropriate file type.", + "Click Export.", + "Connect to the deployment containing the collection you wish to export data from.", + "Navigate to your target collection.", + "Create an aggregation pipeline and run for results.", + "Click Export.", + "Select the appropriate file type.", + "Click Export.", + "Import and Export Data from the Command Line" + ], + "paragraphs": "You can use MongoDB Compass to import and export data to and from\n collections . Compass supports import and\nexport for both JSON and CSV files. To import or export data to\nor from a collection, navigate to the detailed collection view by either\nselecting the collection from the Databases tab or\nclicking the collection in the left-side navigation. Compass is not a tool for backing up data. For information on backup\nsolutions, see Backup Methods for a Self-Managed Deployment . MongoDB Compass can import data into a collection from either a JSON or\n CSV file. Importing data into a collection is not permitted in MongoDB Compass Readonly Edition . Importing data is not available if you are connected to a\n Data Lake . Before you can import your data into MongoDB Compass you must first ensure\nthat it is formatted correctly. When importing data from a JSON file, you can format your\ndata as: Newline-delimited documents, or Comma-separated documents in an array The following newline-delimited .json file is formatted\ncorrectly: The following comma-separated .json array file is also\nformatted correctly: Compass ignores line breaks in JSON arrays. Compass automatically generates ObjectIDs for these objects on import\nsince no ObjectIDs were specified in the initial JSON. When importing data from a CSV file, the first line of the\nfile must be a comma-separated list of your document field\nnames. Subsequent lines in the file must be comma-separated\nfield values in the order corresponding with the field order\nin the first line. The following .csv file imports three documents: MongoDB Compass automatically generates ObjectIDs for these objects on import\nsince no ObjectIDs were specified in the initial CSV file. To import your formatted data into a collection: A progress bar displays the status of the import. If an error occurs\nduring import, the progress bar turns red and an error message\nappears in the dialog. To see all errors, click View Log . After successful import, the dialog closes and\nCompass displays the collection page containing the newly imported\ndocuments. To learn how to connect to a deployment, see\n Connect to MongoDB . You can either select the collection from the\n Collections tab or click the collection in the\nleft-hand pane. Select either a JSON or CSV file to import and click Select . If you are importing a CSV file, you may specify fields to import and\nthe types of those fields under Specify Fields and Types . To exclude a field from a CSV file you are importing, uncheck the\ncheckbox next to that field name. To select a type for a field, use\nthe dropdown menu below that field name. Under Options , configure the import options for your use\ncase. If you are importing a CSV file, you may select how your data\nis delimited. For both JSON and CSV file imports, you can toggle\n Ignore empty strings and Stop on errors : If checked, Ignore empty strings drops fields with\nempty string values from your imported documents. The document is\nstill imported with all other fields. If checked, Stop on errors prevents any data from being\nimported in the event of an error. If unchecked, data is inserted\nuntil an error is encountered and successful inserts are not rolled\nback. The import operation will not continue after encountering an\nerror in either case. MongoDB Compass can export data from a collection as either a\n JSON or CSV file. If you specify a\n filter or aggregation pipeline for your collection, Compass only exports\ndocuments which match the specified query or\npipeline results. Avoid exporting to CSV files when possible. CSV files\nmay lose type information and are not suitable for backing up your data. You can use a query filter to export only the documents that match the filter. You can use the Project field in the query bar to\nspecify the fields to return or export. To export an entire collection to a file: To learn how to connect to a deployment, see\n Connect to MongoDB . You can either select the collection from the\n Collections tab or click the collection in the\nleft-hand pane. You can select either JSON or CSV . If you select\n JSON , you can expand the Advanced JSON Format dropdown\nand select from the following extended JSON formats: JSON Format Description Default Extended JSON A string format that avoids any loss of BSON type information. This\nis the default Compass setting. Relaxed Extended JSON A string format that emphasizes readability and interoperability at\nthe expense of type preservation. That is, conversion from relaxed\nformat to BSON can lose type information. WARNING: This format is not recommended for data integrity. Canonical Extended JSON A string format that emphasizes type preservation at the expense of\nreadability and interoperability. That is, conversion from canonical\nto BSON will generally preserve type information except in certain\nspecific cases. Choose where to export the file and click Select . A progress bar displays the status of the export. If an error occurs\nduring export, the progress bar turns red and an error message appears\nin the dialog. After successful export, the dialog closes. To export a subset of documents from a collection: To learn how to connect to a deployment, see\n Connect to MongoDB . You can either select the collection from the\n Collections tab or click the collection in the\nleft-hand pane. Specify a filter in the\n query bar to export only the documents\nwhich match the filter. The top section of the export dialog displays the query\nentered in the query bar. You can choose to export All Fields or Select fields in table .\nIn the query bar, you can also use the Project field to specify\nthe fields to return or export. Only fields that are checked are included in the exported file. You can add document fields to include with the Add Field \nbutton if the field you want to include is not automatically detected. Compass samples your collection to pre-populate a list of\nfields. Fields which only appear in a small percentage of\ndocuments may not be automatically detected. For details on sampling, see Sampling . Under Export File Type , select either JSON or CSV .\nIf you select JSON , you can expand the Advanced JSON Format dropdown\nand select from the following extended JSON formats: JSON Format Description Default Extended JSON A string format that avoids any loss of BSON type information. This\nis the default Compass setting. Relaxed Extended JSON A string format that emphasizes readability and interoperability at\nthe expense of type preservation. That is, conversion from relaxed\nformat to BSON can lose type information. WARNING: This format is not recommended for data integrity. Canonical Extended JSON A string format that emphasizes type preservation at the expense of\nreadability and interoperability. That is, conversion from canonical\nto BSON will generally preserve type information except in certain\nspecific cases. Choose where to export the file and click Select . A progress bar displays the status of the export. If an error occurs\nduring export, the progress bar turns red and an error message appears\nin the dialog. After successful export, the dialog closes. To export results from your aggregation pipeline: To learn how to connect to a deployment, see\n Connect to MongoDB . You can either select the collection from the\n Collections tab or click the collection\nin the left-hand pane. To learn how to create an aggregation pipeline, see Create an\nAggregation Pipeline . Under Export File Type , select either JSON \nor CSV . If you select JSON , you can expand\nthe Advanced JSON Format dropdown and select from the\nfollowing extended JSON formats: JSON Format Description Sample Document Default Extended JSON A string format that avoids any loss of BSON type information. This\nis the default Compass setting. Relaxed Extended JSON A string format that emphasizes readability and interoperability at\nthe expense of type preservation. That is, conversion from relaxed\nformat to BSON can lose type information. WARNING: This format is not recommended for data integrity. Canonical Extended JSON A string format that emphasizes type preservation at the expense of\nreadability and interoperability. That is, conversion from canonical\nto BSON will generally preserve type information except in certain\nspecific cases. To import and export data from the command line, you can use MongoDB's\n Database Tools . See\n mongoimport and\n mongoexport .", + "code": [ + { + "lang": "javascript", + "value": "{ \"type\": \"home\", \"number\": \"212-555-1234\" }\n{ \"type\": \"cell\", \"number\": \"646-555-4567\" }\n{ \"type\": \"office\", \"number\": \"202-555-0182\"}" + }, + { + "lang": "javascript", + "value": "[{ \"type\": \"home\", \"number\": \"212-555-1234\" }, { \"type\": \"cell\", \"number\": \"646-555-4567\" }, { \"type\": \"office\", \"number\": \"202-555-0182\"}]" + }, + { + "lang": "none", + "value": "name,age,fav_color,pet\nJeff,25,green,Bongo\nAlice,20,purple,Hazel\nTim,32,red,Lassie" + }, + { + "lang": "javascript", + "value": "{\n \"fortyTwo\" : 42,\n \"oneHalf\" : 0.5,\n \"bignumber\" : {\n \"$numberLong\" : \"5000000000\"\n }\n}" + }, + { + "lang": "javascript", + "value": "{\n \"fortyTwo\" : 42,\n \"oneHalf\": 0.5,\n \"bignumber\" : 5000000000\n}" + }, + { + "lang": "javascript", + "value": "{\n \"fortyTwo\" : {\n \"$numberInt\" : \"42\"\n },\n \"oneHalf\" : {\n \"$numberDouble\" : \"0.5\"\n },\n \"bignumber\" : {\n \"$numberLong\" : \"5000000000\"\n }\n}" + } + ], + "preview": "Import and export data with MongoDB Compass.", + "tags": "atlas", + "facets": { + "programming_language": [ + "json" + ], + "genre": [ + "tutorial" + ], + "target_product": [ + "compass" + ] + } + }, + { + "slug": "in-use-encryption-tutorial", + "title": "In-Use Encryption Tutorial", + "headings": [ + "Overview", + "Requirements and Limitations", + "Create Your Encrypted Collection", + "Procedure", + "Click the Additional preferences drop down.", + "Check the Queryable Encryption box.", + "Specify your Encrypted Fields.", + "(Optional) Specify KMS Provider.", + "(Optional) Specify Key Encryption Key.", + "Click Create Database or Create Collection.", + "Import Your Data", + "Click on your collection on the left-hand navigation banner.", + "Click Add Data.", + "Click Import File.", + "Select File and Input File Type.", + "Click Import.", + "Enable and Disable In-Use Encryption", + "Disable In-Use Encryption", + "Click on In-Use Encryption on left-hand navigation bar." + ], + "paragraphs": "In-Use Encryption allows you to connect to your deployments using\n Queryable Encryption . This connection method\nallows you to encrypt a subset of fields in your collections. You can also use CSFLE to encrypt a subset of fields\nin your collection. CSFLE encryption is enabled through the schema editor. This guide shows you how to connect to your deployment and collections using\nQueryable Encryption. This guide uses the air_airlines.json \ndata set in the guided examples. The guide covers the process of importing\nyour data set. In-Use Encryption is an Enterprise/Atlas only feature. You need a replica set to use this connection option. Your replica set can be\na single node or larger. You need to connect to your deployment on Compass using In-Use Encryption. For\nmore information on how to connect to your deployment, see In-Use Encryption\nConnection tab . Once your deployment is connected using In-Use Encryption, create your collection\nusing Queryable Encryption. You can create a new database and collection or you\ncan create a new collection in an existing database. Queryable Encryption supports new collections only. You can't enable Queryable Encryption\non existing collections. Click the Create a Database button or the\n Create a Collection button. Enter the name of the database and/or collection. Change the path field value from encryptedField to the\nname of the field you want encrypted. Here, the encrypted field is the base field of the air_airlines \ndata set. For more information, see Encrypted Fields . The collection has a Queryable Encryption badge next to\nits name to indicate that fields in that collection are encrypted. Your imported collection is displayed in the document view. The specified\nencrypted field is marked by a key symbol next to the value. Here, the base field is marked with the key symbol. You can enable and disable In-Use Encryption in your deployment. When In-Use Encryption is enabled : When In-Use Encryption is disabled : You can modify encrypted values. You can insert documents and specified fields will be encrypted. You cannot modify encrypted values. Compass displays the values\nof these fields as a series of asterisks. Inserted documents can not encrypt fields. To disable In-Use Encryption: Click the Enable In-Use Encryption for this connection \ntoggle. Disabling In-Use Encryption only affects how Compass accesses your\ndata.", + "code": [], + "preview": "In-Use Encryption allows you to connect to your deployments using\nQueryable Encryption. This connection method\nallows you to encrypt a subset of fields in your collections.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "", + "title": "What is MongoDB Compass?", + "headings": [ + "Visually Explore Your Data", + "Connect to your deployment", + "Import your data", + "Insert documents into your collections", + "Query your data", + "Insert documents into your collections", + "Create aggregation pipelines", + "Connect to your deployment", + "Work with your data in the MongoDB Shell", + "Go Further with Compass" + ], + "paragraphs": "MongoDB Compass is a powerful GUI for querying, aggregating, and analyzing your MongoDB data in a visual environment. Compass is free to use and source available, and can be run on macOS, Windows, and Linux. View installation instructions Explore some of the tasks Compass can help you accomplish, such as importing and managing data from an easy-to-navigate interface. Connect to a MongoDB deployment hosted on MongoDB Atlas, or a deployment hosted locally on your own machine. To learn more, see Connect to MongoDB Import data from CSV or JSON files into your MongoDB database. To learn more, see Import and Export Data Paste documents into the JSON view, or manually insert documents using a field-by-field editor. To learn more, see Insert Documents Write ad-hoc queries or generate queries with the help of AI\nto filter your data. Explore trends and commonalities in\nyour collections. To learn more, see: Query Your Data Query with Natural Language Insert documents into your collections in two ways, JSON Mode and a Field-by-Field Editor. To learn more, see Insert Documents Write aggregation pipelines that allow documents in a collection or view to pass through multiple stages where they are processed into a set of aggregated results. To learn more, see Aggregation Pipeline Builder Connect to a MongoDB deployment hosted on MongoDB Atlas, or a deployment hosted locally on your own machine. To learn more, see Connect to MongoDB Use the embedded MongoDB Shell in Compass to control your data in an interactive JavaScript environment. To learn more, see Embedded MongoDB Shell Expand your knowledge of MongoDB by using Compass with other MongoDB products. Use Compass to connect to your Atlas cluster Learn MongoDB Basics with MongoDB University Access more in-depth examples of querying data", + "code": [], + "preview": "MongoDB Compass is a GUI for querying, aggregating, and analyzing your data in a visual environment run on macOS, Windows, and Linux.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "indexes/create-search-index", + "title": "Create and Manage an Atlas Search Index", + "headings": [ + "About this Task", + "Procedures", + "Create an Index", + "Open the index creation dialog", + "Specify a name for the index", + "Specify the search index definition", + "Click Create Search Index", + "Example", + "Results", + "Edit an Index", + "From the Indexes tab, click Search Indexes.", + "Hover over the index you want to edit.", + "Click the pencil icon to open the Edit dialog.", + "Make the changes and click Save.", + "Delete an Index", + "From the Indexes tab, click Search Indexes.", + "Hover over the index you want to delete.", + "Click the trash icon to open the confirmation dialog.", + "Type the name of the index you want to delete and click Save.", + "Learn More" + ], + "paragraphs": "You can create Atlas Search indexes in MongoDB Compass . Atlas Search indexes\nlet you query data in Atlas Search . Atlas Search\nindexes enable performant text search queries by mapping search terms to\nthe documents that contain those terms. To create an Atlas Search index, your deployment must be either: Additionally, your deployment must run MongoDB version 7.0 or later. Hosted on MongoDB Atlas and\nhave an Atlas cluster tier of M10 or higher. A local deployment that is set up using the Atlas CLI . From the Indexes tab, click the\n Create button, then click Search Index . Compass provides templates for different kinds of search\nindexes. To learn more, see\n Search Index Definition Syntax . The following example definition creates a search index that indexes all\nfields: To view the status of your created index, go to the Indexes \ntab and set the toggle at the top-right to Search Indexes . The Status column indicates the status of the index. When\nthe status is Ready , your index is ready to be used. For more information on search index statuses, see\n Atlas Search Index Statuses . Manage Indexes Define Atlas Search Field Mappings Define Synonym Mappings in Your Atlas Search Index", + "code": [ + { + "lang": "javascript", + "value": "{\n mappings: { dynamic: true }\n}" + } + ], + "preview": "You can create Atlas Search indexes in MongoDB Compass. Atlas Search indexes\nlet you query data in Atlas Search. Atlas Search\nindexes enable performant text search queries by mapping search terms to\nthe documents that contain those terms.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "indexes/create-vector-search-index", + "title": "Create an Atlas Vector Search Index", + "headings": [ + "About this Task", + "Steps", + "Open the index creation dialog", + "Specify a name for the index", + "Select the Atlas Search Index type", + "Provide the Atlas Vector Search Index configurations", + "Click Create Search Index", + "Example", + "Results", + "Learn More" + ], + "paragraphs": "You can create Atlas Vector Search indexes using MongoDB Compass . These indexes\nenable you to index vector data and other data types, which facilitates\nsemantic searches on the indexed fields. Atlas Vector Search indexes support\nindexing vector data by identifying the most similar vectors. The index\ndetermines similarity by calculating the distance between the query vector\nand the vectors stored in the index. To create an Atlas Search index, your deployment must align with one of the\nfollowing cases: Additionally, your deployment must run MongoDB version 7.0 or later. A deployment hosted on MongoDB Atlas and\nhave an Atlas cluster tier of M10 or higher. A local deployment that is set up using the Atlas CLI . From the Indexes tab, click the\n Create button and then click Search Index . Select Vector Search . You must initially provide the following default vector search index\nconfigurations. You can modify the configurations later. Field Type Description type string Human-readable label that identifies the type of index. The value must\nbe vector to perform a vector search against the indexed fields. If\nomitted, it defaults to search , which only supports full-text search. path string The field name to index. numDimensions int The number of vector dimensions, which Atlas Search enforces at index- and\nquery-time. This value can't be greater than 4096. similarity string The vector similarity function used to search for the top K-nearest neighbors.\nSelect from the following functions: Function Description euclidean A function that measures the distance between ends of vectors. This function\nallows you to measure similarity based on varying dimensions. cosine A function that measures similarity based on the angle between vectors. This\nfunction allows you to measure similarity that isn't scaled by magnitude. You can't use zero magnitude vectors with cosine. To measure cosine similarity,\nwe recommend that you normalize your vectors and use dotProduct instead. dotProduct A function that measures similarly to cosine, but takes into account the\nmagnitude of the vector. This function allows you to efficiently measure\nsimilarity based on both angle and magnitude. To use dotProduct, you must\nnormalize the vector to unit length at index- and query-time. The following example definition uses the sample_mflix.embedded_movies collection\nand indexes the plot_embedding field to create an Atlas Vector Search index: To view the status of your created index, go to the Indexes \ntab and set the toggle at the top-right to Search Indexes . The Status column indicates the status of the index. When\nthe status is Ready , your index is ready to use. For more information on search index statuses, see\n Atlas Search Index Statuses . Manage Indexes How to Index Fields for Vector Search How to Perform Semantic Search Against Data in Your Atlas Cluster", + "code": [ + { + "lang": "javascript", + "value": "{\n \"fields\": [ {\n \"type\": \"vector\",\n \"path\": \"plot_embedding\",\n \"numDimensions\": 1536,\n \"similarity\": \"euclidean\"\n } ]\n}" + } + ], + "preview": "You can create Atlas Vector Search indexes using MongoDB Compass. These indexes\nenable you to index vector data and other data types, which facilitates\nsemantic searches on the indexed fields. Atlas Vector Search indexes support\nindexing vector data by identifying the most similar vectors. The index\ndetermines similarity by calculating the distance between the query vector\nand the vectors stored in the index.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "indexes", + "title": "Manage Indexes", + "headings": [ + "Indexes Tab", + "Create an Index", + "Open the index creation dialog", + "Add fields to the index", + "Optional. Specify index options", + "Click Create Index.", + "Create an Atlas Search Index", + "Create an Atlas Vector Search Index", + "Create a Wildcard Index", + "Hide or Unhide an Index", + "Hover over the index.", + "Click the Hide Index button.", + "Click Confirm.", + "Drop an Index", + "Click the trash can icon for the index to drop.", + "Confirm the index to delete.", + "Click Drop to drop the index.", + "Limitations" + ], + "paragraphs": "Indexes are special data structures that improve query performance.\nIndexes store a portion of a collection's data in an easy-to-traverse\nform. The index stores the value of a specific field or set of fields,\nordered by the value of the field. To improve query performance, build indexes on fields that appear often\nin queries and for all operations that\n sort by a field . To learn more about indexes, see Indexes . Queries on an indexed field can use the index to limit the number of\ndocuments that must be scanned to find matching documents. Sort operations on an indexed field can return documents pre-sorted\nby the index. Indexes have some negative performance impact on write operations.\nFor collections with high write-to-read ratio, indexes are expensive\nsince each insert must also update any indexes. For a detailed list\nof considerations for indexes, see\n Operational Considerations for Indexes . The Indexes tab lists the existing indexes for a collection. To access the Indexes tab for a collection, click on the\ncollection on the left hand pane and select the Indexes tab. For each index, Compass displays the following information: Name and Definition The name of the index and keys. Type Regular, text, geospatial or hashed index. Size How large the index is. Usage Number of times the index has been used in a lookup since the time\nthe index was created or the last server restart. Properties Any special properties (such as uniqueness, partial) of the\nindex. From the Indexes tab, click the Create\nIndex button. Specify an index key. To specify an existing document field as an index key, select\nthe field from the dropdown list. To specify a field that does not exist in any document as an\nindex key, enter the field name in the input box. To create a compound index ,\nclick the icon next to the index type dropdown. Use the dropdown to the right of each field name to specify the\nindex type. You can specify one of the following types: Ascending Descending 2dsphere Text To learn how to specify a wildcard index, see\n Create a Wildcard Index . Compass supports the following index options: Option Description More Information Create unique index Ensure that the indexed fields do not store duplicate values. Unique Indexes Index name Specify a name for the index. Specify an Index Name Create a TTL (Time to Live) index Delete documents automatically after a specified number of\nseconds since the indexed field value. TTL Indexes Partial filter expression Index only the documents which match the specified filter\nexpression. For example: The following partial filter expression only indexes\ndocuments where the timezone field exists: Partial Indexes Wildcard projection ( New in MongoDB 4.2 ) Support unknown or arbitrary fields which match the specified\nprojection in the index. To use a wildcard projection, set\nyour index field name to $** . This directs\n Compass to use all fields in the document (excluding\n _id ). For example: Consider the following wildcard projection document: If your index field name is $** , your index only\nincludes the values of the fields in that projection. Create a Wildcard Index Use custom collation Create a custom collation for the index by typing or pasting the\ncollation document in the text box. Collation Document Atlas Search indexes let you query data in Atlas Search . For more\ninformation, see Create and Manage an Atlas Search Index . Atlas Vector Search indexes enable you to index vector data and other data\ntypes, which facilitates semantic searches on the indexed fields. For more\ninformation, see Create an Atlas Vector Search Index . You can create wildcard indexes to\nsupport queries against unknown or arbitrary fields. To create a\nwildcard index in Compass , manually type the wildcard index\nfield ( .$** ) into the Select a field name \ninput. Consider a collection where documents contain a userMetadata \nobject. The fields within the userMetadata object may vary\nbetween documents. You can create a wildcard index on userMetadata to account for\nall potential fields within the object. Type the following into\nthe Select a field name input: Specify a type ( ascending or descending ) for your wildcard\nindex, then click Create Index . Compass shows the type of your new index as\n Wildcard . You can hide an index from the query planner to\nevaluate the potential impact of dropping an index without actually dropping the\nindex. From the Indexes tab, hover over the index you want to hide. Click the closed-eye icon on the right that appears when you hover over\nyour selected index. In the dialog box, confirm the index you want to hide. After you confirm\nyour selection, a Hidden badge appears under the\n Properties column. To unhide your index, repeat steps 1-3. After you unhide your index,\n Compass removes the Hidden badge from the\n Properties column. From the Indexes tab, to delete an index,\nclick on the trash can icon for that index. A confirmation\ndialog appears. In the dialog, enter the name of the index to delete. Creating, hiding, and dropping indexes is not permitted in MongoDB Compass Readonly Edition . The Indexes tab is not available if you are connected\nto a Data Lake . You can manage Atlas Search indexes in Compass if your deployment is\nlocal, has an Atlas cluster tier of M10 or larger, and runs MongoDB 7.0\nor higher. For clusters running an earlier version of MongoDB,\nyou can manage your Atlas Search indexes using the Atlas UI, the\n Atlas CLI , or the\n Atlas Administration API .", + "code": [ + { + "lang": "js", + "value": "{ \"timezone\": { \"$exists\": true } }" + }, + { + "lang": "javascript", + "value": "{\n \"product_attributes.elements\" : 1,\n \"product_attributes.resistance\" : 1\n}" + }, + { + "lang": "javascript", + "value": "userMetadata.$**" + } + ], + "preview": "Indexes are special data structures that improve query performance.\nIndexes store a portion of a collection's data in an easy-to-traverse\nform. The index stores the value of a specific field or set of fields,\nordered by the value of the field.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "install/verify-signatures/disk-images", + "title": "Verify Packages with Disk Image Verification", + "headings": [ + "Before you Begin", + "Steps" + ], + "paragraphs": "This page describes how to verify .dmg packages on macOS. The MongoDB release team digitally signs MongoDB Compass packages to certify\nthat packages are a valid and unaltered MongoDB release. Before you\ninstall MongoDB Compass , you can use the digital signature to validate the\npackage. If you don't have MongoDB Compass installed, download the MongoDB Compass binary\nfrom the Download Center . To verify the MongoDB Compass package, run: If the package is signed by MongoDB, the output includes the following\ninformation:", + "code": [ + { + "lang": "sh", + "value": "codesign -dv --verbose=4 " + }, + { + "lang": "sh", + "value": "Authority=Developer ID Application: MongoDB, Inc. (4XWMY46275)\nAuthority=Developer ID Certification Authority\nAuthority=Apple Root CA" + } + ], + "preview": "This page describes how to verify .dmg packages on macOS.", + "tags": null, + "facets": { + "genre": [ + "tutorial" + ], + "target_product": [ + "compass" + ] + } + }, + { + "slug": "install/verify-signatures/gpg", + "title": "Verify Packages with GPG", + "headings": [ + "Before you Begin", + "Steps", + "Import the MongoDB Compass public key", + "Download the MongoDB Compass public signature", + "Verify the package" + ], + "paragraphs": "This page describes how to use GPG to verify packages. The MongoDB release team digitally signs MongoDB Compass packages to certify\nthat packages are a valid and unaltered MongoDB release. Before you\ninstall MongoDB Compass , you can use the digital signature to validate the\npackage. If you don't have MongoDB Compass installed, download the MongoDB Compass binary\nfrom the Download Center . If the key imports successfully, the command returns: If you have previously imported the key, the command returns: To download the MongoDB Compass public signature, go to the Compass\nReleases page\non GitHub and download the corresponding .sig file for your\nversion and variant. For example, if you downloaded the\n mongodb-compass-1.43.5-darwin-x64.zip archive,\ndownload the\n mongodb-compass-1.43.5-darwin-x64.zip.sig \nsignature. Make sure that you select the correct version in the GitHub\nreleases page when you download the signature. If the package is signed by MongoDB, the command returns: If the package is signed but the signing key is not added to your\nlocal trustdb , the command returns: If the package is not signed properly, the command returns an\nerror message:", + "code": [ + { + "lang": "sh", + "value": "curl https://pgp.mongodb.com/compass.asc | gpg --import" + }, + { + "lang": "sh", + "value": "gpg: key CEED0419D361CB16: public key \"MongoDB Compass Signing Key \" imported\ngpg: Total number processed: 1\ngpg: imported: 1" + }, + { + "lang": "sh", + "value": "gpg: key A8130EC3F9F5F923: \"MongoDB Compass Signing Key \" not changed\ngpg: Total number processed: 1\ngpg: unchanged: 1" + }, + { + "lang": "sh", + "value": "gpg --verify " + }, + { + "lang": "sh", + "value": "gpg: Signature made Mon Jan 22 10:22:53 2024 CET\ngpg: using RSA key AB1B92FFBE0D3740425DAD16A8130EC3F9F5F923\ngpg: Good signature from \"MongoDB Compass Signing Key \" [unknown]" + }, + { + "lang": "sh", + "value": "gpg: WARNING: This key is not certified with a trusted signature!\ngpg: There is no indication that the signature belongs to the owner." + }, + { + "lang": "sh", + "value": "gpg: Signature made Mon Jan 22 10:22:53 2024 CET\ngpg: using RSA key AB1B92FFBE0D3740425DAD16A8130EC3F9F5F923\ngpg: BAD signature from \"MongoDB Compass Signing Key \" [unknown]" + } + ], + "preview": "This page describes how to use GPG to verify packages.", + "tags": null, + "facets": { + "genre": [ + "tutorial" + ], + "target_product": [ + "compass" + ] + } + }, + { + "slug": "install/verify-signatures/rpm", + "title": "Verify RPM Packages (RHEL)", + "headings": [ + "Before you Begin", + "Steps", + "Import the MongoDB Compass public key in gpg and rpm", + "Verify the rpm file" + ], + "paragraphs": "This page describes how to verify .rpm packages on RHEL operating\nsystems. The MongoDB release team digitally signs MongoDB Compass packages to certify\nthat packages are a valid and unaltered MongoDB release. Before you\ninstall MongoDB Compass , you can use the digital signature to validate the\npackage. If you don't have MongoDB Compass installed, download the MongoDB Compass binary\nfrom the Download Center . If the key imports successfully, the command returns: If you have previously imported the key, the command returns: If the file is signed, the command returns:", + "code": [ + { + "lang": "sh", + "value": "curl https://pgp.mongodb.com/compass.asc | gpg --import\n\nrpm --import https://pgp.mongodb.com/compass.asc" + }, + { + "lang": "sh", + "value": "gpg: key CEED0419D361CB16: public key \"MongoDB Compass Signing Key \" imported\ngpg: Total number processed: 1\ngpg: imported: 1" + }, + { + "lang": "sh", + "value": "gpg: key A8130EC3F9F5F923: \"MongoDB Compass Signing Key \" not changed\ngpg: Total number processed: 1\ngpg: unchanged: 1" + }, + { + "lang": "sh", + "value": "rpm --checksig " + }, + { + "lang": "sh", + "value": " digests signatures OK" + } + ], + "preview": "This page describes how to verify .rpm packages on RHEL operating\nsystems.", + "tags": null, + "facets": { + "genre": [ + "tutorial" + ], + "target_product": [ + "compass" + ] + } + }, + { + "slug": "install/verify-signatures/windows", + "title": "Verify Windows Packages", + "headings": [ + "Before you Begin", + "Steps", + "Verify Packages with PowerShell", + "Verify Packages by Checking Properties", + "Open the properties for your MongoDB Compass package", + "Check the package's digital signatures" + ], + "paragraphs": "This page describes how to verify Windows .exe and .msi \npackages. The MongoDB release team digitally signs MongoDB Compass packages to certify\nthat packages are a valid and unaltered MongoDB release. Before you\ninstall MongoDB Compass , you can use the digital signature to validate the\npackage. If you don't have MongoDB Compass installed, download the MongoDB Compass binary\nfrom the Download Center . To verify the MongoDB Compass package on Windows, you can use one of these\nmethods: Verify Packages with PowerShell Verify Packages by Checking Properties To verify Windows packages with PowerShell, run: If the file is signed, the command returns: In the properties window, open the Digital Signatures \ntab. If the package is properly signed, the Digital Signatures show\nthese properties: Name of signer Digest algorithm Timestamp MONGODB, INC. sha256 ", + "code": [ + { + "lang": "sh", + "value": "powershell Get-AuthenticodeSignature -FilePath " + }, + { + "lang": "sh", + "value": "SignerCertificate Status Path\n----------------- ------ ----\nF2D7C28591847B... Valid " + } + ], + "preview": "This page describes how to verify Windows .exe and .msi\npackages.", + "tags": null, + "facets": { + "genre": [ + "tutorial" + ], + "target_product": [ + "compass" + ] + } + }, + { + "slug": "install/verify-signatures", + "title": "Verify Integrity of Compass Packages", + "headings": [], + "paragraphs": "To learn how to verify MongoDB Compass packages, see the corresponding page\nfor your verification method: The MongoDB release team digitally signs MongoDB Compass packages to certify\nthat packages are a valid and unaltered MongoDB release. Before you\ninstall MongoDB Compass , you can use the digital signature to validate the\npackage. Verify Packages with Disk Image Verification Verify Packages with GPG Verify RPM Packages (RHEL) Verify Windows Packages", + "code": [], + "preview": "To learn how to verify MongoDB Compass packages, see the corresponding page\nfor your verification method:", + "tags": null, + "facets": { + "genre": [ + "tutorial" + ], + "target_product": [ + "compass" + ] + } + }, + { + "slug": "install", + "title": "Download and Install Compass", + "headings": [ + "Software Requirements", + "Software Requirements", + "Software Requirements", + "Software Requirements", + "Download Compass", + "Download Compass", + "Download and Install Compass", + "Install Compass", + "Install Compass" + ], + "paragraphs": "You can connect to your MongoDB Atlas \ndeployment with MongoDB Compass . MongoDB Atlas is the fully managed service for\nMongoDB deployments in the cloud. To download and install MongoDB Compass , select your operating system: MongoDB Compass doesn't support virtual desktop environments. Compass requires: 64-bit version of Microsoft Windows 10 or later. MongoDB 4.4 or later. Microsoft .NET Framework version 4.5 or later . The Compass installer prompts you to install the\nminimum required version of the .NET framework if it is not\nalready installed on your system. Starting the installation as an administrator if you are running a silent\ninstallation using Microsoft PowerShell or installing on\nAzure Virtual Desktop Infrastructure (VDI). Compass requires: Compass supports x64 and ARM64 architectures. Compass supports x64 and ARM64 architectures. 64-bit version of macOS 10.12 or later. MongoDB 4.4 or later. M1 Silicon is a supported ARM64 architecture and has a separate binary in the download center. Select the appropriate tab based on your Linux distribution and\ndesired package from the tabs below: When you run MongoDB Compass on Linux machines using Nvidia graphics cards,\n MongoDB Compass may not render correctly. If an error returns, try including\nthe --disable-gpu flag when you run the application. To install the .deb package on Ubuntu and Debian,\nclick the .deb tab. To install the .rpm package on\n RHEL (Red Hat Enterprise Linux) , click the .rpm \ntab. Compass requires: 64-bit version of Ubuntu 20.04 or later. MongoDB 4.4 or later. Compass requires: 64-bit version of RHEL 8+ or later. MongoDB 4.4 or later. To download Compass , you can use your preferred web browser. Open the downloads page . Select the installer you prefer. The MongoDB Compass installer is\navailable as a .exe or .msi package or a .zip \narchive. Download the latest version of MongoDB Compass for\nWindows. To download Compass , you can use your preferred web browser. Open the downloads page . Download the latest version of MongoDB Compass for\nmacOS. The MongoDB Compass installer is a .dmg disk\nimage. To download Compass on Linux systems, use wget . Alternatively, you can download Compass from the\nMongoDB downloads page . Download MongoDB Compass . Install MongoDB Compass . If your Linux distribution does not support using apt for\ninstalling local .deb files, run the following lines\nto install MongoDB Compass : Start MongoDB Compass . Download MongoDB Compass . Install MongoDB Compass . Start MongoDB Compass . Double-click the installer file. Follow the prompts to install Compass . You can\nselect the destination of the Compass installation. Once installed, Compass launches and prompts you to\nconfigure privacy settings and specify update preferences. Once you have downloaded Compass , double-click on\nthe .dmg file to open the disk image within the macOS\nFinder. Drag the MongoDB Compass application to your\n Applications folder. Eject the disk image. From the Applications folder, double-click on\nthe Compass icon to start the application. When you open MongoDB Compass for the first time, you may receive\na notice stating\nthat it is an application downloaded from the internet, requiring you\nto confirm you want to open it. Click Open to continue\nand launch Compass . Depending on your system's security settings, you may have\nto modify your system settings to grant Compass \npermissions to run. You may be prompted to enter your\nsystem password before launching Compass . Update MongoDB Compass Capabilities of Compass Editions", + "code": [ + { + "lang": "shell", + "value": "wget https://downloads.mongodb.com/compass/mongodb-compass_1.43.5_amd64.deb" + }, + { + "lang": "shell", + "value": "sudo apt install ./mongodb-compass_1.43.5_amd64.deb" + }, + { + "lang": "shell", + "value": "sudo dpkg -i mongodb-compass_1.43.5_amd64.deb\nsudo apt-get install -f # This installs required compass dependencies" + }, + { + "lang": "sh", + "value": "mongodb-compass" + }, + { + "lang": "shell", + "value": "wget https://downloads.mongodb.com/compass/mongodb-compass-1.43.5.x86_64.rpm" + }, + { + "lang": "shell", + "value": "sudo yum install mongodb-compass-1.43.5.x86_64.rpm" + }, + { + "lang": "sh", + "value": "mongodb-compass" + } + ], + "preview": "Download and install MongoDB Compass for Windows, macOS, or Linux.", + "tags": null, + "facets": { + "genre": [ + "tutorial" + ], + "target_product": [ + "compass" + ] + } + }, + { + "slug": "instance", + "title": "Compass Home", + "headings": [ + "Connections Sidebar" + ], + "paragraphs": "The Compass Home screen provides details regarding the\nMongoDB instances to which Compass is connected. The Compass Home screen includes the following sections: Once connected to a MongoDB instance, you can reach the Compass\nHome screen by clicking a cluster name in the Connections\nSidebar . Starting in version 1.44.0, you can connect to multiple MongoDB\ninstances at once through Compass. When you activate multiple\nconnections, Compass displays each active connection in a separate\ntab. Connections Sidebar Your saved queries and aggregation pipelines . The Connections Sidebar on the left of the Compass window\ncontains a list of your favorite and saved connections. Active\nconnections have a green dot next to their name. For more information on interacting with your connections, see\n Connections Sidebar .", + "code": [], + "preview": "The Compass Home screen provides details regarding the\nMongoDB instances to which Compass is connected.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "keyboard-shortcuts", + "title": "Keyboard Shortcuts", + "headings": [], + "paragraphs": "Keyboard shortcuts enable you to easily navigate Compass . Category Description Windows Mac Menu Online help F1 F1 Menu Quit application Ctrl + Q Cmd + Q Menu Show settings Ctrl + , Cmd + , Menu Hide window Ctrl + H Cmd + H Menu Hide other windows Ctrl + Shift + H Cmd + Shift + H Menu Open a new Compass window Ctrl + N Cmd + N Menu Close current window Ctrl + Shift + W Cmd + Shift + W Menu Share schema as JSON Alt + Ctrl + S Alt + Cmd + S Menu Reload screen Ctrl + Shift + R Cmd + Shift + R Menu Reload data Ctrl + R Cmd + R Menu Toggle sidebar Ctrl + Shift + D Cmd + Shift + D Menu Actual size Ctrl + 0 Cmd + 0 Menu Zoom in Ctrl + = Cmd + = Menu Zoom out Ctrl + - Cmd + - Menu Toggle DevTools Alt + Ctrl + I Alt + Cmd + I Menu Minimize Ctrl + M Cmd + M General Undo Ctrl + Z Cmd + Z General Redo Ctrl + Shift + Z Cmd + Shift + Z General Cut Ctrl + X Cmd + X General Copy Ctrl + C Cmd + C General Paste Ctrl + V Cmd + V General Select all Ctrl + A Cmd + A General Find Ctrl + F Cmd + F Workspace Navigate to next tab Ctrl + Shift + ] Cmd + Shift + ] Workspace Navigate to previous tab Ctrl + Shift + [ Cmd + Shift + [ Workspace Close current tab Ctrl + Shift + W Cmd + Shift + W Workspace Open new tab Ctrl + T Cmd + T Aggregation Focus Mode Add a new stage after current one Ctrl + Shift + B Cmd + Shift + B Aggregation Focus Mode Add a new stage before current one Ctrl + Shift + A Cmd + Shift + A Aggregation Focus Mode Navigate to next stage Ctrl + Shift + 9 Cmd + Shift + 9 Aggregation Focus Mode Navigate to previous stage Ctrl + Shift + 0 Cmd + Shift + 0 Pipeline text editor Comment out code Ctrl + / Cmd + / All text editors Prettify code Ctrl + Shift + B Ctrl + Shift + B Query bar Submit query Enter (from the query bar) Enter (from the query bar) Web Shell Toggle shell Ctrl + \u02cb Ctrl + \u02cb Web Shell Deletes the next character Ctrl + D Ctrl + D Web Shell Moves the cursor to the end of the line Ctrl + E Ctrl + E Web Shell Moves the cursor forward one character Ctrl + F Ctrl + F Web Shell Erases one character, similar to hitting backspace Ctrl + H Cmd + H Web Shell Clears the screen, similar to the clear command Ctrl + L Cmd + L Web Shell Swap the last two characters before the cursor Ctrl + T Ctrl + T Web Shell Cycle backwards through command history \u2191 \u2191 Web Shell Cycle forwards through command history \u2193 \u2193", + "code": [], + "preview": "Keyboard shortcuts enable you to easily navigate Compass.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "learn-more", + "title": "Resources to Learn and Explore MongoDB", + "headings": [ + "Free MongoDB Courses Online", + "Free MongoDB Cluster for Learning and Testing", + "MongoDB Server Installation", + "Resources and Documentation", + "MongoDB Community Forums", + "MongoDB User Groups" + ], + "paragraphs": "The following sections outline methods to learn MongoDB basics and\nbest practices. To help you get started with MongoDB Compass and the MongoDB database,\nsee the MongoDB University course M001 . This course\nprovides access to a student cluster for learning about MongoDB. If you are looking to learn more about MongoDB technologies, there are\nother available courses to help you along the way. All courses are\navailable at no charge. In the course catalog, you will find courses\non: Development in Java , Node.js , Python , New features and tools available in the latest\nversion of MongoDB, MongoDB Atlas Administration , Aggregation , Monitoring and Insights ,\nand more. If you would like to use a hosted instance of MongoDB rather than set up\na local server, you can set up a free M0 cluster on Atlas , MongoDB's hosted database as a\nservice. The M0 tier cluster is ideal for learning and testing. Once the cluster is set up, you can use the Connect button\nin the Atlas interface to obtain the correct Compass connection\nsettings for your cluster. Fill in the Compass connection form with your\nsettings to connect to your cluster . To install and run MongoDB using your own network services, you can use\nthe following tutorials to get a MongoDB database server up and running: Install MongoDB Enterprise on Windows Install MongoDB Enterprise on Linux Install MongoDB Enterprise on macOS Resource Description MongoDB Architecture Guide Starting point to get an overview of the MongoDB database. MongoDB Blog Provides timely updates on new products, features and best\npractices for getting the most out of the MongoDB platform. Resource Center Contains presentations, white papers and webinars covering all\naspects of the MongoDB database and associated products. Database Server Documentation Contains information about installing, configuring and\nmaintaining the database, as well as in depth information on the\nquery language. The MongoDB Community Forums is a\ncentral place to connect with other MongoDB users in your community,\nask questions, and get answers. MongoDB User Groups (MUGs) are local communities of developers who work\ntogether to learn from one another about MongoDB best practices,\nnetwork, and have fun. To find a MongoDB User Group in your area, see\n MongoDB User Groups on MeetUp .", + "code": [], + "preview": "The following sections outline methods to learn MongoDB basics and\nbest practices.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "manage-data/performance-insights", + "title": "Performance Insights", + "headings": [ + "Use Cases", + "Behavior", + "Learn More" + ], + "paragraphs": "When MongoDB Compass determines that your schema or queries can be improved,\nit displays a performance insight. Performance insights show ways to\nimprove your schema and data modeling practices. Use performance\ninsights to learn best schema design practices and improve application\nperformance. Performance insights are best followed early in your application\ndevelopment process. Starting your application with good data modeling\npractices helps prevent schema and performance issues as your\napplication grows. Although Compass provides performance insights at any stage of\ndevelopment, it can be difficult to make schema modifications in\nlarge-scale schemas that are used in production. Before you modify your schema based on performance insights, ensure that\nthe suggestion makes sense for your application. For example, if\n Compass suggests creating an index, make sure that index\nsupports queries that are run frequently. Performance insights are enabled automatically. Performance insights are generic, and do not use properties specific to\nyour schema such as database or collection names. Compass shows performance insights in the following scenarios: Scenario Performance insight You run a query or aggregation without an index. Add an index to support the operation. You run an aggregation pipeline that uses a $lookup \nstage. Embed related data to avoid the need for a $lookup operation. You run a $text or $regex query. If possible, use Atlas Search to improve\nperformance for text search queries. Your database contains too many collections. Reduce the number of collections. Your documents contain an array field with too many elements. Avoid unbounded arrays. The data size of individual documents is too large. Break up large documents into separate collections. Your collection contains too many indexes. Review your indexes and remove any that are unnecessary. To learn more about data modeling in MongoDB, see\n Data Modeling . To learn how to create effective indexes for your application, see\n Indexing Strategies .", + "code": [], + "preview": "When MongoDB Compass determines that your schema or queries can be improved,\nit displays a performance insight. Performance insights show ways to\nimprove your schema and data modeling practices. Use performance\ninsights to learn best schema design practices and improve application\nperformance.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "manage-data", + "title": "Interact with Your Data", + "headings": [], + "paragraphs": "MongoDB Atlas \nis a cloud-hosted database-as-a-service that\nprovides free sample datasets for your\nclusters. You can use Compass to explore and interact with\nthese sample datasets. Learn how to create and manage databases in your deployment. Learn how to manage collections in your databases. Learn how to manage views in your databases. Views are read-only\nresults of an aggregation run against\na collection. Learn how to manage documents in your collections. Learn how to query your data to return data that matches a specified\ncriteria. Learn how to create indexes to improve query performance. Learn how to analyze your data schema and shape of the fields in a\nparticular collection. Learn how to view and analyze deployment performance. Learn how to ensure that all documents in a collection\nfollow a defined set of rules. Learn how Compass samples documents to provide\ninsights about a collection. Learn how to use In-Use Encryption to encrypt data in your collections.", + "code": [], + "preview": null, + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "performance", + "title": "View Real-Time Performance", + "headings": [ + "Limitations", + "Server Stats", + "Stop Slow Operations", + "Click the query you want to stop in the Slowest Operations section.", + "In the Operation Details view, click Kill Op.", + "Pause the Display", + "Limitation" + ], + "paragraphs": "To access the real-time server performance view, click the\n next to the connection name in the\n Connections Sidebar and select the Performance \nmenu option. The Performance view is not available if you are connected\nto an Atlas Data Lake . MongoDB Compass cannot retrieve performance data for collections that use\n Queryable Encryption . MongoDB Compass shows limited performance data when connected to a\n mongos . The Performance tab displays various stats: Chart/Table Description Operations Displays the number of operations as reported by\n mongostat . For a description of the\nfields, see mongostat . Read & Write Displays the number of active reads, queued reads, active\nwrites, queued writes as reported by mongostat . For a description of the fields, see\n mongostat . Network Displays the number of connections as reported by\n mongostat . For a description of the\nfields, see mongostat . Memory Displays the memory stats as reported by mongostat . For a description of the fields, see\n mongostat . Hottest Collections Displays the collections with the most activities as reported\nby mongotop . The collections correspond to a given moment in the charts;\ni.e. as you move over the charts, the corresponding collections\nare highlighted/displayed in the table. Slowest Operations Displays the slowest operations as reported by\n db.currentOp() . The\noperations correspond to a given moment in the charts; i.e. as\nyou move over the charts, the corresponding slowest operations\nare displayed in the table. To view the details of a slow\noperation or stop the operation , click on\nthe operation. You can stop slow operations from the Performance Tab . On deployments that require authentication , to stop operations that you don't own, you\nmust have the killop privilege action. The Pause button above the graph display pauses the update\nof the displays only. Pausing the performance tab does not impact the\ncollection of the underlying data. To restart the display updates, click\n Play . Next to the Pause button is a clock which shows the\ncurrent time while the graphs are updating. If the display is\npaused, the clock shows the time at which the pause began. The Performance tab is not available if you are connected\nto a Data Lake .", + "code": [], + "preview": "To access the real-time server performance view, click the\n next to the connection name in the\nConnections Sidebar and select the Performance\nmenu option.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "query/atlas-search", + "title": "Run Atlas Search Queries", + "headings": [ + "Steps", + "Find the index you want to use in your query", + "Run the query", + "Learn More" + ], + "paragraphs": "You can run Atlas Search $search and $searchMeta \nqueries from MongoDB Compass if you created the Atlas Search index for data on the Atlas cluster. You can\nalso run $vectorSearch queries from MongoDB Compass if you have an\nexisting Atlas Vector search index on your data in Atlas. From the Indexes tab, click Search Indexes . Hover over the index you want to use in your query. Click the Aggregate button. MongoDB Compass switches to the Aggregations tab where you\ncan run your query. Construct your query. By default, MongoDB Compass selects the $search stage for\nyour query. You can select the $searchMeta stage\nto use in your query. If you are using a vectorSearch type\nindex in your query, you can only use the\n $vectorSearch stage in your query. The Atlas Search $search and $searchMeta and\nthe Atlas Vector Search $vectorSearch pipeline stages must\nbe the first stage in your aggregation pipeline. Click Run to run your query. Atlas Search Pipeline Stages Atlas Vector Search Pipeline Stage", + "code": [], + "preview": "You can run Atlas Search $search and $searchMeta\nqueries from MongoDB Compass if you created the Atlas Search index for data on the Atlas cluster. You can\nalso run $vectorSearch queries from MongoDB Compass if you have an\nexisting Atlas Vector search index on your data in Atlas.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "query/collation", + "title": "Set Language Specific Rules for String Comparison", + "headings": [ + "Set Collation", + "Clear the Query", + "To Learn More" + ], + "paragraphs": "Use the Collation query bar option\nto specify language-specific rules for string comparison, such as\nrules for lettercase and accent marks. In the Query Bar, click Options . Enter the locale field in the collation document to specify the\n ICU Locale code for the\ndesired language in the Collation field. As you type, the Find button is disabled and the\n Collation label turns red until a valid query is entered. To use the pinyin variant of the Chinese collation, use the\nfollowing collation document: Click Find to run the query and view the updated\nresults. To clear the query bar and the results of the query, click\n Reset . See the supported languages and locales section in the\n MongoDB Manual . See the possible fields in a collation document in the\n MongoDB Manual .", + "code": [ + { + "lang": "javascript", + "value": "{ \"locale\" : \"zh@collation=pinyin\" }" + } + ], + "preview": "Use the Collation query bar option\nto specify language-specific rules for string comparison, such as\nrules for lettercase and accent marks.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "query/filter", + "title": "Query Your Data", + "headings": [ + "Compatibility", + "Set Query Filter", + "Examples", + "Match by a Single Condition", + "Match by Multiple Conditions ($and)", + "Match by Multiple Possible Conditions ($or)", + "Match by Exclusion ($not)", + "Match with Comparison Operators", + "Match by Date", + "Match by Array Conditions", + "Match by Substring", + "Match by Embedded Field", + "Supported Data Types in the Query Bar", + "Clear the Query", + "Query Collections with Invalid UTF8 Data", + "How Does the Compass Query Compare to MongoDB and SQL Queries?" + ], + "paragraphs": "You can type MongoDB filter documents into the query bar to display only\ndocuments which match the specified criteria. To learn more about\nquerying documents, see Query Documents in the MongoDB manual. You can query your data for deployments hosted in the following\nenvironments: MongoDB Atlas : The fully\nmanaged service for MongoDB deployments in the cloud MongoDB Enterprise : The\nsubscription-based, self-managed version of MongoDB MongoDB Community : The\nsource available, free-to-use, and self-managed version of MongoDB To learn more about querying your data for deployments hosted in MongoDB\nAtlas, see Find Specific Documents . In the Filter field, enter a filter document between the\ncurly braces. You can use all the MongoDB query operators except the $text and\n $expr operators. The following filter returns documents that have a title value\nof Jurassic Park : Click Find to run the query and view the updated\nresults. The examples on this page use a small example dataset. To import the\nsample data into your MongoDB deployment, perform the following steps: Copy the following documents to your clipboard: In Compass , use the left navigation panel to select the\ndatabase and the collection you want to import the data to. Click the Documents tab. Click Add Data and select Insert Document . Set the View to JSON ( {} ). Paste the JSON documents from your clipboard into the modal. Click Insert . If you do not have a MongoDB deployment or if you want to query a\nlarger sample data set, see Sample Data for Atlas Clusters for instructions on creating a free-tier cluster\nwith sample data. The following example queries filter the sample\ndocuments provided on this page. The following query filter finds all documents where the value of\n name is \"Andrea Le\": The query returns the following document: The following query filter finds all documents where scores array\ncontains the value 75 , and the name is Greg Powell : The query returns the following document: The following query filter uses the $or operator to find\ndocuments where version is 4 , or name is Andrea Le : The query returns the following documents: The following query filter uses the $not operator to find all\ndocuments where the value of the name field is not equal to\n\"Andrea Le\", or the name field does not exist: The query returns the following documents: For a complete list of logical query operators, see\n Logical Query Operators . The following query filter uses the $lte operator to find all\ndocuments where version is less than or equal to 4 : The query returns the following documents: For a complete list of comparison operators, see\n Comparison Query Operators . The following query filter uses the $gt operator and\n Date() method to find all documents where the dateCreated \nfield value is later than June 22nd, 2000: The query returns the following documents: The following query filter uses the $elemMatch operator\nto find all documents where at least one value in the scores \narray is greater than 80 and less than 90 : The query returns the following document because one of the values\nin the scores array is 85 : For more query examples, see\n Query Documents \nin the MongoDB manual. The following query filter uses the $regex operator\nto find all documents where the value of email includes the term\n\"andrea_le\": The query returns the following document: The following query filter finds the\ndocument with the school.name subfield of \"Northwestern\": The query returns the following document: For more query examples, see\n Query Documents \nin the MongoDB manual. The Compass Filter supports using the\n mongo shell mode representation of the MongoDB\nExtended JSON BSON data types . The following filter returns documents where\n start_date is greater than than the BSON Date \n 2017-05-01 : By specifying the Date type in both start_date and the\n $gt comparison operator, Compass performs the greater\nthan comparison chronologically, returning documents with\n start_date later than 2017-05-01 . Without the Date type specification, Compass compares the\n start_dates as strings\n lexicographically ,\ninstead of comparing the values chronologically. To clear the query bar and the results of the query, click\n Reset . If you attempt to query or export data with invalid UTF8 characters\nthe following error message displays: To query or export this data, disable UTF8 validation by setting\nthe enableUtf8Validation URI option to false . The following URI disables UTF8 validation: Editing data with enableUtf8Validation=false can result in\nloss of data. This approach is a temporary workaround to\nquery or export data only. You can also disable this option in the\n Advanced Connection Options by\nselecting enableUtf8Validation and entering\n false . $filter corresponds to the WHERE clause in a\n SQL (Structured Query Language) SELECT statement. You have 3,235 articles. You would like to see all articles\nthat Joe Bloggs wrote.", + "code": [ + { + "lang": "json", + "value": "{ \"title\": \"Jurassic Park\" }" + }, + { + "lang": "JSON", + "value": "[\n {\n \"name\": \"Andrea Le\",\n \"email\": \"andrea_le@fake-mail.com\",\n \"school\": {\n \"name\": \"Northwestern\"\n },\n \"version\": 5,\n \"scores\": [ 85, 95, 75 ],\n \"dateCreated\": { \"$date\": \"2003-03-26\" }\n },\n {\n \"email\": \"no_name@fake-mail.com\",\n \"version\": 4,\n \"scores\": [ 90, 90, 70 ],\n \"dateCreated\": { \"$date\": \"2001-04-15\" }\n },\n {\n \"name\": \"Greg Powell\",\n \"email\": \"greg_powell@fake-mail.com\",\n \"version\": 1,\n \"scores\": [ 65, 75, 80 ],\n \"dateCreated\": { \"$date\": \"1999-02-10\" }\n }\n]" + }, + { + "lang": "shell", + "value": "{ name: \"Andrea Le\" }" + }, + { + "lang": "JSON", + "value": "{\n \"_id\": { \"$oid\": \"5e349915cebae490877d561d\" },\n \"name\": \"Andrea Le\",\n \"email\": \"andrea_le@fake-mail.com\",\n \"school\": {\n \"name\": \"Northwestern\"\n },\n \"version\": 5,\n \"scores\": [ 85, 95, 75 ],\n \"dateCreated\": { \"$date\": \"2003-03-26\" }\n}" + }, + { + "lang": "shell", + "value": "{ $and: [ { scores: 75, name: \"Greg Powell\" } ] }" + }, + { + "lang": "JSON", + "value": "{\n \"_id\": { \"$oid\":\"5a9427648b0beebeb69579cf\" },\n \"name\": \"Greg Powell\",\n \"email\": \"greg_powell@fake-mail.com\",\n \"version\": 1,\n \"scores\": [ 65, 75, 80 ],\n \"dateCreated\": { \"$date\": \"1999-02-10\" }\n}" + }, + { + "lang": "shell", + "value": "{ $or: [ { version: 4 }, { name: \"Andrea Le\" } ] }" + }, + { + "lang": "JSON", + "value": "[\n {\n \"_id\": { \"$oid\": \"5e349915cebae490877d561d\" },\n \"name\": \"Andrea Le\",\n \"email\": \"andrea_le@fake-mail.com\",\n \"school\": {\n \"name\": \"Northwestern\"\n },\n \"version\": 5,\n \"scores\": [ 85, 95, 75 ],\n \"dateCreated\": { \"$date\": \"2003-03-26\" }\n },\n {\n \"_id\": { \"$oid\":\"5e349915cebae490877d561e\" },\n \"email\": \"no_name@fake-mail.com\",\n \"version\": 4,\n \"scores\": [ 90, 90, 70 ],\n \"dateCreated\": { \"$date\": \"2001-04-15\" }\n }\n]" + }, + { + "lang": "shell", + "value": "{ name: { $not: { $eq: \"Andrea Le\" } } }" + }, + { + "lang": "JSON", + "value": "[\n {\n \"_id\": { \"$oid\":\"5e349915cebae490877d561e\" },\n \"email\": \"no_name@fake-mail.com\",\n \"version\": 4,\n \"scores\": [ 90, 90, 70 ],\n \"dateCreated\": { \"$date\": \"2001-04-15\" }\n },\n {\n \"_id\": { \"$oid\":\"5a9427648b0beebeb69579cf\" },\n \"name\": \"Greg Powell\",\n \"email\": \"greg_powell@fake-mail.com\",\n \"version\": 1,\n \"scores\": [ 65, 75, 80 ],\n \"dateCreated\": { \"$date\": \"1999-02-10\" }\n }\n]" + }, + { + "lang": "shell", + "value": "{ version: { $lte: 4 } }" + }, + { + "lang": "JSON", + "value": "[\n {\n \"_id\": { \"$oid\":\"5e349915cebae490877d561e\" },\n \"email\": \"no_name@fake-mail.com\",\n \"version\": 4,\n \"scores\": [ 90, 90, 70 ],\n \"dateCreated\": { \"$date\": \"2001-04-15\" }\n },\n {\n \"_id\": { \"$oid\":\"5a9427648b0beebeb69579cf\" },\n \"name\": \"Greg Powell\",\n \"email\": \"greg_powell@fake-mail.com\",\n \"version\": 1,\n \"scores\": [ 65, 75, 80 ],\n \"dateCreated\": { \"$date\": \"1999-02-10\" }\n }\n]" + }, + { + "lang": "shell", + "value": "{ dateCreated: { $gt: new Date('2000-06-22') } }" + }, + { + "lang": "JSON", + "value": "[\n {\n \"_id\": { \"$oid\": \"5e349915cebae490877d561d\" },\n \"name\": \"Andrea Le\",\n \"email\": \"andrea_le@fake-mail.com\",\n \"school\": {\n \"name\": \"Northwestern\"\n },\n \"version\": 5,\n \"scores\": [ 85, 95, 75 ],\n \"dateCreated\": { \"$date\": \"2003-03-26\" }\n },\n {\n \"_id\": { \"$oid\": \"5e349915cebae490877d561e\" },\n \"email\": \"no_name@fake-mail.com\",\n \"version\": 4,\n \"scores\": [ 90, 90, 70 ],\n \"dateCreated\": { \"$date\": \"2001-04-15\" }\n }\n]" + }, + { + "lang": "shell", + "value": "{ scores: { $elemMatch: { $gt: 80, $lt: 90 } } }" + }, + { + "lang": "JSON", + "value": "{\n \"_id\": { \"$oid\": \"5e349915cebae490877d561d\" },\n \"name\": \"Andrea Le\",\n \"email\": \"andrea_le@fake-mail.com\",\n \"school\": {\n \"name\": \"Northwestern\"\n },\n \"version\": 5,\n \"scores\": [ 85, 95, 75 ],\n \"dateCreated\": { \"$date\": \"2003-03-26\" }\n}" + }, + { + "lang": "shell", + "value": "{ email: { $regex: \"andrea_le\" } }" + }, + { + "lang": "JSON", + "value": "{\n \"_id\": { \"$oid\": \"5e349915cebae490877d561d\" },\n \"name\": \"Andrea Le\",\n \"email\": \"andrea_le@fake-mail.com\",\n \"school\": {\n \"name\": \"Northwestern\"\n },\n \"version\": 5,\n \"scores\": [ 85, 95, 75 ],\n \"dateCreated\": { \"$date\": \"2003-03-26\" }\n}" + }, + { + "lang": "shell", + "value": "{ \"school.name\": \"Northwestern\" }" + }, + { + "lang": "JSON", + "value": "{\n \"_id\": { \"$oid\": \"5e349915cebae490877d561d\" },\n \"name\": \"Andrea Le\",\n \"email\": \"andrea_le@fake-mail.com\",\n \"school\": {\n \"name\": \"Northwestern\"\n },\n \"version\": 5,\n \"scores\": [ 85, 95, 75 ],\n \"dateCreated\": { \"$date\": \"2003-03-26\" }\n}" + }, + { + "lang": "javascript", + "value": "{ \"start_date\": {$gt: new Date('2017-05-01')} }" + }, + { + "lang": "none", + "value": "Invalid UTF-8 string in BSON document." + }, + { + "lang": "javascript", + "value": "mongodb://localhost:27017/?enableUtf8Validation=false" + }, + { + "lang": "javascript", + "value": "{ author : { $eq : \"Joe Bloggs\" } }" + }, + { + "lang": "javascript", + "value": "db.article.aggregate( { $match: { \"author\": \"Joe Bloggs\" } } )" + }, + { + "lang": "sql", + "value": "SELECT * FROM article\nWHERE author = \"Joe Bloggs\";" + } + ], + "preview": "Query documents in your Atlas, Enterprise, or Community database deployments.", + "tags": "sample dataset", + "facets": { + "genre": [ + "tutorial" + ], + "target_product": [ + "compass" + ] + } + }, + { + "slug": "query/limit", + "title": "Limit the Number of Returned Documents", + "headings": [ + "Set Documents to Return", + "Clear the Query", + "To Learn More", + "How Does the Compass Query Compare to MongoDB and SQL Queries?" + ], + "paragraphs": "If the query bar has the Limit option, you can specify the\nmaximum number of documents to return. To specify the limit: In the Query Bar, click Options . Enter an integer representing the number of documents to return into\nthe Limit field. Click Find to run the query and view the updated\nresults. To clear the query bar and the results of the query, click\n Reset . See the limit entry in the\n MongoDB Manual . $skip corresponds to the LIMIT ... clause\nin a SQL (Structured Query Language) SELECT statement. You have 3,235 articles. You would like to see a list of the\nfirst 10 articles.", + "code": [ + { + "lang": "sql", + "value": "SELECT * FROM article\nLIMIT 10;" + }, + { + "lang": "javascript", + "value": "db.article.aggregate(\n { $limit : 10 }\n);" + }, + { + "lang": "javascript", + "value": "$limit : 10" + } + ], + "preview": "If the query bar has the Limit option, you can specify the\nmaximum number of documents to return.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "query/maxtimems", + "title": "Adjust Maximum Time for Query Operations", + "headings": [ + "Set MAX TIME MS", + "Learn More" + ], + "paragraphs": "The MAX TIME MS query bar option sets the cumulative time\nlimit in milliseconds to process query bar operations. If the time\nlimit is reached before the operation completes, Compass \ninterrupts the operation. The default MAX TIME MS value is 60000, or 60 seconds.\nConsider raising this value if you meet one of the following conditions: You can also\nconsider creating indexes to improve query\nperformance. You have a large collection . Your operations frequently time out. You query data archived from the Atlas cluster using Online Archive. Click Options . Adjust MAX TIME MS to the desired value in milliseconds. To learn more about MAX TIME MS , see\n cursor.maxTimeMS() \nin the MongoDB manual.", + "code": [], + "preview": "The MAX TIME MS query bar option sets the cumulative time\nlimit in milliseconds to process query bar operations. If the time\nlimit is reached before the operation completes, Compass\ninterrupts the operation.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "query/project", + "title": "Set Which Fields Are Returned", + "headings": [ + "To Learn More", + "How Does the Compass Query Compare to MongoDB and SQL Queries?" + ], + "paragraphs": "If the query bar displays the Project option, you can\nspecify which fields to return in the resulting data. By default, all\nfields are returned. To set a projection: In the Query Bar, click Options . Enter the projection document into the Project field. As you type, the Find button is disabled and the\n Project label turns red until a valid query is entered. Specify the field name and set to 1 in the project document. Only the fields specified in the project document are returned.\nThe _id field is returned unless it is set to 0 in the\n Project document. Specify the field name and set to 0 in the project document. All fields except for the fields specified in the project\ndocument are returned. Click Find to run the query and view the updated\nresults. For query result sets larger than 1000 documents, Compass shows a\nsubset of the results. Otherwise, Compass shows the entire result\nset. For details on sampling, see Sampling . To learn how project works, see the project entry in the\n MongoDB Manual . $project corresponds to choosing specific fields to return\nin a SQL (Structured Query Language) SELECT statement. You have 3,235 articles. You would like to see only the headlines\nand authors of those articles.", + "code": [ + { + "lang": "javascript", + "value": "{ year: 1, name: 1 }" + }, + { + "lang": "javascript", + "value": "{ year: 0, name: 0 }" + }, + { + "lang": "sql", + "value": "SELECT headline, author FROM article;" + }, + { + "lang": "javascript", + "value": "db.article.aggregate(\n { $project : { headline : 1, author : 1 } }\n);" + }, + { + "lang": "javascript", + "value": "{ headline : 1, author : 1 }" + } + ], + "preview": "If the query bar displays the Project option, you can\nspecify which fields to return in the resulting data. By default, all\nfields are returned.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "query/queries", + "title": "Managing Saved Queries and Aggregations", + "headings": [ + "Saving an Aggregation Pipeline", + "Click the save dropdown button", + "Enter a name for your pipeline", + "Save the pipeline", + "Saving a Favorite Query", + "Open query history", + "Select favorites", + "Name your query", + "Viewing Saved Queries", + "Using the My Queries Tab", + "Using the Favorites Tab", + "Open query history", + "Click the Favorites button in the past queries pane.", + "Viewing Query History" + ], + "paragraphs": "Starting in MongoDB Compass 1.31, you can load saved queries and aggregations from\nthe My Queries view once you are connected to your cluster.\nThis page explains how to add and view favorite queries and\naggregations. You can save a pipeline so that you can find it later. If you\nload a saved pipeline, you can change it without\nchanging the original saved copy. You can also create a view from your pipeline results. To save your pipeline: In the aggregation pipeline pane, click the Save drop-down\nmenu and select Save as . Click the Save button to save your pipeline. Your pipeline\nwill be saved under the folder icon at the top left of the\npipeline builder. You can favorite a query so that you can find it later. If you\nload a favorite query, you can change it without\nchanging the original saved copy. To add a query to your favorites: ..step:: Click Save . Click the History button on the query bar. Click the Star button. Enter a name for your query. Starting in MongoDB Compass 1.31, you can view your saved queries\nand aggregation pipelines on the My Queries tab\nonce connected to your cluster. When you click a saved or favorite query tile, Compass opens the\n Documents tab with the filter loaded. When you click a saved or favorite pipeline tile, Compass opens the\n Aggregations tab with the pipeline loaded. You can also view favorite queries from the Favorites tab\nfrom in the Documents view. To open the\n Favorites tab: Click the History button at the top of the\n Documents tab. Select your favorited operation. If your favorited operation is a query, only a Filter \nstatement displays in the Favorites list view\nalong with copy and delete \nicons. Clicking a favorited query in the\n Favorites list view populates your\n Query Bar with that query. If your favorited operation is a bulk update statement, a\n Filter and Update statement display in\nthe Favorites list view along with copy\n , delete , and\n open in modal icons. Clicking the icon opens the bulk\nupdate statement and the filter criteria in the\n Update Documents modal. For details on bulk\nupdate statements in Compass, see\n Modify Multiple Documents . For details on how to view query history see\n viewing recent query history .", + "code": [], + "preview": "How to add and view favorite saved queries and aggregations.", + "tags": "sample dataset", + "facets": { + "genre": [ + "tutorial" + ], + "target_product": [ + "compass" + ] + } + }, + { + "slug": "query/recent", + "title": "View Recent Queries", + "headings": [ + "Query Autocomplete Menu" + ], + "paragraphs": "You can view up to 20 of the most recent queries for a collection in the\nrecent queries menu. To open the recent queries menu, click the \nicon in the query bar. Click a query in the list to populate the query bar with\nthat query. You can also hover over a query and perform the following actions: Click Action Save the query as a favorite . Specify\na name for the query and click Save . Copy the query to the clipboard. Remove the query from the Recent list. If the query bar is empty, you can click the filter bar to open the\nquery autocomplete menu and view up to 50 of your most recent queries. To scroll\nthrough your most recent queries, you can use the \u2191 and \u2193 arrow\nkeys. If a query is highlighted on the menu, it displays a pop-up panel that\nshows the filter, project, and sort stages of the query. To select a query from\nthe autocomplete menu, press Enter , press Tab , or click the query. If you start typing into the filter bar, the autocomplete menu filters results\nto only display recent queries that include the specified string. If you start\ntyping into the other option fields, such as Project or\n Sort , the autocomplete menu only displays recent queries that\ninclude those fields. To learn how to write queries to filter your data, see\n Query Your Data .", + "code": [], + "preview": "You can view up to 20 of the most recent queries for a collection in the\nrecent queries menu. To open the recent queries menu, click the \nicon in the query bar. Click a query in the list to populate the query bar with\nthat query.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "query/skip", + "title": "Skip a Number of Documents", + "headings": [ + "Set Documents to Skip", + "Clear the Query", + "To Learn More", + "How Does the Compass Query Compare to MongoDB and SQL Queries?" + ], + "paragraphs": "If the query bar displays the Skip option, you can specify\nhow many documents to skip before returning the result set. To specify the number of documents to skip: In the Query Bar, click Options . Enter an integer representing the number of documents to skip into\nthe Skip field. Click Find to run the query and view the updated\nresults. For query result sets larger than 1000 documents, Compass shows a\nsubset of the results. Otherwise, Compass shows the entire result\nset. For details on sampling, see Sampling . To clear the query bar and the results of the query, click\n Reset . See the skip entry in the\n MongoDB Manual . $skip corresponds to the LIMIT ... OFFSET ... clause\nin a SQL (Structured Query Language) SELECT statement. You have a 3,235 articles. You would like to see a list of articles\ngrouped in blocks of 50, starting with the 436th record.", + "code": [ + { + "lang": "sql", + "value": "SELECT * FROM article\nLIMIT 50 OFFSET 435;" + }, + { + "lang": "javascript", + "value": "db.article.aggregate(\n { $limit : 50 },\n { $skip : 435 }\n);" + }, + { + "lang": "javascript", + "value": "$skip : 435" + } + ], + "preview": "If the query bar displays the Skip option, you can specify\nhow many documents to skip before returning the result set.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "query/sort", + "title": "Sort the Returned Documents", + "headings": [ + "Compatibility", + "Set the Sort Order", + "Clear the Query", + "To Learn More", + "How Does the Compass Query Compare to MongoDB and SQL Queries?" + ], + "paragraphs": "If the query bar displays the Sort option, you can specify\nthe sort order of the returned documents. You can sort the returned documents for deployments hosted in the following\nenvironments: MongoDB Atlas : The fully\nmanaged service for MongoDB deployments in the cloud MongoDB Enterprise : The\nsubscription-based, self-managed version of MongoDB MongoDB Community : The\nsource available, free-to-use, and self-managed version of MongoDB To learn more about sorting returned documents for deployments hosted in MongoDB\nAtlas, see Sort Query Results . To set the sort order: In the Query Bar, click Options . Enter the sort document into the Sort field. As you type, the Find button is disabled and the\n Sort label turns red until a valid query is entered. To specify ascending order for a field, set the field to 1 in\nthe sort document. To specify descending order for a field, set\nthe field and -1 in the sort documents. The following sort document sorts results first by year \nin descending order, and within each year, sort by name in\nascending order. Click Find to run the query and view the updated\nresults. To clear the query bar and the results of the query, click\n Reset . See the sort entry in the\n MongoDB Manual . $sort corresponds to the ORDER BY ... clause\nin a SQL (Structured Query Language) SELECT statement. You have 3,235 articles. You would like to see a list of articles\nsorted alphabetically by headline.", + "code": [ + { + "lang": "javascript", + "value": "{ year: -1, name: 1 }" + }, + { + "lang": "sql", + "value": "SELECT * FROM article\nORDER BY headline ASC;" + }, + { + "lang": "javascript", + "value": "db.article.aggregate(\n { $sort : { headline : 1 } }\n);" + }, + { + "lang": "javascript", + "value": "$sort : { headline : 1 }" + } + ], + "preview": "How to specify the sort order of the returned documents in your Atlas, Enterprise, or Community database deployment.", + "tags": null, + "facets": { + "genre": [ + "tutorial" + ], + "target_product": [ + "compass" + ] + } + }, + { + "slug": "query-plan", + "title": "View Query Performance", + "headings": [ + "About This Task", + "Steps", + "Click the Explain button", + "Select a query operation", + "(Optional) Select the Raw Output view.", + "Learn More" + ], + "paragraphs": "To help you better understand the performance of your query, you can view\nyour query's explain plan. On the Explain Plan modal, you can view the explain stages as a\n Visual Tree , where each query operation appears as a node on the\ntree. You can also view the explain details in raw JSON format by selecting\nthe Raw Output view. The explain plan includes a Query Performance Summary with\ninformation on the execution of your query such as: The Explain Plan doesn't show aggregation pipeline stages\nsuch as $merge and $out because\n Compass ignores all out stages from the aggregation before\nrunning the explain plan. Execution time The number of returned documents The number of examined documents The number of examined index keys The Explain Plan is not available if you are connected\nto Data Lake . In the query bar, click the Explain button to open the\nmodal. By default, the explain stages are are shown as a Visual Tree .\nEach query operation appears as a node on the tree. For more detailed execution information about the query operation, click\nthe corresponding node. For example, the following explain plan provides detailed information on\na query that filters for { title : \"Jurassic Park\" } : To view your full explain plan as raw JSON, select the\n Raw Output view. Analyze Query Performance", + "code": [], + "preview": "To help you better understand the performance of your query, you can view\nyour query's explain plan.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "query-with-natural-language/ai-and-data-usage-information", + "title": "AI and Data Usage Information", + "headings": [ + "Third Party Providers", + "How Your Data is Used", + "Disable Natural Language Querying" + ], + "paragraphs": "Querying with natural language in Compass is powered by Generative AI\n(Gen AI), and may give inaccurate responses. See our Generative AI FAQ \nfor more information about Gen AI in MongoDB products. Querying with natural language in Compass currently uses the Azure OpenAI Service \nhosted by Microsoft. This is subject to change in the future. When you query with natural language in Compass, the following\ninformation is sent to MongoDB's backend and/or the third party\nAI provider: The information that is sent will not be shared with any other third\nparties or stored by the AI provider. We do not send database\nconnection strings, credentials, or rows/documents from your databases. The full text of your natural language prompt. The schema of the collection you are querying,\nincluding database name, collection name, field names, and types. Enable sending sample field values. This is an optional setting to\nimprove the quality of recommendations. You can manage this through the\n Enable sending sample field values setting in\n Compass settings . This setting\nis off by default. Natural language querying in Compass is available if you have\nenabled the Generative AI setting and logged into Atlas. If you no longer want to use the feature, uncheck\n Enable AI Features in the\n Artificial Intelligence settings. To prevent usage of this feature entirely, you can disable it in the\n global configuration file .", + "code": [], + "preview": "Querying with natural language in Compass is powered by Generative AI\n(Gen AI), and may give inaccurate responses. See our Generative AI FAQ\nfor more information about Gen AI in MongoDB products.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "query-with-natural-language/enable-natural-language-querying", + "title": "Enable Natural Language Querying", + "headings": [ + "About this Task", + "Before you Begin", + "Steps", + "Open settings", + "Enable AI Features and Log In to Atlas", + "(Optional) Read MongoDB's GenAI FAQs", + "Next Steps" + ], + "paragraphs": "You can enable Compass natural language querying by toggling the\n Use Generative AI setting. Natural language queries can\nbe a helpful starting point and assist you in learning to write MongoDB\nqueries. To enable natural language querying in Compass, follow the steps below.\nAlternatively, you can enable natural language querying by clicking the\n Log in to Atlas to enable button within the\n Use natural language to generate queries and pipelines modal.\nYou can display this modal on Compass versions 1.40.0 or later when\nyou click the Generate query or\n Generate aggregation button on the query bar. To enable the generation of queries and aggregations in Compass,\nyou must: Sign into Atlas. For details on how to get a free Atlas\naccount and deploy a free M0 cluster, see Get Started with Atlas . Install Compass version 1.40.0 or later. For instructions on\ndownloading and installing the latest version, see Download and Install Compass . You can also enable Natural Language Querying using the configuration\nfile. For details, see Configuration File Settings . Operating System Description Keyboard Shortcut Windows / Linux In the top menu bar, click Edit . Ctrl + , macOS In the top menu bar, click MongoDB Compass . \u2318 + , You only need to enable AI features if it is your first time\nusing natural language querying. Select Artificial Intelligence . Click the Enable AI Features check box. Click Log in with Atlas . A web browser displays the\n Atlas login page . Complete the login procedure. After you log in successfully, a\n You can create queries and aggregations with\ngenerative AI \nmessage displays. Click Save . For more information about generative AI usage with MongoDB\nCompass, see: AI and Data Usage Information FAQ For MongoDB Generative AI Features Prompt a Natural Language Query", + "code": [], + "preview": "You can enable Compass natural language querying by toggling the\nUse Generative AI setting. Natural language queries can\nbe a helpful starting point and assist you in learning to write MongoDB\nqueries.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "query-with-natural-language/prompt-natural-language-aggregation", + "title": "Prompt a Natural Language Aggregation", + "headings": [ + "About this Task", + "Before you Begin", + "Steps", + "Navigate to the Natural Language Query Bar", + "Type a question about your collection", + "Run the aggregation", + "Example", + "Next Steps", + "Learn More" + ], + "paragraphs": "You can use MongoDB Compass to generate aggregation queries using natural\nlanguage. Compass uses AI to generate aggregations based on prompts\nyou provide. Querying with natural language can be a helpful starting\npoint and assist you in learning to write MongoDB queries. When you query your data using natural language in Compass, the text of\nyour prompts and details about your MongoDB schemas are sent to\nMicrosoft and OpenAI for processing. Your data is not stored on any\nthird party storage systems or used to train AI models. This software uses generative artificial intelligence. It is\nexperimental and may give inaccurate results. Your use of this software\nis subject to MongoDB's: Terms of Use Acceptable Use Policy Privacy Policy You can also provide natural language prompts on the\n documents tab . This page uses the sample_mflix.movies \ncollection from the Atlas sample dataset. It can be difficult to predict the complexity of the query or pipeline\nreturned by the Compass AI. You may need to adjust the\n MAX TIME MS query bar option to either allow longer query\nprocessing time or prevent long-running queries. For details, see\n Adjust Maximum Time for Query Operations . You must enable natural language querying in Compass . The Natural Language Query Bar displays. Select the Aggregations tab. Click the Generate aggregation button. Type a natural language prompt for your collection into the query\nbar. Aggregation pipeline prompts usually have an aggregation verb\nsuch as count, average, or sum with logical conditions. For\nexample: How many movies have more than 3 writers\nin the writers array? Press enter or click the Generate aggregation button. An aggregation pipeline populates in the Pipeline \nbar. You can scroll down to see the syntax of each stage. The results populate in the aggregations view. Before running the query, make sure to thoroughly review the\nsyntax of each stage. Ensure the returned pipeline has the\nfields and stages that match your desired use case. Some aggregation operators, like $merge and\n $out , can modify your collection's data. If your aggregation pipeline contains operators that can modify\nyour collection's data, you are prompted for confirmation before\nthe pipeline is executed. Press enter or click the Run button to execute\nthe pipeline. You can optionally provide feedback by clicking the \nor icon by the Natural Language Query Bar \nand provide details on your experience. Your feedback is not used to train any AI models. Below are examples of prompts to help you understand expected results\nwhen using natural language prompts for aggregation. Prompt Response Count the movies that have a release year of 1999 Which comedy genre movie has the most awards? How many movies have a imdb.rating > 4? Prompt a Natural Language Query Query with Natural Language", + "code": [ + { + "lang": "json", + "value": "[\n {\n $match: {\n year: 1999\n },\n },\n {\n $count: \"total_movies\",\n }\n]" + }, + { + "lang": "json", + "value": "[\n {\n $match: {\n genres: \"Comedy\"\n }\n },\n {\n $sort: {\n \"awards.wins\": -1,\n \"awards.nominations\": -1\n }\n },\n {\n $limit: 1\n },\n {\n $project: {\n _id: 0,\n title: 1,\n \"awards.wins\": 1,\n \"awards.nominations\": 1\n }\n }\n]" + }, + { + "lang": "json", + "value": "[\n {\n $match: {\n \"imdb.rating\": { $gt: 4 }\n }\n },\n {\n $group: {\n _id: null,\n count: { $sum: 1 }\n }\n }\n]" + } + ], + "preview": "You can use MongoDB Compass to generate aggregation queries using natural\nlanguage. Compass uses AI to generate aggregations based on prompts\nyou provide. Querying with natural language can be a helpful starting\npoint and assist you in learning to write MongoDB queries.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "query-with-natural-language/prompt-natural-language-query", + "title": "Prompt a Natural Language Query", + "headings": [ + "About this Task", + "Before you Begin", + "Steps", + "Navigate to the Natural Language Query Bar", + "Type a question about your collection", + "Run the query", + "Example", + "Next Steps", + "Learn More" + ], + "paragraphs": "You can use MongoDB Compass to generate queries using natural\nlanguage. Compass uses AI to generate queries based on prompts\nyou provide. Querying with natural language can be a helpful starting\npoint and assist you in learning to write MongoDB queries. When you query your data using natural language in Compass, the text of\nyour prompts and details about your MongoDB schemas are sent to\nMicrosoft and OpenAI for processing. Your data is not stored on any\nthird party storage systems or used to train AI models. This software uses generative artificial intelligence. It is\nexperimental and may give inaccurate results. Your use of this software\nis subject to MongoDB's: Terms of Use Acceptable Use Policy Privacy Policy You can query with natural language to create both queries and\naggregations. If your prompt results in an aggregation, you are\nautomatically redirected to the Aggregations tab and\na pop-up displays indicating your prompt requires aggregation stages. You can also provide natural language prompts on the\n aggregations tab . This page uses the sample_mflix.movies \ncollection from the Atlas sample dataset. It can be difficult to predict the complexity of the query or pipeline\nreturned by the Compass AI. You may need to adjust the\n MAX TIME MS query bar option to either allow longer query\nprocessing time or prevent long-running queries. For details, see\n Adjust Maximum Time for Query Operations . You must enable natural language querying in Compass . The Natural Language Query Bar displays. Select the Documents tab. Click the Generate query button. Type a natural language prompt for your collection into the query\nbar. For example: Which movies were released in 2000? Press enter or click the Generate query button. A filter query populates in the Filter bar. The results populate in the documents view. Before running the query, make sure to thoroughly review the\nsyntax in the Filter bar. Ensure the returned\nquery has the fields and operators that match your desired use case. Press enter or click the Find button to execute\nthe query. You can optionally provide feedback by clicking the \nor icon by the Natural Language Query Bar \nand provide details on your experience. Your feedback is not used to train any AI models. Below are examples of prompts to help you understand expected results\nwhen using natural language prompts. Prompt Response Which movies have a \"PG\" rating? Which movies include \"David Mamet\" in the writers array field? Which movies have a runtime greater than 90? Prompt a Natural Language Aggregation Query with Natural Language", + "code": [ + { + "lang": "json", + "value": "{\"rated\": \"PG\"}" + }, + { + "lang": "json", + "value": "{\"writers\": \"David Mamet\"}" + }, + { + "lang": "json", + "value": "{\"runtime\": {$gt: 90}}" + } + ], + "preview": "You can use MongoDB Compass to generate queries using natural\nlanguage. Compass uses AI to generate queries based on prompts\nyou provide. Querying with natural language can be a helpful starting\npoint and assist you in learning to write MongoDB queries.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "query-with-natural-language/query-with-natural-language", + "title": "Query with Natural Language", + "headings": [ + "Use Cases", + "Behavior", + "Get Started", + "Details" + ], + "paragraphs": "You can use MongoDB Compass to ask natural language questions about your\ndata. Compass uses AI to generate filter queries and aggregations based\non the prompts you provide. You may want to use natural language to query in Compass to: Ask plain text questions about your data. Create an initial query or aggregation pipeline that you can\nmodify to suit your requirements. Learn how to write complex queries with multiple aggregation stages. Compass natural language querying is available starting in version\n 1.40.x . Natural language querying utilizes Azure Open AI \nas its current provider. This provider may be subject to change in\nthe future. The MongoDB Compass natural language querying feature is on a rolling\nrelease schedule. As a result, some users may temporarily have functionality\nthat other users do not. Enable Natural Language Querying Prompt a Natural Language Query Prompt a Natural Language Aggregation When you query your data using natural language in Compass, the text of\nyour prompts and details about your MongoDB schemas are sent to\nMicrosoft and OpenAI for processing. Your data is not stored on any\nthird party storage systems or used to train AI models. This software uses generative artificial intelligence. It is\nexperimental and may give inaccurate results. Your use of this software\nis subject to MongoDB's: Terms of Use Acceptable Use Policy Privacy Policy", + "code": [], + "preview": "You can use MongoDB Compass to ask natural language questions about your\ndata. Compass uses AI to generate filter queries and aggregations based\non the prompts you provide.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "release-notes", + "title": "Release Notes", + "headings": [ + "MongoDB Compass 1.44.4", + "MongoDB Compass 1.44.3", + "MongoDB Compass 1.44.0", + "MongoDB Compass 1.43.6", + "MongoDB Compass 1.43.5", + "MongoDB Compass 1.43.4", + "MongoDB Compass 1.43.3", + "MongoDB Compass 1.43.2", + "MongoDB Compass 1.43.1", + "MongoDB Compass 1.43.0", + "MongoDB Compass 1.42.5", + "MongoDB Compass 1.42.3", + "MongoDB Compass 1.42.2", + "MongoDB Compass 1.42.1", + "MongoDB Compass 1.42.0", + "MongoDB Compass 1.41.0", + "MongoDB Compass 1.40.4", + "MongoDB Compass 1.40.3", + "MongoDB Compass 1.40.2", + "MongoDB Compass 1.40.1", + "MongoDB Compass 1.40.0", + "MongoDB Compass 1.39.4", + "MongoDB Compass 1.39.3", + "MongoDB Compass 1.39.2", + "MongoDB Compass 1.39.1", + "MongoDB Compass 1.39.0", + "MongoDB Compass 1.38.2", + "MongoDB Compass 1.38.1", + "MongoDB Compass 1.38.0", + "MongoDB Compass 1.37.0", + "MongoDB Compass 1.36.4", + "MongoDB Compass 1.36.3", + "MongoDB Compass 1.36.2", + "MongoDB Compass 1.36.0", + "MongoDB Compass 1.35.0", + "MongoDB Compass 1.34.2", + "MongoDB Compass 1.34.1", + "MongoDB Compass 1.33.1", + "MongoDB Compass 1.33.0", + "MongoDB Compass 1.32.3", + "MongoDB Compass 1.32.2", + "MongoDB Compass 1.32.1", + "MongoDB Compass 1.32.0", + "MongoDB Compass 1.31.3", + "MongoDB Compass 1.31.2", + "MongoDB Compass 1.31.1", + "MongoDB Compass 1.31.0", + "MongoDB Compass 1.30.1", + "MongoDB Compass 1.29.6", + "MongoDB Compass 1.29.5", + "MongoDB Compass 1.29.4", + "MongoDB Compass 1.28.4", + "MongoDB Compass 1.28.1", + "MongoDB Compass 1.26.1", + "MongoDB Compass 1.26.0", + "MongoDB Compass 1.25.0", + "MongoDB Compass 1.24.1", + "MongoDB Compass 1.23", + "MongoDB Compass 1.22", + "MongoDB Compass 1.21", + "MongoDB Compass 1.20", + "MongoDB Compass 1.19", + "MongoDB Compass 1.18", + "MongoDB Compass 1.17", + "MongoDB Compass 1.16", + "MongoDB Compass 1.15", + "MongoDB Compass 1.14", + "MongoDB Compass 1.13", + "MongoDB Compass 1.12", + "MongoDB Compass 1.11", + "MongoDB Compass 1.10", + "MongoDB Compass 1.9", + "MongoDB Compass 1.8", + "MongoDB Compass 1.7", + "MongoDB Compass 1.6", + "MongoDB Compass 1.5", + "MongoDB Compass 1.4", + "MongoDB Compass 1.3", + "1.3.0-beta.3 - July 12, 2016", + "1.3.0-beta.2 - June 29, 2016", + "1.3.0-beta.0 - June 27, 2016", + "MongoDB Compass 1.2", + "1.2.0-beta.3 - June 23, 2016", + "1.2.0-beta.2 - June 1, 2016", + "1.2.0-beta.1 - May 10, 2016", + "MongoDB Compass 1.1", + "1.1.1 -- Jan 19, 2016", + "MongoDB Compass 1.0", + "1.0.1 -- Dec 18, 2015", + "Bug Fixes", + "Improvements" + ], + "paragraphs": "Released September 18, 2024 New Features: Bug Fixes: Full Changelog available on GitHub Support for multiple KMS (Key Management Service) options from the\nsame provider ( COMPASS-8082 ). Remove outdated \"toggle shell\" keyboard shortcut ( COMPASS-8259 ). Use executionStats verbosity for explain plans ( COMPASS-8263 ). Trim whitespaces when creating or editing a namespace ( COMPASS-8123 ). Released September 5, 2024 New Features: Bug Fixes: Full Changelog available on GitHub When selecting a query history item from autocomplete, automatically move the\ncursor to the end of the editor. Compass supports Queryable Encryption range queries on encrypted fields\n( COMPASS-7066 ). Update query history autocompletion to be more selective\n( COMPASS-8241 ). Fix nextPage availability logic ( COMPASS-8239 ). Check for Vector Search support when showing edit templates\n( COMPASS-8235 ). Handle special characters in SSH URL correctly ( COMPASS-8254 ). Remove certificates without issuer from system CA list\n( COMPASS-8252 ). Released September 3, 2024 New Features: Bug Fixes: Full Changelog available on GitHub Support for users working with data stored on different connections\nstored in the Compass connections window ( COMPASS-6410 ). Adds per-connection proxy settings ( COMPASS-8142 ). Enables proxy support feature flag ( COMPASS-8167 ). Stream import errors to the log file with proper back pressure\n( COMPASS-7820 ). In the bulk update preview, convert array indexes from strings to\nnumbers ( COMPASS-8218 ). Bump shell-bson-parser to 1.1.2 ( MONGOSH-1859 ). Released August 23, 2024 New Features: Bug Fixes: Full Changelog available on GitHub Release query history autocompletion ( COMPASS-8096 ). Adds ability to load more documents per page in Documents view\n( COMPASS-6903 ). Remove useSystemCA by making it default\n( COMPASS-8077 ). Add option to prefer ID token over access token ( COMPASS-8107 ). Work around long paths issue on windows when building native\ndependencies to make sure use system ca option works\n( COMPASS-8051 ). Fix indexes ux issues ( COMPASS-7084 and\n COMPASS-4744 ). New tab design ( COMPASS-8122 ). Restore Isolated and Readonly special behavior in packaged\napplication ( COMPASS-8129 ). Released July 31, 2024 New Features: Bug Fixes: Full Changelog available on GitHub A warning toast message now displays when Compass cannot access credential\nstorage ( COMPASS-7819 ). Fixed a bug when closing the Performance telemetry screen\ncaused a crash ( COMPASS-8056 ). Fixed a bug when sidebar search displayed only database names when both\ndatabase and other object names matched the search\ncriteria ( COMPASS-8026 ). Fixed a bug that prevented adding nested fields on object-type\nfields ( COMPASS-7929 ). Fixed a bug that caused EJSON data types to export incorrectly when\nexport to JSON ( COMPASS-8099 ). Released July 1, 2024 Bug Fixes: Full Changelog available on GitHub Prevents application from hanging when selecting ranges too quickly on the\n Schema tab ( COMPASS-8048 ). Updates Electron to version 29.4.2 , which includes various\nsecurity fixes. Released June 27, 2024 New Features: Bug Fixes: Full Changelog available on GitHub Prevents modified tabs from being closed by accident\n( COMPASS-5022 ). Aggregations use maxTimeMS default on preview documents\n( COMPASS-7798 ). Fixes a regression that prevented autoconnect from working properly\n( COMPASS-8044 ). Released June 25, 2024 New Features: Bug Fixes: Full Changelog available on GitHub Shows tool tip when a query or aggregation is generated without\ncontent ( COMPASS-7837 ). Adds a confirmation dialog when quitting Compass\n( COMPASS-6435 ). Expands options when applied from a query history that has options. Regular expression and number query history fix\n( COMPASS-7215 , COMPASS-7008 ). Keeps listeners for insert document validity on document view\n( COMPASS-3246 ). Hides inaccurate collection statistics for timeseries\n( COMPASS-6712 ). Removes 'Preview' label from OIDC ( COMPASS-7666 ). Uses system ca certificates in Atlas requests and OIDC\n( COMPASS-7950 ). Hides edit view button in read only mode ( COMPASS-7688 ). Updates line numbers to be unselectable ( COMPASS-7941 ). Sidebar tab fix. Released June 12, 2024 New Features: Bug Fixes: Full changelog available on GitHub Added a disabled state to the Generative AI query bar. This state\ndisplays while Generative AI is fetching results COMPASS-7902 . Updated the close window hotkey to be cmd + shift +\n w to avoid conflict with close tab cmd + w \n COMPASS-7301 . Added a setting for enabling sample documents. This setting improves\nGenerative AI queries COMPASS-7931 . Updated Generative AI input to be resizable text area\n COMPASS-7940 . Fixed an issue with base64 regular expressions COMPASS-7541 . Fixed a display issue with the Create collection button when\nusing Compass in readonly mode. Removed the Hackolade banner from the schema tab\n COMPASS-7974 . Fixed an issue when a connection gets saved as favorite from the old\nsidebar COMPASS-7980 . Fixed a display issue for long index names COMPASS-7016 . Various user interface message and verbiage improvements. Released May 02, 2024 New Features Bug Fixes Full changelog available on GitHub New Features Allows users to specify hints ( COMPASS-7829 ) Shows an error message when connecting to Stream Processing ( COMPASS-7809 ) Warns users when generated aggregation contains a write operation ( COMPASS-7298 ) Adds support to notify users for an update on Linux/MSI ( COMPASS-7686 ) Adds gradient while a generate request is in progress ( COMPASS-7836 ) Includes the error count in the import toast ( COMPASS-7826 ) Removes the GenAI \"Preview\" badge ( COMPASS-7890 ) Disables the query bar controls while GenAI is running ( COMPASS-7839 ) Ensures that the confirmation modal always asks for confirmation input ( COMPASS-7613 ) Bumps Electron to 29 and removes support for RHEL7 ( COMPASS-7868 ) Parses AI response correctly ( COMPASS-7780 ) Fixes CPU hikes because of bad useEffect dependency Queries generated by Natural Language Querying have improved\nquality and accuracy. Released April 08, 2024 New Features Bug Fixes Full changelog available on GitHub Updates Atlas login screen flow ( COMPASS-7755 ) Handles collection subtab from link ( COMPASS-7731 ) Z-indexed stacked components ( COMPASS-7732 ) Removes the ability to collapse the sidebar ( COMPASS-7812 ) Updates the \u201cUse Generative AI\u201d settings flow ( COMPASS-7756 ) Show the in-progress index in the list again ( COMPASS-7789 ) Do not throw when rendering invalid dates ( COMPASS-7749 ) Can't sign out if not signed in yet ( COMPASS-7787 ) Click current op for details scoping error ( COMPASS-7805 ) MongoDB Compass version 1.42.4 was not released. Released March 20, 2024 New Features Bug Fixes Full changelog available on GitHub Install updates without confirmation in the background by default\n( COMPASS-7616 ) Enable rename collection feature flag ( COMPASS-7699 ) Bump OIDC dependencies to latest versions Bump mongosh , driver, and bson to latest versions Calculate the maximum line length in a more stack efficient way\n( COMPASS-7647 ) Only access defaultSession when app is ready Don't allow the readonly filter to grow in width indefinitely\n( COMPASS-7728 ) Released March 01, 2024 New Features Bug Fixes Full changelog available on GitHub Install updates without confirmation in the background by default\n( COMPASS-7616 ) Double space not applied from schema or query history\n( COMPASS-6980 ) Reset atlas search index on reopen and type change Don't include the version number in process.title because it shows in\nthe menubar in macOS Sonoma ( COMPASS-7513 ) Show the folder through the main process ( COMPASS-7671 ) Released February 15, 2024 New Features Bug Fixes Full changelog available on GitHub Added vector search index creation to the create search indexes\nmodal ( COMPASS-7302 ). Vector search type indexes now display in the search index table\n( COMPASS-7509 ). Improved validation of command line arguments ( COMPASS-7260 ). Fixed a display issue that made update and delete labels hidden on\nnarrow windows. Released January 31, 2024 New Features Bug Fixes Full changelog available on GitHub Compass now supports Bulk Update and\n Bulk Delete \noperations ( COMPASS-7329 , COMPASS-7330 ). Fixed namespace stats that refresh after document updates. Fixed table card autosizing ( COMPASS-7548 ). Fixed an issue when opening a new collection tab if an existing\ncollection tab with the same name was already open\n( COMPASS-7556 ). Fixed an issue where switching tabs would reset vertical scrolling\nto the top position ( COMPASS-7370 ). Fixed an issue where invalid dates resulted in a blank export page\n( COMPASS-7515 ). Released December 18, 2023 New Features: Bug Fixes: Full changelog available on GitHub Auto-insert empty document for all fields in the query bar. Add filter to saved connections when there are more than ten saved\n( COMPASS-7439 ). Implement text search for aggregation stage wizard. Improve stage wizard discoverability and interaction\n( COMPASS-7350 ). Use dark colors for background on initial app loading with dark theme. Remove outdated guide cues ( COMPASS-7396 ) Introduced workspaces plugin and implemented single top-level tabs\n( COMPASS-7354 ). Include OIDC in $external auth mechanism list ( COMPASS-7512 ). Properly render syntax errors in embedded shell ( COMPASS-7497 ). Expanded documents retain state after switching tabs ( COMPASS-7318 ). Prevent shell container from overlaying sidebar content\n( COMPASS-7395 ). Prevent AI entry button from being submitted when the sort is submitted\n( COMPASS-7356 ). Released October 18, 2023 New Feature: Bug Fixes: Full changelog available on GitHub Integrated search index signals ( COMPASS-7176 ). Fixed tab behavior with selection ( COMPASS-7013 ). UI fixes ( COMPASS-7304 ). Made column widths smaller ( COMPASS-7341 ). Remediated vulnerability SNYK-JS-BABELTRAVERSE-5962462 ( COMPASS-7345 ). Released October 11, 2023 New Features: Bug Fixes: Full changelog available on GitHub Introduced the ability to create and manage Atlas Search indexes in\nCompass. After creating a $search index, you are redirected to the\nsearch indexes modal ( COMPASS-7247 ). Field names are now autocompleted when defining indexes.\n( COMPASS-7174 ). Syntax errors are now highlighted when defining indexes.\n( COMPASS-7246 ). The vectorEmbedding index template definition replaced knnVector .\n( COMPASS-7288 ). For more details, see Manage Indexes and\n Create and Manage an Atlas Search Index . Improved AI feedback experience. For more information on\nGenerative AI natural language queries in Compass, see Query with Natural Language .\n( COMPASS-7211 , COMPASS-7251 ). Removed useNewUrlParser and useUnifiedTopology from export\nto language options ( COMPASS-4897 ). Fixed GUI flicker when closing the search index modal ( COMPASS-7248 ). Downgraded Electron to version 25.8.4 ( COMPASS-7291 ). Corrected an error displaying collections in Atlas Data Federation\nthrough the side bar ( COMPASS-7307 ). Fixed an issue when the insert dialog did not catch invalid bson ( COMPASS-7316 ). Released September 28, 2023 Bug Fixes: Full changelog available on GitHub Hot fixed an issue where users were not able to run Compass after\nupgrading to 1.40.0 ( COMPASS-7270 , COMPASS-7269 ). Repairs broken preferences by setting default values. Released September 27, 2023 New Features: Bug Fixes: Full changelog available on GitHub When using a search index, a new tab prompts you and redirects you to\nthe aggregation tab with the $search operator and the index name populated\n( COMPASS-7168 ). Added a drop-down to choose a search index template ( COMPASS-7173 ). Ensure Atlas Login doesn't show in settings if you didn't get the\nGenerative AI feature rollout. Released September 26, 2023 New Features: Bug Fixes: Full changelog available on GitHub Upgrade embedded MongoDB shell to\nversion 2.0.0 ( COMPASS-7057 ). Upgrade Node driver to version 6.0.0 ( COMPASS-7057 ). Upgrade Electron to version 26 . Set up local Atlas detection ( COMPASS-7213 ). Display local Atlas development environments as such\n( COMPASS-7156 ). Introduce $vectorSearch aggregation stage to MongoDB 7.1 and 7.0.x\n( COMPASS-7064 ). Enable Atlas search index \nmanagement ( COMPASS-7238 ). Enable natural language query and pipeline generation (incrementally\nrolled out to users) ( COMPASS-6866 ). Account for changed key order in query ( COMPASS-7194 ). Remove out stages before running explain plan ( COMPASS-7012 ). Don't automatically select regex when detecting regex\n( COMPASS-7144 ). Released September 6, 2023 Bug Fixes: Full Changelog available on GitHub Allows [object Object] as a valid string value in TypeChecker\n( COMPASS-7132 ). Doesn't treat non-numbers in CSV headers as array indexes\n( COMPASS-7157 ). Limits when custom paste handling is applied and uses clipboard data when\nauto-fixing user input ( COMPASS-7149 ). Updates Electron to v24.8.2 to address security vulnerabilities. This updates\nspecifically addresses CVE-2023-4427 and CWE-119. Released August 28, 2023 Bug Fix: Released August 22, 2023 New Features: Bug Fixes: Full Changelog available on GitHub Starting in version 1.39.2, MongoDB Compass no longer supports migrating from legacy\nconnection files that pre-date version 1.31.0. Legacy connections refer to an\ninternal Compass connection-options format that is stored on disk and no\nlonger supported after version 1.39.0. If you have legacy connections saved in your favorites, export the\nconnections on version 1.39.0 to convert them\ninto the new format before updating to version 1.39.2 or later. Show insights for unbound arrays ( COMPASS-6836 ). Use modal to highlight legacy connections ( COMPASS-7072 ). Automatically add { } to Find queries ( COMPASS-6530 ). Show list of legacy connections. ( COMPASS-7081 ). Fix error that would occur when modifying a filter in the schema tab\n( COMPASS-6944 ). Use correct tab name for indexes & validation ( COMPASS-7022 ). Strip unknown preferences when loading ( COMPASS-7026 ). Split connection storage between processes ( COMPASS-7078 ). Clear drop collections input state if drop collections success\n( COMPASS-7035 ). Allow empty optional string flags ( COMPASS-7101 ). Released August 8, 2023 New Features: Full Changelog available on GitHub Updates Electron to v23.3.12 to address security vulnerabilities. The\nspecific CVEs addressed in this update are CVE-2023-3730, CVE-2023-3732,\nand CVE-2023-3728. Released July 14, 2023 New features: Bug Fixes: Full Changelog available on GitHub Enable proactive performance insights by default. Proactive performance\ninsights analyze your queries and suggest ways to improve performance.\n( COMPASS-7000 ) Fix issue where Compass would create an incorrect index.\n( COMPASS-6981 ) Ensure that Compass displays indexes in the correct case.\n( COMPASS-6510 ) Cap number of log files to 100. ( MONGOSH-1449 ) Map project to projection before emitting open-explain-plan event\n( COMPASS-6995 ) Fix issue with Windows hotkeys. ( COMPASS-6777 ) Handle missing execution stats in raw explain. Released June 30, 2023 Bug Fix: Full Changelog available on GitHub Fix issue with projecting document size. (Reverts\n COMPASS-6837 ) New Features: Full Changelog availble on GitHub Auto expand object and array field types on field add ( COMPASS-6939 ). Show unindexed query insight in explain plan modal ( COMPASS-6933 ). Show array length on array fields on documents ( COMPASS-6938 ). Add ctrl + tab and ctrl + shift + tab hotkeys for switching tabs. Enable new explain plan by default. Adds insights for usage of $text and $regex in aggregation builder and\ncollection header ( COMPASS-6834 ). Add cues ( COMPASS-6614 ). Signal for bloated documents during import. Released June 21, 2023 New Features: Bug Fixes: Full Changelog available on GitHub OpenID Connect (OIDC) authentication ( COMPASS-6803 ). Stage wizard, which helps build aggregation pipelines\n( COMPASS-6814 ). Add visual tree and update summary for aggregation explain plans\n( COMPASS-6821 and COMPASS-6888 ). Open a collection in a new tab shortcut. Add performance tab indicator to state that information about certain\ncollections is missing ( COMPASS-6593 ). Add Atlas error message when connection fails because of IP access\nissue ( COMPASS-6842 ). Show insight when query is unindexed ( COMPASS-6832 ). Show insight when number of collections is too high\n( COMPASS-6835 ). Add unindexed aggregation insight ( COMPASS-6833 ). Implement guide cue component in Compass to provide contextual user\ninterface assistance ( COMPASS-6334 ). Add support for hiding and unhiding indexes in the Index tab. For Windows installations, Compass now requires Windows version 10 or later\n( COMPASS-6897 ). Add file type filters when exporting data ( COMPASS-6890 ). Rename \"Less Options\" to \"Fewer Options\" ( COMPASS-6774 ). Support dark mode for TypeEditor drop down ( COMPASS-6893 ). Fix execution time for aggregation explain plan\n( COMPASS-6496 ). Fix fast XML parser issue ( COMPASS-6905 ). Use enableShell setting to control the runtime start and stop. Include has_sort in telemetry. Various jQuery fixes ( COMPASS-6885 , COMPASS-6884 ,\n COMPASS-6883 , and COMPASS-6882 ). Replace got with fetch in redirect ( COMPASS-6881 ). Fix redirects ( COMPASS-6880 and COMPASS-6879 ). Show 1 as the page number for collections when no entries are present. Select combo box option onBlur issue ( COMPASS-6511 ). Use mongodb-cloud-info v2 for IPv6 support in cloud metrics\n( COMPASS-6795 ). Fix editor dark mode background selection color\n( COMPASS-6910 ). Disable the autoupdater for MSI installs (Windows without Squirrel)\n( COMPASS-6857 ). Fix for bulkWrite when importing data ( COMPASS-6928 ). Released May 25, 2023 New Features: Bug Fixes: Full Changelog available on GitHub Dark mode has been updated with a modern theme. If the modern theme\nis enabled in Settings>Feature Preview , Compass defaults to\nthe modern dark mode theme. Various user experience improvements when using the Import and Export functionality in Compass ( COMPASS-5576 , COMPASS-6543 ). Combine array fields into one in the import CSV preview ( COMPASS-6766 ). Add settings to sidebar menus ( COMPASS-6796 ). Show password only when user focuses on input ( COMPASS-6161 ). Add autocomplete support for $percentile, $median and $$USER_ROLES ( COMPASS-6780 , COMPASS-6781 ). Toggle state of Edit connection string is based on the new global preference. This setting controls whether a password is visible when creating a new connection. Compass supports the new Queryable Encryption protocol. Starting\nin v1.37.0, Compass is not compatible with MongoDB server versions\n earlier than 7.0 when using Queryable Encryption ( COMPASS-6601 , COMPASS-6602 ). When using queryable encryption on pre-7.0 servers, you can decrypt\nencrypted data, but you cannot insert or query data. Add export aggregation code preview to export modal ( COMPASS-6725 ). Differentiate between new Date() and Date() ( COMPASS-6755 ). Fix guessFileType() when JSON fails and CSV lines are huge. Flush import progress throttle on import error. Feature flag default values ( COMPASS-6525 ). Allow updates on a sharded collection ( COMPASS-6058 ). Detect line breaks, pass it on to papaparse ( COMPASS-6819 ). Update reset on query bar to reset results and emit query-changed ( COMPASS-6805 ). Released April 27, 2023 New Features: Bug Fixes: Full Changelog available on GitHub Update add data icon to plus with circle from download ( COMPASS-6494 ) Show import progress in toast, make import background\n( COMPASS-6540 , COMPASS-6555 ) Import progress ( COMPASS-6721 ) Update new connection text to new window ( COMPASS-6723 ) Remove re-count when not available ( COMPASS-5179 , COMPASS-6649 ) Fill autocomplete on tab ( COMPASS-6695 ) Show error border when focused ( COMPASS-6724 ) Compass readonly allows to drop namespaces from the sidebar ( COMPASS-6687 ) Fixes the problem of refresh button on collection tab not refreshing\nthe collection stats ( COMPASS-6738 ) Update windows config file fetching location one folder up ( COMPASS-6527 ) If listCSVFields() or analyzeCSVFields() fails it will display the error\nin the modal ( COMPASS-6737 ) Released April 13, 2023 New Features: Bug Fixes: Add links to the documentation to the agg and stage\nautocompleter suggestions ( COMPASS-6688 ) Listen to query-history events in query-bar and open saved items\n( COMPASS-6680 , COMPASS-6681 , COMPASS-6685 ) Fix loading configuration file on windows, remove arg check ( COMPASS-6527 ) Remove count when exporting views and time series collections\n( COMPASS-5179 , COMPASS-6649 ) Released March 29, 2023 New Features: Bug Fixes: Full Changelog available on GitHub Removes focus mode feature flag, always show ( COMPASS-6474 ) Analyze CSV fields and auto-select the correct type ( COMPASS-6536 ) Add GitHub source code link to help menu and window menu\n( COMPASS-6585 ) Makes analyzeCSVFields() skippable ( COMPASS-6638 ) Apply readPref to initial ping command ( COMPASS-6595 ) Fix guessFileType() for large JSON docs ( COMPASS-6629 ) Fix memory leak in listCSVFields() ( COMPASS-6630 ) Add dark mode colours for the mixed warning Abort analyzeCSVFields() when closing the import modal\n( COMPASS-6633 ) Optimize CSV field type detection Released March 15, 2023 New Features: Bug Fixes: Full Changelog available on Github: Enable focus mode ( COMPASS-6474 ) Add stage button between stages ( COMPASS-6382 ) Use type from last array element when inserting new element to array\n( COMPASS-6432 ) Redirect to the new collection after creating it ( COMPASS-6019 ) Stage toolbar ( COMPASS-6381 ) LG darkmode support and UI cleanup in the explain tab ( COMPASS-6463 ) Adds a cancellable loader to explain Enable column store indexes for MongoDB 6.3 ( COMPASS-6487 ) flexi bucket options for Timeseries Upgrade mongosh to 1.7.0 Include preview rows in the listCSVFields() result ( COMPASS-6422 ) Enable focus mode ( COMPASS-6474 ) When dropping a collection or database, redirect to either the database or\ndatabases view ( COMPASS-6018 , COMPASS-6434 ) Dark theme improvements in the settings modal ( COMPASS-6552 ) Conditional confirmation modal ( COMPASS-6355 ) Adds the refresh CTA to sidebar Open file input before import modal ( COMPASS-6535 ) Enable LG darkmode as public preview ( COMPASS-6515 ,\n COMPASS-6556 ) Hook for keyboard shortcuts ( COMPASS-6551 ) Adds refresh CTA on database and collection list view ( COMPASS-6431 ) Place settings under the most idiomatic menu for the platform\n( COMPASS-6430 ) If a date is in the safe range, go with relaxed EJSON rather than canonical\n( COMPASS-5744 ) Redesign of add stage button ( COMPASS-6449 ) Optimises the opening of tab Don't show negative count on delete when no document count ( COMPASS-5996 ) Stop on errors when stopOnErrors is true ( COMPASS-6518 ) Undefined rather than false if getCloudInfo fails, support SRV URIs\n( COMPASS-6111 ) Cancel edit on non-existent field ( COMPASS-6505 ) Halt autoupdater on application exit to prevent logger crashing\n( COMPASS-6051 ) Do not reset stage value if it was already changed ( COMPASS-6584 ) Released January 11, 2023 New Features: Bug Fixes: Update export modal to LeafyGreen components ( COMPASS-6220 ) Replace types dropdown with LG select Use leafygreen combobox to select stages Replace export-to-language with leafygreen components ( COMPASS-6219 ) Add connection import/export UI Convert compass query history to new components ( COMPASS-6221 ) Use the same date hook in query history as in saved aggregations ( COMPASS-6221 ) Add forceConnectionOptions option ( COMPASS-6068 ) Implement readOnly option ( COMPASS-6064 ) Update import modal to LeafyGreen components ( COMPASS-6220 ) Add --username and --password for auto-connect ( COMPASS-6216 ) Expose protectConnectionStrings in settings UI ( COMPASS-6262 ) Kerberos password field setting ( COMPASS-5950 ) Add maxTimeMS as setting ( COMPASS-6063 ) Update compass validation components to leafygreen ( COMPASS-6237 ) Update explain plan components ( COMPASS-6236 ) Implement enableDevTools option ( COMPASS-6061 ), ( COMPASS-5615 ) Use rebranded components in the document table view Add tracking event when stage value changes ( COMPASS-6310 ) Update Compass aggregations modals ( COMPASS-6286 ) Add LG darkTheme support for table view Remove trackErrors setting ( COMPASS-5708 ) Move all autoupdates logic to compass main process, allow to dismiss updates ( COMPASS-6057 ) ( COMPASS-6303 ) Convert more insert dialog code to compass components & leafygreen ( COMPASS-6285 ) Register Compass as a protocol handler for mongodb:// ( COMPASS-6085 ) Add --show-example-config flag ( COMPASS-6084 ) Cancellable aggregate and schema analysis ( COMPASS-5668 ) Cancellable find and explain ( COMPASS-5668 ) Implement new input docs card design ( COMPASS-6234 ) Update scrollbar styles ( COMPASS-5597 ) Cancellable counts ( COMPASS-5668 ) Update aggregations stage components ( COMPASS-6234 ) Enable pipeline as text feature ( COMPASS-6299 ) Index tab UI improvements ( COMPASS-6323 ), ( COMPASS-6329 ) Add refresh document count in aggregation results ( COMPASS-6156 ) Confirm when deleting pipeline ( COMPASS-4137 ) Improve table view interactions Do not save auto-connection in recents ( COMPASS-6290 ) Check for root level when deciding if _id key is editable ( COMPASS-6160 ) Fix the saved pipelines popover's scrolling ( COMPASS-6277 ) Disable deprecation warnings in production ( COMPASS-6322 ) Ignore non-digits in number input ( COMPASS-6326 ) Speed up export ( COMPASS-6332 ) Increase compass schema value bubble contrast ( COMPASS-6230 ) Fix macOS protocol handler connection string passing Fix typo on Indexes screen Avoid race condition when installing listeners Hide delete for db/coll cards in readonly mode ( COMPASS-6292 ) Freeze settings modal height and adjust categories ( COMPASS-6325 ) Fix nested field autocomplete ( COMPASS-6335 ) Reset contains error check on document json view edit cancel ( COMPASS-6059 ) Pass the preference as a prop when nesting Field ( COMPASS-6363 ) Hide add stage in toolbar ( COMPASS-6373 ) Make $out options more clear in agg pipeline builder ( COMPASS-6304 ) Speed up document json view ( COMPASS-6365 ) Export to Language (Java) has incorrect class name ( COMPASS-6159 ) Enable next page button when count is unknown ( COMPASS-6340 ) Initialize before identify and use get-os-info from npm Output stage destination name ( COMPASS-6407 ) Set width of compass shell to avoid overflow ( COMPASS-6411 ) Released December 16, 2022 Bug Fixes: Fix (compass-editors): fix nested field autocomplete ( COMPASS-6335 ) Fix (schema): fix display of geo visualizations for nested fields\n( COMPASS-6363 ) Released November 21, 2022 New Features: Bug Fixes: Full changelog available on GitHub . Add command-line interface and global configuration ( COMPASS-6069 ,\n COMPASS-6070 , COMPASS-6071 , COMPASS-6073 ,\nand COMPASS-6074 ) Flip the new toolbars feature flag, always show new toolbars\n( COMPASS-5679 ) Add autocompleter for aggregation, use autocompleter in import pipeline\nmodal ( COMPASS-6175 ) Add protectConnectionStrings option ( COMPASS-6066 ) Adds the networkTraffic configuration option to block outgoing network\nconnections ( COMPASS-6065 ) Show icons in the sidebar menus ( COMPASS-6081 ) Rebranding components ( COMPASS-6100 , COMPASS-6101 ,\n COMPASS-6121 , COMPASS-6048 , and COMPASS-6187 ) Layout improvements ( COMPASS-6148 , COMPASS-6150 , and\n COMPASS-5582 ) Add theme as regular setting ( COMPASS-6067 and COMPASS-5284 ) Fix installation issues on Windows ( COMPASS-6315 ) Fix map rendering and add e2e tests ( COMPASS-6131 ) Reconnect CSFLE client after collMod ( COMPASS-5989 ) Improve selection area for insert document editor Add map for collection stats for tab namespace isolation\n( COMPASS-6146 ) Open info links in browser ( COMPASS-6193 ) Menu not fully showing for field actions, remove old backgrounds\n( COMPASS-6186 ) Released September 14, 2022 New Features: Bug Fixes: Full changelog available on GitHub . Update saved aggregations to open as popover ( COMPASS-5852 ) Add error message hint for crud timeout message ( COMPASS-4638 ) Show namespace on saved queries and pipelines popovers ( COMPASS-6028 ) Add sparse option for indexes ( COMPASS-1963 ) Only show columnstore index option for mongodb server >= 7 ( COMPASS-5970 ) Add progress badge to the indexes table ( COMPASS-5944 ) Fix table header for indexes ( COMPASS-6042 ) Adjust crypt shared library download script for M1 builds Hide collection submenu on disconnect ( COMPASS-6047 ) Align delete index modal text Fix updating arrays with dots in names ( COMPASS-6011 ) Hide document views when there are no documents Fix import deep JSON overwriting variables ( COMPASS-5971 ) Released August 31, 2022 New Features: Full changelog available on GitHub . arm64 build for darwin is now available ( COMPASS-5574 ) Released July 13, 2022 New Features: Bug Fixes: Full changelog available on GitHub . bson-transpilers: Export to PHP from Compass ( PHPLIB-719 ) compass-components: Enable ACE code formatter ( COMPASS-5923 ) compass-indexes: Update toolbar to leafygreen components ( COMPASS-5676 ) connect: Add Save & Connect button ( COMPASS-5776 ) explain-plan-helper: Add support for indexes in stages ( COMPASS-5878 ) Automatically refresh after CSFLE insert ( COMPASS-5806 ) Improve Binary handling ( COMPASS-5848 ) Align elements on the create collection modal ( COMPASS-5921 ) connection-form: Align advanced tab and input field widths import: Pre-create an empty object before creating its properties ( COMPASS-5076 ) ace-autocompleter: Provide a special snippet for $merge stage in ADL connect-form: Connect to the newly created favourite ( COMPASS-5776 ) documents: Fixes to recent queries queries: Ignore duplicate recent queries ( COMPASS-2237 ) aggregation-explain: Show indexes ( COMPASS-5879 ) explain-plan-helper: Use execution time of cursor stage ( COMPASS-5858 ) Released June 7, 2022 Fixes an issue where $merge and $out aggregation stages would\nnot appear in the pipeline builder when connected to a Data Lake . Full changelog available on GitHub . Released June 3, 2022 New Features: Bug Fixes: Full changelog available on GitHub . Enables In-Use Encryption ( COMPASS-5634 ) Show index keys in aggregation explain plan ( COMPASS-5857 ) Open aggregation pipeline in correct namespace ( COMPASS-5872 ) Hide $documents operator in collection aggregations\n( COMPASS-5843 ) Released May 31, 2022 New Features: Bug Fixes: Full changelog available on GitHub . Adds explain plan for aggregations ( COMPASS-5788 ) Allows import into Queryable Encryption collections\n( COMPASS-5810 ) In the pipeline builder, hide the stage error message when changing\nthe aggregation operator ( COMPASS-5684 ) Remove unique option on columnstore index creation\n( COMPASS-5830 ) Reconnect the SSH tunnel when it gets disconnected\n( COMPASS-5454 ) Released May 17, 2022 New Features: Bug fixes: Full changelog available on GitHub . Export aggregation pipelines to Go. Bump Node driver to version 4.6.0 and embedded shell to version 1.4.1\n( COMPASS-5619 ). Support columnstore indexes and clustered collections\n( COMPASS-5665 , COMPASS-5666 ). Add srvMaxHosts to advanced connection options. Disable aggregation toolbar options when pipeline is invalid. Style improvements. In connection form, allow empty hosts. In aggregation pipeline builder, update default document preview\namount from 10 to 20. Resize elements for improved visibility. Released April 14, 2022 Bug fixes: Full changelog available on GitHub . Aggregation screen no longer shows a \"Cannot have two html5 backends\"\nerror. ( COMPASS-5655 ) Connections that use certificates no longer fail with \"option\nusesystemca is not supported\" ( COMPASS-5729 ) You can edit null values in CRUD view ( COMPASS-5697 ) Invalid UUID values display correctly in CRUD view\n( COMPASS-5726 ) Editing Int64 values in JSON view no longer changes their type to\nInt32 ( COMPASS-5710 ) Released April 05, 2022 Bug Fixes: Full changelog available on GitHub . Fixed \"rendering AggregationsPlugin\" error. CRUD Fixes Fixed a bug that updated a documents data type to String when\nediting a field of data type Date in CRUD Document view. Released March 31, 2022 New Features: Bug Fixes: View this release on GitHub . All JIRA issues closed in 1.31.0 . New connection experience Add new form for Kerberos options. Support loading system CA store. Use new favorite connection modal in sidebar. Add support for MONGODB_AWS. Aggregation and Query Improvements New saved aggregation and queries view. Add link and descriptions for the $densify aggregation stage. Add ability to export queries and aggregations to Ruby. Update aggregation result preview card styles. Schema Tab Fixes Fix shift selecting multiple items in schema tab. Unambiguously display latitude and longitude on map. CRUD Fixes Allow empty JSON input. Increase input width for query bar max timeout ms area. Miscellaneous Fixes Add directConnection=true when converting from old model. Pick only specified columns when exporting data as JSON. Hide SSH tunnel password. Released January 13, 2022 New Features: Bug Fixes: All JIRA issues closed in 1.30.1 . Add link and descriptions for the $documents .\naggregation stage. Connect form: Add SSL/TLS option radio box group. Databases and Collections: Add async loading states for databases and\ncollections list. Export secrets methods and parse raw models. Styles: Add darkreader dark theme option. Connections: Add general tab contents to connect form. Support MongoDB 5.2 aggregations. Remove unused vars. SSH Tunnel: Remove unused import. BSON Transpilers: Account for bson Decimal128 validation changes. Make SSH tunnel use Socks5. You can now connect to replica sets and\nsharded clusters using an SSH tunnel. Compass Logging: Bump mongodb-log-writer to allow browser envs. Move theme menu from help to view in non mac/darwin. Connections: Hide socks tab on SSH form. Add SSH label. Remove compass-components from prod dependencies. toggle-shell: Use key instead of keyCode. data-service: Do not return name from adapted dbStats Do not ignore directConnection=false mocha-config-compass: Disable source map processing when running code\nin electron / web runtime in tests Released December 20, 2021 New Features: Bug Fixes: Full changelog available on GitHub . Adds loading states for collection in sidebar. Improves identification of Atlas cluster. Improve telemetry connection tracking. Dependency Upgrades: Bumps react-ace to\nversion 9.5.0. Bumps Node.js driver to version\n 4.2.1 . Bump mongosh version to 1.1.6 . Removes expired link from license. Fixes error handling in listCollections . Keeps tlsCertificateFile as URI parameter. Hides full-text search stages for time series and views. Does not overfetch connectionInfo and update the state\ntoo often. Released November 24, 2021 Bug Fixes: Full changelog available on GitHub . Fixes connection with TLS / SSL options. Fixes document searching for Serverless Atlas. collStats now always shows for collections on the\ncollection screen. Collection menu now appears when collection is selected. Released November 16, 2021 New Features: Supported Platforms: Full changelog available on GitHub . Adds client-side logging for MongoDB Compass \noperations. Improved MongoDB Compass startup time. Adds support for MongoDB 5.1 features. Improved reliability for connections. MongoDB Compass now uses: Electron version 13 Node version 14 MongoDB Compass for macOS can now run on M1 platforms that have Rosetta or\nRosetta 2 installed. For more information, see Software\nRequirements . Released August 30, 2021 New Features: Bug Fixes: Full changelog available on GitHub . Enables resizing the preview area for aggregation pipelines. Allows hiding the Query History and\n Export to Language buttons in the query view. Bumps mongosh version for the embedded shell to 1.0.4 . Properly supports all Kerberos options. Fixed an issue with geospatial queries being incorrectly merged. Released July 13, 2021 New Features: Bug Fixes: Full changelog available on GitHub . Adds support for load balancer connections. Adds a Granularity option when creating a time series\ncollection. Disallows editing schema validation for time series collections. Hides the Drop Collection button in readonly Compass. Geoqueries no longer populate query bar fields with null . Released April 9, 2021 New Features: Bug Fixes: Allows functions in the query bar and aggregations. When navigating to the Databases view,\n Compass now clears a previously selected collection\nfrom the left navigation. Updates the embedded MongoDB Shell to\nversion 0.9.0 . Compass now displays the expected value when you update\nfields in the table view. Creating a collection or database is now prohibited when form\nfields are empty. Schema tab graphs no longer fail to render when switching tabs. SSH tunnel no longer hangs on disconnect. Released March 3, 2021 New Features: Bug Fixes: Adds ability to create text indexes . Adds ability to cancel a connection attempt. Data is now refreshed when Find is clicked in the query\nbar. Improvements to schema analysis to prevent\ntimeouts with large datasets. Improves connection form input and validation. MongoDB Compass prevents inserting data via the JSON editor without\nspecifying a document. Previously, Compass would silently error when a\ndocument was not specified. Saving a favorite connection no longer freezes MongoDB\nCompass. Stylistic fixes. Released January 13, 2021 New Features: Bug Fixes: Updates the embedded MongoDB Shell to\nversion 0.6.1 . Improvements to connection validation. Compass no longer crashes during startup on certain versions of\nWindows. For more information see\n COMPASS-4510 . When connecting to a MongoDB deployment, Compass no longer\nautomatically inserts a value of 27017 for Port \nwhen Port is left blank. Compass no longer displays the incorrect port number when connecting\nto MongoDB via SRV record. Released December 9, 2020 New Features: Bug Fixes: Adds support for updates on sharded collections. Adds support for the print() method in the\n embedded MongoDB Shell . Updates the embedded MongoDB Shell to\nversion 0.5.2 . Provides better readonly and view handling. Adds support for multi-line string editing in the field-by-field\neditor. Provides a descriptive tooltip when selecting an aggregation stage\nin the Aggregation Pipeline Builder . Non-editable fields can now be deleted in the field-by-field editor. Fixes connection URI issues with SCRAM-SHA-256. Adds support for $out when connected to a\n Data Lake . Removes broken import and export sidebar actions. Improves x.509 authentication. Makes the x.509 username\noptional in connection validation and improves validation error\nmessages. Various other bug fixes and improvements. Released November 4, 2020 Notarizes MongoDB Compass for macOS Catalina. You should no longer need to\nmanually allow macOS to trust MongoDB Compass before running. Kerberos authentication improvements on RHEL7. Importing a text pipeline containing a $out stage no longer\ncrashes MongoDB Compass . Various other bug fixes and improvements. Released September 3, 2020 Added an embedded MongoDB Shell . You\ncan use MongoDB Shell to test queries and operations in an interactive\nJavaScript interface. Released April 28, 2020 Improved experience for\n importing and exporting data . Improved CSV parsing when importing data. Added support for importing a subset of fields from CSV. Provides guidance to upgrade from Community Edition. Community Edition\nis now deprecated. To learn more, see Migrate to Compass from Compass Community . Various bug fixes and improvements. Released December 5, 2019 On macOS systems, the first time that you update MongoDB Compass to version\n1.20 or later, you will need to allow access to your system storage\n for each saved connection in Recents and\n Favorites . To learn more, see Allow Keychain Access for Recent and Favorite Connections . Added the option to include driver syntax when\n exporting queries to a language . New and improved Connection experience\nwith support for all connection options. Improved user experience for saving and sharing\n Favorite Connections . Added JSON mode for managing documents. With JSON mode, you can\nnow insert multiple documents at once. Added support for querying UUIDs via the\n Documents query bar or in the\n Aggregation Pipeline Builder . Added support for the following aggregation pipeline operators: $set $unset $replaceWith Improved inline documentation for aggregation pipeline arguments. Removed $limit \nahead of the\n $count stage in\nthe aggregation pipeline builder to ensure accurate counts on large\ncollections. Prior versions of MongoDB Compass placed a $limit stage\nbefore $count stages in the\n Aggregation Pipeline Builder for large\ncollections, even when sample mode was disabled. Various bug fixes and improvements. Released August 11, 2019 Added support for: Views . You can create\nviews based on results from an\n aggregation pipeline . Wildcard Indexes . Killing long-running operations from the\n Performance Tab . Adjusting the maximum timeout for\nqueries executed in the Query Bar. New settings available in the\n aggregation pipeline builder . You can\nnow specify a sample size, number of documents to preview, and a\nmaximum timeout for your pipeline operations. Obscures fields encrypted with Field-Level Encryption. These fields\ncannot be modified by Compass users. Compass now warns users who are connected to non-genuine\nMongoDB servers. For more information, see\n this entry in the FAQ . Released May 17, 2019 Provided fixes to the Compass Windows installer. With the\nnew .msi installer you can: Select the destination of the Compass installation. Install Compass for all users. Script the Compass installation and perform a quiet\ninstall. Added support for Ubuntu 18.10 and other recent Linux distributions. New Schema Validation experience. Added support for\n JSON schema validation . Includes smart editor with autocomplete. For macOS systems, Compass now requires macOS 10.12 or greater. Released March 4, 2019 Performance improvements to the Documents \nand Aggregation tabs, specifically with\ndeeply nested documents. Fixed several connection issues. Fixed Kerberos connections where hostname is not the canonical\nname. Fixed SRV connections with special characters in the password. Compass no longer allows direct connections to\n secondary databases, which would result in hangs on the\nloading navigation screen. Fixed connections to M0 \n Atlas clusters with\nreadonly users. Fixed issue where usersInfo command was not available\nto the data service. authSource now correctly defaults to admin when connecting to\n Atlas . Compass now properly forces a disconnect when requested. Released November 12, 2018 Added collation support to the following features: Create a Collection Create a Database Create an Index Query Your Data Create an Aggregation Pipeline Added the ability to find text within a page using either\n Ctrl + F or Command + F , depending on your\nplatform. Reduced the required permissions to use MongoDB Compass . As of this\nversion of MongoDB Compass , users require the\n read permission to access a database in Compass . Updated dates to display in UTC (Universal Time Coordinated) \ntime. Added support for SCRAM-SHA-256 authentication mechanism. Various bug fixes and improvements. As of this version, you should not provide a Password when\nusing Kerberos as the authentication mechanism. Released August 23, 2018 Added support for importing plain text pipelines into the\n Aggregation Pipeline Builder . Added support for exporting aggregation pipelines and exporting queries in the syntax of the following languages: Java Node C# Python 3 Released June 26, 2018 Added Aggregation Pipeline Builder ,\nwhich provides the ability to execute\n aggregation pipelines to\ngain additional insight into your data. Added MongoDB Compass Isolated Edition for highly secure\nenvironments. This edition does not initiate any network requests\nexcept to the MongoDB server. Released May 3, 2018 Added ability to\n import and export data in JSON and\n CSV format. Released March 5, 2018 Added MongoDB Compass Readonly Edition which limits certain\n CRUD operations within your organization. The following actions are not permitted in Compass Readonly Edition: All other functionality remains the same as in standard MongoDB Compass . Create and drop databases Create and drop collections Create, delete, edit, and clone documents Create and drop indexes Create, delete, and edit document validation rules Added support for connecting to Compass using an\n SRV record (Service record) . In the connect dialog, if\nCompass detects an SRV record URI on the clipboard it\nauto-completes the dialog based on the SRV record. Made various performance and stability inprovements to the documents tab. Released December 17, 2017 Added support for plugins\nthat extend the functionality of MongoDB Compass . Added support for disconnecting from the active\nMongoDB instance without restarting MongoDB Compass . Added Table View for documents as a\nmethod of viewing the contents of a collection in tabular format. Released Oct 25, 2017 Now available in two editions, Compass Community and Compass. Compass provides the following features not in the Community edition: Kerberos Authentication LDAP Authentication x509 Authentication Schema Analysis Real Time Server Stats Document Validation Released Oct, 2017 Added autocomplete functionality to the query bar. Compass automatically stores up to 20 most recent queries for each\ncollection. From the past queries view for a collection, you can\nview the recent queries as well as the\nqueries saved as favorites . For more\ninformation, see View Recent Queries . When a user connects to a MongoDB instance ,\nCompass now displays: For more information, see Compass Home . The connection name if the connection is a favorite connection or\n\"My Cluster\" if it is not. The type of deployment (standalone, replica set, sharded cluster).\nIf the deployment is a replica set and the replica set name is\nspecified in the connection window, the number of replica set\nmembers will also be displayed. Released Aug 2, 2017 Documents tab is the default Schema sampling only on demand Explain executed only on demand Improved Document Editing Deployment Awareness (and read preference) Added ability to specify replica set name and read preference in\nconnection screen. Added ability to parse MongoDB URI string in the connection screen. Allow typing index field names in addition to dropdown Use Client Meta Data to identify Compass application name in server logs New Loading animation Released Jun 7, 2017 Added ability to include options in the query bar . Added ability to add or delete database/collection from the left-hand navigation sidebar. Added ability to collapse the left-hand navigation sidebar. Released Mar 1, 2017 Added support for Linux: Ubuntu 14.04+ and RHEL 7+. Added ability to zoom in and zoom out of panels. Released Nov 29, 2016 Added ability to create and drop databases. Added ability to create and drop collections. Added ability to create indexes . Added support for document validation . Improved security when connecting to Atlas. During Connection setup,\n MongoDB Compass supports the use of System Certificate Authority for\nTLS/SSL connections to Atlas Deployment. Provides Real Time Performance stats. Released Nov 1, 2016 Add support for connecting to Atlas. Various bug fixes and improvements. Released Sep 15, 2016 Allow specifying the value of the _id field when inserting new\ndocuments. Set the default field and value sizes to 1 when adding a new key\nto a document. Typing \" : \" in the key input field tabs to the value input field when\nediting a document. Only allow addition of one element at a time if the field name in the\nnew element is blank when editing a document. CRUD documentation now available in the application help menu. Fix element autofocus on add. Bug: Small Bug Fixes identified at MongoDB World New: CRUD single document create, update, delete New: SSH tunnel support New: Tree explain plan view New: Geographic query builder and visualization Explicit opt-in for \"3rd party maps\" usage Improve display of binary data in in the document viewer Query builder on numeric histograms should leave bounds open Intercom overlay button now visible Load preferences defensively, catching parsing errors and reset preferences Compass Treasure Hunt for MongoDB World Released Jun 27, 2016 Beta installs alongside the stable release as a distinct application,\nwith distinct preferences Index view Explain plan view Documents view moved to a separate tab Automatic updates Bug: Feature Tour does not show on first launch Bug: Compass fails to start with JavaScript error in main process:\n SyntaxError: Unexpected end of input Bug: No error displayed message when an authentication error occurs Bug: Compass does not handle $indexStats fetch failure on MongoDB 3.2 NEW: Added explain plan view Added feature tour points of new 1.2 features Bugfix: After increasing maxTimeMS timeout, query default falls back\nto 100 docs Released Jan 19, 2016 Released Dec 7, 2015 MongoDB Compass 1.0 is now available. It helps users\nto visually analyze and understand their MongoDB data. Query builder bug in unique minicharts when resetting Hang: Do something graceful after closing/opening laptop lid & using\nCompass again Error in Compass.app/Contents/Resources/app/node_modules/scout-server/lib/models/token.js:20 Pass readPreference.nearest in lib/routes/collection.js Enterprise/Community version not correctly detected for MongoDB 2.6, 3.0 Compass hangs when upper case host alias used in connection Reduce reservoir sampling limit to 10,000 documents Possible race condition when reading from IndexedDB Cannot access DBs with colon (\":\") in their name Cannot read property 'authInfo' of undefined in mongodb-instance-model/lib/fetch.js:297 Cannot access DBs with octothorp ( # ) in their name Failure to sample on first collection selected ObjectID visualization missing last tooltip Change intercom message/menu item to \"Provide Feedback\" Open external links in user's web browser, not Electron Place SSL \"Client Certificate\" field above \"Client Private Key\" Re-enable highlighting/selecting of some UI elements Replace Help entry stubs with actual text Use consistent titles across windows Simplify language in opt-in panel Reduce font size of header to accommodate full db.collection name Remove \"\u2013\" (minimize) on Intercom Run shrinkwrap to lock Compass 1.0 dependencies Confirm successful Evergreen builds from release-1 branch Compass fails to connect to hostname", + "code": [], + "preview": "Released September 18, 2024", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "sampling", + "title": "Sampling", + "headings": [ + "Sampling Method" + ], + "paragraphs": "Sampling in MongoDB Compass is the selection of a subset of documents from a\ncollection for analysis. Analyzing a sample set of data is a common\nstatistical analysis technique; the results of analyzing a sample set\ntend to be similar to the results of analyzing an entire data set. Compass uses sampling for efficiency. Generally, standard\nsample sets can be selected and analyzed in a few seconds. Analyzing\nlarge samples or entire collections demands significantly more time and\nprocessing power. MongoDB Compass samples 1,000 documents from a collection using the\n $sample \noperator via the\n aggregation pipeline . This\nprovides efficient, random sampling without replacement over an entire\ncollection, or over the subset of documents specified by a query.", + "code": [], + "preview": "Sampling in MongoDB Compass is the selection of a subset of documents from a\ncollection for analysis. Analyzing a sample set of data is a common\nstatistical analysis technique; the results of analyzing a sample set\ntend to be similar to the results of analyzing an entire data set.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "schema/export", + "title": "Export Your Schema", + "headings": [ + "Schema Object Properties", + "Example Schema" + ], + "paragraphs": "You can export your schema after analyzing it. This is useful for\nsharing your schema and comparing schemas across collections. If you have not already done so, analyze your schema: Once your schema has been analyzed, export your schema: Your schema is copied to your clipboard as a JSON (Javascript Object Notation) object. Select your desired collection and click the Schema tab. Click Analyze Schema . When Compass analyzes your schema, it samples a random\nsubset of documents from your collection. To learn more about\nsampling, see Sampling . In the top menu bar, click Collection . From the dropdown, click Share Schema as JSON . Schema objects have count and fields properties: count is an integer that represents the number of documents\n sampled from the collection to generate the schema. fields is an array of metadata objects that correspond to each\nfield in the documents analyzed during sampling. Each element in the\n fields array contains the following fields: Property Data type Description name String Name of the corresponding field, e.g. _id . path String Path to the corresponding field within a document. count Integer Number of documents in which the corresponding field appears. types Array Array of metadata objects that represent each data type that\nappears in the corresponding field. types[n].name String Name of this data type. types[n].bsonType String BSON (Binary Javascript Object Notation) type of this data type. types[n].path String Path to the corresponding field within a document. types[n].count Integer Number of times this data type appears in the corresponding\nfield. types[n].values Array Array of the actual sampled values that\nappear in the corresponding field and match this data type. types[n].total_count Integer If the corresponding field is an array, the number of elements\nin that array. types[n].probability Number Probability that the value of the corresponding field is this\ndata type in a random document. types[n].unique Integer Number of unique values of this data type that appear in\nthe corresponding field. types[n].has_duplicates Boolean true if a single value of this data type appears multiple\ntimes in the corresponding field. Otherwise false . types[n].lengths Array If this data type is an array, an array of integers representing\nthe lengths of arrays found in the corresponding field. Not\npresent for other data types. types[n].average_length Number If this data type is an array, the average length of arrays in\nthe corresponding field across sampled documents. Not present\nfor other data types. total_count Integer Number of documents sampled from the collection. type String or Array String or array of strings representing possible types for\nthe corresponding field. has_duplicates Boolean true if a single value appears multiple times in\nthe corresponding field. Otherwise false . probability Number Probability that a random document contains the corresponding\nfield. The following example uses a collection of 3 documents, each\nwith a sport field and unique information about that sport: You can import the above example to MongoDB Compass to experiment with schema\noutputs. To import the example collection into MongoDB Compass : The example above outputs the following schema: Copy the JSON documents above. In MongoDB Compass , select a collection or create a new collection to\nimport the copied documents to. The Documents tab\ndisplays. Click Add Data . Select Insert Document from the dropdown. In the JSON view of the dialog, paste the copied documents and click\n Insert .", + "code": [ + { + "lang": "json", + "value": "[\n {\n \"_id\": { \"$oid\":\"5e8359ba7782b98ba98c16fd\" },\n \"sport\": \"Baseball\",\n \"equipment\": [ \"bat\", \"baseball\", \"glove\", \"helmet\" ]\n },\n {\n \"_id\": { \"$oid\":\"5e835a727782b98ba98c16fe\" },\n \"sport\": \"Football\",\n \"variants\": {\n \"us\":\"Soccer\",\n \"eu\":\"Football\"\n }\n },\n {\n \"_id\": { \"$oid\":\"5e835ade7782b98ba98c16ff\" },\n \"sport\": \"Cricket\",\n \"origin\": \"England\"\n }\n]" + }, + { + "lang": "json", + "value": "{\n \"fields\": [\n {\n \"name\": \"_id\",\n \"path\": \"_id\",\n \"count\": 3,\n \"types\": [\n {\n \"name\": \"ObjectID\",\n \"bsonType\": \"ObjectID\",\n \"path\": \"_id\",\n \"count\": 3,\n \"values\": [\n \"5e8359ba7782b98ba98c16fd\",\n \"5e835a727782b98ba98c16fe\",\n \"5e835ade7782b98ba98c16ff\"\n ],\n \"total_count\": 0,\n \"probability\": 1,\n \"unique\": 3,\n \"has_duplicates\": false\n }\n ],\n \"total_count\": 3,\n \"type\": \"ObjectID\",\n \"has_duplicates\": false,\n \"probability\": 1\n },\n {\n \"name\": \"equipment\",\n \"path\": \"equipment\",\n \"count\": 1,\n \"types\": [\n {\n \"name\": \"Undefined\",\n \"type\": \"Undefined\",\n \"path\": \"equipment\",\n \"count\": 2,\n \"total_count\": 0,\n \"probability\": 0.6666666666666666,\n \"unique\": 1,\n \"has_duplicates\": true\n },\n {\n \"name\": \"Array\",\n \"bsonType\": \"Array\",\n \"path\": \"equipment\",\n \"count\": 1,\n \"types\": [\n {\n \"name\": \"String\",\n \"bsonType\": \"String\",\n \"path\": \"equipment\",\n \"count\": 4,\n \"values\": [\n \"bat\",\n \"baseball\",\n \"glove\",\n \"helmet\"\n ],\n \"total_count\": 0,\n \"probability\": 1,\n \"unique\": 4,\n \"has_duplicates\": false\n }\n ],\n \"lengths\": [\n 4\n ],\n \"total_count\": 4,\n \"probability\": 0.3333333333333333,\n \"average_length\": 4\n }\n ],\n \"total_count\": 3,\n \"type\": [\n \"Undefined\",\n \"Array\"\n ],\n \"has_duplicates\": true,\n \"probability\": 0.3333333333333333\n },\n {\n \"name\": \"origin\",\n \"path\": \"origin\",\n \"count\": 1,\n \"types\": [\n {\n \"name\": \"Undefined\",\n \"type\": \"Undefined\",\n \"path\": \"origin\",\n \"count\": 2,\n \"total_count\": 0,\n \"probability\": 0.6666666666666666,\n \"unique\": 1,\n \"has_duplicates\": true\n },\n {\n \"name\": \"String\",\n \"bsonType\": \"String\",\n \"path\": \"origin\",\n \"count\": 1,\n \"values\": [\n \"England\"\n ],\n \"total_count\": 0,\n \"probability\": 0.3333333333333333,\n \"unique\": 1,\n \"has_duplicates\": false\n }\n ],\n \"total_count\": 3,\n \"type\": [\n \"Undefined\",\n \"String\"\n ],\n \"has_duplicates\": true,\n \"probability\": 0.3333333333333333\n },\n {\n \"name\": \"sport\",\n \"path\": \"sport\",\n \"count\": 3,\n \"types\": [\n {\n \"name\": \"String\",\n \"bsonType\": \"String\",\n \"path\": \"sport\",\n \"count\": 3,\n \"values\": [\n \"Baseball\",\n \"Football\",\n \"Cricket\"\n ],\n \"total_count\": 0,\n \"probability\": 1,\n \"unique\": 3,\n \"has_duplicates\": false\n }\n ],\n \"total_count\": 3,\n \"type\": \"String\",\n \"has_duplicates\": false,\n \"probability\": 1\n },\n {\n \"name\": \"variants\",\n \"path\": \"variants\",\n \"count\": 1,\n \"types\": [\n {\n \"name\": \"Undefined\",\n \"type\": \"Undefined\",\n \"path\": \"variants\",\n \"count\": 2,\n \"total_count\": 0,\n \"probability\": 0.6666666666666666,\n \"unique\": 1,\n \"has_duplicates\": true\n },\n {\n \"name\": \"Document\",\n \"bsonType\": \"Document\",\n \"path\": \"variants\",\n \"count\": 1,\n \"fields\": [\n {\n \"name\": \"eu\",\n \"path\": \"variants.eu\",\n \"count\": 1,\n \"types\": [\n {\n \"name\": \"String\",\n \"bsonType\": \"String\",\n \"path\": \"variants.eu\",\n \"count\": 1,\n \"values\": [\n \"Football\"\n ],\n \"total_count\": 0,\n \"probability\": 1,\n \"unique\": 1,\n \"has_duplicates\": false\n }\n ],\n \"total_count\": 1,\n \"type\": \"String\",\n \"has_duplicates\": false,\n \"probability\": 1\n },\n {\n \"name\": \"us\",\n \"path\": \"variants.us\",\n \"count\": 1,\n \"types\": [\n {\n \"name\": \"String\",\n \"bsonType\": \"String\",\n \"path\": \"variants.us\",\n \"count\": 1,\n \"values\": [\n \"Soccer\"\n ],\n \"total_count\": 0,\n \"probability\": 1,\n \"unique\": 1,\n \"has_duplicates\": false\n }\n ],\n \"total_count\": 1,\n \"type\": \"String\",\n \"has_duplicates\": false,\n \"probability\": 1\n }\n ],\n \"total_count\": 0,\n \"probability\": 0.3333333333333333\n }\n ],\n \"total_count\": 3,\n \"type\": [\n \"Undefined\",\n \"Document\"\n ],\n \"has_duplicates\": true,\n \"probability\": 0.3333333333333333\n }\n ],\n \"count\": 3\n}" + } + ], + "preview": "You can export your schema after analyzing it. This is useful for\nsharing your schema and comparing schemas across collections.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "schema", + "title": "Analyze Your Data Schema", + "headings": [ + "Query Bar", + "Field Descriptions", + "Field with a Single Data Type", + "Field with Multiple Data Types", + "Missing Field", + "Strings", + "Numbers", + "Dates and ObjectIDs", + "Embedded Documents and Arrays", + "GeoJSON and [longitude,latitude] Arrays", + "View Charts of Mixed Types", + "Query Builder", + "Click the chart value.", + "Optional. Select multiple values.", + "Optional. Click on other field values to create a compound query.", + "Optional. Deselect a value.", + "Run the query", + "Analyze Location Data", + "Apply a Location Filter", + "Edit a Location Filter", + "Delete a Location Filter", + "Troubleshooting" + ], + "paragraphs": "The Schema tab provides an overview of the data type\nand shape of the fields in a particular collection. Databases\nand collections are visible in the left-side navigation. The overview is based on sampling \nthe documents in the collection. The schema overview may include\nadditional data about the contents of the fields, such as the\nminimum and maximum values of dates and integers, the frequency of\noccurrence of particular values, and the cardinality of the data. MongoDB has a flexible schema model , which means that some fields may\ncontain different types of data from one document to the next. For\nexample, a field named address may contain strings and integers in\nsome documents, objects in others, or some combination of all three. In the case of heterogenous fields, the Schema tab shows a\nbreakdown of the various data types contained within the field with the\npercentage of each data type represented. The Schema tab shows size information about the\n test.restaurants collection at the top, including the total\nnumber of documents in the collection, the average document size,\nand the total disk space occupied by the collection. The following fields are shown with details: The _id field is an\n ObjectId .\nEach ObjectId contains a timestamp, so Compass displays the range\nof creation times for the sampled documents. The address field contains four nested fields. You\ncan expand the field panel to see analyses of each of the nested\nfields. The borough field contains a string indicating the borough in\nwhich the restaurant is located. The cardinality is low enough\nthat Compass can provide a graded bar of the field contents, with\nthe most-frequently occurring string on the left. The categories field contains arrays of strings. The analysis\nshows the minimum, maximum, and average array lengths. Using the query bar in the Schema tab,\nyou can create a query filter to limit your result set. Click the\n Options button to specify query options, such as the\nparticular fields to display and the number of results to return. For query result sets larger than 1000 documents, Compass shows a\nsubset of the results. Otherwise, Compass shows the entire result\nset. For details on sampling, see Sampling . In the Schema tab, you can also use the Query Builder to\nenter a query into the query bar. For each field, Compass displays summary information about the data\ntype or types the field contains and the range of values. Depending\non the data type and the level of cardinality, Compass displays\nhistograms, graded bars, geographical maps, and sample data to provide\na sense of the shape and scope of the data contained in each field. Below is an example of the data type summary for a field called\n last_login which contains data of type date . For fields that contain multiple data types,\nCompass displays a percentage breakdown of\nthe various data types across documents. In the example below,\nthe chart shows the contents of a field called phone_no in which\n81% of documents are of type string , and the remaining 19% are of\ntype number . If a collection contains documents in which not all fields contain\na value, the missing values display as undefined . In the example\nbelow, the field age has no recorded value in 40% of the sampled\ndocuments. Strings can appear in three different ways. If there are\nentirely unique strings in a field, Compass shows a random\nselection of string values from the specified field. Click\nthe circular refresh icon to see\na new set of randomly selected values from the field. If there are only a few different string values,\nCompass shows the strings in a single graded bar which\nshows the percentage of the population of the string values. If there are multiple string values with some duplicates,\nCompass shows a histogram indicating the frequency of\neach string found within the field. Move the mouse over each bar to display a tooltip which shows the\nvalue of the string. Numbers are similar to strings in their representation.\nUnique numbers are shown in the following manner: Duplicate numbers are shown in a histogram that indicates their\nfrequency: Fields that represent dates (and fields that contain the ObjectID\ndata type, which includes a timestamp) are shown across multiple\nbar charts. The two charts on the top row represent the day of the\nweek and time of day of the timestamp value. The single chart on the bottom shows the first and last timestamp\nvalue, and the vertical lines represent the distribution of the\ntimestamp across the range of first to last. Fields that contain a sub-document or an array are displayed with\na small triangle next to them and a visual representation of the\ndata contained within the sub-document or array. Click on the triangle to expand the field and view the embedded\ndocuments: Fields that contain GeoJSON data or [longitude,latitude] arrays are\ndisplayed with interactive maps. For more information on interacting\nwith location data in Compass , see Analyze Location Data . Third party mapping services are not available in\n Compass Isolated Edition . If a field has mixed types, you can view different charts of each type\nby clicking on the type field. In the example below, the age \nfield shows the values that are strings: Clicking on the number type causes the chart to show its numeric\ndata: In the Schema tab, you can type the filter manually into\nthe query bar or generate the filter with the Compass query builder.\nThe query builder allows you to select data elements from one or more\nfields in your schema and construct a query matching the selected\nelements. The following procedure describes the steps involved in\nbuilding a complex query with the query bar. You can compose the initial query filter by using the clickable query\nbuilder and then manually edit the generated filter to your exact\nrequirements. In the Schema view, you can click on a chart value to\nbuild a query. For example, the following image shows the query\nfilter built by clicking the EWR value for the\n departureAirportFsCode field. To select multiple values for a field, click and drag the cursor\nover a selection of values, or press shift+click on the desired\nvalues. For example, the following image shows shows the compound query\nbuilt by selecting a value in the flightId field. To deselect a previously selected value, shift+click on\nthe selected value: To run the query, click Analyze . Click Reset \nto clear your query. In the Schema tab, you can use interactive maps to filter\nand analyze location data. If your field contains\n GeoJSON data or\n [longitude,latitude] arrays, the Schema tab displays\na map containing the points from the field. The data type\nfor location fields is coordinates . You can apply a filter to the map to only analyze a specific\nrange of points. To define a location filter: The query bar updates as you draw location filters to show\nthe exact coordinates used in the\n $geoWithin query\napplied to the schema analysis. If you specify multiple location filters, the query becomes\nan $or query\nwith multiple $geoWithin operators. Click the Circle button at the top-right\nof the map. Click and drag on the map to draw a circle containing the area of\nthe map you want to analyze. Repeat this process as desired to include additional areas of\nthe map in the schema analysis. To move or resize a location filter, click on the\nright side of the map. You will enter the filter editing mode, which\nlooks like this: After modifying your filters, click Save . Click and drag the square in the center of the circle. Click and drag the square at the edge of the circle. To delete a location filter from the map: Click on the right side of the map. Either click: A location filter to delete that filter. Clear All to delete all location filters. Click Save . If the analysis of your schema times out, it might be because the\ncollection you are analyzing is very large, causing MongoDB to stop the\noperation before the analysis is complete. Increase the value of\n MAX TIME MS to allow the operation time to complete. To increase the value of MAX TIME MS : Once you have increased the value of MAX TIME MS , retry your schema\nanalysis by clicking Analyze . In the query bar, expand Options . Increase the value of MAX TIME MS to accommodate your collection.\n MAX TIME MS defaults to 60000 milliseconds, or 60 seconds, but large\ncollections might take tens of seconds to analyze.", + "code": [], + "preview": "The Schema tab provides an overview of the data type\nand shape of the fields in a particular collection. Databases\nand collections are visible in the left-side navigation.", + "tags": null, + "facets": { + "target_product": [ + "compass" + ] + } + }, + { + "slug": "settings/command-line-options", + "title": "Command Line Options", + "headings": [ + "Definition", + "Compass Executable Location", + "Syntax", + "Options", + "Command Line Only", + "General Settings", + "Learn More" + ], + "paragraphs": "You can customize MongoDB Compass using command line options. You can start MongoDB Compass and set configuration options from the command line.\nWhen you use the command line to set configuration options, the options are\nset at startup. Settings configured using command line options cannot be\nmodified on the Compass interface. The name and location of the Compass executable varies by\noperating system. Operating System Executable Name Location Linux mongodb-compass The installer installs it in the /usr/bin directory. Windows MongoDBCompass.exe The installer installs it in a folder you pick during the installation\nprocess. MacOS MongoDB Compass The installer installs it under the Applications folder: Use the following syntax to start MongoDB Compass and set a configuration option: These options can only be set on the command line. These options\ncannot be set in the Settings panel or the Compass \nconfiguration file. Setting Definition Export favorite connections. Show MongoDB Compass options. Import favorite connections. Specify a passphrase to encrypt the exported favorite connections output file. Specify a passphrase to decrypt the imported favorite\nconnections file. Provides an example Compass configuration file. Suppresses warnings about disallowed connection string properties and allows\nautomatic connection. Compass inspects the connection string passed in the\ncommand line, and verifies whether the connection string contains parameters\nthat can result in a security risk. Only set this option if you pass a known trusted connection string to\n Compass . Show MongoDB Compass version. These options can be set on the command line, in a configuration file, or in the\n Settings panel. Setting an option on the command line overrides the\nvalue in the Settings panel. Setting Definition Enable automatic updates . Use\n --no-autoUpdates to disable automatic updates. Specifies a shell command to start the browser for OIDC authentication\nwhen you connect to the server or log into your Atlas Cloud account. Enable the feedback panel. Use --no-enableFeedbackPanel to disable\nautomatic updates. Enable Chrome DevTools in Compass . To learn more, see Toggle Chrome DevTools . Allow MongoDB Compass to make requests to a 3rd party mapping service. Use\n --no-enableMaps to disable mapping requests. Enable or disable the embedded MongoDB Shell on Compass . To learn more, see Disable the Embedded MongoDB Shell . Specifies a file containing a list of connections that are automatically\navailable once MongoDB Compass starts. To learn more, see Specify Read Preference and Tags . Assign connection option values for when you\nconnect to your MongoDB deployment through Compass . These connection\noptions cannot be overridden by connections from the Compass interface or\na command line connection string. Allows additional command line flags. Register Compass as a handler for mongodb:// and mongodb+srv://\nURLs. If Install Compass as URL Protocol Handler is enabled,\nyou can open Compass by navigating to a mongodb:// or\nmongodb+srv:// URL in your internet browser. Available on macOS and Windows. Specify an upper time limit for all Compass database operations. Configure MongoDB Compass to only allow outgoing network operations to connect to the database. To learn more, see Block Outgoing Network Connections . Specifies a password for authentication to Compass and the MongoDB\ndeployment provided in the connection string. Use in conjunction with --username . Allows you to remain logged in when using the MongoDB OIDC authentication\nmechanism for MongoDB server connections. Session tokens are encrypted and\nstored using the system keychain. To learn more, see Hide Credentials in Your Connection String . Sets all connection strings as read-only. Passwords in connection\nstrings display as ***** . If protectConnectionStrings is enabled, Compass disables the\n Edit connection string option and hides the\n Edit connection string toggle. Sets connection strings for new connections as read-only by\ndefault. Passwords in new connection strings display as\n ***** . If protectConnectionStringsForNewConnections is enabled,\n Compass disables the Edit connection string \noption but doesn't prevent users from manually enabling the\noption with the Edit connection string toggle. Prevent users from performing write operations to your MongoDB deployment\nthrough Compass . To learn more, see Restrict Write Operations to MongoDB . Enables performance insights for your schemas and queries. To learn more, see Performance Insights . Show or hide the Kerberos password field on the Compass \nconnection form. To learn more, see Display the Kerberos Password Field . Enables device authorization flow for OIDC authentication on MongoDB server.\nThis enables a less secure authentication flow that can be used as a fallback\nwhen browser-based authentication is unavailable. Specify the MongoDB Compass UI theme. The supported themes are DARK ,\n LIGHT , and OS_THEME . Enable sending usage statistics. Use --no-trackUsageStatistics \nto disable sending usage statistics. Specifies a username for authentication to Compass and the MongoDB\ndeployment provided in the connection string Optionally, use in conjuction with --password . To view all available Compass configuration options, run the following\ncommand in the folder containing your MongoDB Compass executable: Start Compass from the Command Line Configuration File Settings Interface Settings", + "code": [ + { + "lang": "shell", + "value": "/Applications/MongoDB\\ Compass.app/Contents/MacOS/MongoDB\\ Compass" + }, + { + "lang": "sh", + "value": " --