diff --git a/Cargo.lock b/Cargo.lock index b770145fb2..314cc6c1d3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -166,9 +166,9 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "async-compression" -version = "0.4.7" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86a9249d1447a85f95810c620abea82e001fe58a31713fcce614caf52499f905" +checksum = "a116f46a969224200a0a97f29cfd4c50e7534e4b4826bd23ea2c3c533039c82c" dependencies = [ "brotli", "flate2", @@ -228,7 +228,7 @@ checksum = "30c5ef0ede93efbf733c1a727f3b6b5a1060bbedd5600183e66f6e4be4af0ec5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -250,7 +250,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -261,7 +261,7 @@ checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -425,7 +425,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -624,7 +624,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", "syn_derive", ] @@ -864,7 +864,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -1350,7 +1350,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -1568,7 +1568,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -1579,7 +1579,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -1662,9 +1662,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.9" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" dependencies = [ "const-oid", "pem-rfc7468", @@ -1710,7 +1710,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -1720,7 +1720,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "206868b8242f27cecce124c19fd88157fbd0dd334df2587f36417bafbc85097b" dependencies = [ "derive_builder_core", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -1895,7 +1895,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -1966,7 +1966,7 @@ checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -2214,7 +2214,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -2804,7 +2804,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.57", + "syn 2.0.55", "toml", "unicode-xid", ] @@ -2880,7 +2880,7 @@ checksum = "0122b7114117e64a63ac49f752a5ca4624d534c7b1c7de796ac196381cd2d947" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -3050,12 +3050,13 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libredox" -version = "0.1.3" +version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ "bitflags 2.5.0", "libc", + "redox_syscall 0.4.1", ] [[package]] @@ -3132,7 +3133,7 @@ checksum = "5cf92c10c7e361d6b99666ec1c6f9805b0bea2c3bd8c78dc6fe98ac5bd78db11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -3767,7 +3768,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -3781,7 +3782,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -3978,14 +3979,14 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -4158,7 +4159,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -4285,7 +4286,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", "version_check", "yansi 1.0.1", ] @@ -4310,7 +4311,7 @@ dependencies = [ "itertools 0.11.0", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -4346,9 +4347,9 @@ dependencies = [ [[package]] name = "quanta" -version = "0.12.3" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" +checksum = "9ca0b7bac0b97248c40bb77288fc52029cf1459c0461ea1b05ee32ccf011de2c" dependencies = [ "crossbeam-utils", "libc", @@ -4555,9 +4556,9 @@ dependencies = [ [[package]] name = "redox_users" -version = "0.4.5" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" +checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" dependencies = [ "getrandom 0.2.12", "libredox", @@ -4576,34 +4577,36 @@ dependencies = [ [[package]] name = "refinery-core" -version = "0.8.13" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d0f5d1af6a2e8d5972ca187b2acf7ecb8d6a1a6ece52bceeae8f57880eaf62f" +checksum = "08d6c80329c0455510a8d42fce286ecb4b6bcd8c57e1816d9f2d6bd7379c2cc8" dependencies = [ "async-trait", "cfg-if", "log", "regex", + "serde", "siphasher 1.0.1", "thiserror", "time", "tokio", "tokio-postgres", + "toml", "url", "walkdir", ] [[package]] name = "refinery-macros" -version = "0.8.13" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba59636ac45d953f2225dc4ca3a55cfda1b015d0e6ff51ea16329918b436d51" +checksum = "6ab6e31e166a49d55cb09b62639e5ab9ba2e73f2f124336b06f6c321dc602779" dependencies = [ "proc-macro2", "quote", "refinery-core", "regex", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -4658,7 +4661,7 @@ checksum = "ad9f2390298a947ee0aa6073d440e221c0726188cfbcdf9604addb6ee393eb4a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -5102,7 +5105,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -5143,7 +5146,7 @@ dependencies = [ "proc-macro2", "quote", "sea-bae", - "syn 2.0.57", + "syn 2.0.55", "unicode-ident", ] @@ -5202,9 +5205,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.10.0" +version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -5215,9 +5218,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.10.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" dependencies = [ "core-foundation-sys", "libc", @@ -5271,7 +5274,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -5313,7 +5316,7 @@ checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -5390,7 +5393,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -5402,7 +5405,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -5577,7 +5580,6 @@ version = "0.1.0" dependencies = [ "blake3", "bytes 1.6.0", - "paste", "postgres-types", "remain", "serde", @@ -5706,7 +5708,7 @@ version = "0.1.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -6185,7 +6187,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -6198,7 +6200,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -6233,9 +6235,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.57" +version = "2.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11a6ae1e52eb25aab8f3fb9fca13be982a373b8f1157ca14b897a825ba4a2d35" +checksum = "002a1b3dbf967edfafc32655d0f377ab0bb7b994aa1d32c8cc7e9b8bf3ebb8f0" dependencies = [ "proc-macro2", "quote", @@ -6251,7 +6253,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -6384,7 +6386,7 @@ checksum = "c8f546451eaa38373f549093fe9fd05e7d2bade739e2ddf834b9968621d60107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -6404,7 +6406,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -6519,7 +6521,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -6810,7 +6812,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -7253,7 +7255,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", "wasm-bindgen-shared", ] @@ -7287,7 +7289,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -7648,7 +7650,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -7668,5 +7670,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] diff --git a/Cargo.toml b/Cargo.toml index f8136cd089..05d72edac1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -94,7 +94,7 @@ directories = "5.0.1" docker-api = "0.14.0" dyn-clone = "1.0.17" flate2 = "1.0.28" -futures = {version = "0.3.30", features = ["executor"]} +futures = "0.3.30" futures-lite = "2.3.0" hex = "0.4.3" http = "0.2.12" # todo: upgrade this alongside hyper/axum/tokio-tungstenite/tower-http diff --git a/lib/dal-test/BUCK b/lib/dal-test/BUCK index 78069fbac3..3040ab423e 100644 --- a/lib/dal-test/BUCK +++ b/lib/dal-test/BUCK @@ -5,10 +5,10 @@ rust_library( deps = [ "//lib/buck2-resources:buck2-resources", "//lib/council-server:council-server", - "//lib/dal:dal-integration-test", + "//lib/dal:dal", "//lib/module-index-client:module-index-client", - "//lib/pinga-server:pinga-server-integration-test", - "//lib/rebaser-server:rebaser-server-integration-test", + "//lib/pinga-server:pinga-server", + "//lib/rebaser-server:rebaser-server", "//lib/si-crypto:si-crypto", "//lib/si-data-nats:si-data-nats", "//lib/si-data-pg:si-data-pg", diff --git a/lib/dal-test/src/lib.rs b/lib/dal-test/src/lib.rs index a961aa428d..9149c8d6aa 100644 --- a/lib/dal-test/src/lib.rs +++ b/lib/dal-test/src/lib.rs @@ -138,8 +138,7 @@ impl Config { config.pg.port = env::var(ENV_VAR_PG_PORT) .unwrap_or_else(|_| DEFAULT_TEST_PG_PORT_STR.to_string()) .parse()?; - - config.pg.pool_max_size = 4; + config.pg.pool_max_size *= 32; config.pg.certificate_path = Some(config.postgres_key_path.clone().try_into()?); if let Ok(value) = env::var(ENV_VAR_PG_HOSTNAME) { @@ -152,7 +151,7 @@ impl Config { config.layer_cache_pg_pool.port = env::var(ENV_VAR_PG_PORT) .unwrap_or_else(|_| DEFAULT_TEST_PG_PORT_STR.to_string()) .parse()?; - config.layer_cache_pg_pool.pool_max_size = 4; + config.layer_cache_pg_pool.pool_max_size *= 32; config.layer_cache_pg_pool.certificate_path = Some(config.postgres_key_path.clone().try_into()?); diff --git a/lib/dal/BUCK b/lib/dal/BUCK index 18bf4d1bd5..46c937b43f 100644 --- a/lib/dal/BUCK +++ b/lib/dal/BUCK @@ -1,7 +1,6 @@ load( "@prelude-si//:macros.bzl", "rust_library", - "rust_library_integration_test", "rust_test", ) @@ -76,83 +75,6 @@ rust_library( test_unit_deps = [ "//third-party/rust:tempfile", ], -) - -rust_library_integration_test( - name = "dal-integration-test", - crate = "dal", - deps = [ - "//lib/si-cbor:si-cbor", - "//lib/council-server:council-server", - "//lib/nats-subscriber:nats-subscriber", - "//lib/object-tree:object-tree", - "//lib/si-crypto:si-crypto", - "//lib/si-data-nats:si-data-nats", - "//lib/si-data-pg:si-data-pg", - "//lib/si-events-rs:si-events", - "//lib/si-hash:si-hash", - "//lib/si-layer-cache:si-layer-cache", - "//lib/si-pkg:si-pkg", - "//lib/si-std:si-std", - "//lib/telemetry-rs:telemetry", - "//lib/telemetry-nats-rs:telemetry-nats", - "//lib/veritech-client:veritech-client", - "//third-party/rust:async-recursion", - "//third-party/rust:async-trait", - "//third-party/rust:base64", - "//third-party/rust:blake3", - "//third-party/rust:chrono", - "//third-party/rust:ciborium", - "//third-party/rust:convert_case", - "//third-party/rust:derive_more", - "//third-party/rust:diff", - "//third-party/rust:dyn-clone", - "//third-party/rust:futures", - "//third-party/rust:hex", - "//third-party/rust:iftree", - "//third-party/rust:itertools", - "//third-party/rust:jwt-simple", - "//third-party/rust:lazy_static", - "//third-party/rust:once_cell", - "//third-party/rust:paste", - "//third-party/rust:petgraph", - "//third-party/rust:postcard", - "//third-party/rust:postgres-types", - "//third-party/rust:pretty_assertions_sorted", - "//third-party/rust:rand", - "//third-party/rust:refinery", - "//third-party/rust:regex", - "//third-party/rust:remain", - "//third-party/rust:serde", - "//third-party/rust:serde-aux", - "//third-party/rust:serde_json", - "//third-party/rust:serde_with", - "//third-party/rust:sled", - "//third-party/rust:sodiumoxide", - "//third-party/rust:strum", - "//third-party/rust:thiserror", - "//third-party/rust:tokio", - "//third-party/rust:tokio-stream", - "//third-party/rust:ulid", - "//third-party/rust:url", - ], - rustc_flags = [ - "--cfg=integration_test", - ], - srcs = glob([ - "src/**/*.rs", - "src/builtins/func/**", - "src/builtins/schema/data/**/*.json", - "src/builtins/schema/definitions/**/*.json", - "src/migrations/**/*.sql", - "src/queries/**/*.sql", - ]), - env = { - "CARGO_MANIFEST_DIR": ".", - }, - test_unit_deps = [ - "//third-party/rust:tempfile", - ], extra_test_targets = [":test-integration"], ) @@ -161,8 +83,7 @@ rust_test( deps = [ "//lib/dal-test:dal-test", "//lib/rebaser-core:rebaser-core", - "//lib/rebaser-server:rebaser-server-integration-test", - "//lib/si-events-rs:si-events", + "//lib/rebaser-server:rebaser-server", "//lib/si-pkg:si-pkg", "//lib/veritech-client:veritech-client", "//third-party/rust:base64", @@ -177,7 +98,7 @@ rust_test( "//third-party/rust:tokio", "//third-party/rust:tokio-util", "//third-party/rust:ulid", - ":dal-integration-test", + ":dal", ], crate_root = "tests/integration.rs", srcs = glob([ diff --git a/lib/dal/Cargo.toml b/lib/dal/Cargo.toml index 12fdbf00eb..7904330cf2 100644 --- a/lib/dal/Cargo.toml +++ b/lib/dal/Cargo.toml @@ -70,6 +70,5 @@ veritech-client = { path = "../../lib/veritech-client" } itertools = { workspace = true } pretty_assertions_sorted = { workspace = true } -si-events = { path = "../../lib/si-events-rs" } tempfile = { workspace = true } tokio-util = { workspace = true } diff --git a/lib/dal/src/action/prototype.rs b/lib/dal/src/action/prototype.rs index 1050a379e9..04f641c2df 100644 --- a/lib/dal/src/action/prototype.rs +++ b/lib/dal/src/action/prototype.rs @@ -282,7 +282,7 @@ impl ActionPrototype { { let node_weight = workspace_snapshot.get_node_weight(node_index).await?; let id = node_weight.id(); - if NodeWeightDiscriminants::Func == node_weight.as_ref().into() { + if NodeWeightDiscriminants::Func == node_weight.into() { return Ok(id.into()); } } diff --git a/lib/dal/src/attribute/prototype.rs b/lib/dal/src/attribute/prototype.rs index c860de9679..4d1e140acb 100644 --- a/lib/dal/src/attribute/prototype.rs +++ b/lib/dal/src/attribute/prototype.rs @@ -157,7 +157,7 @@ impl AttributePrototype { { let node_weight = workspace_snapshot.get_node_weight(node_index).await?; let node_weight_id = node_weight.id(); - if NodeWeightDiscriminants::Func == node_weight.as_ref().into() { + if NodeWeightDiscriminants::Func == node_weight.into() { return Ok(node_weight_id.into()); } } @@ -356,7 +356,6 @@ impl AttributePrototype { let (target_id, edge_weight_discrim) = match workspace_snapshot .get_node_weight(prototype_edge_source) .await? - .as_ref() { NodeWeight::Prop(prop_inner) => { (prop_inner.id(), EdgeWeightKindDiscriminants::Prop) @@ -391,7 +390,6 @@ impl AttributePrototype { if let NodeWeight::AttributeValue(av_node_weight) = workspace_snapshot .get_node_weight(attribute_value_target) .await? - .as_ref() { attribute_value_ids.push(av_node_weight.id().into()) } @@ -428,10 +426,8 @@ impl AttributePrototype { Ok(match maybe_value_idxs.first().copied() { Some(value_idx) => { - if let NodeWeight::AttributeValue(av_node_weight) = workspace_snapshot - .get_node_weight(value_idx) - .await? - .as_ref() + if let NodeWeight::AttributeValue(av_node_weight) = + workspace_snapshot.get_node_weight(value_idx).await? { Some(av_node_weight.id().into()) } else { diff --git a/lib/dal/src/attribute/prototype/argument.rs b/lib/dal/src/attribute/prototype/argument.rs index 3af0c0a645..368693bbfd 100644 --- a/lib/dal/src/attribute/prototype/argument.rs +++ b/lib/dal/src/attribute/prototype/argument.rs @@ -134,7 +134,7 @@ impl AttributePrototypeArgument { ) .await? { - match workspace_snapshot.get_node_weight(node_idx).await?.as_ref() { + match workspace_snapshot.get_node_weight(node_idx).await? { NodeWeight::Content(inner) => { let inner_addr_discrim: ContentAddressDiscriminants = inner.content_address().into(); @@ -318,7 +318,7 @@ impl AttributePrototypeArgument { .into_iter() .next() { - match workspace_snapshot.get_node_weight(target).await?.as_ref() { + match workspace_snapshot.get_node_weight(target).await? { NodeWeight::Prop(inner) => { return Ok(Some(ValueSource::Prop(inner.id().into()))); } @@ -513,7 +513,7 @@ impl AttributePrototypeArgument { for idx in apa_node_idxs { let node_weight = workspace_snapshot.get_node_weight(idx).await?; - if let NodeWeight::AttributePrototypeArgument(apa_weight) = node_weight.as_ref() { + if let NodeWeight::AttributePrototypeArgument(apa_weight) = &node_weight { if let Some(ArgumentTargets { destination_component_id, .. diff --git a/lib/dal/src/attribute/value.rs b/lib/dal/src/attribute/value.rs index f591d91296..adec6cee01 100644 --- a/lib/dal/src/attribute/value.rs +++ b/lib/dal/src/attribute/value.rs @@ -946,10 +946,8 @@ impl AttributeValue { let workspace_snapshot = ctx.workspace_snapshot()?; let prop_node_index = workspace_snapshot.get_node_index_by_id(prop_id).await?; - if let NodeWeight::Prop(prop_inner) = workspace_snapshot - .get_node_weight(prop_node_index) - .await? - .as_ref() + if let NodeWeight::Prop(prop_inner) = + workspace_snapshot.get_node_weight(prop_node_index).await? { prop_inner.kind() } else { @@ -1277,7 +1275,6 @@ impl AttributeValue { .workspace_snapshot()? .get_node_weight(node_index) .await? - .as_ref() { prop_map.insert( prop_inner.name().to_string(), @@ -1375,7 +1372,6 @@ impl AttributeValue { match workspace_snapshot .get_node_weight(element_prop_index) .await? - .as_ref() { NodeWeight::Prop(prop_inner) => (prop_inner.id(), prop_inner.kind()), _ => { @@ -1620,7 +1616,6 @@ impl AttributeValue { match workspace_snapshot .get_node_weight(element_prop_index) .await? - .as_ref() { NodeWeight::Prop(prop_inner) => (prop_inner.id(), prop_inner.kind()), _ => { @@ -1822,7 +1817,7 @@ impl AttributeValue { view: Option, ) -> AttributeValueResult<()> { let workspace_snapshot = ctx.workspace_snapshot()?; - let (_, av_node_weight) = { + let (av_idx, av_node_weight) = { let av_idx = workspace_snapshot .get_node_index_by_id(attribute_value_id) .await?; @@ -1867,6 +1862,7 @@ impl AttributeValue { workspace_snapshot .add_node(NodeWeight::AttributeValue(new_av_node_weight)) .await?; + workspace_snapshot.replace_references(av_idx).await?; Ok(()) } @@ -1881,7 +1877,7 @@ impl AttributeValue { func_execution_pk: FuncExecutionPk, ) -> AttributeValueResult<()> { let workspace_snapshot = ctx.workspace_snapshot()?; - let (_av_idx, av_node_weight) = { + let (av_idx, av_node_weight) = { let av_idx = workspace_snapshot .get_node_index_by_id(attribute_value_id) .await?; @@ -1942,6 +1938,7 @@ impl AttributeValue { workspace_snapshot .add_node(NodeWeight::AttributeValue(new_av_node_weight)) .await?; + workspace_snapshot.replace_references(av_idx).await?; Ok(()) } @@ -1978,7 +1975,7 @@ impl AttributeValue { .await? { let target_node_weight = workspace_snapshot.get_node_weight(target).await?; - if let NodeWeight::Prop(prop_node_weight) = target_node_weight.as_ref() { + if let NodeWeight::Prop(prop_node_weight) = &target_node_weight { maybe_prop_id = match maybe_prop_id { Some(already_found_prop_id) => { return Err(AttributeValueError::MultiplePropsFound( @@ -2097,7 +2094,7 @@ impl AttributeValue { .pop() { let node_weight = workspace_snapshot.get_node_weight(ordering).await?; - if let NodeWeight::Ordering(ordering_weight) = node_weight.as_ref() { + if let NodeWeight::Ordering(ordering_weight) = node_weight { Ok(ordering_weight .order() .clone() diff --git a/lib/dal/src/component.rs b/lib/dal/src/component.rs index 1cb9f33134..c0a46626d4 100644 --- a/lib/dal/src/component.rs +++ b/lib/dal/src/component.rs @@ -626,7 +626,6 @@ impl Component { if let NodeWeight::Content(content) = workspace_snapshot .get_node_weight(maybe_schema_variant_index) .await? - .as_ref() { let content_hash_discriminants: ContentAddressDiscriminants = content.content_address().into(); @@ -833,7 +832,7 @@ impl Component { .await? { let target_node_weight = workspace_snapshot.get_node_weight(target).await?; - if let NodeWeight::AttributeValue(_) = target_node_weight.as_ref() { + if let NodeWeight::AttributeValue(_) = target_node_weight { maybe_root_attribute_value_id = match maybe_root_attribute_value_id { Some(already_found_root_attribute_value_id) => { return Err(ComponentError::MultipleRootAttributeValuesFound( @@ -1243,6 +1242,9 @@ impl Component { ctx.workspace_snapshot()? .add_node(NodeWeight::Component(new_component_node_weight)) .await?; + ctx.workspace_snapshot()? + .replace_references(component_idx) + .await?; } let updated = ComponentContentV1::from(component.clone()); diff --git a/lib/dal/src/context.rs b/lib/dal/src/context.rs index 576d2d0700..1772d0df82 100644 --- a/lib/dal/src/context.rs +++ b/lib/dal/src/context.rs @@ -19,7 +19,6 @@ use tokio::time::Instant; use veritech_client::{Client as VeritechClient, CycloneEncryptionKey}; use crate::layer_db_types::ContentTypes; -use crate::workspace_snapshot::node_weight::NodeWeight; use crate::workspace_snapshot::{ conflict::Conflict, graph::WorkspaceSnapshotGraph, update::Update, vector_clock::VectorClockId, }; @@ -37,7 +36,7 @@ use crate::{ }; use crate::{EncryptedSecret, Workspace}; -pub type DalLayerDb = LayerDb; +pub type DalLayerDb = LayerDb; /// A context type which contains handles to common core service dependencies. /// diff --git a/lib/dal/src/func.rs b/lib/dal/src/func.rs index 994c93912a..98232f759b 100644 --- a/lib/dal/src/func.rs +++ b/lib/dal/src/func.rs @@ -305,7 +305,7 @@ impl Func { let name = name.as_ref(); for func_index in func_indices { let node_weight = workspace_snapshot.get_node_weight(func_index).await?; - if let NodeWeight::Func(inner_weight) = node_weight.as_ref() { + if let NodeWeight::Func(inner_weight) = node_weight { if inner_weight.name() == name { return Ok(Some(inner_weight.id().into())); } @@ -401,7 +401,7 @@ impl Func { // have changed, this ends up updating the node for the function twice. This could be // optimized to do it only once. if func.name.as_str() != node_weight.name() { - let _original_node_index = workspace_snapshot.get_node_index_by_id(func.id).await?; + let original_node_index = workspace_snapshot.get_node_index_by_id(func.id).await?; node_weight.set_name(func.name.as_str()); @@ -410,6 +410,10 @@ impl Func { node_weight.new_with_incremented_vector_clock(ctx.change_set()?)?, )) .await?; + + workspace_snapshot + .replace_references(original_node_index) + .await?; } let updated = FuncContentV1::from(func.clone()); @@ -469,7 +473,6 @@ impl Func { .ok_or(FuncError::IntrinsicFuncNotFound(name.to_owned())) } - #[instrument(level = "debug", skip_all)] pub async fn list(ctx: &DalContext) -> FuncResult> { let workspace_snapshot = ctx.workspace_snapshot()?; diff --git a/lib/dal/src/func/argument.rs b/lib/dal/src/func/argument.rs index b26af16192..b58fe04789 100644 --- a/lib/dal/src/func/argument.rs +++ b/lib/dal/src/func/argument.rs @@ -337,7 +337,7 @@ impl FuncArgument { { let ulid: Ulid = id.into(); - let (_arg_node_idx, arg_nw) = { + let (arg_node_idx, arg_nw) = { let workspace_snapshot = ctx.workspace_snapshot()?; let arg_node_idx = workspace_snapshot.get_node_index_by_id(ulid).await?; @@ -376,6 +376,7 @@ impl FuncArgument { workspace_snapshot .add_node(NodeWeight::FuncArgument(new_func_arg.clone())) .await?; + workspace_snapshot.replace_references(arg_node_idx).await?; func_arg_node_weight = new_func_arg; } diff --git a/lib/dal/src/pkg/import.rs b/lib/dal/src/pkg/import.rs index 5f9314ea28..d6cbb64ea4 100644 --- a/lib/dal/src/pkg/import.rs +++ b/lib/dal/src/pkg/import.rs @@ -64,8 +64,6 @@ pub struct ImportOptions { const SPECIAL_CASE_FUNCS: [&str; 2] = ["si:resourcePayloadToValue", "si:normalizeToArray"]; #[allow(clippy::too_many_arguments)] -#[allow(clippy::type_complexity)] -#[instrument(level = "info", skip_all)] async fn import_change_set( ctx: &DalContext, change_set_id: Option, @@ -1735,7 +1733,6 @@ async fn import_func_arguments( Ok(()) } -#[instrument(level = "info", skip_all)] async fn create_schema(ctx: &DalContext, schema_spec_data: &SiPkgSchemaData) -> PkgResult { let schema = Schema::new(ctx, schema_spec_data.name()) .await? @@ -1776,7 +1773,6 @@ async fn create_schema(ctx: &DalContext, schema_spec_data: &SiPkgSchemaData) -> // Ok(()) // } -#[instrument(level = "debug", skip_all)] async fn import_schema( ctx: &DalContext, change_set_id: Option, @@ -2365,7 +2361,6 @@ impl Extend for CreatePropsSideEffects { } } -#[instrument(level = "info", skip_all)] async fn create_props( ctx: &DalContext, change_set_id: Option, @@ -2374,7 +2369,6 @@ async fn create_props( prop_root_prop_id: PropId, schema_variant_id: SchemaVariantId, ) -> PkgResult { - // info!("creating prop"); let context = PropVisitContext { ctx, schema_variant_id, @@ -2424,7 +2418,6 @@ async fn create_props( // Ok(()) // } -#[instrument(level = "info", skip_all)] async fn import_schema_variant( ctx: &DalContext, change_set_id: Option, @@ -2434,7 +2427,6 @@ async fn import_schema_variant( installed_pkg_id: Option, thing_map: &mut ThingMap, ) -> PkgResult> { - // info!("installing schema variant"); let schema_variant = match change_set_id { None => { let hash = variant_spec.hash().to_string(); @@ -2578,8 +2570,6 @@ async fn import_schema_variant( ) .await?; - // info!("creating domain"); - side_effects.extend( create_props( ctx, @@ -2617,7 +2607,6 @@ async fn import_schema_variant( &PropPath::new(["root", "secrets"]), ) .await?; - // info!("creating secrets"); side_effects.extend( create_props( @@ -2635,7 +2624,6 @@ async fn import_schema_variant( let root_prop_id = Prop::find_prop_id_by_path(ctx, schema_variant.id(), &PropPath::new(["root"])) .await?; - // info!("creating secret defs"); let secret_definition_prop = Prop::new( ctx, @@ -2648,7 +2636,6 @@ async fn import_schema_variant( ) .await?; let secret_definition_prop_id = secret_definition_prop.id(); - // info!("creating secret defs props"); side_effects.extend( create_props( @@ -2662,15 +2649,12 @@ async fn import_schema_variant( .await?, ); } - // info!("finalizing"); SchemaVariant::finalize(ctx, schema_variant.id()).await?; - // info!("importing sockets"); for socket in variant_spec.sockets()? { import_socket(ctx, change_set_id, socket, schema_variant.id(), thing_map).await?; } - // info!("importing action funcs"); for action_func in &variant_spec.action_funcs()? { let prototype = import_action_func( @@ -2691,7 +2675,6 @@ async fn import_schema_variant( } } - // info!("importing auth funcs"); for auth_func in &variant_spec.auth_funcs()? { let prototype = import_auth_func( ctx, @@ -2711,8 +2694,6 @@ async fn import_schema_variant( } } - // info!("importing leaf funcs"); - for leaf_func in variant_spec.leaf_functions()? { import_leaf_function( ctx, @@ -2723,7 +2704,6 @@ async fn import_schema_variant( ) .await?; } - // info!("setting default values"); // Default values must be set before attribute functions are configured so they don't // override the prototypes set there @@ -2748,7 +2728,6 @@ async fn import_schema_variant( set_default_value(ctx, name_default_value_info).await?; } - // info!("configuring si_prop_funcs"); for si_prop_func in variant_spec.si_prop_funcs()? { let prop_id = Prop::find_prop_id_by_path( ctx, @@ -2774,7 +2753,6 @@ async fn import_schema_variant( ) .await?; } - // info!("configuring root prop funcs"); let mut has_resource_value_func = false; for root_prop_func in variant_spec.root_prop_funcs()? { @@ -2809,7 +2787,6 @@ async fn import_schema_variant( if !has_resource_value_func { attach_resource_payload_to_value(ctx, schema_variant.id()).await?; } - // info!("configuring attr funcs"); for attr_func in side_effects.attr_funcs { import_attr_func_for_prop( @@ -2823,7 +2800,6 @@ async fn import_schema_variant( .await?; } - // info!("configuring map key funcs"); for (key, map_key_func) in side_effects.map_key_funcs { import_attr_func_for_prop( ctx, @@ -2835,7 +2811,6 @@ async fn import_schema_variant( ) .await?; } - // info!("done"); Some(schema_variant) } @@ -3245,7 +3220,6 @@ async fn create_prop( parent_prop_info: Option, ctx: &PropVisitContext<'_>, ) -> PkgResult> { - // info!("creating prop"); let prop = match ctx.change_set_id { None => { let data = spec.data().ok_or(PkgError::DataNotFound("prop".into()))?; @@ -3283,8 +3257,6 @@ async fn create_prop( } }; - // info!("created prop"); - let prop_id = prop.id(); // Both attribute functions and default values have to be set *after* the schema variant is @@ -3364,8 +3336,6 @@ async fn create_prop( }); } - // info!("done with create prop side effect creation"); - Ok(Some(ParentPropInfo { prop_id: prop.id(), path: prop.path(ctx.ctx).await?, diff --git a/lib/dal/src/prop.rs b/lib/dal/src/prop.rs index 02cce1ddb1..85d1dd503d 100644 --- a/lib/dal/src/prop.rs +++ b/lib/dal/src/prop.rs @@ -350,11 +350,7 @@ impl Prop { .first() { Some(parent_node_idx) => Ok( - match workspace_snapshot - .get_node_weight(*parent_node_idx) - .await? - .as_ref() - { + match workspace_snapshot.get_node_weight(*parent_node_idx).await? { NodeWeight::Prop(prop_inner) => Some(prop_inner.id().into()), NodeWeight::Content(content_inner) => { let content_addr_discrim: ContentAddressDiscriminants = @@ -440,7 +436,7 @@ impl Prop { let node_idx = workspace_snapshot.get_node_index_by_id(prop_id).await?; if let NodeWeight::Prop(inner) = - workspace_snapshot.get_node_weight(node_idx).await?.as_ref() + workspace_snapshot.get_node_weight(node_idx).await? { parts.push_front(inner.name().to_owned()); work_queue.push_back(inner.id().into()); @@ -492,7 +488,6 @@ impl Prop { /// Create a new [`Prop`]. A corresponding [`AttributePrototype`] and [`AttributeValue`] will be /// created when the provided [`SchemaVariant`](crate::SchemaVariant) is /// [`finalized`](crate::SchemaVariant::finalize). - #[instrument(level = "debug", skip_all)] pub async fn new( ctx: &DalContext, name: impl Into, @@ -621,7 +616,6 @@ impl Prop { if let NodeWeight::Prop(prop_inner) = workspace_snapshot .get_node_weight(maybe_elem_node_idx) .await? - .as_ref() { return Ok(prop_inner.id().into()); } @@ -644,10 +638,8 @@ impl Prop { ) .await? { - if let NodeWeight::Prop(prop_inner) = workspace_snapshot - .get_node_weight(prop_node_index) - .await? - .as_ref() + if let NodeWeight::Prop(prop_inner) = + workspace_snapshot.get_node_weight(prop_node_index).await? { if prop_inner.name() == child_name.as_ref() { return Ok(prop_node_index); @@ -661,7 +653,6 @@ impl Prop { )) } - #[instrument(level = "info", skip_all)] pub async fn find_prop_id_by_path_opt( ctx: &DalContext, schema_variant_id: SchemaVariantId, diff --git a/lib/dal/src/property_editor/schema.rs b/lib/dal/src/property_editor/schema.rs index 0eaf0663aa..272695d6c6 100644 --- a/lib/dal/src/property_editor/schema.rs +++ b/lib/dal/src/property_editor/schema.rs @@ -52,7 +52,6 @@ impl PropertyEditorSchema { if let NodeWeight::Prop(child_prop_weight) = workspace_snapshot .get_node_weight(child_prop_node_index) .await? - .as_ref() { let child_prop_id: PropId = child_prop_weight.id().into(); diff --git a/lib/dal/src/schema.rs b/lib/dal/src/schema.rs index 76c633e4e1..0d3b98717a 100644 --- a/lib/dal/src/schema.rs +++ b/lib/dal/src/schema.rs @@ -87,7 +87,6 @@ impl Schema { &self.name } - #[instrument(level = "debug", skip_all)] pub async fn new(ctx: &DalContext, name: impl Into) -> SchemaResult { let content = SchemaContentV1 { timestamp: Timestamp::now(), diff --git a/lib/dal/src/schema/variant.rs b/lib/dal/src/schema/variant.rs index 995ab85b55..a713bccf1d 100644 --- a/lib/dal/src/schema/variant.rs +++ b/lib/dal/src/schema/variant.rs @@ -239,7 +239,7 @@ impl SchemaVariant { while let Some((prop_id, maybe_parent_path)) = work_queue.pop_front() { let node_weight = workspace_snapshot.get_node_weight_by_id(prop_id).await?; - match node_weight.as_ref() { + match node_weight { NodeWeight::Prop(prop_inner) => { let name = prop_inner.name(); @@ -489,7 +489,7 @@ impl SchemaVariant { for index in edge_targets { let node_weight = workspace_snapshot.get_node_weight(index).await?; // TODO(nick): ensure that only one prop can be under a schema variant. - if let NodeWeight::Prop(inner_weight) = node_weight.as_ref() { + if let NodeWeight::Prop(inner_weight) = node_weight { if inner_weight.name() == "root" { return Ok(inner_weight.clone()); } @@ -553,7 +553,7 @@ impl SchemaVariant { .await?; for target in targets { let node_weight = workspace_snapshot.get_node_weight(target).await?; - if let NodeWeight::Prop(child_prop) = node_weight.as_ref() { + if let NodeWeight::Prop(child_prop) = node_weight { work_queue.push_back(child_prop.to_owned()) } } @@ -562,6 +562,45 @@ impl SchemaVariant { Ok(()) } + pub async fn mark_props_as_able_to_be_used_as_prototype_args( + ctx: &DalContext, + schema_variant_id: SchemaVariantId, + ) -> SchemaVariantResult<()> { + let workspace_snapshot = ctx.workspace_snapshot()?; + let root_prop_node_weight = Self::get_root_prop_node_weight(ctx, schema_variant_id).await?; + let root_prop_idx = workspace_snapshot + .get_node_index_by_id(root_prop_node_weight.id()) + .await?; + + let mut work_queue = VecDeque::new(); + work_queue.push_back(root_prop_idx); + + while let Some(prop_idx) = work_queue.pop_front() { + workspace_snapshot + .mark_prop_as_able_to_be_used_as_prototype_arg(prop_idx) + .await?; + + let node_weight = workspace_snapshot + .get_node_weight(prop_idx) + .await? + .to_owned(); + if let NodeWeight::Prop(prop) = node_weight { + // Only descend if we are an object. + if prop.kind() == PropKind::Object { + let targets = workspace_snapshot + .outgoing_targets_for_edge_weight_kind( + prop.id(), + EdgeWeightKindDiscriminants::Use, + ) + .await?; + work_queue.extend(targets); + } + } + } + + Ok(()) + } + pub async fn new_action_prototype( ctx: &DalContext, func_id: FuncId, @@ -825,6 +864,11 @@ impl SchemaVariant { ) .await?; + info!( + "adding root child func arg: {:?}, {:?}", + input_prop_id, input.location + ); + let new_apa = AttributePrototypeArgument::new( ctx, existing_proto_id, @@ -886,7 +930,7 @@ impl SchemaVariant { let node_weight = workspace_snapshot .get_node_weight(maybe_socket_node_index) .await?; - if let NodeWeight::Content(content_node_weight) = node_weight.as_ref() { + if let NodeWeight::Content(content_node_weight) = node_weight { match content_node_weight.content_address() { ContentAddress::OutputSocket(output_socket_content_hash) => { output_socket_hashes @@ -971,10 +1015,8 @@ impl SchemaVariant { let mut schema_id: Option = None; for (edge_weight, source_index, _) in maybe_schema_indices { if *edge_weight.kind() == EdgeWeightKind::new_use_default() { - if let NodeWeight::Content(content) = workspace_snapshot - .get_node_weight(source_index) - .await? - .as_ref() + if let NodeWeight::Content(content) = + workspace_snapshot.get_node_weight(source_index).await? { let content_hash_discriminants: ContentAddressDiscriminants = content.content_address().into(); diff --git a/lib/dal/src/schema/variant/root_prop.rs b/lib/dal/src/schema/variant/root_prop.rs index 958986e9a2..0851788598 100644 --- a/lib/dal/src/schema/variant/root_prop.rs +++ b/lib/dal/src/schema/variant/root_prop.rs @@ -2,7 +2,6 @@ //! to the database. use strum::{AsRefStr, Display as EnumDisplay, EnumIter, EnumString}; -use telemetry::prelude::*; use crate::prop::{PropParent, PropPath}; use crate::property_editor::schema::WidgetKind; @@ -108,7 +107,6 @@ pub struct RootProp { impl RootProp { /// Create and set a [`RootProp`] for the [`SchemaVariant`]. - #[instrument(level = "info", skip_all)] pub async fn new( ctx: &DalContext, schema_variant_id: SchemaVariantId, @@ -125,6 +123,7 @@ impl RootProp { .await?; let root_prop_id = root_prop.id(); + // info!("setting up si, domain and secrets"); let si_prop_id = Self::setup_si(ctx, root_prop_id).await?; let domain_prop = Prop::new_without_ui_optionals( @@ -143,11 +142,16 @@ impl RootProp { ) .await?; + // info!("setting up resource"); let resource_prop_id = Self::setup_resource(ctx, root_prop_id).await?; + // info!("setting up resource value"); let resource_value_prop_id = Self::setup_resource_value(ctx, root_prop_id).await?; + // info!("setting up code"); let code_prop_id = Self::setup_code(ctx, root_prop_id).await?; + // info!("setting up qualification"); let qualification_prop_id = Self::setup_qualification(ctx, root_prop_id).await?; + // info!("setting up deleted at"); let deleted_at_prop = Prop::new( ctx, "deleted_at", diff --git a/lib/dal/src/socket/input.rs b/lib/dal/src/socket/input.rs index 281b11d827..40edc55de1 100644 --- a/lib/dal/src/socket/input.rs +++ b/lib/dal/src/socket/input.rs @@ -174,7 +174,7 @@ impl InputSocket { let node_weight = workspace_snapshot .get_node_weight(socket_node_index) .await?; - if let NodeWeight::Content(content_inner) = node_weight.as_ref() { + if let NodeWeight::Content(content_inner) = &node_weight { if ContentAddressDiscriminants::InputSocket == content_inner.content_address().into() { @@ -351,10 +351,8 @@ impl InputSocket { .await?; for av_source_idx in av_sources { - if let NodeWeight::AttributeValue(av_node_weight) = workspace_snapshot - .get_node_weight(av_source_idx) - .await? - .as_ref() + if let NodeWeight::AttributeValue(av_node_weight) = + workspace_snapshot.get_node_weight(av_source_idx).await? { result.push(av_node_weight.id().into()); } diff --git a/lib/dal/src/socket/output.rs b/lib/dal/src/socket/output.rs index 563a7b3d3d..de7a50ea18 100644 --- a/lib/dal/src/socket/output.rs +++ b/lib/dal/src/socket/output.rs @@ -228,10 +228,8 @@ impl OutputSocket { ) .await?; for av_source_idx in av_sources { - if let NodeWeight::AttributeValue(av_node_weight) = workspace_snapshot - .get_node_weight(av_source_idx) - .await? - .as_ref() + if let NodeWeight::AttributeValue(av_node_weight) = + workspace_snapshot.get_node_weight(av_source_idx).await? { result.push(av_node_weight.id().into()); } @@ -375,11 +373,8 @@ impl OutputSocket { ) .await? { - if let NodeWeight::AttributePrototypeArgument(attribute_prototype_argument_weight) = ctx - .workspace_snapshot()? - .get_node_weight(tail_idx) - .await? - .as_ref() + if let NodeWeight::AttributePrototypeArgument(attribute_prototype_argument_weight) = + ctx.workspace_snapshot()?.get_node_weight(tail_idx).await? { results.push(attribute_prototype_argument_weight.id().into()); } diff --git a/lib/dal/src/workspace_snapshot.rs b/lib/dal/src/workspace_snapshot.rs index c724e426db..61ae1395aa 100644 --- a/lib/dal/src/workspace_snapshot.rs +++ b/lib/dal/src/workspace_snapshot.rs @@ -30,31 +30,17 @@ pub mod node_weight; pub mod update; pub mod vector_clock; -use crate::workspace_snapshot::content_address::ContentAddressDiscriminants; -use crate::workspace_snapshot::node_weight::CategoryNodeWeight; -use chrono::Utc; -use futures::executor; +use si_layer_cache::persister::PersistStatus; +use si_pkg::KeyOrIndex; +use std::sync::Arc; +use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; + use petgraph::prelude::*; -use petgraph::visit::DfsEvent; use si_data_pg::PgError; use si_events::ContentHash; -use si_events::NodeWeightAddress; use si_events::WorkspaceSnapshotAddress; -use si_layer_cache::db::node_weight::NodeWeightDb; -use si_layer_cache::persister::PersistStatus; -use si_pkg::KeyOrIndex; -use std::collections::HashMap; -use std::collections::HashSet; -use std::collections::VecDeque; -use std::sync::Arc; -use strum::IntoEnumIterator; use telemetry::prelude::*; use thiserror::Error; -use tokio::fs::File; -use tokio::io::AsyncWriteExt; -use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; -use tokio::task::JoinError; - use ulid::Ulid; use crate::change_set::{ChangeSet, ChangeSetError, ChangeSetId}; @@ -71,24 +57,17 @@ use crate::{ DalContext, TransactionsError, WorkspaceSnapshotGraph, }; -use self::graph::GraphLocalNodeWeight; use self::node_weight::{NodeWeightDiscriminants, OrderingNodeWeight}; #[remain::sorted] #[derive(Error, Debug)] pub enum WorkspaceSnapshotError { - #[error("Category node not found: {0:?}")] - CategoryNodeNotFound(NodeIndex), #[error("change set error: {0}")] ChangeSet(#[from] ChangeSetError), #[error("change set {0} has no workspace snapshot address")] ChangeSetMissingWorkspaceSnapshotAddress(ChangeSetId), - #[error("Action would create a graph cycle")] - CreateGraphCycle, #[error("edge weight error: {0}")] EdgeWeight(#[from] EdgeWeightError), - #[error("JoinError: {0:?}")] - Join(#[from] JoinError), #[error("layer db error: {0}")] LayerDb(#[from] si_layer_cache::LayerDbError), #[error("missing content from store for id: {0}")] @@ -97,18 +76,12 @@ pub enum WorkspaceSnapshotError { Monotonic(#[from] ulid::MonotonicError), #[error("NodeWeight error: {0}")] NodeWeight(#[from] NodeWeightError), - #[error("NodeWeight not found at {0:?}")] - NodeWeightMissing(NodeWeightAddress), - #[error("Node with id {0} not found")] - NodeWithIdNotFound(Ulid), #[error("si_data_pg error: {0}")] Pg(#[from] PgError), #[error("postcard error: {0}")] Postcard(#[from] postcard::Error), #[error("serde json error: {0}")] SerdeJson(#[from] serde_json::Error), - #[error("NodeIndex has too many Ordering children: {0:?}")] - TooManyOrderingForNode(Ulid), #[error("transactions error: {0}")] Transactions(#[from] TransactionsError), #[error("could not acquire lock: {0}")] @@ -155,9 +128,6 @@ pub struct WorkspaceSnapshot { /// implemenations of Deref and DerefMut, and their construction in /// working_copy()/working_copy_mut() working_copy: Arc>>, - node_weight_db: NodeWeightDb, - events_actor: si_events::Actor, - events_tenancy: si_events::Tenancy, } struct SnapshotReadGuard<'a> { @@ -214,129 +184,62 @@ pub(crate) fn serde_value_to_string_type(value: &serde_json::Value) -> String { } impl WorkspaceSnapshot { - /// Generates a snapshot with only a root node, without persisting it. In - /// most cases what you want is [`WorkspaceSnapshot::initial`]. - pub async fn empty(ctx: &DalContext, change_set: &ChangeSet) -> WorkspaceSnapshotResult { - let root_node = Arc::new(NodeWeight::new_content( - change_set, - change_set.generate_ulid()?, - content_address::ContentAddress::Root, - )?); - let (node_address, _) = ctx - .layer_db() - .node_weight() - .write( - root_node.clone(), - None, - ctx.events_tenancy(), - ctx.events_actor(), - ) - .await?; - - let graph: WorkspaceSnapshotGraph = WorkspaceSnapshotGraph::new(root_node, node_address)?; - - Ok(Self { - address: Arc::new(RwLock::new(WorkspaceSnapshotAddress::nil())), - read_only_graph: Arc::new(graph), - working_copy: Arc::new(RwLock::new(None)), - node_weight_db: ctx.layer_db().node_weight().clone(), - events_actor: ctx.events_actor(), - events_tenancy: ctx.events_tenancy(), - }) - } - - /// Generates a snapshot with the initial category nodes attached to the - /// root node and writes it out. #[instrument(level = "debug", skip_all)] pub async fn initial( ctx: &DalContext, change_set: &ChangeSet, ) -> WorkspaceSnapshotResult { - let initial = Self::empty(ctx, change_set).await?; - let root_id = initial.root_id().await?; + let mut graph: WorkspaceSnapshotGraph = WorkspaceSnapshotGraph::new(change_set)?; // Create the category nodes under root. - for category_kind in CategoryNodeKind::iter() { - let category_node_id = initial.add_category_node(change_set, category_kind).await?; - initial - .add_edge( - root_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use())?, - category_node_id, - ) - .await?; - } + let component_node_index = + graph.add_category_node(change_set, CategoryNodeKind::Component)?; + let func_node_index = graph.add_category_node(change_set, CategoryNodeKind::Func)?; + let action_batch_node_index = + graph.add_category_node(change_set, CategoryNodeKind::ActionBatch)?; + let schema_node_index = graph.add_category_node(change_set, CategoryNodeKind::Schema)?; + let secret_node_index = graph.add_category_node(change_set, CategoryNodeKind::Secret)?; + + // Connect them to root. + graph.add_edge( + graph.root(), + EdgeWeight::new(change_set, EdgeWeightKind::new_use())?, + action_batch_node_index, + )?; + graph.add_edge( + graph.root(), + EdgeWeight::new(change_set, EdgeWeightKind::new_use())?, + component_node_index, + )?; + graph.add_edge( + graph.root(), + EdgeWeight::new(change_set, EdgeWeightKind::new_use())?, + func_node_index, + )?; + graph.add_edge( + graph.root(), + EdgeWeight::new(change_set, EdgeWeightKind::new_use())?, + schema_node_index, + )?; + graph.add_edge( + graph.root(), + EdgeWeight::new(change_set, EdgeWeightKind::new_use())?, + secret_node_index, + )?; + + // We do not care about any field other than "working_copy" because "write" will populate + // them using the assigned working copy. + let initial = Self { + address: Arc::new(RwLock::new(WorkspaceSnapshotAddress::nil())), + read_only_graph: Arc::new(graph), + working_copy: Arc::new(RwLock::new(None)), + }; initial.write(ctx, change_set.vector_clock_id()).await?; Ok(initial) } - /// The workspace snapshot is made of up Arc wrapped concurrency types, so - /// `clone` does not duplicate the snapshot, but instead just increments the - /// ref count. This will produce an actual clone of the data. - pub async fn real_clone(&self) -> Self { - let working_copy_clone = self.working_copy().await.clone(); - Self { - address: Arc::new(RwLock::new(self.id().await)), - read_only_graph: Arc::new(working_copy_clone), - working_copy: Arc::new(RwLock::new(None)), - node_weight_db: self.node_weight_db.clone(), - events_actor: self.events_actor.clone(), - events_tenancy: self.events_tenancy, - } - } - - pub fn events_actor(&self) -> si_events::Actor { - self.events_actor.clone() - } - - pub fn events_tenancy(&self) -> si_events::Tenancy { - self.events_tenancy - } - - pub async fn node_count(&self) -> usize { - self.working_copy().await.node_count() - } - - pub async fn is_acyclic_directed(&self) -> bool { - self.working_copy().await.is_acyclic_directed() - } - - pub async fn mark_graph_seen( - &self, - vector_clock_id: VectorClockId, - ) -> WorkspaceSnapshotResult<()> { - let mut updates = vec![]; - let seen_at = Utc::now(); - for edge in self.working_copy_mut().await.graph_mut().edge_weights_mut() { - edge.mark_seen_at(vector_clock_id, seen_at); - } - - for node_index in self.working_copy().await.graph().node_indices() { - let mut remote_node_weight = self.get_node_weight(node_index).await?.as_ref().clone(); - remote_node_weight.mark_seen_at(vector_clock_id, seen_at); - let node_id = remote_node_weight.id(); - let (new_address, _) = self - .node_weight_db - .write( - Arc::new(remote_node_weight), - None, - self.events_tenancy(), - self.events_actor(), - ) - .await?; - updates.push((node_index, new_address, node_id)); - } - for (index, address, node_id) in updates { - self.working_copy_mut() - .await - .update_node_weight_address(index, address, node_id)?; - } - - Ok(()) - } - #[instrument(level = "debug", skip_all)] pub async fn write( &self, @@ -345,16 +248,17 @@ impl WorkspaceSnapshot { ) -> WorkspaceSnapshotResult { // Pull out the working copy and clean it up. let new_address = { - self.cleanup().await?; + let mut working_copy = self.working_copy_mut().await; + working_copy.cleanup(); // Mark everything left as seen. - self.mark_graph_seen(vector_clock_id).await?; + working_copy.mark_graph_seen(vector_clock_id)?; let (new_address, status_reader) = ctx .layer_db() .workspace_snapshot() .write( - Arc::new(self.working_copy().await.clone()), + Arc::new(working_copy.clone()), None, ctx.events_tenancy(), ctx.events_actor(), @@ -385,10 +289,7 @@ impl WorkspaceSnapshot { Ok(self.working_copy().await.root()) } - pub async fn root_id(&self) -> WorkspaceSnapshotResult { - Ok(self.get_node_weight(self.root().await?).await?.id()) - } - + #[instrument(level = "debug", skip_all)] async fn working_copy(&self) -> SnapshotReadGuard<'_> { SnapshotReadGuard { read_only_graph: self.read_only_graph.clone(), @@ -396,6 +297,7 @@ impl WorkspaceSnapshot { } } + #[instrument(level = "debug", skip_all)] async fn working_copy_mut(&self) -> SnapshotWriteGuard<'_> { if self.working_copy.read().await.is_none() { // Make a copy of the read only graph as our new working copy @@ -408,182 +310,89 @@ impl WorkspaceSnapshot { } pub async fn add_node(&self, node: NodeWeight) -> WorkspaceSnapshotResult { - let node_id = node.id(); - let node_arc = Arc::new(node); - let (hash, _) = self - .node_weight_db - .write( - node_arc.clone(), - None, - self.events_tenancy(), - self.events_actor(), - ) - .await?; - - let maybe_existing_node_index = self.get_node_index_by_id_opt(node_id).await; - let new_node_index = self.working_copy_mut().await.add_node(node_arc, hash)?; - self.update_merkle_tree_hash(new_node_index).await?; - - // If we are replacing an existing node, we need to replace all references to it - if let Some(existing_node_index) = maybe_existing_node_index { - self.replace_references(existing_node_index).await?; - } - - self.get_node_index_by_id(node_id).await + let new_node_index = self.working_copy_mut().await.add_node(node)?; + Ok(new_node_index) } + #[instrument(level = "debug", skip_all)] pub async fn add_ordered_node( &self, change_set: &ChangeSet, node: NodeWeight, ) -> WorkspaceSnapshotResult { - let new_node_index = self.add_node(node).await?; - let ordering_node_index = self - .add_node(NodeWeight::Ordering(OrderingNodeWeight::new(change_set)?)) - .await?; - let edge_index = self - .add_edge_unchecked( - new_node_index, - EdgeWeight::new(change_set, EdgeWeightKind::Ordering)?, - ordering_node_index, - ) - .await?; - let (source, _) = self.edge_endpoints(edge_index).await?; - Ok(source) - } - - pub async fn add_category_node( - &self, - change_set: &ChangeSet, - kind: CategoryNodeKind, - ) -> WorkspaceSnapshotResult { - let inner_weight = CategoryNodeWeight::new(change_set, kind)?; - let node_id = inner_weight.id(); - self.add_node(NodeWeight::Category(inner_weight)).await?; - Ok(node_id) + let new_node_index = self + .working_copy_mut() + .await + .add_ordered_node(change_set, node)?; + Ok(new_node_index) } + #[instrument(level = "debug", skip_all)] pub async fn update_content( &self, change_set: &ChangeSet, id: Ulid, new_content_hash: ContentHash, ) -> WorkspaceSnapshotResult<()> { - let node_weight_index = self.get_node_index_by_id(id).await?; - let node_weight = self.get_node_weight(node_weight_index).await?; - // we have to copy it to modify it - let mut node_weight = node_weight.as_ref().clone(); - - node_weight.increment_vector_clock(change_set)?; - node_weight.new_content_hash(new_content_hash)?; - - self.add_node(node_weight).await?; - - let _current_index = self.get_node_index_by_id(id).await?; - - Ok(()) - } - - pub async fn get_graph_local_node_weight( - &self, - id: impl Into, - ) -> WorkspaceSnapshotResult { - let node_idx = self.get_node_index_by_id(id).await?; - Ok(self.working_copy().await.get_node_weight(node_idx)?) + Ok(self + .working_copy_mut() + .await + .update_content(change_set, id, new_content_hash)?) } + #[instrument(level = "debug", skip_all)] pub async fn add_edge( &self, from_node_id: impl Into, edge_weight: EdgeWeight, to_node_id: impl Into, - ) -> WorkspaceSnapshotResult<()> { + ) -> WorkspaceSnapshotResult { let from_node_index = self .working_copy() .await .get_node_index_by_id(from_node_id)?; let to_node_index = self.working_copy().await.get_node_index_by_id(to_node_id)?; - // Temporarily add the edge to the existing tree to see if it would create a cycle. - // Configured to run only in tests because it has a major perf impact otherwise - #[cfg(integration_test)] - { - let temp_edge = self.working_copy_mut().await.graph_mut().update_edge( - from_node_index, - to_node_index, - edge_weight.clone(), - ); - - let would_create_a_cycle = !self.is_acyclic_directed().await; - self.working_copy_mut() - .await - .graph_mut() - .remove_edge(temp_edge); - - if would_create_a_cycle { - return Err(WorkspaceSnapshotError::CreateGraphCycle)?; - } - } - - let new_from_node_index = self.copy_node_by_index(from_node_index).await?; - - self.working_copy_mut() + Ok(self + .working_copy_mut() .await - .add_edge(new_from_node_index, edge_weight, to_node_index)?; - self.update_merkle_tree_hash(new_from_node_index).await?; - self.replace_references(from_node_index).await?; - - Ok(()) + .add_edge(from_node_index, edge_weight, to_node_index)?) } // NOTE(nick): this should only be used by the rebaser and in specific scenarios where the // indices are definitely correct. + #[instrument(level = "debug", skip_all)] pub async fn add_edge_unchecked( &self, from_node_index: NodeIndex, edge_weight: EdgeWeight, to_node_index: NodeIndex, ) -> WorkspaceSnapshotResult { - let edge_index = - self.working_copy_mut() - .await - .add_edge(from_node_index, edge_weight, to_node_index)?; - self.update_merkle_tree_hash(from_node_index).await?; - - Ok(edge_index) + Ok(self + .working_copy_mut() + .await + .add_edge(from_node_index, edge_weight, to_node_index)?) } + #[instrument(level = "debug", skip_all)] pub async fn add_ordered_edge( &self, change_set: &ChangeSet, from_node_id: impl Into, edge_weight: EdgeWeight, to_node_id: impl Into, - ) -> WorkspaceSnapshotResult<()> { - let from_node_id = from_node_id.into(); - let to_node_id = to_node_id.into(); - - self.add_edge(from_node_id, edge_weight, to_node_id).await?; - - // Find the ordering node of the "container" if there is one, and add the thing pointed to - // by the `to_node_id` to the ordering. Also point the ordering node at the thing with - // an `Ordinal` edge, so that Ordering nodes must be touched *after* the things they order - // in a depth first search - if let Some(mut container_ordering_node) = - self.ordering_node_for_container(from_node_id).await? - { - self.add_edge( - container_ordering_node.id(), - EdgeWeight::new(change_set, EdgeWeightKind::Ordinal)?, - to_node_id, - ) - .await?; - - container_ordering_node.push_to_order(change_set, to_node_id)?; - self.add_node(NodeWeight::Ordering(container_ordering_node)) - .await?; - }; - - Ok(()) + ) -> WorkspaceSnapshotResult { + let from_node_index = self + .working_copy() + .await + .get_node_index_by_id(from_node_id)?; + let to_node_index = self.working_copy().await.get_node_index_by_id(to_node_id)?; + let (edge_index, _) = self.working_copy_mut().await.add_ordered_edge( + change_set, + from_node_index, + edge_weight, + to_node_index, + )?; + Ok(edge_index) } #[instrument(level = "debug", skip_all)] @@ -593,468 +402,11 @@ impl WorkspaceSnapshot { onto_workspace_snapshot: &WorkspaceSnapshot, onto_vector_clock_id: VectorClockId, ) -> WorkspaceSnapshotResult<(Vec, Vec)> { - let onto_workspace_snapshot = onto_workspace_snapshot.clone(); - let self_clone = self.clone(); - let handle = tokio::task::spawn_blocking(move || { - let onto_graph = executor::block_on(onto_workspace_snapshot.working_copy()); - let mut conflicts: Vec = Vec::new(); - let mut updates: Vec = Vec::new(); - if let Err(traversal_error) = petgraph::visit::depth_first_search( - onto_graph.graph(), - Some(onto_graph.root()), - |event| { - self_clone.detect_conflicts_and_updates_process_dfs_event( - to_rebase_vector_clock_id, - &onto_workspace_snapshot, - onto_vector_clock_id, - event, - &mut conflicts, - &mut updates, - ) - }, - ) { - return Err(WorkspaceSnapshotGraphError::GraphTraversal(traversal_error)); - }; - Ok((conflicts, updates)) - }); - - let (conflicts, updates) = handle.await??; - - Ok((conflicts, updates)) - } - - #[allow(clippy::too_many_arguments)] - #[instrument(level = "debug", skip_all)] - fn detect_conflicts_and_updates_process_dfs_event( - &self, - to_rebase_vector_clock_id: VectorClockId, - onto: &WorkspaceSnapshot, - onto_vector_clock_id: VectorClockId, - event: DfsEvent, - conflicts: &mut Vec, - updates: &mut Vec, - ) -> Result, petgraph::visit::DfsEvent> { - match event { - DfsEvent::Discover(onto_node_index, _) => { - let onto_node_weight = executor::block_on(onto.get_node_weight(onto_node_index)) - .map_err(|err| { - error!( - "Unable to get NodeWeight for onto NodeIndex {:?}: {}", - onto_node_index, err, - ); - event - })?; - - let onto_local_node_weight = executor::block_on(onto.working_copy()) - .get_node_weight(onto_node_index) - .map_err(|err| { - error!( - "Unable to get graph local node weight for NodeIndex {:?} in onto, {}", - onto_node_index, err - ); - event - })?; - - let mut to_rebase_node_indexes = HashSet::new(); - let onto_root_node_index = executor::block_on(onto.root()).map_err(|err| { - error!("Unable to get root index for onto: {}", err); - event - })?; - - if onto_node_index == onto_root_node_index { - // There can only be one (valid/current) `ContentAddress::Root` at any - // given moment, and the `lineage_id` isn't really relevant as it's not - // globally stable (even though it is locally stable). This matters as we - // may be dealing with a `WorkspaceSnapshotGraph` that is coming to us - // externally from a module that we're attempting to import. The external - // `WorkspaceSnapshotGraph` will be `self`, and the "local" one will be - // `onto`. - to_rebase_node_indexes.insert(executor::block_on(self.root()).map_err( - |err| { - error!("Unable to get root index for self: {}", err); - event - }, - )?); - } else { - // Only retain node indexes... or indices... if they are part of the current - // graph. There may still be garbage from previous updates to the graph - // laying around. - let mut potential_to_rebase_node_indexes = - executor::block_on(self.working_copy()) - .get_node_index_by_lineage(onto_node_weight.lineage_id()); - - potential_to_rebase_node_indexes.retain(|node_index| { - executor::block_on(self.working_copy()).has_path_to_root(*node_index) - }); - to_rebase_node_indexes.extend(potential_to_rebase_node_indexes); - - // TODO(nick): detect category nodes with a different lineage. We will likely - // need to check incoming edges in one graph and then look for outgoing edges in - // the other graph. - // // Since category nodes may be created from scratch from a different workspace, - // // they may have different lineage ids. We still want to consider the same - // // category kind as an equivalent node, even though it might have a different - // // lineage id. - // if let NodeWeight::Category(onto_category_node_weight) = onto_node_weight { - // onto_category_node_weight - // } - // let category_node_kind = onto_category_node_weight.kind(); - // let (_, to_rebase_category_node_index) = - // self.get_category_node(Some(onto_category_node_weight.id()), category_node_kind).map_err(|err| { - // error!( - // "Unable to get to rebase Category node for kind {:?} from onto {:?}: {}", - // onto_category_node_weight.kind(), onto, err, - // ); - // event - // })?; - // to_rebase_node_indexes.insert(to_rebase_category_node_index); - // } - } - - // If everything with the same `lineage_id` is identical, then we can prune the - // graph traversal, and avoid unnecessary lookups/comparisons. - let mut any_content_with_lineage_has_changed = false; - - for to_rebase_node_index in to_rebase_node_indexes { - let to_rebase_local_node_weight= executor::block_on(self.working_copy()).get_node_weight(to_rebase_node_index).map_err( - |err: WorkspaceSnapshotGraphError| { - error!("Unable to get graph local node weight for NodeIndex {:?} on self, {}", to_rebase_node_index, err); - event - })?; - - let to_rebase_node_weight = executor::block_on( - self.get_node_weight(to_rebase_node_index), - ) - .map_err(|err| { - error!( - "Unable to get to_rebase NodeWeight for NodeIndex {:?}: {}", - to_rebase_node_index, err, - ); - event - })?; - - if onto_local_node_weight.merkle_tree_hash() - == to_rebase_local_node_weight.merkle_tree_hash() - { - // If the merkle tree hashes are the same, then the entire sub-graph is - // identical, and we don't need to check any further. - debug!( - "onto {}, {:?} and to rebase {}, {:?} merkle tree hashes are the same", - onto_local_node_weight.merkle_tree_hash(), - onto_node_index, - to_rebase_local_node_weight.merkle_tree_hash(), - to_rebase_node_index - ); - continue; - } - any_content_with_lineage_has_changed = true; - - // Check if there's a difference in the node itself (and whether it is a - // conflict if there is a difference). - if onto_node_weight.node_hash() != to_rebase_node_weight.node_hash() { - if to_rebase_node_weight - .vector_clock_write() - .is_newer_than(onto_node_weight.vector_clock_write()) - { - // The existing node (`to_rebase`) has changes, but has already seen - // all of the changes in `onto`. There is no conflict, and there is - // nothing to update. - } else if onto_node_weight - .vector_clock_write() - .is_newer_than(to_rebase_node_weight.vector_clock_write()) - { - // `onto` has changes, but has already seen all of the changes in - // `to_rebase`. There is no conflict, and we should update to use the - // `onto` node. - updates.push(Update::ReplaceSubgraph { - onto: onto_node_index, - to_rebase: to_rebase_node_index, - }); - } else { - // There are changes on both sides that have not - // been seen by the other side; this is a conflict. - // There may also be other conflicts in the outgoing - // relationships, the downstream nodes, or both. - - if let ( - NodeWeight::Ordering(onto_ordering), - NodeWeight::Ordering(to_rebase_ordering), - ) = (onto_node_weight.as_ref(), to_rebase_node_weight.as_ref()) - { - // TODO Checking if two ordering arrays are non conflicting - // (if the common elements between two ordering have the same relative positions) - // is logic that could be extracted into its own thing. The following code does that - - // Both `onto` and `to_rebase` have changes that the other has not incorporated. We - // need to find out what the changes are to see what needs to be updated, and what - // conflicts. - let onto_ordering_set: HashSet = - onto_ordering.order().iter().copied().collect(); - let to_rebase_ordering_set: HashSet = - to_rebase_ordering.order().iter().copied().collect(); - - // Make sure that both `onto` and `to_rebase` have the same relative ordering for the - // nodes they have in common. If they don't, then that means that the order changed on - // at least one of them. - let common_items: HashSet = onto_ordering_set - .intersection(&to_rebase_ordering_set) - .copied() - .collect(); - let common_onto_items = { - let mut items = onto_ordering.order().clone(); - items.retain(|i| common_items.contains(i)); - items - }; - let common_to_rebase_items = { - let mut items = to_rebase_ordering.order().clone(); - items.retain(|i| common_items.contains(i)); - items - }; - if common_onto_items != common_to_rebase_items { - conflicts.push(Conflict::ChildOrder { - to_rebase: to_rebase_node_index, - onto: onto_node_index, - }); - } - } else { - conflicts.push(Conflict::NodeContent { - to_rebase: to_rebase_node_index, - onto: onto_node_index, - }); - } - } - } - - let (container_conflicts, container_updates) = executor::block_on(self - .find_container_membership_conflicts_and_updates( - to_rebase_vector_clock_id, - to_rebase_node_index, - &[], - onto, - onto_vector_clock_id, - onto_node_index, - &[], - )) - .map_err(|err| { - error!("Unable to find container membership conflicts and updates for onto container NodeIndex {:?} and to_rebase container NodeIndex {:?}: {}", onto_node_index, to_rebase_node_index, err); - event - })?; - - updates.extend(container_updates); - conflicts.extend(container_conflicts); - } - - if any_content_with_lineage_has_changed { - // There was at least one thing with a merkle tree hash difference, so we need - // to examine further down the tree to see where the difference(s) are, and - // where there are conflicts, if there are any. - Ok(petgraph::visit::Control::Continue) - } else { - // Everything to be rebased is identical, so there's no need to examine the - // rest of the tree looking for differences & conflicts that won't be there. - Ok(petgraph::visit::Control::Prune) - } - } - DfsEvent::TreeEdge(_, _) - | DfsEvent::BackEdge(_, _) - | DfsEvent::CrossForwardEdge(_, _) - | DfsEvent::Finish(_, _) => { - // These events are all ignored, since we handle looking at edges as we encounter - // the node(s) the edges are coming from (Outgoing edges). - Ok(petgraph::visit::Control::Continue) - } - } - } - - #[allow(clippy::too_many_arguments)] - async fn find_container_membership_conflicts_and_updates( - &self, - to_rebase_vector_clock_id: VectorClockId, - to_rebase_container_index: NodeIndex, - to_rebase_container_order: &[Ulid], - onto: &WorkspaceSnapshot, - onto_vector_clock_id: VectorClockId, - onto_container_index: NodeIndex, - onto_container_order: &[Ulid], - ) -> WorkspaceSnapshotResult<(Vec, Vec)> { - #[derive(Debug, Clone, Hash, PartialEq, Eq)] - struct UniqueEdgeInfo { - pub kind: EdgeWeightKind, - pub target_lineage: Ulid, - } - - #[derive(Debug, Clone)] - struct EdgeInfo { - pub source_node_index: NodeIndex, - pub target_node_index: NodeIndex, - pub edge_weight: EdgeWeight, - } - - let mut updates = Vec::new(); - let mut conflicts = Vec::new(); - - let mut to_rebase_edges = HashMap::::new(); - for (edge_weight, source_node_index, target_node_index) in self - .edges_directed_by_index(to_rebase_container_index, Outgoing) - .await? - { - let target_node_weight = self.get_node_weight(target_node_index).await?; - - if to_rebase_container_order.contains(&target_node_weight.id()) { - continue; - } - - to_rebase_edges.insert( - UniqueEdgeInfo { - kind: edge_weight.kind().clone(), - target_lineage: target_node_weight.lineage_id(), - }, - EdgeInfo { - source_node_index, - target_node_index, - edge_weight, - }, - ); - } - - let mut onto_edges = HashMap::::new(); - for (edge_weight, source_node_index, target_node_index) in onto - .edges_directed_by_index(onto_container_index, Outgoing) - .await? - { - let target_node_weight = onto.get_node_weight(target_node_index).await?; - - if onto_container_order.contains(&target_node_weight.id()) { - continue; - } - - onto_edges.insert( - UniqueEdgeInfo { - kind: edge_weight.kind().clone(), - target_lineage: target_node_weight.lineage_id(), - }, - EdgeInfo { - source_node_index, - target_node_index, - edge_weight, - }, - ); - } - - let only_to_rebase_edges = { - let mut unique_edges = to_rebase_edges.clone(); - for key in onto_edges.keys() { - unique_edges.remove(key); - } - unique_edges - }; - let only_onto_edges = { - let mut unique_edges = onto_edges.clone(); - for key in to_rebase_edges.keys() { - unique_edges.remove(key); - } - unique_edges - }; - - debug!("only to rebase edges: {:?}", &only_to_rebase_edges); - debug!("only onto edges: {:?}", &only_onto_edges); - - let root_seen_as_of_onto = self - .get_node_weight(self.root().await?) - .await? - .vector_clock_recently_seen() - .entry_for(onto_vector_clock_id); - - let onto_last_saw_to_rebase = onto - .get_node_weight(onto.root().await?) - .await? - .vector_clock_recently_seen() - .entry_for(to_rebase_vector_clock_id); - - for only_to_rebase_edge_info in only_to_rebase_edges.values() { - let to_rebase_item_weight = self - .get_node_weight(only_to_rebase_edge_info.target_node_index) - .await?; - - // If `onto` has never seen this edge, then it's new, and there are no conflicts, and - // no updates. - if only_to_rebase_edge_info - .edge_weight - .vector_clock_first_seen() - .entry_for(to_rebase_vector_clock_id) - <= onto_last_saw_to_rebase - { - if to_rebase_item_weight - .vector_clock_write() - .entry_for(to_rebase_vector_clock_id) - >= onto_last_saw_to_rebase - { - // Item has been modified in `onto` (`onto` item write vector clock > "seen as - // of" for `onto` entry in `to_rebase` root): Conflict (ModifyRemovedItem) - conflicts.push(Conflict::ModifyRemovedItem( - only_to_rebase_edge_info.target_node_index, - )) - } else { - // Item not modified & removed by `onto`: No conflict; Update::RemoveEdge - updates.push(Update::RemoveEdge { - source: only_to_rebase_edge_info.source_node_index, - destination: only_to_rebase_edge_info.target_node_index, - edge_kind: only_to_rebase_edge_info.edge_weight.kind().into(), - }); - } - } else { - debug!( - "edge weight entry for to rebase vector clock id {:?} is older than onto last saw {:?}", - only_to_rebase_edge_info.edge_weight.vector_clock_first_seen().entry_for(to_rebase_vector_clock_id), - onto_last_saw_to_rebase, - ); - } - } - - // - Items unique to `onto`: - for only_onto_edge_info in only_onto_edges.values() { - let onto_item_weight = onto - .get_node_weight(only_onto_edge_info.target_node_index) - .await?; - - if let Some(onto_first_seen) = only_onto_edge_info - .edge_weight - .vector_clock_first_seen() - .entry_for(onto_vector_clock_id) - { - // From "onto_first_seen", we know "when was the first time onto saw this edge?". - match root_seen_as_of_onto { - Some(root_seen_as_of) if onto_first_seen <= root_seen_as_of => {} - _ => { - // Edge first seen by `onto` > "seen as of" on `to_rebase` graph for `onto`'s entry on - // root node: Item is new. - // Other case where item is new: the `to_rebase` has never seen anything from - // the `onto` change set. All the items are new. - updates.push(Update::NewEdge { - source: to_rebase_container_index, - destination: only_onto_edge_info.target_node_index, - edge_weight: only_onto_edge_info.edge_weight.clone(), - }); - } - } - } else if let Some(root_seen_as_of) = root_seen_as_of_onto { - if onto_item_weight - .vector_clock_write() - .has_entries_newer_than(root_seen_as_of) - { - // Item write vector clock has entries > "seen as of" on `to_rebase` graph for - // `onto`'s entry on root node: Conflict (RemoveModifiedItem) - conflicts.push(Conflict::RemoveModifiedItem { - container: to_rebase_container_index, - removed_item: only_onto_edge_info.target_node_index, - }); - } - } - // Item removed by `to_rebase`: No conflict & no update necessary. - } - - // - Sets same: No conflicts/updates - Ok((conflicts, updates)) + Ok(self.working_copy().await.detect_conflicts_and_updates( + to_rebase_vector_clock_id, + &*onto_workspace_snapshot.working_copy().await, + onto_vector_clock_id, + )?) } // NOTE(nick): this should only be used by the rebaser. @@ -1066,250 +418,53 @@ impl WorkspaceSnapshot { Ok(self.working_copy_mut().await.edge_endpoints(edge_index)?) } - /// Replace references should be called when a node has been changed and - /// copied into the graph. It will use the original_node_index to find the - /// most up to date version of the new node, and replace all edges that - /// point to that old node with edges pointing to the new node. Because the - /// graph is treated as an immutable, copy-on-write structure, this means - /// walking up the graph to the root and copying all nodes that have edges - /// that point to the original_node_index, and all nodes that have edges - /// that point to *those* parent nodes, etc, until we've processed the - /// entire parent tree of the original node. - #[instrument(level = "trace", skip_all)] - pub async fn replace_references( - &self, - original_node_index: NodeIndex, - ) -> WorkspaceSnapshotResult<()> { - // Climb from the original node, up to root, rewriting outgoing edges - // along the way. But we have to be sure to climb to root once for each - // sibling node that we encounter as we walk up to root. - let mut outer_queue = VecDeque::from([original_node_index]); - - while let Some(old_node_index) = outer_queue.pop_front() { - let mut work_queue = VecDeque::from([old_node_index]); - - while let Some(old_node_index) = work_queue.pop_front() { - for edge_ref in self - .working_copy() - .await - .graph() - .edges_directed(old_node_index, Direction::Incoming) - { - work_queue.push_back(edge_ref.source()); - outer_queue.push_back(edge_ref.source()) - } - - let latest_node_idx = self - .working_copy() - .await - .get_latest_node_idx(old_node_index)?; - let new_node_idx = if latest_node_idx != old_node_index { - latest_node_idx - } else { - self.copy_node_by_index(latest_node_idx).await? - }; - - // Find all outgoing edges weights and find the edge targets. - let mut edges_to_create = Vec::new(); - for edge_ref in self - .working_copy() - .await - .graph() - .edges_directed(old_node_index, Outgoing) - { - edges_to_create.push(( - edge_ref.weight().clone(), - edge_ref.target(), - edge_ref.id(), - )); - } - - // Make copies of these edges where the source is the new node index and the - // destination is one of the following... - // - If an entry exists in `old_to_new_node_indices` for the destination node index, - // use the value of the entry (the destination was affected by the replacement, - // and needs to use the new node index to reflect this). - // - There is no entry in `old_to_new_node_indices`; use the same destination node - // index as the old edge (the destination was *NOT* affected by the replacement, - // and does not have any new information to reflect). - for (edge_weight, destination_node_index, edge_idx) in edges_to_create { - // Need to directly add the edge, without going through `self.add_edge` to avoid - // infinite recursion, and because we're the place doing all the book keeping - // that we'd be interested in happening from `self.add_edge`. - let destination_node_index = self - .working_copy() - .await - .get_latest_node_idx(destination_node_index)?; - - self.working_copy_mut() - .await - .graph_mut() - .remove_edge(edge_idx); - - self.working_copy_mut().await.graph_mut().update_edge( - new_node_idx, - destination_node_index, - edge_weight, - ); - } - - self.update_merkle_tree_hash(new_node_idx).await?; - } - } - - self.working_copy_mut().await.update_root_index()?; - - Ok(()) - } - - async fn update_merkle_tree_hash( + #[instrument(level = "debug", skip_all)] + pub async fn import_subgraph( &self, - node_index_to_update: NodeIndex, + other: &mut Self, + root_index: NodeIndex, ) -> WorkspaceSnapshotResult<()> { - let remote_node_weight = self.get_node_weight(node_index_to_update).await?; - let node_id_to_update = remote_node_weight.id(); - - let mut hasher = si_events::MerkleTreeHash::hasher(); - hasher.update(remote_node_weight.node_hash().as_bytes()); - - // Need to make sure that ordered containers have their ordered children in the - // order specified by the ordering graph node. - let explicitly_ordered_children = self - .ordered_children_for_node(node_id_to_update) - .await? - .unwrap_or_default(); - - // Need to make sure the unordered neighbors are added to the hash in a stable order to - // ensure the merkle tree hash is identical for identical trees. - let mut unordered_neighbors = Vec::new(); - for neighbor_index in self - .working_copy() - .await - .graph() - .neighbors_directed(node_index_to_update, Outgoing) - { - let neighbor_id = self.get_node_weight(neighbor_index).await?.id(); - // Only add the neighbor if it's not one of the ones with an explicit ordering. - if !explicitly_ordered_children.contains(&neighbor_id) { - unordered_neighbors.push((neighbor_id, neighbor_index)); - } - } - - // We'll sort the neighbors by the ID in the NodeWeight, as that will result in more stable - // results than if we sorted by the NodeIndex itself. - unordered_neighbors.sort_by_cached_key(|(id, _index)| *id); - // It's not important whether the explicitly ordered children are first or last, as long as - // they are always in that position, and are always in the sequence specified by the - // container's Ordering node. - let mut ordered_neighbors = - Vec::with_capacity(explicitly_ordered_children.len() + unordered_neighbors.len()); - ordered_neighbors.extend(explicitly_ordered_children); - ordered_neighbors - .extend::>(unordered_neighbors.iter().map(|(id, _index)| *id).collect()); - - for neighbor_id in ordered_neighbors { - let graph_local_node_weight = self.get_graph_local_node_weight(neighbor_id).await?; - let neighbor_node_index = self.get_node_index_by_id(neighbor_id).await?; - hasher.update(graph_local_node_weight.merkle_tree_hash().as_bytes()); - - // The edge(s) between `node_index_to_update`, and `neighbor_node` potentially encode - // important information related to the "identity" of `node_index_to_update`. - for connecting_edgeref in self - .working_copy() - .await - .graph() - .edges_connecting(node_index_to_update, neighbor_node_index) - { - match connecting_edgeref.weight().kind() { - // This is the key for an entry in a map. - EdgeWeightKind::Contain(Some(key)) => hasher.update(key.as_bytes()), - - EdgeWeightKind::Use { is_default } => { - hasher.update(is_default.to_string().as_bytes()) - } - - // This is the key representing an element in a container type corresponding - // to an AttributePrototype - EdgeWeightKind::Prototype(Some(key)) => hasher.update(key.as_bytes()), - - // Nothing to do, as these EdgeWeightKind do not encode extra information - // in the edge itself. - EdgeWeightKind::AuthenticationPrototype - | EdgeWeightKind::Action - | EdgeWeightKind::ActionPrototype - | EdgeWeightKind::Contain(None) - | EdgeWeightKind::FrameContains - | EdgeWeightKind::PrototypeArgument - | EdgeWeightKind::PrototypeArgumentValue - | EdgeWeightKind::Socket - | EdgeWeightKind::Ordering - | EdgeWeightKind::Ordinal - | EdgeWeightKind::Prop - | EdgeWeightKind::Prototype(None) - | EdgeWeightKind::Proxy - | EdgeWeightKind::Root - | EdgeWeightKind::SocketValue => {} - } - } - } - - self.working_copy_mut() - .await - .graph_mut() - .node_weight_mut(node_index_to_update) - .ok_or(WorkspaceSnapshotGraphError::NodeWeightNotFound)? - .set_merkle_tree_hash(hasher.finalize()); - - Ok(()) - } - - async fn copy_node_by_index( - &self, - node_index: NodeIndex, - ) -> WorkspaceSnapshotResult { - let remote_node_weight = self.get_node_weight(node_index).await?; - let local_weight = self.working_copy().await.get_node_weight(node_index)?; - let node_index = self + Ok(self .working_copy_mut() .await - .add_node(remote_node_weight, local_weight.address())?; - self.update_merkle_tree_hash(node_index).await?; - - Ok(node_index) + .import_subgraph(&*other.working_copy().await, root_index)?) } + /// Calls [`WorkspaceSnapshotGraph::replace_references()`] #[instrument(level = "debug", skip_all)] - pub async fn get_node_weight_by_address( + pub async fn replace_references( &self, - address: NodeWeightAddress, - ) -> WorkspaceSnapshotResult> { - self.node_weight_db - .read(&address) - .await? - .ok_or(WorkspaceSnapshotError::NodeWeightMissing(address)) + original_node_index: NodeIndex, + ) -> WorkspaceSnapshotResult<()> { + Ok(self + .working_copy_mut() + .await + .replace_references(original_node_index)?) } #[instrument(level = "debug", skip_all)] pub async fn get_node_weight_by_id( &self, id: impl Into, - ) -> WorkspaceSnapshotResult> { + ) -> WorkspaceSnapshotResult { let node_idx = self.get_node_index_by_id(id).await?; - self.get_node_weight(node_idx).await + Ok(self + .working_copy() + .await + .get_node_weight(node_idx)? + .to_owned()) } #[instrument(level = "debug", skip_all)] pub async fn get_node_weight( &self, node_index: NodeIndex, - ) -> WorkspaceSnapshotResult> { - let local_node_weight = self.working_copy().await.get_node_weight(node_index)?; - self.node_weight_db - .read(&local_node_weight.address()) - .await? - .ok_or(WorkspaceSnapshotError::NodeWeightMissing( - local_node_weight.address(), - )) + ) -> WorkspaceSnapshotResult { + Ok(self + .working_copy() + .await + .get_node_weight(node_index)? + .to_owned()) } #[instrument(level = "debug", skip_all)] @@ -1326,103 +481,18 @@ impl WorkspaceSnapshot { #[instrument(level = "debug", skip_all)] pub async fn cleanup(&self) -> WorkspaceSnapshotResult<()> { - let start = tokio::time::Instant::now(); - - // We want to remove all of the "garbage" we've accumulated while operating on the graph. - // Anything that is no longer reachable from the current `self.root_index` should be - // removed as it is no longer referenced by anything in the current version of the graph. - // Fortunately, we don't need to walk the graph to find out if something is reachable from - // the root, since `has_path_connecting` is slow (depth-first search). Any node that does - // *NOT* have any incoming edges (aside from the `self.root_index` node) is not reachable, - // by definition. Finding the list of nodes with no incoming edges is very fast. If we - // remove all nodes (that are not the `self.root_index` node) that do not have any - // incoming edges, and we keep doing this until the only one left is the `self.root_index` - // node, then all remaining nodes are reachable from `self.root_index`. - let mut old_root_ids: HashSet; - let root_index = self.root().await?; - loop { - old_root_ids = self - .working_copy() - .await - .graph() - .externals(Incoming) - .filter(|node_idx| *node_idx != root_index) - .collect(); - if old_root_ids.is_empty() { - break; - } - - for stale_node_index in &old_root_ids { - self.working_copy_mut() - .await - .graph_mut() - .remove_node(*stale_node_index); - } - } - info!("Removed stale NodeIndex: {:?}", start.elapsed()); - - let node_addresses: HashSet = self - .working_copy() - .await - .graph() - .node_weights() - .map(|weight| weight.address()) - .collect(); - - let mut remaining_node_ids = HashSet::new(); - for address in &node_addresses { - let node_weight = self.get_node_weight_by_address(*address).await?; - remaining_node_ids.insert(node_weight.id()); - } - - // After we retain the nodes, collect the remaining ids and indices. - info!( - "Got remaining node IDs: {} ({:?})", - remaining_node_ids.len(), - start.elapsed() - ); - let remaining_node_indices: HashSet = - self.working_copy().await.graph().node_indices().collect(); - info!( - "Got remaining NodeIndex: {} ({:?})", - remaining_node_indices.len(), - start.elapsed() - ); - - // Cleanup the node index by id map. - self.working_copy_mut() - .await - .retain_node_index_by_id(remaining_node_ids); - info!("Removed stale node_index_by_id: {:?}", start.elapsed()); - - // Cleanup the node indices by lineage id map. - self.working_copy_mut() - .await - .retain_node_indices_by_lineage_id(remaining_node_indices); - info!( - "Removed stale node_indices_by_lineage_id: {:?}", - start.elapsed() - ); - - self.working_copy_mut() - .await - .retain_id_by_node_addresses(node_addresses); - info!("Removed stale id_by_node_address: {:?}", start.elapsed()); - + self.working_copy_mut().await.cleanup(); Ok(()) } #[instrument(level = "debug", skip_all)] - pub async fn nodes(&self) -> WorkspaceSnapshotResult, NodeIndex)>> { - let mut result = vec![]; - - for node in self.working_copy().await.nodes() { - let node_weight = self.get_node_weight_by_address(node.address()).await?; - let node_index = self.get_node_index_by_id(node_weight.id()).await?; - result.push((node_weight.clone(), node_index)); - } - - Ok(result) + pub async fn nodes(&self) -> WorkspaceSnapshotResult> { + Ok(self + .working_copy() + .await + .nodes() + .map(|(weight, index)| (weight.to_owned(), index)) + .collect()) } #[instrument(level = "debug", skip_all)] @@ -1435,216 +505,12 @@ impl WorkspaceSnapshot { .collect()) } - pub async fn import_subgraph( - &self, - other: &WorkspaceSnapshot, - root_index: NodeIndex, - ) -> WorkspaceSnapshotResult<()> { - let mut dfs = - petgraph::visit::DfsPostOrder::new(other.working_copy().await.graph(), root_index); - while let Some(node_index_to_copy) = dfs.next(&other.working_copy().await.graph()) { - let node_weight_to_copy = other - .get_node_weight(node_index_to_copy) - .await? - .as_ref() - .clone(); - let node_weight_id = node_weight_to_copy.id(); - let node_weight_lineage_id = node_weight_to_copy.lineage_id(); - - // The following assumes there are no conflicts between "self" and "other". If there - // are conflicts between them, we shouldn't be running updates. - let node_index = if let Some(equivalent_node_index) = self - .find_equivalent_node(node_weight_id, node_weight_lineage_id) - .await? - { - let equivalent_node_weight = self.get_node_weight(equivalent_node_index).await?; - if equivalent_node_weight - .vector_clock_write() - .is_newer_than(node_weight_to_copy.vector_clock_write()) - { - equivalent_node_index - } else { - let new_node_index = self.add_node(node_weight_to_copy).await?; - self.working_copy() - .await - .get_latest_node_idx(new_node_index)? - } - } else { - self.add_node(node_weight_to_copy).await? - }; - - for (edge_weight, _, target_idx) in other - .edges_directed_by_index(node_index_to_copy, Outgoing) - .await? - { - let target_id = other.get_node_weight(target_idx).await?.id(); - let latest_target = self.get_node_index_by_id(target_id).await?; - self.working_copy_mut().await.graph_mut().update_edge( - node_index, - latest_target, - edge_weight, - ); - } - } - - Ok(()) + pub async fn dot(&self) { + self.working_copy().await.dot(); } - #[allow(dead_code)] pub async fn tiny_dot_to_file(&self, suffix: Option<&str>) { - let suffix = suffix.unwrap_or("dot"); - // NOTE(nick): copy the output and execute this on macOS. It will create a file in the - // process and open a new tab in your browser. - // ``` - // GRAPHFILE=; cat $GRAPHFILE.txt | dot -Tsvg -o processed-$GRAPHFILE.svg; open processed-$GRAPHFILE.svg - // ``` - - let self_clone = self.clone(); - - let dot = tokio::task::spawn_blocking(move || { - let graph_clone = executor::block_on(self_clone.working_copy()).graph().clone(); - - let get_node_attributes_fn = |_, (node_index, node_weight): (NodeIndex, &GraphLocalNodeWeight)| { - let remote_node_weight = executor::block_on(self_clone.get_node_weight(node_index)).expect("unable to get node weight"); - let (label, color) = match remote_node_weight.as_ref() { - NodeWeight::Content(weight) => { - let discrim = ContentAddressDiscriminants::from(weight.content_address()); - let color = match discrim { - // Some of these should never happen as they have their own top-level - // NodeWeight variant. - ContentAddressDiscriminants::Action => "green", - ContentAddressDiscriminants::ActionBatch => "green", - ContentAddressDiscriminants::ActionRunner => "green", - ContentAddressDiscriminants::ActionPrototype => "green", - ContentAddressDiscriminants::AttributePrototype => "green", - ContentAddressDiscriminants::Component => "black", - ContentAddressDiscriminants::OutputSocket => "red", - ContentAddressDiscriminants::Func => "black", - ContentAddressDiscriminants::FuncArg => "black", - ContentAddressDiscriminants::InputSocket => "red", - ContentAddressDiscriminants::JsonValue => "fuchsia", - ContentAddressDiscriminants::Prop => "orange", - ContentAddressDiscriminants::Root => "black", - ContentAddressDiscriminants::Schema => "black", - ContentAddressDiscriminants::SchemaVariant => "black", - ContentAddressDiscriminants::Secret => "black", - ContentAddressDiscriminants::StaticArgumentValue => "green", - ContentAddressDiscriminants::ValidationPrototype => "black", - }; - (discrim.to_string(), color) - } - NodeWeight::AttributePrototypeArgument(apa) => ( - format!( - "Attribute Prototype Argument{}", - apa.targets() - .map(|targets| format!( - "\nsource: {}\nto: {}", - targets.source_component_id, targets.destination_component_id - )) - .unwrap_or("".to_string()) - ), - "green", - ), - NodeWeight::AttributeValue(_) => ("Attribute Value".to_string(), "blue"), - NodeWeight::Category(category_node_weight) => match category_node_weight.kind() - { - CategoryNodeKind::Component => { - ("Components (Category)".to_string(), "black") - } - CategoryNodeKind::ActionBatch => { - ("Action Batches (Category)".to_string(), "black") - } - CategoryNodeKind::Func => ("Funcs (Category)".to_string(), "black"), - CategoryNodeKind::Schema => ("Schemas (Category)".to_string(), "black"), - CategoryNodeKind::Secret => ("Secrets (Category)".to_string(), "black"), - }, - NodeWeight::Component(component) => ( - "Component".to_string(), - if component.to_delete() { - "gray" - } else { - "black" - }, - ), - NodeWeight::Func(func_node_weight) => { - (format!("Func\n{}", func_node_weight.name()), "black") - } - NodeWeight::FuncArgument(func_arg_node_weight) => ( - format!("Func Arg\n{}", func_arg_node_weight.name()), - "black", - ), - NodeWeight::Ordering(_) => { - (NodeWeightDiscriminants::Ordering.to_string(), "gray") - } - NodeWeight::Prop(prop_node_weight) => { - (format!("Prop\n{}", prop_node_weight.name()), "orange") - } - }; - let color = color.to_string(); - let id = remote_node_weight.id(); - format!( - "label = \"\n\n{label}\n{node_index:?}\n{id}\n\n{:?}\n\n{:?}\"\nfontcolor = {color}\ncolor = {color}", - node_weight.address(), - node_weight.merkle_tree_hash(), - ) - }; - - let dot = petgraph::dot::Dot::with_attr_getters( - &graph_clone, - &[ - petgraph::dot::Config::NodeNoLabel, - petgraph::dot::Config::EdgeNoLabel, - ], - &|_, edgeref| { - let discrim: EdgeWeightKindDiscriminants = edgeref.weight().kind().into(); - let color = match discrim { - EdgeWeightKindDiscriminants::Action => "black", - EdgeWeightKindDiscriminants::ActionPrototype => "black", - EdgeWeightKindDiscriminants::AuthenticationPrototype => "black", - EdgeWeightKindDiscriminants::Contain => "blue", - EdgeWeightKindDiscriminants::FrameContains => "black", - EdgeWeightKindDiscriminants::Ordering => "gray", - EdgeWeightKindDiscriminants::Ordinal => "gray", - EdgeWeightKindDiscriminants::Prop => "orange", - EdgeWeightKindDiscriminants::Prototype => "green", - EdgeWeightKindDiscriminants::PrototypeArgument => "green", - EdgeWeightKindDiscriminants::PrototypeArgumentValue => "green", - EdgeWeightKindDiscriminants::Socket => "red", - EdgeWeightKindDiscriminants::SocketValue => "purple", - EdgeWeightKindDiscriminants::Proxy => "gray", - EdgeWeightKindDiscriminants::Root => "black", - EdgeWeightKindDiscriminants::Use => "black", - }; - - match edgeref.weight().kind() { - EdgeWeightKind::Contain(key) => { - let key = key - .as_deref() - .map(|key| format!(" ({key}")) - .unwrap_or("".into()); - format!( - "label = \"{discrim:?}{key}\"\nfontcolor = {color}\ncolor = {color}" - ) - } - _ => format!("label = \"{discrim:?}\"\nfontcolor = {color}\ncolor = {color}"), - } - }, - &get_node_attributes_fn); - - Box::new(format!("{dot:?}")) - }).await.expect("should generate dot"); - let filename_no_extension = format!("{}-{}", Ulid::new().to_string(), suffix); - let mut file = File::create(format!("/home/zacharyhamm/{filename_no_extension}.txt")) - .await - .expect("could not create file"); - file.write_all(dot.as_bytes()) - .await - .expect("could not write file"); - println!("dot output stored in file (filename without extension: {filename_no_extension})"); - } - - pub async fn get_node_index_by_id_opt(&self, id: impl Into) -> Option { - self.working_copy().await.get_node_index_by_id_opt(id) + self.working_copy().await.tiny_dot_to_file(suffix); } #[instrument(level = "debug", skip_all)] @@ -1663,7 +529,7 @@ impl WorkspaceSnapshot { Ok(self.working_copy().await.get_latest_node_idx(node_index)?) } - #[instrument(level = "info", skip_all)] + #[instrument(skip_all)] pub async fn find( ctx: &DalContext, workspace_snapshot_addr: WorkspaceSnapshotAddress, @@ -1685,9 +551,6 @@ impl WorkspaceSnapshot { address: Arc::new(RwLock::new(workspace_snapshot_addr)), read_only_graph: snapshot, working_copy: Arc::new(RwLock::new(None)), - node_weight_db: ctx.layer_db().node_weight().clone(), - events_tenancy: ctx.events_tenancy(), - events_actor: ctx.events_actor(), }) } @@ -1719,28 +582,8 @@ impl WorkspaceSnapshot { source: Option, kind: CategoryNodeKind, ) -> WorkspaceSnapshotResult { - let source_index = match source { - Some(provided_source) => self.get_node_index_by_id(provided_source).await?, - None => self.root().await?, - }; - - // TODO(nick): ensure that two target category nodes of the same kind don't exist for the - // same source node. - for (_, _, maybe_category_node_index) in - self.edges_directed_by_index(source_index, Outgoing).await? - { - let maybe_category_node_weight = - self.get_node_weight(maybe_category_node_index).await?; - - if let NodeWeight::Category(category_node_weight) = maybe_category_node_weight.as_ref() - { - if category_node_weight.kind() == kind { - return Ok(category_node_weight.id()); - } - } - } - - Err(WorkspaceSnapshotError::CategoryNodeNotFound(source_index)) + let (category_node_id, _) = self.working_copy().await.get_category_node(source, kind)?; + Ok(category_node_id) } #[instrument(level = "debug", skip_all)] @@ -1882,7 +725,7 @@ impl WorkspaceSnapshot { pub async fn all_outgoing_targets( &self, id: impl Into, - ) -> WorkspaceSnapshotResult>> { + ) -> WorkspaceSnapshotResult> { let mut result = vec![]; let target_idxs: Vec = self .edges_directed(id, Direction::Outgoing) @@ -1893,7 +736,7 @@ impl WorkspaceSnapshot { for target_idx in target_idxs { let node_weight = self.get_node_weight(target_idx).await?; - result.push(node_weight); + result.push(node_weight.to_owned()); } Ok(result) @@ -1903,7 +746,7 @@ impl WorkspaceSnapshot { pub async fn all_incoming_sources( &self, id: impl Into, - ) -> WorkspaceSnapshotResult>> { + ) -> WorkspaceSnapshotResult> { let mut result = vec![]; let source_idxs: Vec = self .edges_directed(id, Direction::Incoming) @@ -1914,7 +757,7 @@ impl WorkspaceSnapshot { for source_idx in source_idxs { let node_weight = self.get_node_weight(source_idx).await?; - result.push(node_weight); + result.push(node_weight.to_owned()); } Ok(result) @@ -1987,72 +830,12 @@ impl WorkspaceSnapshot { target_node_index: NodeIndex, edge_kind: EdgeWeightKindDiscriminants, ) -> WorkspaceSnapshotResult<()> { - let source_node_index = self - .working_copy() - .await - .get_latest_node_idx(source_node_index)?; - - self.copy_node_by_index(source_node_index).await?; - self.replace_references(source_node_index).await?; - let source_node_index = self - .working_copy() - .await - .get_latest_node_idx(source_node_index)?; - - let source_node_id = self.get_node_weight(source_node_index).await?.id(); - - let target_node_index = self - .working_copy() - .await - .get_latest_node_idx(target_node_index)?; - - let target_node_id = self.get_node_weight(target_node_index).await?.id(); - - self.working_copy_mut() - .await - .remove_edge(source_node_index, target_node_index, edge_kind); - - if let Some(mut ordering_node) = self.ordering_node_for_container(source_node_id).await? { - // We only want to update the ordering of the container if we removed an edge to - // one of the ordered relationships. - if ordering_node.remove_from_order(change_set, target_node_id)? { - let ordering_node_index = self.get_node_index_by_id(ordering_node.id()).await?; - self.working_copy_mut().await.remove_edge( - ordering_node_index, - target_node_index, - EdgeWeightKindDiscriminants::Ordinal, - ); - - self.add_node(NodeWeight::Ordering(ordering_node)).await?; - } - } - - let source_node_index = self - .working_copy() - .await - .get_latest_node_idx(source_node_index)?; - - let mut work_queue = VecDeque::from([source_node_index]); - - while let Some(node_index) = work_queue.pop_front() { - self.update_merkle_tree_hash( - // If we updated the ordering node, that means we've invalidated the container's - // NodeIndex (new_source_node_index), so we need to find the new NodeIndex to be able - // to update the container's merkle tree hash. - node_index, - ) - .await?; - - for edge_ref in self - .working_copy() - .await - .edges_directed(node_index, Incoming) - { - work_queue.push_back(edge_ref.source()); - } - } - - Ok(()) + Ok(self.working_copy_mut().await.remove_edge( + change_set, + source_node_index, + target_node_index, + edge_kind, + )?) } #[instrument(level = "debug", skip_all)] @@ -2084,182 +867,62 @@ impl WorkspaceSnapshot { onto: &WorkspaceSnapshot, updates: &[Update], ) -> WorkspaceSnapshotResult<()> { - for update in updates { - match update { - Update::NewEdge { - source, - destination, - edge_weight, - } => { - let updated_source = self.working_copy().await.get_latest_node_idx(*source)?; - let updated_source_id = self.get_node_weight(updated_source).await?.id(); - let destination = self - .find_in_self_or_create_using_onto(*destination, onto) - .await?; - let destination_id = self.get_node_weight(destination).await?.id(); - - self.add_edge(updated_source_id, edge_weight.clone(), destination_id) - .await?; - } - Update::RemoveEdge { - source, - destination, - edge_kind, - } => { - let updated_source = self.working_copy().await.get_latest_node_idx(*source)?; - let destination = self - .working_copy() - .await - .get_latest_node_idx(*destination)?; - self.remove_edge( - to_rebase_change_set, - updated_source, - destination, - *edge_kind, - ) - .await?; - } - Update::ReplaceSubgraph { - onto: onto_subgraph_root, - to_rebase: to_rebase_subgraph_root, - } => { - let updated_to_rebase = self - .working_copy() - .await - .get_latest_node_idx(*to_rebase_subgraph_root)?; - self.find_in_self_or_create_using_onto(*onto_subgraph_root, onto) - .await?; - self.replace_references(updated_to_rebase).await?; + Ok(self.working_copy_mut().await.perform_updates( + to_rebase_change_set, + &*onto.working_copy().await, + updates, + )?) + } + + /// Mark whether a prop can be used as an input to a function. Props below + /// Maps and Arrays are not valid inputs. Must only be used when + /// "finalizing" a schema variant! + #[instrument(level = "debug", skip_all)] + pub async fn mark_prop_as_able_to_be_used_as_prototype_arg( + &self, + node_index: NodeIndex, + ) -> WorkspaceSnapshotResult<()> { + self.working_copy_mut() + .await + .update_node_weight(node_index, |node_weight| match node_weight { + NodeWeight::Prop(prop_inner) => { + prop_inner.set_can_be_used_as_prototype_arg(true); + Ok(()) } - } - } + _ => Err(WorkspaceSnapshotGraphError::IncompatibleNodeTypes)?, + })?; + Ok(()) } - /// Find in self where self is the "to rebase" side or create using "onto". - async fn find_in_self_or_create_using_onto( + #[instrument(level = "debug", skip_all)] + pub async fn ordering_node_for_container( &self, - unchecked: NodeIndex, - onto: &WorkspaceSnapshot, - ) -> WorkspaceSnapshotResult { - let unchecked_local_node_weight = onto.working_copy().await.get_node_weight(unchecked)?; - let unchecked_node_weight = onto.get_node_weight(unchecked).await?; - - let found_or_created = { - let equivalent_node = if let Some(found) = self - .working_copy() - .await - .find_latest_idx_in_self_from_other_idx(&*onto.working_copy().await, unchecked)? - { - Some(found) - } else { - self.working_copy().await.find_equivalent_node( - unchecked_node_weight.id(), - unchecked_node_weight.lineage_id(), - )? - }; - - match equivalent_node { - Some(found_equivalent_node) => { - let found_equivalent_node_weight = self - .working_copy() - .await - .get_node_weight(found_equivalent_node)?; - if found_equivalent_node_weight.merkle_tree_hash() - != unchecked_local_node_weight.merkle_tree_hash() - { - self.import_subgraph(onto, unchecked).await?; - self.working_copy() - .await - .find_latest_idx_in_self_from_other_idx( - &*onto.working_copy().await, - unchecked, - )? - .ok_or(WorkspaceSnapshotGraphError::NodeWeightNotFound)? - } else { - found_equivalent_node - } - } - None => { - self.import_subgraph(onto, unchecked).await?; - self.working_copy() - .await - .find_latest_idx_in_self_from_other_idx( - &*onto.working_copy().await, - unchecked, - )? - .ok_or(WorkspaceSnapshotGraphError::NodeWeightNotFound)? - } - } - }; - Ok(found_or_created) + id: impl Into, + ) -> WorkspaceSnapshotResult> { + let idx = self.get_node_index_by_id(id).await?; + Ok(self.working_copy().await.ordering_node_for_container(idx)?) } - /// Returns an `Option>`. If there is an ordering node, then the - /// return will be a [`Some`], where the [`Vec`] is populated with the - /// [`Ulid`] of the nodes specified by the ordering node, in the order - /// defined by the ordering node. If there is not an ordering node, then the - /// return will be [`None`]. #[instrument(level = "debug", skip_all)] pub async fn ordered_children_for_node( &self, id: impl Into, ) -> WorkspaceSnapshotResult>> { + let idx = self.get_node_index_by_id(id.into()).await?; + let mut result = vec![]; Ok( - if let Some(ordering_weight) = self.ordering_node_for_container(id).await? { - for ordered_id in ordering_weight.order() { - // verify that ordered thing in order is actually in the graph - if self.get_node_index_by_id_opt(*ordered_id).await.is_none() { - return Err(WorkspaceSnapshotError::NodeWithIdNotFound(*ordered_id)); - } + if let Some(idxs) = self.working_copy().await.ordered_children_for_node(idx)? { + for idx in idxs { + let id = self.get_node_weight(idx).await?.id(); + result.push(id); } - Some(ordering_weight.order().clone()) + Some(result) } else { None }, ) } - - pub async fn ordering_node_for_container( - &self, - container_node_id: impl Into, - ) -> WorkspaceSnapshotResult> { - let container_node_id = container_node_id.into(); - let mut ordering_nodes: Vec = vec![]; - for (edge, _, target) in self.edges_directed(container_node_id, Outgoing).await? { - if edge.kind() == &EdgeWeightKind::Ordering { - if let NodeWeight::Ordering(inner) = self.get_node_weight(target).await?.as_ref() { - ordering_nodes.push(inner.clone()); - } - } - } - - if ordering_nodes.len() > 1 { - error!( - "Too many ordering nodes found for container NodeId {:?}", - container_node_id, - ); - return Err(WorkspaceSnapshotError::TooManyOrderingForNode( - container_node_id, - )); - } - Ok(ordering_nodes.first().cloned()) - } - - pub async fn update_order( - &self, - change_set: &ChangeSet, - container_id: Ulid, - new_order: Vec, - ) -> WorkspaceSnapshotResult<()> { - if let Some(mut ordering_node) = self.ordering_node_for_container(container_id).await? { - ordering_node.set_order(change_set, new_order)?; - self.add_node(NodeWeight::Ordering(ordering_node)).await?; - } - - Ok(()) - } - #[instrument(level = "debug", skip_all)] pub async fn index_or_key_of_child_entry( &self, diff --git a/lib/dal/src/workspace_snapshot/graph.rs b/lib/dal/src/workspace_snapshot/graph.rs index 830c2cd184..99d3e7786f 100644 --- a/lib/dal/src/workspace_snapshot/graph.rs +++ b/lib/dal/src/workspace_snapshot/graph.rs @@ -1,28 +1,34 @@ +use std::collections::{HashMap, HashSet, VecDeque}; +use std::fs::File; +use std::io::Write; + +use chrono::Utc; +/// Ensure [`NodeIndex`] is usable by external crates. +pub use petgraph::graph::NodeIndex; use petgraph::stable_graph::Edges; -use petgraph::{algo, prelude::*}; +pub use petgraph::Direction; +use petgraph::{algo, prelude::*, visit::DfsEvent}; use serde::{Deserialize, Serialize}; -use si_events::{ - deserialize_merkle_tree_hash_as_bytes, deserialize_node_weight_address_as_bytes, - serialize_merkle_tree_hash_as_bytes, serialize_node_weight_address_as_bytes, MerkleTreeHash, - NodeWeightAddress, -}; -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; - -use telemetry::prelude::*; +use si_events::ContentHash; use thiserror::Error; use ulid::Ulid; +use telemetry::prelude::*; + +use crate::change_set::{ChangeSet, ChangeSetError}; +use crate::workspace_snapshot::content_address::ContentAddressDiscriminants; +use crate::workspace_snapshot::node_weight::category_node_weight::CategoryNodeKind; +use crate::workspace_snapshot::node_weight::{CategoryNodeWeight, NodeWeightDiscriminants}; +use crate::workspace_snapshot::vector_clock::VectorClockId; use crate::workspace_snapshot::{ - edge_weight::{EdgeWeight, EdgeWeightError, EdgeWeightKindDiscriminants}, - node_weight::NodeWeightError, + conflict::Conflict, + content_address::ContentAddress, + edge_weight::{EdgeWeight, EdgeWeightError, EdgeWeightKind, EdgeWeightKindDiscriminants}, + node_weight::{NodeWeight, NodeWeightError, OrderingNodeWeight}, + update::Update, }; -/// Ensure [`NodeIndex`] is usable by external crates. -pub use petgraph::graph::NodeIndex; -pub use petgraph::Direction; - -use super::node_weight::NodeWeight; +mod tests; pub type LineageId = Ulid; @@ -34,8 +40,12 @@ pub enum WorkspaceSnapshotGraphError { CannotCompareOrderedAndUnorderedContainers(NodeIndex, NodeIndex), #[error("could not find category node used by node with index {0:?}")] CategoryNodeNotFound(NodeIndex), + #[error("ChangeSet error: {0}")] + ChangeSet(#[from] ChangeSetError), #[error("Unable to retrieve content for ContentHash")] ContentMissingForContentHash, + #[error("Action would create a graph cycle")] + CreateGraphCycle, #[error("could not find the newly imported subgraph when performing updates")] DestinationNotUpdatedWhenImportingSubgraph, #[error("Edge does not exist for EdgeIndex: {0:?}")] @@ -54,12 +64,12 @@ pub enum WorkspaceSnapshotGraphError { NodeWeight(#[from] NodeWeightError), #[error("node weight not found")] NodeWeightNotFound, - #[error("Node with address not found: {0}")] - NodeWithAddressNotFound(NodeWeightAddress), #[error("Node with ID {} not found", .0.to_string())] NodeWithIdNotFound(Ulid), #[error("No Prop found for NodeIndex {0:?}")] NoPropFound(NodeIndex), + #[error("NodeIndex has too many Ordering children: {0:?}")] + TooManyOrderingForNode(NodeIndex), #[error("NodeIndex has too many Prop children: {0:?}")] TooManyPropForNode(NodeIndex), #[error("Workspace Snapshot has conflicts and must be rebased")] @@ -70,53 +80,10 @@ pub enum WorkspaceSnapshotGraphError { pub type WorkspaceSnapshotGraphResult = Result; -#[derive(Serialize, Deserialize, Clone, Debug, Copy)] -pub struct GraphLocalNodeWeight { - // The default serializers for these types serialize them as a string of hex digits, which is - // twice the size of the pure byte representation. So we've implemented byte serialization so - // that postcard will make these as small as possible - #[serde( - serialize_with = "serialize_node_weight_address_as_bytes", - deserialize_with = "deserialize_node_weight_address_as_bytes" - )] - address: NodeWeightAddress, - #[serde( - serialize_with = "serialize_merkle_tree_hash_as_bytes", - deserialize_with = "deserialize_merkle_tree_hash_as_bytes" - )] - merkle_tree_hash: MerkleTreeHash, -} - -impl GraphLocalNodeWeight { - fn new(address: NodeWeightAddress) -> Self { - Self { - address, - merkle_tree_hash: MerkleTreeHash::nil(), - } - } - - pub fn address(&self) -> NodeWeightAddress { - self.address - } - - pub fn merkle_tree_hash(&self) -> MerkleTreeHash { - self.merkle_tree_hash - } - - pub fn set_merkle_tree_hash(&mut self, hash: MerkleTreeHash) { - self.merkle_tree_hash = hash; - } - - pub fn set_address(&mut self, hash: NodeWeightAddress) { - self.address = hash; - } -} - #[derive(Default, Deserialize, Serialize, Clone)] pub struct WorkspaceSnapshotGraph { - graph: StableDiGraph, + graph: StableDiGraph, node_index_by_id: HashMap, - id_by_node_address: HashMap, node_indices_by_lineage_id: HashMap>, root_index: NodeIndex, } @@ -132,76 +99,33 @@ impl std::fmt::Debug for WorkspaceSnapshotGraph { } impl WorkspaceSnapshotGraph { - pub fn new( - root_node_weight: Arc, - root_address: NodeWeightAddress, - ) -> WorkspaceSnapshotGraphResult { - let mut graph = StableDiGraph::with_capacity(1, 0); - let local_node_weight = GraphLocalNodeWeight::new(root_address); - let root_index = graph.add_node(local_node_weight); - let mut me = Self { + pub fn new(change_set: &ChangeSet) -> WorkspaceSnapshotGraphResult { + let mut graph: StableDiGraph = StableDiGraph::with_capacity(1, 0); + let root_node = NodeWeight::new_content( + change_set, + change_set.generate_ulid()?, + ContentAddress::Root, + )?; + + let node_id = root_node.id(); + let lineage_id = root_node.lineage_id(); + let root_index = graph.add_node(root_node); + + let mut result = Self { root_index, graph, ..Default::default() }; - me.insert_into_maps(root_node_weight, root_index, root_address); - Ok(me) - } - - fn insert_into_maps( - &mut self, - node: Arc, - node_index: NodeIndex, - address: NodeWeightAddress, - ) { - // Update the accessor maps using the new index. - self.id_by_node_address.insert(address, node.id()); - self.node_index_by_id.insert(node.id(), node_index); - self.node_indices_by_lineage_id - .entry(node.lineage_id()) - .and_modify(|set| { - set.insert(node_index); - }) - .or_insert_with(|| HashSet::from([node_index])); - } - - pub(crate) fn graph(&self) -> &StableDiGraph { - &self.graph - } + result.add_node_finalize(node_id, lineage_id, root_index)?; - pub(crate) fn graph_mut(&mut self) -> &mut StableDiGraph { - &mut self.graph + Ok(result) } pub fn root(&self) -> NodeIndex { self.root_index } - pub fn retain_node_index_by_id(&mut self, remaining_node_ids: HashSet) { - self.node_index_by_id - .retain(|id, _| remaining_node_ids.contains(id)) - } - - pub fn retain_node_indices_by_lineage_id( - &mut self, - remaining_node_indices_by_lineage_id: HashSet, - ) { - self.node_indices_by_lineage_id.retain(|_, node_indices| { - node_indices - .retain(|node_index| remaining_node_indices_by_lineage_id.contains(node_index)); - !node_indices.is_empty() - }); - } - - pub fn retain_id_by_node_addresses( - &mut self, - remaining_node_addresses: HashSet, - ) { - self.id_by_node_address - .retain(|address, _| remaining_node_addresses.contains(address)) - } - pub fn get_latest_node_idx_opt( &self, node_idx: NodeIndex, @@ -213,13 +137,13 @@ impl WorkspaceSnapshotGraph { Ok(Some(self.get_latest_node_idx(node_idx)?)) } + #[inline(always)] pub fn get_latest_node_idx( &self, node_idx: NodeIndex, ) -> WorkspaceSnapshotGraphResult { - let node_address = self.get_node_weight(node_idx)?.address(); - let id = self.get_id_by_node_address(node_address)?; - self.get_node_index_by_id(id) + let node_id = self.get_node_weight(node_idx)?.id(); + self.get_node_index_by_id(node_id) } pub fn add_edge( @@ -228,10 +152,34 @@ impl WorkspaceSnapshotGraph { edge_weight: EdgeWeight, to_node_index: NodeIndex, ) -> WorkspaceSnapshotGraphResult { + // Temporarily add the edge to the existing tree to see if it would create a cycle. + // Configured to run only in tests because it has a major perf impact otherwise + #[cfg(test)] + { + let temp_edge = + self.graph + .update_edge(from_node_index, to_node_index, edge_weight.clone()); + + let would_create_a_cycle = !self.is_acyclic_directed(); + self.graph.remove_edge(temp_edge); + if would_create_a_cycle { + return Err(WorkspaceSnapshotGraphError::CreateGraphCycle); + } + } + + // Because outgoing edges are part of a node's identity, we create a new "from" node + // as we are effectively writing to that node (we'll need to update the merkle tree + // hash), and everything in the graph should be treated as copy-on-write. + let new_from_node_index = self.copy_node_by_index(from_node_index)?; + // Add the new edge to the new version of the "from" node. - let new_edge_index = self - .graph - .update_edge(from_node_index, to_node_index, edge_weight); + let new_edge_index = + self.graph + .update_edge(new_from_node_index, to_node_index, edge_weight); + self.update_merkle_tree_hash(new_from_node_index)?; + + // Update the rest of the graph to reflect the new node/edge. + self.replace_references(from_node_index)?; Ok(new_edge_index) } @@ -240,22 +188,73 @@ impl WorkspaceSnapshotGraph { self.node_index_by_id.remove(&id.into()); } - pub fn add_node( + fn add_node_finalize( &mut self, - node: Arc, - node_address: NodeWeightAddress, - ) -> WorkspaceSnapshotGraphResult { - let local_node_weight = GraphLocalNodeWeight::new(node_address); + node_id: Ulid, + lineage_id: Ulid, + node_idx: NodeIndex, + ) -> WorkspaceSnapshotGraphResult<()> { + // Update the accessor maps using the new index. + self.node_index_by_id.insert(node_id, node_idx); + self.node_indices_by_lineage_id + .entry(lineage_id) + .and_modify(|set| { + set.insert(node_idx); + }) + .or_insert_with(|| HashSet::from([node_idx])); + self.update_merkle_tree_hash(node_idx)?; + + Ok(()) + } - // Create the node and cache the index. - let new_node_index = self.graph.add_node(local_node_weight); + pub fn add_node(&mut self, node: NodeWeight) -> WorkspaceSnapshotGraphResult { + let node_id = node.id(); + let lineage_id = node.lineage_id(); + let new_node_index = self.graph.add_node(node); - // Update the accessor maps using the new index. - self.insert_into_maps(node, new_node_index, node_address); + self.add_node_finalize(node_id, lineage_id, new_node_index)?; Ok(new_node_index) } + pub fn add_category_node( + &mut self, + change_set: &ChangeSet, + kind: CategoryNodeKind, + ) -> WorkspaceSnapshotGraphResult { + let inner_weight = CategoryNodeWeight::new(change_set, kind)?; + let new_node_index = self.add_node(NodeWeight::Category(inner_weight))?; + Ok(new_node_index) + } + + pub fn get_category_node( + &self, + source: Option, + kind: CategoryNodeKind, + ) -> WorkspaceSnapshotGraphResult<(Ulid, NodeIndex)> { + let source_index = match source { + Some(provided_source) => self.get_node_index_by_id(provided_source)?, + None => self.root_index, + }; + + // TODO(nick): ensure that two target category nodes of the same kind don't exist for the + // same source node. + for edgeref in self.graph.edges_directed(source_index, Outgoing) { + let maybe_category_node_index = edgeref.target(); + let maybe_category_node_weight = self.get_node_weight(maybe_category_node_index)?; + + if let NodeWeight::Category(category_node_weight) = maybe_category_node_weight { + if category_node_weight.kind() == kind { + return Ok((category_node_weight.id(), maybe_category_node_index)); + } + } + } + + Err(WorkspaceSnapshotGraphError::CategoryNodeNotFound( + source_index, + )) + } + pub fn edges_directed( &self, node_index: NodeIndex, @@ -286,8 +285,13 @@ impl WorkspaceSnapshotGraph { .collect() } - pub fn nodes(&self) -> Vec { - self.graph.node_weights().map(ToOwned::to_owned).collect() + pub fn nodes(&self) -> impl Iterator { + self.graph.node_indices().filter_map(|node_idx| { + self.get_node_weight_opt(node_idx) + .ok() + .flatten() + .map(|weight| (weight, node_idx)) + }) } pub fn edges(&self) -> impl Iterator { @@ -303,6 +307,80 @@ impl WorkspaceSnapshotGraph { }) } + // TODO(nick): fix this clippy error. + #[allow(clippy::type_complexity)] + pub fn add_ordered_edge( + &mut self, + change_set: &ChangeSet, + from_node_index: NodeIndex, + edge_weight: EdgeWeight, + to_node_index: NodeIndex, + ) -> WorkspaceSnapshotGraphResult<(EdgeIndex, Option<(EdgeIndex, NodeIndex, NodeIndex)>)> { + let _start = std::time::Instant::now(); + let new_edge_index = self.add_edge(from_node_index, edge_weight, to_node_index)?; + + let from_node_index = self.get_latest_node_idx(from_node_index)?; + let to_node_index = self.get_latest_node_idx(to_node_index)?; + + // Find the ordering node of the "container" if there is one, and add the thing pointed to + // by the `to_node_index` to the ordering. Also point the ordering node at the thing with + // an `Ordinal` edge, so that Ordering nodes must be touched *after* the things they order + // in a depth first search + let maybe_ordinal_edge_information = if let Some(container_ordering_node_index) = + self.ordering_node_index_for_container(from_node_index)? + { + let ordinal_edge_index = self.add_edge( + container_ordering_node_index, + EdgeWeight::new(change_set, EdgeWeightKind::Ordinal)?, + to_node_index, + )?; + + let container_ordering_node_index = + self.get_latest_node_idx(container_ordering_node_index)?; + + if let NodeWeight::Ordering(previous_container_ordering_node_weight) = + self.get_node_weight(container_ordering_node_index)? + { + let element_id = self + .node_index_to_id(to_node_index) + .ok_or(WorkspaceSnapshotGraphError::NodeWeightNotFound)?; + + let mut new_container_ordering_node_weight = + previous_container_ordering_node_weight.clone(); + new_container_ordering_node_weight.push_to_order(change_set, element_id)?; + self.add_node(NodeWeight::Ordering(new_container_ordering_node_weight))?; + self.replace_references(container_ordering_node_index)?; + } + + Some(( + ordinal_edge_index, + container_ordering_node_index, + to_node_index, + )) + } else { + None + }; + + Ok((new_edge_index, maybe_ordinal_edge_information)) + } + + pub fn add_ordered_node( + &mut self, + change_set: &ChangeSet, + node: NodeWeight, + ) -> WorkspaceSnapshotGraphResult { + let new_node_index = self.add_node(node)?; + let ordering_node_index = + self.add_node(NodeWeight::Ordering(OrderingNodeWeight::new(change_set)?))?; + let edge_index = self.add_edge( + new_node_index, + EdgeWeight::new(change_set, EdgeWeightKind::Ordering)?, + ordering_node_index, + )?; + let (source, _) = self.edge_endpoints(edge_index)?; + Ok(source) + } + // pub async fn attribute_value_view( // &self, // content_store: &mut impl Store, @@ -464,8 +542,68 @@ impl WorkspaceSnapshotGraph { // Ok(view) // } - // local graph: address, merkle tree hash - // remote graph: node_hash (hash of the content, but not the vector clocks) + pub fn cleanup(&mut self) { + let start = tokio::time::Instant::now(); + + // We want to remove all of the "garbage" we've accumulated while operating on the graph. + // Anything that is no longer reachable from the current `self.root_index` should be + // removed as it is no longer referenced by anything in the current version of the graph. + // Fortunately, we don't need to walk the graph to find out if something is reachable from + // the root, since `has_path_connecting` is slow (depth-first search). Any node that does + // *NOT* have any incoming edges (aside from the `self.root_index` node) is not reachable, + // by definition. Finding the list of nodes with no incoming edges is very fast. If we + // remove all nodes (that are not the `self.root_index` node) that do not have any + // incoming edges, and we keep doing this until the only one left is the `self.root_index` + // node, then all remaining nodes are reachable from `self.root_index`. + let mut old_root_ids: HashSet; + loop { + old_root_ids = self + .graph + .externals(Incoming) + .filter(|node_id| *node_id != self.root_index) + .collect(); + if old_root_ids.is_empty() { + break; + } + + for stale_node_index in &old_root_ids { + self.graph.remove_node(*stale_node_index); + } + } + info!("Removed stale NodeIndex: {:?}", start.elapsed()); + + // After we retain the nodes, collect the remaining ids and indices. + let remaining_node_ids: HashSet = self.graph.node_weights().map(|n| n.id()).collect(); + info!( + "Got remaining node IDs: {} ({:?})", + remaining_node_ids.len(), + start.elapsed() + ); + let remaining_node_indices: HashSet = self.graph.node_indices().collect(); + info!( + "Got remaining NodeIndex: {} ({:?})", + remaining_node_indices.len(), + start.elapsed() + ); + + // Cleanup the node index by id map. + self.node_index_by_id + .retain(|id, _index| remaining_node_ids.contains(id)); + info!("Removed stale node_index_by_id: {:?}", start.elapsed()); + + // Cleanup the node indices by lineage id map. + self.node_indices_by_lineage_id + .iter_mut() + .for_each(|(_lineage_id, node_indices)| { + node_indices.retain(|node_index| remaining_node_indices.contains(node_index)); + }); + self.node_indices_by_lineage_id + .retain(|_lineage_id, node_indices| !node_indices.is_empty()); + info!( + "Removed stale node_indices_by_lineage_id: {:?}", + start.elapsed() + ); + } pub fn find_equivalent_node( &self, @@ -487,6 +625,247 @@ impl WorkspaceSnapshotGraph { Ok(maybe_equivalent_node) } + fn copy_node_by_index( + &mut self, + node_index_to_copy: NodeIndex, + ) -> WorkspaceSnapshotGraphResult { + self.add_node(self.get_node_weight(node_index_to_copy)?.clone()) + } + + pub fn detect_conflicts_and_updates( + &self, + to_rebase_vector_clock_id: VectorClockId, + onto: &WorkspaceSnapshotGraph, + onto_vector_clock_id: VectorClockId, + ) -> WorkspaceSnapshotGraphResult<(Vec, Vec)> { + let mut conflicts: Vec = Vec::new(); + let mut updates: Vec = Vec::new(); + if let Err(traversal_error) = + petgraph::visit::depth_first_search(&onto.graph, Some(onto.root_index), |event| { + self.detect_conflicts_and_updates_process_dfs_event( + to_rebase_vector_clock_id, + onto, + onto_vector_clock_id, + event, + &mut conflicts, + &mut updates, + ) + }) + { + return Err(WorkspaceSnapshotGraphError::GraphTraversal(traversal_error)); + }; + + Ok((conflicts, updates)) + } + + fn detect_conflicts_and_updates_process_dfs_event( + &self, + to_rebase_vector_clock_id: VectorClockId, + onto: &WorkspaceSnapshotGraph, + onto_vector_clock_id: VectorClockId, + event: DfsEvent, + conflicts: &mut Vec, + updates: &mut Vec, + ) -> Result, petgraph::visit::DfsEvent> { + match event { + DfsEvent::Discover(onto_node_index, _) => { + let onto_node_weight = onto.get_node_weight(onto_node_index).map_err(|err| { + dbg!( + "Unable to get NodeWeight for onto NodeIndex {:?}: {}", + onto_node_index, + err, + ); + event + })?; + + let mut to_rebase_node_indexes = HashSet::new(); + if onto_node_index == onto.root_index { + // There can only be one (valid/current) `ContentAddress::Root` at any + // given moment, and the `lineage_id` isn't really relevant as it's not + // globally stable (even though it is locally stable). This matters as we + // may be dealing with a `WorkspaceSnapshotGraph` that is coming to us + // externally from a module that we're attempting to import. The external + // `WorkspaceSnapshotGraph` will be `self`, and the "local" one will be + // `onto`. + to_rebase_node_indexes.insert(self.root_index); + } else { + // Only retain node indexes... or indices... if they are part of the current + // graph. There may still be garbage from previous updates to the graph + // laying around. + let mut potential_to_rebase_node_indexes = + self.get_node_index_by_lineage(onto_node_weight.lineage_id()); + potential_to_rebase_node_indexes + .retain(|node_index| self.has_path_to_root(*node_index)); + to_rebase_node_indexes.extend(potential_to_rebase_node_indexes); + + // TODO(nick): detect category nodes with a different lineage. We will likely + // need to check incoming edges in one graph and then look for outgoing edges in + // the other graph. + // // Since category nodes may be created from scratch from a different workspace, + // // they may have different lineage ids. We still want to consider the same + // // category kind as an equivalent node, even though it might have a different + // // lineage id. + // if let NodeWeight::Category(onto_category_node_weight) = onto_node_weight { + // onto_category_node_weight + // } + // let category_node_kind = onto_category_node_weight.kind(); + // let (_, to_rebase_category_node_index) = + // self.get_category_node(Some(onto_category_node_weight.id()), category_node_kind).map_err(|err| { + // error!( + // "Unable to get to rebase Category node for kind {:?} from onto {:?}: {}", + // onto_category_node_weight.kind(), onto, err, + // ); + // event + // })?; + // to_rebase_node_indexes.insert(to_rebase_category_node_index); + // } + } + + // If everything with the same `lineage_id` is identical, then we can prune the + // graph traversal, and avoid unnecessary lookups/comparisons. + let mut any_content_with_lineage_has_changed = false; + + for to_rebase_node_index in to_rebase_node_indexes { + let to_rebase_node_weight = + self.get_node_weight(to_rebase_node_index).map_err(|err| { + error!( + "Unable to get to_rebase NodeWeight for NodeIndex {:?}: {}", + to_rebase_node_index, err, + ); + event + })?; + + if onto_node_weight.merkle_tree_hash() + == to_rebase_node_weight.merkle_tree_hash() + { + // If the merkle tree hashes are the same, then the entire sub-graph is + // identical, and we don't need to check any further. + debug!( + "onto {} and to rebase {} merkle tree hashes are the same", + onto_node_weight.id(), + to_rebase_node_weight.id() + ); + continue; + } + any_content_with_lineage_has_changed = true; + + // Check if there's a difference in the node itself (and whether it is a + // conflict if there is a difference). + if onto_node_weight.node_hash() != to_rebase_node_weight.node_hash() { + if to_rebase_node_weight + .vector_clock_write() + .is_newer_than(onto_node_weight.vector_clock_write()) + { + // The existing node (`to_rebase`) has changes, but has already seen + // all of the changes in `onto`. There is no conflict, and there is + // nothing to update. + } else if onto_node_weight + .vector_clock_write() + .is_newer_than(to_rebase_node_weight.vector_clock_write()) + { + // `onto` has changes, but has already seen all of the changes in + // `to_rebase`. There is no conflict, and we should update to use the + // `onto` node. + updates.push(Update::ReplaceSubgraph { + onto: onto_node_index, + to_rebase: to_rebase_node_index, + }); + } else { + // There are changes on both sides that have not + // been seen by the other side; this is a conflict. + // There may also be other conflicts in the outgoing + // relationships, the downstream nodes, or both. + + if let ( + NodeWeight::Ordering(onto_ordering), + NodeWeight::Ordering(to_rebase_ordering), + ) = (onto_node_weight, to_rebase_node_weight) + { + // TODO Checking if two ordering arrays are non conflicting + // (if the common elements between two ordering have the same relative positions) + // is logic that could be extracted into its own thing. The following code does that + + // Both `onto` and `to_rebase` have changes that the other has not incorporated. We + // need to find out what the changes are to see what needs to be updated, and what + // conflicts. + let onto_ordering_set: HashSet = + onto_ordering.order().iter().copied().collect(); + let to_rebase_ordering_set: HashSet = + to_rebase_ordering.order().iter().copied().collect(); + + // Make sure that both `onto` and `to_rebase` have the same relative ordering for the + // nodes they have in common. If they don't, then that means that the order changed on + // at least one of them. + let common_items: HashSet = onto_ordering_set + .intersection(&to_rebase_ordering_set) + .copied() + .collect(); + let common_onto_items = { + let mut items = onto_ordering.order().clone(); + items.retain(|i| common_items.contains(i)); + items + }; + let common_to_rebase_items = { + let mut items = to_rebase_ordering.order().clone(); + items.retain(|i| common_items.contains(i)); + items + }; + if common_onto_items != common_to_rebase_items { + conflicts.push(Conflict::ChildOrder { + to_rebase: to_rebase_node_index, + onto: onto_node_index, + }); + } + } else { + conflicts.push(Conflict::NodeContent { + to_rebase: to_rebase_node_index, + onto: onto_node_index, + }); + } + } + } + + let (container_conflicts, container_updates) = self + .find_container_membership_conflicts_and_updates( + to_rebase_vector_clock_id, + to_rebase_node_index, + &[], + onto, + onto_vector_clock_id, + onto_node_index, + &[], + ) + .map_err(|err| { + error!("Unable to find container membership conflicts and updates for onto container NodeIndex {:?} and to_rebase container NodeIndex {:?}: {}", onto_node_index, to_rebase_node_index, err); + event + })?; + + updates.extend(container_updates); + conflicts.extend(container_conflicts); + } + + if any_content_with_lineage_has_changed { + // There was at least one thing with a merkle tree hash difference, so we need + // to examine further down the tree to see where the difference(s) are, and + // where there are conflicts, if there are any. + Ok(petgraph::visit::Control::Continue) + } else { + // Everything to be rebased is identical, so there's no need to examine the + // rest of the tree looking for differences & conflicts that won't be there. + Ok(petgraph::visit::Control::Prune) + } + } + DfsEvent::TreeEdge(_, _) + | DfsEvent::BackEdge(_, _) + | DfsEvent::CrossForwardEdge(_, _) + | DfsEvent::Finish(_, _) => { + // These events are all ignored, since we handle looking at edges as we encounter + // the node(s) the edges are coming from (Outgoing edges). + Ok(petgraph::visit::Control::Continue) + } + } + } + #[allow(dead_code)] pub fn dot(&self) { // NOTE(nick): copy the output and execute this on macOS. It will create a file in the @@ -496,66 +875,390 @@ impl WorkspaceSnapshotGraph { // ``` let current_root_weight = self .get_node_weight(self.root_index) - .expect("could not get root node weight in dot debug output"); + .expect("this should be impossible and this code should only be used for debugging"); println!( "Root Node Weight: {current_root_weight:?}\n{:?}", petgraph::dot::Dot::with_config(&self.graph, &[petgraph::dot::Config::EdgeNoLabel]) ); } + #[allow(dead_code, clippy::disallowed_methods)] + pub fn tiny_dot_to_file(&self, suffix: Option<&str>) { + let suffix = suffix.unwrap_or("dot"); + // NOTE(nick): copy the output and execute this on macOS. It will create a file in the + // process and open a new tab in your browser. + // ``` + // GRAPHFILE=; cat $GRAPHFILE.txt | dot -Tsvg -o processed-$GRAPHFILE.svg; open processed-$GRAPHFILE.svg + // ``` + let dot = petgraph::dot::Dot::with_attr_getters( + &self.graph, + &[ + petgraph::dot::Config::NodeNoLabel, + petgraph::dot::Config::EdgeNoLabel, + ], + &|_, edgeref| { + let discrim: EdgeWeightKindDiscriminants = edgeref.weight().kind().into(); + let color = match discrim { + EdgeWeightKindDiscriminants::Action => "black", + EdgeWeightKindDiscriminants::ActionPrototype => "black", + EdgeWeightKindDiscriminants::AuthenticationPrototype => "black", + EdgeWeightKindDiscriminants::Contain => "blue", + EdgeWeightKindDiscriminants::FrameContains => "black", + EdgeWeightKindDiscriminants::Ordering => "gray", + EdgeWeightKindDiscriminants::Ordinal => "gray", + EdgeWeightKindDiscriminants::Prop => "orange", + EdgeWeightKindDiscriminants::Prototype => "green", + EdgeWeightKindDiscriminants::PrototypeArgument => "green", + EdgeWeightKindDiscriminants::PrototypeArgumentValue => "green", + EdgeWeightKindDiscriminants::Socket => "red", + EdgeWeightKindDiscriminants::SocketValue => "purple", + EdgeWeightKindDiscriminants::Proxy => "gray", + EdgeWeightKindDiscriminants::Root => "black", + EdgeWeightKindDiscriminants::Use => "black", + }; + + match edgeref.weight().kind() { + EdgeWeightKind::Contain(key) => { + let key = key + .as_deref() + .map(|key| format!(" ({key}")) + .unwrap_or("".into()); + format!( + "label = \"{discrim:?}{key}\"\nfontcolor = {color}\ncolor = {color}" + ) + } + _ => format!("label = \"{discrim:?}\"\nfontcolor = {color}\ncolor = {color}"), + } + }, + &|_, (node_index, node_weight)| { + let (label, color) = match node_weight { + NodeWeight::Content(weight) => { + let discrim = ContentAddressDiscriminants::from(weight.content_address()); + let color = match discrim { + // Some of these should never happen as they have their own top-level + // NodeWeight variant. + ContentAddressDiscriminants::Action => "green", + ContentAddressDiscriminants::ActionBatch => "green", + ContentAddressDiscriminants::ActionRunner => "green", + ContentAddressDiscriminants::ActionPrototype => "green", + ContentAddressDiscriminants::AttributePrototype => "green", + ContentAddressDiscriminants::Component => "black", + ContentAddressDiscriminants::OutputSocket => "red", + ContentAddressDiscriminants::Func => "black", + ContentAddressDiscriminants::FuncArg => "black", + ContentAddressDiscriminants::InputSocket => "red", + ContentAddressDiscriminants::JsonValue => "fuchsia", + ContentAddressDiscriminants::Prop => "orange", + ContentAddressDiscriminants::Root => "black", + ContentAddressDiscriminants::Schema => "black", + ContentAddressDiscriminants::SchemaVariant => "black", + ContentAddressDiscriminants::Secret => "black", + ContentAddressDiscriminants::StaticArgumentValue => "green", + ContentAddressDiscriminants::ValidationPrototype => "black", + }; + (discrim.to_string(), color) + } + NodeWeight::AttributePrototypeArgument(apa) => ( + format!( + "Attribute Prototype Argument{}", + apa.targets() + .map(|targets| format!( + "\nsource: {}\nto: {}", + targets.source_component_id, targets.destination_component_id + )) + .unwrap_or("".to_string()) + ), + "green", + ), + NodeWeight::AttributeValue(_) => ("Attribute Value".to_string(), "blue"), + NodeWeight::Category(category_node_weight) => match category_node_weight.kind() + { + CategoryNodeKind::Component => { + ("Components (Category)".to_string(), "black") + } + CategoryNodeKind::ActionBatch => { + ("Action Batches (Category)".to_string(), "black") + } + CategoryNodeKind::Func => ("Funcs (Category)".to_string(), "black"), + CategoryNodeKind::Schema => ("Schemas (Category)".to_string(), "black"), + CategoryNodeKind::Secret => ("Secrets (Category)".to_string(), "black"), + }, + NodeWeight::Component(component) => ( + "Component".to_string(), + if component.to_delete() { + "gray" + } else { + "black" + }, + ), + NodeWeight::Func(func_node_weight) => { + (format!("Func\n{}", func_node_weight.name()), "black") + } + NodeWeight::FuncArgument(func_arg_node_weight) => ( + format!("Func Arg\n{}", func_arg_node_weight.name()), + "black", + ), + NodeWeight::Ordering(_) => { + (NodeWeightDiscriminants::Ordering.to_string(), "gray") + } + NodeWeight::Prop(prop_node_weight) => { + (format!("Prop\n{}", prop_node_weight.name()), "orange") + } + }; + let color = color.to_string(); + let id = node_weight.id(); + format!( + "label = \"\n\n{label}\n{node_index:?}\n{id}\n\n\"\nfontcolor = {color}\ncolor = {color}", + ) + }, + ); + let filename_no_extension = format!("{}-{}", Ulid::new().to_string(), suffix); + + let home_str = std::env::var("HOME").expect("could not find home directory via env"); + let home = std::path::Path::new(&home_str); + + let mut file = File::create(home.join(format!("{filename_no_extension}.txt"))) + .expect("could not create file"); + file.write_all(format!("{dot:?}").as_bytes()) + .expect("could not write file"); + println!("dot output stored in file (filename without extension: {filename_no_extension})"); + } + + #[allow(clippy::too_many_arguments)] + fn find_container_membership_conflicts_and_updates( + &self, + to_rebase_vector_clock_id: VectorClockId, + to_rebase_container_index: NodeIndex, + to_rebase_container_order: &[Ulid], + onto: &WorkspaceSnapshotGraph, + onto_vector_clock_id: VectorClockId, + onto_container_index: NodeIndex, + onto_container_order: &[Ulid], + ) -> WorkspaceSnapshotGraphResult<(Vec, Vec)> { + #[derive(Debug, Clone, Hash, PartialEq, Eq)] + struct UniqueEdgeInfo { + pub kind: EdgeWeightKind, + pub target_lineage: Ulid, + } + + #[derive(Debug, Copy, Clone)] + struct EdgeInfo { + pub source_node_index: NodeIndex, + pub target_node_index: NodeIndex, + pub edge_kind: EdgeWeightKindDiscriminants, + pub edge_index: EdgeIndex, + } + + let mut updates = Vec::new(); + let mut conflicts = Vec::new(); + + let mut to_rebase_edges = HashMap::::new(); + for edgeref in self + .graph + .edges_directed(to_rebase_container_index, Outgoing) + { + let target_node_weight = self.get_node_weight(edgeref.target())?; + + if to_rebase_container_order.contains(&target_node_weight.id()) { + continue; + } + + to_rebase_edges.insert( + UniqueEdgeInfo { + kind: edgeref.weight().kind().clone(), + target_lineage: target_node_weight.lineage_id(), + }, + EdgeInfo { + source_node_index: edgeref.source(), + target_node_index: edgeref.target(), + edge_kind: edgeref.weight().kind().into(), + edge_index: edgeref.id(), + }, + ); + } + + let mut onto_edges = HashMap::::new(); + for edgeref in onto.graph.edges_directed(onto_container_index, Outgoing) { + let target_node_weight = onto.get_node_weight(edgeref.target())?; + + if onto_container_order.contains(&target_node_weight.id()) { + continue; + } + + onto_edges.insert( + UniqueEdgeInfo { + kind: edgeref.weight().kind().clone(), + target_lineage: target_node_weight.lineage_id(), + }, + EdgeInfo { + source_node_index: edgeref.source(), + target_node_index: edgeref.target(), + edge_kind: edgeref.weight().kind().into(), + edge_index: edgeref.id(), + }, + ); + } + + let only_to_rebase_edges = { + let mut unique_edges = to_rebase_edges.clone(); + for key in onto_edges.keys() { + unique_edges.remove(key); + } + unique_edges + }; + let only_onto_edges = { + let mut unique_edges = onto_edges.clone(); + for key in to_rebase_edges.keys() { + unique_edges.remove(key); + } + unique_edges + }; + + debug!("only to rebase edges: {:?}", &only_to_rebase_edges); + debug!("only onto edges: {:?}", &only_onto_edges); + + let root_seen_as_of_onto = self + .get_node_weight(self.root_index)? + .vector_clock_recently_seen() + .entry_for(onto_vector_clock_id); + + let onto_last_saw_to_rebase = onto + .get_node_weight(onto.root_index)? + .vector_clock_recently_seen() + .entry_for(to_rebase_vector_clock_id); + + for only_to_rebase_edge_info in only_to_rebase_edges.values() { + let to_rebase_edge_weight = self + .get_edge_weight_opt(only_to_rebase_edge_info.edge_index)? + .ok_or(WorkspaceSnapshotGraphError::EdgeWeightNotFound)?; + let to_rebase_item_weight = + self.get_node_weight(only_to_rebase_edge_info.target_node_index)?; + + // If `onto` has never seen this edge, then it's new, and there are no conflicts, and + // no updates. + if to_rebase_edge_weight + .vector_clock_first_seen() + .entry_for(to_rebase_vector_clock_id) + <= onto_last_saw_to_rebase + { + if to_rebase_item_weight + .vector_clock_write() + .entry_for(to_rebase_vector_clock_id) + >= onto_last_saw_to_rebase + { + // Item has been modified in `onto` (`onto` item write vector clock > "seen as + // of" for `onto` entry in `to_rebase` root): Conflict (ModifyRemovedItem) + conflicts.push(Conflict::ModifyRemovedItem( + only_to_rebase_edge_info.target_node_index, + )) + } else { + // Item not modified & removed by `onto`: No conflict; Update::RemoveEdge + updates.push(Update::RemoveEdge { + source: only_to_rebase_edge_info.source_node_index, + destination: only_to_rebase_edge_info.target_node_index, + edge_kind: only_to_rebase_edge_info.edge_kind, + }); + } + } else { + debug!( + "edge weight entry for to rebase vector clock id {:?} is older than onto last saw {:?}", to_rebase_edge_weight.vector_clock_first_seen().entry_for(to_rebase_vector_clock_id), onto_last_saw_to_rebase); + } + } + + // - Items unique to `onto`: + for only_onto_edge_info in only_onto_edges.values() { + let onto_edge_weight = onto + .get_edge_weight_opt(only_onto_edge_info.edge_index)? + .ok_or(WorkspaceSnapshotGraphError::EdgeWeightNotFound)?; + let onto_item_weight = onto.get_node_weight(only_onto_edge_info.target_node_index)?; + + if let Some(onto_first_seen) = onto_edge_weight + .vector_clock_first_seen() + .entry_for(onto_vector_clock_id) + { + // From "onto_first_seen", we know "when was the first time onto saw this edge?". + match root_seen_as_of_onto { + Some(root_seen_as_of) if onto_first_seen <= root_seen_as_of => {} + _ => { + // Edge first seen by `onto` > "seen as of" on `to_rebase` graph for `onto`'s entry on + // root node: Item is new. + // Other case where item is new: the `to_rebase` has never seen anything from + // the `onto` change set. All the items are new. + updates.push(Update::NewEdge { + source: to_rebase_container_index, + destination: only_onto_edge_info.target_node_index, + edge_weight: onto_edge_weight.clone(), + }); + } + } + } else if let Some(root_seen_as_of) = root_seen_as_of_onto { + if onto_item_weight + .vector_clock_write() + .has_entries_newer_than(root_seen_as_of) + { + // Item write vector clock has entries > "seen as of" on `to_rebase` graph for + // `onto`'s entry on root node: Conflict (RemoveModifiedItem) + conflicts.push(Conflict::RemoveModifiedItem { + container: to_rebase_container_index, + removed_item: only_onto_edge_info.target_node_index, + }); + } + } + // Item removed by `to_rebase`: No conflict & no update necessary. + } + + // - Sets same: No conflicts/updates + Ok((conflicts, updates)) + } + #[inline(always)] pub(crate) fn get_node_index_by_id( &self, id: impl Into, ) -> WorkspaceSnapshotGraphResult { let id = id.into(); - debug!("{:?}", self.node_index_by_id); - self.get_node_index_by_id_opt(id) - .ok_or(WorkspaceSnapshotGraphError::NodeWithIdNotFound(id)) - } - pub(crate) fn get_node_index_by_id_opt(&self, id: impl Into) -> Option { - let id = id.into(); - self.node_index_by_id.get(&id).copied() - } - - pub(crate) fn get_id_by_node_address( - &self, - address: NodeWeightAddress, - ) -> WorkspaceSnapshotGraphResult { - match self.id_by_node_address.get(&address).copied().ok_or( - WorkspaceSnapshotGraphError::NodeWithAddressNotFound(address), - ) { - Ok(a) => Ok(a), - Err(e) => { - error!("{}", e); - Err(e) - } - } + self.node_index_by_id + .get(&id) + .copied() + .ok_or(WorkspaceSnapshotGraphError::NodeWithIdNotFound(id)) } - pub(crate) fn get_node_index_by_lineage(&self, lineage_id: Ulid) -> HashSet { + fn get_node_index_by_lineage(&self, lineage_id: Ulid) -> HashSet { self.node_indices_by_lineage_id .get(&lineage_id) .cloned() .unwrap_or_default() } + pub fn node_index_to_id(&self, node_idx: NodeIndex) -> Option { + self.graph + .node_weight(node_idx) + .map(|node_weight| node_weight.id()) + } + pub fn get_node_weight_opt( &self, node_index: NodeIndex, - ) -> WorkspaceSnapshotGraphResult> { - Ok(self.graph.node_weight(node_index).copied()) + ) -> WorkspaceSnapshotGraphResult> { + Ok(self.graph.node_weight(node_index)) } pub fn get_node_weight( &self, node_index: NodeIndex, - ) -> WorkspaceSnapshotGraphResult { - Ok(self - .get_node_weight_opt(node_index)? - .ok_or(WorkspaceSnapshotGraphError::NodeWeightNotFound)? - .to_owned()) + ) -> WorkspaceSnapshotGraphResult<&NodeWeight> { + self.get_node_weight_opt(node_index)? + .ok_or(WorkspaceSnapshotGraphError::NodeWeightNotFound) + } + + fn get_node_weight_mut( + &mut self, + node_index: NodeIndex, + ) -> WorkspaceSnapshotGraphResult<&mut NodeWeight> { + self.graph + .node_weight_mut(node_index) + .ok_or(WorkspaceSnapshotGraphError::NodeWeightNotFound) } pub fn get_edge_weight_opt( @@ -565,12 +1268,55 @@ impl WorkspaceSnapshotGraph { Ok(self.graph.edge_weight(edge_index)) } - pub(crate) fn has_path_to_root(&self, node: NodeIndex) -> bool { + fn has_path_to_root(&self, node: NodeIndex) -> bool { algo::has_path_connecting(&self.graph, self.root_index, node, None) } + pub fn import_subgraph( + &mut self, + other: &WorkspaceSnapshotGraph, + root_index: NodeIndex, + ) -> WorkspaceSnapshotGraphResult<()> { + let mut dfs = petgraph::visit::DfsPostOrder::new(&other.graph, root_index); + while let Some(node_index_to_copy) = dfs.next(&other.graph) { + let node_weight_to_copy = other.get_node_weight(node_index_to_copy)?.clone(); + let node_weight_id = node_weight_to_copy.id(); + let node_weight_lineage_id = node_weight_to_copy.lineage_id(); + + // The following assumes there are no conflicts between "self" and "other". If there + // are conflicts between them, we shouldn't be running updates. + let node_index = if let Some(equivalent_node_index) = + self.find_equivalent_node(node_weight_id, node_weight_lineage_id)? + { + let equivalent_node_weight = self.get_node_weight(equivalent_node_index)?; + if equivalent_node_weight + .vector_clock_write() + .is_newer_than(node_weight_to_copy.vector_clock_write()) + { + equivalent_node_index + } else { + let new_node_index = self.add_node(node_weight_to_copy)?; + + self.replace_references(equivalent_node_index)?; + self.get_latest_node_idx(new_node_index)? + } + } else { + self.add_node(node_weight_to_copy)? + }; + + for edge in other.graph.edges_directed(node_index_to_copy, Outgoing) { + let target_id = other.get_node_weight(edge.target())?.id(); + let latest_target = self.get_node_index_by_id(target_id)?; + self.graph + .update_edge(node_index, latest_target, edge.weight().clone()); + } + } + + Ok(()) + } + #[allow(dead_code)] - pub fn is_acyclic_directed(&self) -> bool { + fn is_acyclic_directed(&self) -> bool { // Using this because "is_cyclic_directed" is recursive. algo::toposort(&self.graph, None).is_ok() } @@ -581,19 +1327,174 @@ impl WorkspaceSnapshotGraph { && algo::has_path_connecting(&self.graph, node, end, None) } + pub fn mark_graph_seen( + &mut self, + vector_clock_id: VectorClockId, + ) -> WorkspaceSnapshotGraphResult<()> { + let seen_at = Utc::now(); + for edge in self.graph.edge_weights_mut() { + edge.mark_seen_at(vector_clock_id, seen_at); + } + for node in self.graph.node_weights_mut() { + node.mark_seen_at(vector_clock_id, seen_at); + } + + Ok(()) + } + pub fn node_count(&self) -> usize { self.graph.node_count() } - pub fn address_map_len(&self) -> usize { - self.id_by_node_address.len() + /// Returns an `Option>`. If there is an ordering node, then the return will be a + /// [`Some`], where the [`Vec`] is populated with the [`NodeIndex`] of the nodes specified by + /// the ordering node, in the order defined by the ordering node. If there is not an ordering + /// node, then the return will be [`None`]. + pub fn ordered_children_for_node( + &self, + container_node_index: NodeIndex, + ) -> WorkspaceSnapshotGraphResult>> { + let mut ordered_child_indexes = Vec::new(); + if let Some(container_ordering_index) = + self.ordering_node_index_for_container(container_node_index)? + { + if let NodeWeight::Ordering(ordering_weight) = + self.get_node_weight(container_ordering_index)? + { + for ordered_id in ordering_weight.order() { + ordered_child_indexes.push( + *self + .node_index_by_id + .get(ordered_id) + .ok_or(WorkspaceSnapshotGraphError::NodeWithIdNotFound(*ordered_id))?, + ); + } + } + } else { + return Ok(None); + } + + Ok(Some(ordered_child_indexes)) + } + + pub fn ordering_node_for_container( + &self, + container_node_index: NodeIndex, + ) -> WorkspaceSnapshotGraphResult> { + Ok( + match self.ordering_node_index_for_container(container_node_index)? { + Some(ordering_node_idx) => match self.get_node_weight_opt(ordering_node_idx)? { + Some(node_weight) => Some(node_weight.get_ordering_node_weight()?.clone()), + None => None, + }, + None => None, + }, + ) + } + + pub fn ordering_node_index_for_container( + &self, + container_node_index: NodeIndex, + ) -> WorkspaceSnapshotGraphResult> { + let onto_ordering_node_indexes = + ordering_node_indexes_for_node_index(self, container_node_index); + if onto_ordering_node_indexes.len() > 1 { + error!( + "Too many ordering nodes found for container NodeIndex {:?}", + container_node_index + ); + return Err(WorkspaceSnapshotGraphError::TooManyOrderingForNode( + container_node_index, + )); + } + Ok(onto_ordering_node_indexes.first().copied()) + } + + pub fn prop_node_index_for_node_index( + &self, + node_index: NodeIndex, + ) -> WorkspaceSnapshotGraphResult> { + let prop_node_indexes = prop_node_indexes_for_node_index(self, node_index); + if prop_node_indexes.len() > 1 { + error!("Too many prop nodes found for NodeIndex {:?}", node_index); + return Err(WorkspaceSnapshotGraphError::TooManyPropForNode(node_index)); + } + Ok(prop_node_indexes.first().copied()) } pub(crate) fn remove_node(&mut self, node_index: NodeIndex) { self.graph.remove_node(node_index); } + /// [`StableGraph`] guarantees the stability of [`NodeIndex`] across removals, however there + /// are **NO** guarantees around the stability of [`EdgeIndex`] across removals. If + /// [`Self::cleanup()`] has been called, then any [`EdgeIndex`] found before + /// [`Self::cleanup()`] has run should be considered invalid. pub(crate) fn remove_edge( + &mut self, + change_set: &ChangeSet, + source_node_index: NodeIndex, + target_node_index: NodeIndex, + edge_kind: EdgeWeightKindDiscriminants, + ) -> WorkspaceSnapshotGraphResult<()> { + let source_node_index = self.get_latest_node_idx(source_node_index)?; + let target_node_index = self.get_latest_node_idx(target_node_index)?; + + self.copy_node_by_index(source_node_index)?; + self.replace_references(source_node_index)?; + // replace references may copy the node again to a new index + let source_node_index = self.get_latest_node_idx(source_node_index)?; + + self.inner_remove_edge(source_node_index, target_node_index, edge_kind); + + if let Some(previous_container_ordering_node_index) = + self.ordering_node_index_for_container(source_node_index)? + { + let element_id = self + .node_index_to_id(target_node_index) + .ok_or(WorkspaceSnapshotGraphError::NodeWeightNotFound)?; + + if let NodeWeight::Ordering(previous_container_ordering_node_weight) = + self.get_node_weight(previous_container_ordering_node_index)? + { + let mut new_container_ordering_node_weight = + previous_container_ordering_node_weight.clone(); + + // We only want to update the ordering of the container if we removed an edge to + // one of the ordered relationships. + if new_container_ordering_node_weight.remove_from_order(change_set, element_id)? { + self.inner_remove_edge( + previous_container_ordering_node_index, + target_node_index, + EdgeWeightKindDiscriminants::Ordinal, + ); + + self.add_node(NodeWeight::Ordering(new_container_ordering_node_weight))?; + self.replace_references(previous_container_ordering_node_index)?; + } + } + } + + let source_node_index = self.get_latest_node_idx(source_node_index)?; + let mut work_queue = VecDeque::from([source_node_index]); + + while let Some(node_index) = work_queue.pop_front() { + self.update_merkle_tree_hash( + // If we updated the ordering node, that means we've invalidated the container's + // NodeIndex (new_source_node_index), so we need to find the new NodeIndex to be able + // to update the container's merkle tree hash. + node_index, + )?; + + for edge_ref in self.graph.edges_directed(node_index, Incoming) { + work_queue.push_back(edge_ref.source()); + } + } + + Ok(()) + } + + fn inner_remove_edge( &mut self, source_node_index: NodeIndex, target_node_index: NodeIndex, @@ -624,67 +1525,380 @@ impl WorkspaceSnapshotGraph { Ok((source, destination)) } - pub(crate) fn update_root_index(&mut self) -> WorkspaceSnapshotGraphResult { - self.root_index = self.get_latest_node_idx(self.root_index)?; - Ok(self.root_index) - } - - // #[allow(dead_code)] - // pub fn update_merkle_tree_hash_to_root( - // &mut self, - // start_idx: NodeIndex, - // ) -> WorkspaceSnapshotGraphResult<()> { - // info!("update merkle tree hash to root"); - // let mut work_queue = VecDeque::from([start_idx]); - // let mut seen_list = HashSet::new(); - - // while let Some(node_idx) = work_queue.pop_front() { - // let mut parents = 0; - // for parent_idx in self.graph.neighbors_directed(node_idx, Incoming) { - // parents += 1; - // if !seen_list.contains(&parent_idx) { - // work_queue.push_back(parent_idx); - // seen_list.insert(parent_idx); - // } - // } + /// Replace references should be called when a node has been changed and copied into the graph. + /// It will use the original_node_index to find the most up to date version of the new node, + /// and replace all edges that point to that old node with edges pointing to the new node. + /// Because the graph is treated as an immutable, copy-on-write structure, this means walking + /// up the graph to the root and copying all nodes that have edges that point to the + /// original_node_index, and all nodes that have edges that point to *those* parent nodes, + /// etc, until we've processed the entire parent tree of the original node. + pub fn replace_references( + &mut self, + original_node_index: NodeIndex, + ) -> WorkspaceSnapshotGraphResult<()> { + // Climb from the original node, up to root, rewriting outgoing edges along the way. But we + // have to be sure to climb to root once for each sibling node that we encounter as we + // walk up to root. + let mut outer_queue = VecDeque::from([original_node_index]); + + while let Some(old_node_index) = outer_queue.pop_front() { + let mut work_queue = VecDeque::from([old_node_index]); + + while let Some(old_node_index) = work_queue.pop_front() { + for edge_ref in self.edges_directed(old_node_index, Direction::Incoming) { + work_queue.push_back(edge_ref.source()); + outer_queue.push_back(edge_ref.source()) + } - // if parents == 0 { - // info!("root index!"); - // } + let latest_node_idx = self.get_latest_node_idx(old_node_index)?; + let new_node_index = if latest_node_idx != old_node_index { + latest_node_idx + } else { + self.copy_node_by_index(old_node_index)? + }; + + // Find all outgoing edges weights and find the edge targets. + let mut edges_to_create = Vec::new(); + for edge_ref in self.graph.edges_directed(old_node_index, Outgoing) { + edges_to_create.push(( + edge_ref.weight().clone(), + edge_ref.target(), + edge_ref.id(), + )); + } - // info!("updating merkle tree hash for {:?}", node_idx); + // Make copies of these edges where the source is the new node index and the + // destination is one of the following... + // - If an entry exists in `old_to_new_node_indices` for the destination node index, + // use the value of the entry (the destination was affected by the replacement, + // and needs to use the new node index to reflect this). + // - There is no entry in `old_to_new_node_indices`; use the same destination node + // index as the old edge (the destination was *NOT* affected by the replacement, + // and does not have any new information to reflect). + for (edge_weight, destination_node_index, edge_idx) in edges_to_create { + // Need to directly add the edge, without going through `self.add_edge` to avoid + // infinite recursion, and because we're the place doing all the book keeping + // that we'd be interested in happening from `self.add_edge`. + let destination_node_index = + self.get_latest_node_idx(destination_node_index)?; + + self.graph.remove_edge(edge_idx); - // self.update_merkle_tree_hash(node_idx)?; - // } + self.graph + .update_edge(new_node_index, destination_node_index, edge_weight); + } - // Ok(()) - // } + self.update_merkle_tree_hash(new_node_index)?; + } + } + + // Use the new version of the old root node as our root node. + self.root_index = self.get_latest_node_idx(self.root_index)?; + + Ok(()) + } - pub(crate) fn update_node_weight_address( + pub fn update_content( &mut self, - node_index: NodeIndex, - address: NodeWeightAddress, + change_set: &ChangeSet, id: Ulid, + new_content_hash: ContentHash, ) -> WorkspaceSnapshotGraphResult<()> { - self.graph - .node_weight_mut(node_index) - .ok_or(WorkspaceSnapshotGraphError::NodeWeightNotFound)? - .set_address(address); - self.id_by_node_address.insert(address, id); + let original_node_index = self.get_node_index_by_id(id)?; + let new_node_index = self.copy_node_by_index(original_node_index)?; + let node_weight = self.get_node_weight_mut(new_node_index)?; + node_weight.increment_vector_clock(change_set)?; + node_weight.new_content_hash(new_content_hash)?; + + self.replace_references(original_node_index)?; + Ok(()) + } + + pub fn update_order( + &mut self, + change_set: &ChangeSet, + container_id: Ulid, + new_order: Vec, + ) -> WorkspaceSnapshotGraphResult<()> { + let original_node_index = self + .ordering_node_index_for_container(self.get_node_index_by_id(container_id)?)? + .ok_or(WorkspaceSnapshotGraphError::NodeWeightNotFound)?; + let new_node_index = self.copy_node_by_index(original_node_index)?; + let node_weight = self.get_node_weight_mut(new_node_index)?; + node_weight.set_order(change_set, new_order)?; + + self.replace_references(original_node_index)?; + Ok(()) + } + + fn update_merkle_tree_hash( + &mut self, + node_index_to_update: NodeIndex, + ) -> WorkspaceSnapshotGraphResult<()> { + let mut hasher = ContentHash::hasher(); + hasher.update( + self.get_node_weight(node_index_to_update)? + .node_hash() + .to_string() + .as_bytes(), + ); + + // Need to make sure that ordered containers have their ordered children in the + // order specified by the ordering graph node. + let explicitly_ordered_children = self + .ordered_children_for_node(node_index_to_update)? + .unwrap_or_default(); + + // Need to make sure the unordered neighbors are added to the hash in a stable order to + // ensure the merkle tree hash is identical for identical trees. + let mut unordered_neighbors = Vec::new(); + for neighbor_node in self + .graph + .neighbors_directed(node_index_to_update, Outgoing) + { + // Only add the neighbor if it's not one of the ones with an explicit ordering. + if !explicitly_ordered_children.contains(&neighbor_node) { + let neighbor_id = self.get_node_weight(neighbor_node)?.id(); + unordered_neighbors.push((neighbor_id, neighbor_node)); + } + } + // We'll sort the neighbors by the ID in the NodeWeight, as that will result in more stable + // results than if we sorted by the NodeIndex itself. + unordered_neighbors.sort_by_cached_key(|(id, _index)| *id); + // It's not important whether the explicitly ordered children are first or last, as long as + // they are always in that position, and are always in the sequence specified by the + // container's Ordering node. + let mut ordered_neighbors = + Vec::with_capacity(explicitly_ordered_children.len() + unordered_neighbors.len()); + ordered_neighbors.extend(explicitly_ordered_children); + ordered_neighbors.extend::>( + unordered_neighbors + .iter() + .map(|(_id, index)| *index) + .collect(), + ); + + for neighbor_node in ordered_neighbors { + hasher.update( + self.get_node_weight(neighbor_node)? + .merkle_tree_hash() + .to_string() + .as_bytes(), + ); + + // The edge(s) between `node_index_to_update`, and `neighbor_node` potentially encode + // important information related to the "identity" of `node_index_to_update`. + for connecting_edgeref in self + .graph + .edges_connecting(node_index_to_update, neighbor_node) + { + match connecting_edgeref.weight().kind() { + // This is the key for an entry in a map. + EdgeWeightKind::Contain(Some(key)) => hasher.update(key.as_bytes()), + + EdgeWeightKind::Use { is_default } => { + hasher.update(is_default.to_string().as_bytes()) + } + + // This is the key representing an element in a container type corresponding + // to an AttributePrototype + EdgeWeightKind::Prototype(Some(key)) => hasher.update(key.as_bytes()), + + // Nothing to do, as these EdgeWeightKind do not encode extra information + // in the edge itself. + EdgeWeightKind::AuthenticationPrototype + | EdgeWeightKind::Action + | EdgeWeightKind::ActionPrototype + | EdgeWeightKind::Contain(None) + | EdgeWeightKind::FrameContains + | EdgeWeightKind::PrototypeArgument + | EdgeWeightKind::PrototypeArgumentValue + | EdgeWeightKind::Socket + | EdgeWeightKind::Ordering + | EdgeWeightKind::Ordinal + | EdgeWeightKind::Prop + | EdgeWeightKind::Prototype(None) + | EdgeWeightKind::Proxy + | EdgeWeightKind::Root + | EdgeWeightKind::SocketValue => {} + } + } + } + + let new_node_weight = self + .graph + .node_weight_mut(node_index_to_update) + .ok_or(WorkspaceSnapshotGraphError::NodeWeightNotFound)?; + new_node_weight.set_merkle_tree_hash(hasher.finalize()); + + Ok(()) + } + + /// Perform [`Updates`](Update) using [`self`](WorkspaceSnapshotGraph) as the "to rebase" graph + /// and a provided graph as the "onto" graph. + pub fn perform_updates( + &mut self, + to_rebase_change_set: &ChangeSet, + onto: &WorkspaceSnapshotGraph, + updates: &[Update], + ) -> WorkspaceSnapshotGraphResult<()> { + for update in updates { + match update { + Update::NewEdge { + source, + destination, + edge_weight, + } => { + let updated_source = self.get_latest_node_idx(*source)?; + let destination = self.find_in_self_or_create_using_onto(*destination, onto)?; + + self.add_edge(updated_source, edge_weight.clone(), destination)?; + } + Update::RemoveEdge { + source, + destination, + edge_kind, + } => { + let updated_source = self.get_latest_node_idx(*source)?; + let destination = self.get_latest_node_idx(*destination)?; + self.remove_edge( + to_rebase_change_set, + updated_source, + destination, + *edge_kind, + )?; + } + Update::ReplaceSubgraph { + onto: onto_subgraph_root, + to_rebase: to_rebase_subgraph_root, + } => { + let updated_to_rebase = self.get_latest_node_idx(*to_rebase_subgraph_root)?; + self.find_in_self_or_create_using_onto(*onto_subgraph_root, onto)?; + self.replace_references(updated_to_rebase)?; + } + } + } + Ok(()) + } + + /// Update node weight in place with a lambda. Use with caution. Generally + /// we treat node weights as immutable and replace them by creating a new + /// node with a new node weight and replacing references to point to the new + /// node. + pub(crate) fn update_node_weight( + &mut self, + node_idx: NodeIndex, + lambda: L, + ) -> WorkspaceSnapshotGraphResult<()> + where + L: FnOnce(&mut NodeWeight) -> WorkspaceSnapshotGraphResult<()>, + { + let node_weight = self + .graph + .node_weight_mut(node_idx) + .ok_or(WorkspaceSnapshotGraphError::NodeWeightNotFound)?; + + lambda(node_weight)?; + Ok(()) } /// Given the node index for a node in other, find if a node exists in self that has the same /// id as the node found in other. - #[instrument("info", skip_all)] - pub(crate) fn find_latest_idx_in_self_from_other_idx( - &self, + fn find_latest_idx_in_self_from_other_idx( + &mut self, other: &WorkspaceSnapshotGraph, other_idx: NodeIndex, ) -> WorkspaceSnapshotGraphResult> { - let node_address = other.get_node_weight(other_idx)?.address(); - let other_id = other.get_id_by_node_address(node_address)?; + let other_id = other.get_node_weight(other_idx)?.id(); Ok(self.get_node_index_by_id(other_id).ok()) } + + /// Find in self where self is the "to rebase" side or create using "onto". + fn find_in_self_or_create_using_onto( + &mut self, + unchecked: NodeIndex, + onto: &WorkspaceSnapshotGraph, + ) -> WorkspaceSnapshotGraphResult { + let unchecked_node_weight = onto.get_node_weight(unchecked)?; + + let found_or_created = { + let equivalent_node = if let Some(found) = + self.find_latest_idx_in_self_from_other_idx(onto, unchecked)? + { + Some(found) + } else { + self.find_equivalent_node( + unchecked_node_weight.id(), + unchecked_node_weight.lineage_id(), + )? + }; + + match equivalent_node { + Some(found_equivalent_node) => { + let found_equivalent_node_weight = + self.get_node_weight(found_equivalent_node)?; + if found_equivalent_node_weight.merkle_tree_hash() + != unchecked_node_weight.merkle_tree_hash() + { + self.import_subgraph(onto, unchecked)?; + self.find_latest_idx_in_self_from_other_idx(onto, unchecked)? + .ok_or(WorkspaceSnapshotGraphError::NodeWeightNotFound)? + } else { + found_equivalent_node + } + } + None => { + self.import_subgraph(onto, unchecked)?; + self.find_latest_idx_in_self_from_other_idx(onto, unchecked)? + .ok_or(WorkspaceSnapshotGraphError::NodeWeightNotFound)? + } + } + }; + Ok(found_or_created) + } +} + +fn ordering_node_indexes_for_node_index( + snapshot: &WorkspaceSnapshotGraph, + node_index: NodeIndex, +) -> Vec { + snapshot + .graph + .edges_directed(node_index, Outgoing) + .filter_map(|edge_reference| { + if edge_reference.weight().kind() == &EdgeWeightKind::Ordering + && matches!( + snapshot.get_node_weight(edge_reference.target()), + Ok(NodeWeight::Ordering(_)) + ) + { + return Some(edge_reference.target()); + } + + None + }) + .collect() +} + +fn prop_node_indexes_for_node_index( + snapshot: &WorkspaceSnapshotGraph, + node_index: NodeIndex, +) -> Vec { + snapshot + .graph + .edges_directed(node_index, Outgoing) + .filter_map(|edge_reference| { + if edge_reference.weight().kind() == &EdgeWeightKind::Prop + && matches!( + snapshot.get_node_weight(edge_reference.target()), + Ok(NodeWeight::Prop(_)) + ) + { + return Some(edge_reference.target()); + } + None + }) + .collect() } diff --git a/lib/dal/src/workspace_snapshot/graph/tests.rs b/lib/dal/src/workspace_snapshot/graph/tests.rs new file mode 100644 index 0000000000..522350fe06 --- /dev/null +++ b/lib/dal/src/workspace_snapshot/graph/tests.rs @@ -0,0 +1,5923 @@ +mod rebase; + +#[allow(clippy::panic)] +#[cfg(test)] +mod test { + use petgraph::graph::NodeIndex; + use petgraph::visit::EdgeRef; + use petgraph::Outgoing; + use pretty_assertions_sorted::assert_eq; + use si_events::ContentHash; + use std::collections::HashMap; + use std::collections::HashSet; + + use crate::change_set::ChangeSet; + use crate::workspace_snapshot::conflict::Conflict; + use crate::workspace_snapshot::content_address::ContentAddress; + use crate::workspace_snapshot::edge_weight::{ + EdgeWeight, EdgeWeightKind, EdgeWeightKindDiscriminants, + }; + use crate::workspace_snapshot::node_weight::NodeWeight; + use crate::workspace_snapshot::update::Update; + use crate::WorkspaceSnapshotGraph; + use crate::{ComponentId, FuncId, PropId, PropKind, SchemaId, SchemaVariantId}; + + #[derive(Debug, PartialEq)] + struct ConflictsAndUpdates { + conflicts: Vec, + updates: Vec, + } + + #[test] + fn new() { + let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let change_set = &change_set; + let graph = WorkspaceSnapshotGraph::new(change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + assert!(graph.is_acyclic_directed()); + } + + // Previously, WorkspaceSnapshotGraph::new would not populate its node_index_by_id, so this test + // would fail, in addition to any functionality that depended on getting the root node index + // on a fresh graph (like add_ordered_node) + #[test] + fn get_root_index_by_root_id_on_fresh_graph() { + let base_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let active_change_set = &base_change_set; + let graph = WorkspaceSnapshotGraph::new(active_change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let root_id = graph + .get_node_weight(graph.root_index) + .expect("get root weight") + .id(); + + let root_node_idx = graph + .get_node_index_by_id(root_id) + .expect("get root node index from ULID"); + + assert_eq!(graph.root_index, root_node_idx); + } + + #[test] + fn multiply_parented_nodes() { + // All edges are outgoing from top to bottom except e to u + // + // root node---->t--->u--->v + // | ^ + // | | + // r ------ | + // / \ | | + // a b | | + // \ / \ | | + // c | | | + // / | | | | + // | d <- | | + // | | | | + // ->e<------ | + // | | + // ---------------- + // + // Edge from e to u mimics a function edge from a prop through a prototype to a function + // There are a few other edges to "u" that are not represented in the drawing above. + // + + let nodes = ["r", "t", "u", "v", "a", "b", "c", "d", "e"]; + let edges = [ + (None, "r"), + (None, "t"), + (Some("t"), "u"), + (Some("u"), "v"), + (Some("r"), "a"), + (Some("r"), "b"), + (Some("r"), "e"), + (Some("a"), "c"), + (Some("b"), "c"), + (Some("c"), "d"), + (Some("b"), "d"), + (Some("d"), "e"), + (Some("c"), "e"), + (Some("e"), "u"), + (Some("c"), "u"), + (Some("a"), "u"), + (Some("a"), "b"), + ]; + + let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let change_set = &change_set; + let mut graph = WorkspaceSnapshotGraph::new(change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let mut node_id_map = HashMap::new(); + + for node in nodes { + // "props" here are just nodes that are easy to create and render the name on the dot + // output. there is no domain modeling in this test. + let node_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let prop_node_weight = NodeWeight::new_prop( + change_set, + node_id, + PropKind::Object, + node, + ContentHash::new(node.as_bytes()), + ) + .expect("create prop node weight"); + graph + .add_node(prop_node_weight) + .expect("Unable to add prop"); + + node_id_map.insert(node, node_id); + } + + for (source, target) in edges { + let source = match source { + None => graph.root_index, + Some(node) => graph + .get_node_index_by_id( + node_id_map + .get(node) + .copied() + .expect("source node should have an id"), + ) + .expect("get node index by id"), + }; + + let target = graph + .get_node_index_by_id( + node_id_map + .get(target) + .copied() + .expect("target node should have an id"), + ) + .expect("get node index by id"); + + graph + .add_edge( + source, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("create edge weight"), + target, + ) + .expect("add edge"); + } + + graph.cleanup(); + + for (source, target) in edges { + let source_idx = match source { + None => graph.root_index, + Some(node) => graph + .get_node_index_by_id( + node_id_map + .get(node) + .copied() + .expect("source node should have an id"), + ) + .expect("get node index by id"), + }; + + let target_idx = graph + .get_node_index_by_id( + node_id_map + .get(target) + .copied() + .expect("target node should have an id"), + ) + .expect("get node index by id"); + + assert!( + graph + .edges_directed(source_idx, Outgoing) + .any(|edge_ref| edge_ref.target() == target_idx), + "An edge from {} to {} should exist", + source.unwrap_or("root"), + target + ); + } + + for (_, id) in node_id_map.iter() { + let idx_for_node = graph + .get_node_index_by_id(*id) + .expect("able to get idx by id"); + graph + .get_node_weight(idx_for_node) + .expect("node with weight in graph"); + } + } + + #[test] + fn add_nodes_and_edges() { + let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let change_set = &change_set; + let mut graph = WorkspaceSnapshotGraph::new(change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let schema_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_id, + ContentAddress::Schema(ContentHash::new( + SchemaId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema"); + let schema_variant_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_variant_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::new( + SchemaVariantId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema variant"); + let component_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let component_index = graph + .add_node( + NodeWeight::new_content( + change_set, + component_id, + ContentAddress::Component(ContentHash::new( + ComponentId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add component"); + + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + component_index, + ) + .expect("Unable to add root -> component edge"); + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_index, + ) + .expect("Unable to add root -> schema edge"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_index, + ) + .expect("Unable to add schema -> schema variant edge"); + graph + .add_edge( + graph + .get_node_index_by_id(component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add component -> schema variant edge"); + + let func_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let func_index = graph + .add_node( + NodeWeight::new_content( + change_set, + func_id, + ContentAddress::Func(ContentHash::new( + FuncId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add func"); + let prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let prop_index = graph + .add_node( + NodeWeight::new_content( + change_set, + prop_id, + ContentAddress::Prop(ContentHash::new( + PropId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add prop"); + + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + func_index, + ) + .expect("Unable to add root -> func edge"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + prop_index, + ) + .expect("Unable to add schema variant -> prop edge"); + graph + .add_edge( + graph + .get_node_index_by_id(prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(func_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add prop -> func edge"); + + assert!(graph.is_acyclic_directed()); + } + + #[test] + fn cyclic_failure() { + let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let change_set = &change_set; + let mut graph = WorkspaceSnapshotGraph::new(change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let schema_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let initial_schema_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_id, + ContentAddress::Schema(ContentHash::new( + SchemaId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema"); + let schema_variant_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let initial_schema_variant_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::new( + SchemaVariantId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema variant"); + let component_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let initial_component_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + component_id, + ContentAddress::Component(ContentHash::new( + ComponentId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add component"); + + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + initial_component_node_index, + ) + .expect("Unable to add root -> component edge"); + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + initial_schema_node_index, + ) + .expect("Unable to add root -> schema edge"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_id) + .expect("Unable to find NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + initial_schema_variant_node_index, + ) + .expect("Unable to add schema -> schema variant edge"); + graph + .add_edge( + graph + .get_node_index_by_id(component_id) + .expect("Unable to find NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to find NodeIndex"), + ) + .expect("Unable to add component -> schema variant edge"); + + let pre_cycle_root_index = graph.root_index; + + // This should cause a cycle. + graph + .add_edge( + graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to find NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(component_id) + .expect("Unable to find NodeIndex"), + ) + .expect_err("Created a cycle"); + + assert_eq!(pre_cycle_root_index, graph.root_index,); + } + + #[test] + fn update_content() { + let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let change_set = &change_set; + let mut graph = WorkspaceSnapshotGraph::new(change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let schema_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_id, + ContentAddress::Schema(ContentHash::from("Constellation")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema"); + let schema_variant_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_variant_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::new( + "Freestar Collective".as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema variant"); + let component_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let component_index = graph + .add_node( + NodeWeight::new_content( + change_set, + component_id, + ContentAddress::Component(ContentHash::from("Crimson Fleet")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add component"); + + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + component_index, + ) + .expect("Unable to add root -> component edge"); + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_index, + ) + .expect("Unable to add root -> schema edge"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_index, + ) + .expect("Unable to add schema -> schema variant edge"); + graph + .add_edge( + graph + .get_node_index_by_id(component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add component -> schema variant edge"); + + // Ensure that the root node merkle tree hash looks as we expect before the update. + let pre_update_root_node_merkle_tree_hash: ContentHash = + serde_json::from_value(serde_json::json![ + "e24d50279e223b763e56317f692713b1b1f172258ec2df66e2b41db49982b810" + ]) + .expect("could not deserialize"); + assert_eq!( + pre_update_root_node_merkle_tree_hash, // expected + graph + .get_node_weight(graph.root_index) + .expect("could not get node weight") + .merkle_tree_hash(), // actual + ); + + let updated_content_hash = ContentHash::from("new_content"); + graph + .update_content(change_set, component_id, updated_content_hash) + .expect("Unable to update Component content hash"); + + let post_update_root_node_merkle_tree_hash: ContentHash = + serde_json::from_value(serde_json::json![ + "94f16e91eca11765f507747aacf49e2e0bacbb63ec5a11ea0cbea7d372f1b2cd" + ]) + .expect("could not deserialize"); + assert_eq!( + post_update_root_node_merkle_tree_hash, // expected + graph + .get_node_weight(graph.root_index) + .expect("could not get node weight") + .merkle_tree_hash(), // actual + ); + assert_eq!( + updated_content_hash, // expected + graph + .get_node_weight( + graph + .get_node_index_by_id(component_id) + .expect("could not get node index by id") + ) + .expect("could not get node weight") + .content_hash(), // actual + ); + + graph.cleanup(); + + // Ensure that there are not more nodes than the ones that should be in use. + assert_eq!(4, graph.node_count()); + + // The hashes must not change upon cleanup. + assert_eq!( + post_update_root_node_merkle_tree_hash, // expected + graph + .get_node_weight(graph.root_index) + .expect("could not get node weight") + .merkle_tree_hash(), // actual + ); + assert_eq!( + updated_content_hash, // expected + graph + .get_node_weight( + graph + .get_node_index_by_id(component_id) + .expect("could not get node index by id") + ) + .expect("could not get node weight") + .content_hash(), // actual + ); + } + + #[test] + fn detect_conflicts_and_updates_simple_no_conflicts_no_updates_in_base() { + let initial_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let initial_change_set = &initial_change_set; + let mut initial_graph = WorkspaceSnapshotGraph::new(initial_change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let schema_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + schema_id, + ContentAddress::Schema(ContentHash::from("Schema A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema A"); + let schema_variant_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_variant_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::from("Schema Variant A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema Variant A"); + + initial_graph + .add_edge( + initial_graph.root_index, + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_index, + ) + .expect("Unable to add root -> schema edge"); + initial_graph + .add_edge( + initial_graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + initial_graph.dot(); + + let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let new_change_set = &new_change_set; + let mut new_graph = initial_graph.clone(); + + let component_id = new_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let component_index = new_graph + .add_node( + NodeWeight::new_content( + new_change_set, + component_id, + ContentAddress::Schema(ContentHash::from("Component A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Component A"); + new_graph + .add_edge( + new_graph.root_index, + EdgeWeight::new(new_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + component_index, + ) + .expect("Unable to add root -> component edge"); + new_graph + .add_edge( + new_graph + .get_node_index_by_id(component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(new_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + new_graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add component -> schema variant edge"); + + new_graph.dot(); + + let (conflicts, updates) = new_graph + .detect_conflicts_and_updates( + new_change_set.vector_clock_id(), + &initial_graph, + initial_change_set.vector_clock_id(), + ) + .expect("Unable to detect conflicts and updates"); + + assert_eq!(Vec::::new(), conflicts); + assert_eq!(Vec::::new(), updates); + } + + #[test] + fn detect_conflicts_and_updates_simple_no_conflicts_with_purely_new_content_in_base() { + let initial_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let base_change_set = &initial_change_set; + let mut base_graph = WorkspaceSnapshotGraph::new(base_change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let schema_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + schema_id, + ContentAddress::Schema(ContentHash::from("Schema A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema A"); + let schema_variant_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_variant_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::from("Schema Variant A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema Variant A"); + + base_graph + .add_edge( + base_graph.root_index, + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_index, + ) + .expect("Unable to add root -> schema edge"); + base_graph + .add_edge( + base_graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + println!("Initial base graph (Root {:?}):", base_graph.root_index); + base_graph.dot(); + + let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let new_change_set = &new_change_set; + let new_graph = base_graph.clone(); + + let new_onto_component_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let new_onto_component_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + new_onto_component_id, + ContentAddress::Component(ContentHash::from("Component B")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Component B"); + let _new_onto_root_component_edge_index = base_graph + .add_edge( + base_graph.root_index, + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + new_onto_component_index, + ) + .expect("Unable to add root -> component edge"); + base_graph + .add_edge( + base_graph + .get_node_index_by_id(new_onto_component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + base_graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add component -> schema variant edge"); + + println!("Updated base graph (Root: {:?}):", base_graph.root_index); + base_graph.dot(); + + let (conflicts, updates) = new_graph + .detect_conflicts_and_updates( + new_change_set.vector_clock_id(), + &base_graph, + base_change_set.vector_clock_id(), + ) + .expect("Unable to detect conflicts and updates"); + + assert_eq!(Vec::::new(), conflicts); + + let new_onto_component_index = base_graph + .get_node_index_by_id(new_onto_component_id) + .expect("Unable to get NodeIndex"); + match updates.as_slice() { + [Update::NewEdge { + source, + destination, + edge_weight, + }] => { + assert_eq!(new_graph.root_index, *source); + assert_eq!(new_onto_component_index, *destination); + assert_eq!(&EdgeWeightKind::new_use(), edge_weight.kind()); + } + other => panic!("Unexpected updates: {:?}", other), + } + } + + #[test] + fn detect_conflicts_and_updates_with_purely_new_content_in_new_graph() { + let initial_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let base_change_set = &initial_change_set; + let mut base_graph = WorkspaceSnapshotGraph::new(base_change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let component_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let component_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + component_id, + ContentAddress::Component(ContentHash::from("Component A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema A"); + base_graph + .add_edge( + base_graph.root_index, + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + component_index, + ) + .expect("Unable to add root -> component edge"); + + base_graph.cleanup(); + println!("Initial base graph (Root {:?}):", base_graph.root_index); + base_graph.dot(); + + let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let new_change_set = &new_change_set; + let mut new_graph = base_graph.clone(); + + let new_component_id = new_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let new_component_index = new_graph + .add_node( + NodeWeight::new_content( + new_change_set, + new_component_id, + ContentAddress::Component(ContentHash::from("Component B")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Component B"); + new_graph + .add_edge( + new_graph.root_index, + EdgeWeight::new(new_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + new_component_index, + ) + .expect("Unable to add root -> component edge"); + + new_graph.cleanup(); + println!("Updated new graph (Root: {:?}):", new_graph.root_index); + new_graph.dot(); + + let (conflicts, updates) = new_graph + .detect_conflicts_and_updates( + new_change_set.vector_clock_id(), + &base_graph, + base_change_set.vector_clock_id(), + ) + .expect("Unable to detect conflicts and updates"); + + assert!(updates.is_empty()); + assert!(conflicts.is_empty()); + + let (conflicts, updates) = base_graph + .detect_conflicts_and_updates( + base_change_set.vector_clock_id(), + &new_graph, + new_change_set.vector_clock_id(), + ) + .expect("Unable to detect conflicts and updates"); + + assert!(conflicts.is_empty()); + + match updates.as_slice() { + [Update::NewEdge { + source, + destination, + edge_weight, + }] => { + assert_eq!(base_graph.root_index, *source); + assert_eq!(new_component_index, *destination); + assert_eq!(&EdgeWeightKind::new_use(), edge_weight.kind()); + } + other => panic!("Unexpected updates: {:?}", other), + } + } + + #[test] + fn detect_conflicts_and_updates_simple_no_conflicts_with_updates_on_both_sides() { + let initial_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let base_change_set = &initial_change_set; + let mut base_graph = WorkspaceSnapshotGraph::new(base_change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let schema_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + schema_id, + ContentAddress::Schema(ContentHash::from("Schema A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema A"); + let schema_variant_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_variant_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::from("Schema Variant A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema Variant A"); + + base_graph + .add_edge( + base_graph.root_index, + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_index, + ) + .expect("Unable to add root -> schema edge"); + base_graph + .add_edge( + base_graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + println!("Initial base graph (Root {:?}):", base_graph.root_index); + base_graph.dot(); + + let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let new_change_set = &new_change_set; + let mut new_graph = base_graph.clone(); + + let component_id = new_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let component_index = new_graph + .add_node( + NodeWeight::new_content( + new_change_set, + component_id, + ContentAddress::Component(ContentHash::from("Component A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Component A"); + new_graph + .add_edge( + new_graph.root_index, + EdgeWeight::new(new_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + component_index, + ) + .expect("Unable to add root -> component edge"); + new_graph + .add_edge( + new_graph + .get_node_index_by_id(component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(new_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + new_graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add component -> schema variant edge"); + + println!("new graph (Root {:?}):", new_graph.root_index); + new_graph.dot(); + + let new_onto_component_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let new_onto_component_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + new_onto_component_id, + ContentAddress::Component(ContentHash::from("Component B")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Component B"); + base_graph + .add_edge( + base_graph.root_index, + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + new_onto_component_index, + ) + .expect("Unable to add root -> component edge"); + base_graph + .add_edge( + base_graph + .get_node_index_by_id(new_onto_component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + base_graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add component -> schema variant edge"); + + println!("Updated base graph (Root: {:?}):", base_graph.root_index); + base_graph.dot(); + + let (conflicts, updates) = new_graph + .detect_conflicts_and_updates( + new_change_set.vector_clock_id(), + &base_graph, + base_change_set.vector_clock_id(), + ) + .expect("Unable to detect conflicts and updates"); + + assert_eq!(Vec::::new(), conflicts); + + let new_onto_component_index = base_graph + .get_node_index_by_id(new_onto_component_id) + .expect("Unable to get NodeIndex"); + match updates.as_slice() { + [Update::NewEdge { + source, + destination, + edge_weight, + }] => { + assert_eq!(new_graph.root_index, *source); + assert_eq!(new_onto_component_index, *destination); + assert_eq!(&EdgeWeightKind::new_use(), edge_weight.kind()); + } + other => panic!("Unexpected updates: {:?}", other), + } + } + + #[test] + fn detect_conflicts_and_updates_simple_with_content_conflict() { + let initial_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let base_change_set = &initial_change_set; + let mut base_graph = WorkspaceSnapshotGraph::new(base_change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let schema_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + schema_id, + ContentAddress::Schema(ContentHash::from("Schema A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema A"); + let schema_variant_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_variant_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::from("Schema Variant A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema Variant A"); + + base_graph + .add_edge( + base_graph.root_index, + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_index, + ) + .expect("Unable to add root -> schema edge"); + base_graph + .add_edge( + base_graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + let component_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let component_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + component_id, + ContentAddress::Component(ContentHash::from("Component A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Component A"); + base_graph + .add_edge( + base_graph.root_index, + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + component_index, + ) + .expect("Unable to add root -> component edge"); + base_graph + .add_edge( + base_graph + .get_node_index_by_id(component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + base_graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add component -> schema variant edge"); + + base_graph.cleanup(); + println!("Initial base graph (Root {:?}):", base_graph.root_index); + base_graph.dot(); + + let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let new_change_set = &new_change_set; + let mut new_graph = base_graph.clone(); + + new_graph + .update_content( + new_change_set, + component_id, + ContentHash::from("Updated Component A"), + ) + .expect("Unable to update Component A"); + + new_graph.cleanup(); + println!("new graph (Root {:?}):", new_graph.root_index); + new_graph.dot(); + + base_graph + .update_content( + base_change_set, + component_id, + ContentHash::from("Base Updated Component A"), + ) + .expect("Unable to update Component A"); + + base_graph.cleanup(); + println!("Updated base graph (Root: {:?}):", base_graph.root_index); + base_graph.dot(); + + let (conflicts, updates) = new_graph + .detect_conflicts_and_updates( + new_change_set.vector_clock_id(), + &base_graph, + base_change_set.vector_clock_id(), + ) + .expect("Unable to detect conflicts and updates"); + + assert_eq!( + vec![Conflict::NodeContent { + onto: base_graph + .get_node_index_by_id(component_id) + .expect("Unable to get component NodeIndex"), + to_rebase: new_graph + .get_node_index_by_id(component_id) + .expect("Unable to get component NodeIndex"), + }], + conflicts + ); + assert_eq!(Vec::::new(), updates); + } + + #[test] + fn detect_conflicts_and_updates_simple_with_modify_removed_item_conflict() { + let initial_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let base_change_set = &initial_change_set; + let mut base_graph = WorkspaceSnapshotGraph::new(base_change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let schema_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + schema_id, + ContentAddress::Schema(ContentHash::from("Schema A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema A"); + let schema_variant_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_variant_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::from("Schema Variant A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema Variant A"); + + base_graph + .add_edge( + base_graph.root_index, + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_index, + ) + .expect("Unable to add root -> schema edge"); + base_graph + .add_edge( + base_graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + let component_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let component_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + component_id, + ContentAddress::Component(ContentHash::from("Component A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Component A"); + base_graph + .add_edge( + base_graph.root_index, + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + component_index, + ) + .expect("Unable to add root -> component edge"); + base_graph + .add_edge( + base_graph + .get_node_index_by_id(component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + base_graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add component -> schema variant edge"); + + base_graph.cleanup(); + println!("Initial base graph (Root {:?}):", base_graph.root_index); + base_graph.dot(); + + let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let new_change_set = &new_change_set; + let mut new_graph = base_graph.clone(); + + base_graph + .remove_edge( + base_change_set, + base_graph.root_index, + base_graph + .get_node_index_by_id(component_id) + .expect("Unable to get NodeIndex"), + EdgeWeightKindDiscriminants::Use, + ) + .expect("Unable to remove Component A"); + + base_graph.cleanup(); + println!("Updated base graph (Root: {:?}):", base_graph.root_index); + base_graph.dot(); + + new_graph + .update_content( + new_change_set, + component_id, + ContentHash::from("Updated Component A"), + ) + .expect("Unable to update Component A"); + + new_graph.cleanup(); + println!("new graph (Root {:?}):", new_graph.root_index); + new_graph.dot(); + + let (conflicts, updates) = new_graph + .detect_conflicts_and_updates( + new_change_set.vector_clock_id(), + &base_graph, + base_change_set.vector_clock_id(), + ) + .expect("Unable to detect conflicts and updates"); + + assert_eq!( + vec![Conflict::ModifyRemovedItem( + new_graph + .get_node_index_by_id(component_id) + .expect("Unable to get NodeIndex") + )], + conflicts + ); + assert_eq!(Vec::::new(), updates); + } + + #[test] + fn detect_conflicts_and_updates_add_unordered_child_to_ordered_container() { + let base_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let active_change_set = &base_change_set; + let mut base_graph = WorkspaceSnapshotGraph::new(active_change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + let active_graph = &mut base_graph; + + // Create base prop node + let base_prop_id = { + let prop_id = active_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let prop_index = active_graph + .add_ordered_node( + active_change_set, + NodeWeight::new_content( + active_change_set, + prop_id, + ContentAddress::Prop(ContentHash::new(prop_id.to_string().as_bytes())), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add prop"); + + active_graph + .add_edge( + active_graph.root_index, + EdgeWeight::new(active_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + prop_index, + ) + .expect("Unable to add sv -> prop edge"); + + prop_id + }; + + active_graph.cleanup(); + active_graph.dot(); + + // Create two prop nodes children of base prop + let ordered_prop_1_index = { + let ordered_prop_id = active_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_index = active_graph + .add_node( + NodeWeight::new_content( + active_change_set, + ordered_prop_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop"); + active_graph + .add_ordered_edge( + active_change_set, + active_graph + .get_node_index_by_id(base_prop_id) + .expect("Unable to get prop NodeIndex"), + EdgeWeight::new(active_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create uses edge weight"), + ordered_prop_index, + ) + .expect("Unable to add prop -> ordered_prop_1 edge"); + + ordered_prop_index + }; + + active_graph.cleanup(); + active_graph.dot(); + + let attribute_prototype_id = { + let node_id = active_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let node_index = active_graph + .add_node( + NodeWeight::new_content( + active_change_set, + node_id, + ContentAddress::AttributePrototype(ContentHash::new( + node_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add attribute prototype"); + + active_graph + .add_edge( + active_graph.root_index, + EdgeWeight::new(active_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + node_index, + ) + .expect("Unable to add root -> prototype edge"); + + node_id + }; + + active_graph.cleanup(); + active_graph.dot(); + + // Get new graph + let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let active_change_set = &new_change_set; + let mut new_graph = base_graph.clone(); + let active_graph = &mut new_graph; + + // Connect Prototype to Prop + active_graph + .add_edge( + active_graph + .get_node_index_by_id(base_prop_id) + .expect("Unable to get prop NodeIndex"), + EdgeWeight::new(active_change_set, EdgeWeightKind::Prototype(None)) + .expect("Unable to create EdgeWeight"), + active_graph + .get_node_index_by_id(attribute_prototype_id) + .expect("Unable to get prop NodeIndex"), + ) + .expect("Unable to add sv -> prop edge"); + active_graph.cleanup(); + active_graph.dot(); + + assert_eq!( + vec![ordered_prop_1_index,], + new_graph + .ordered_children_for_node( + new_graph + .get_node_index_by_id(base_prop_id) + .expect("Unable to get base prop NodeIndex") + ) + .expect("Unable to find ordered children for node") + .expect("Node is not an ordered node") + ); + + // Assert that the new edge to the prototype gets created + let (conflicts, updates) = base_graph + .detect_conflicts_and_updates( + active_change_set.vector_clock_id(), + &new_graph, + new_change_set.vector_clock_id(), + ) + .expect("Unable to detect conflicts and updates"); + + assert!(conflicts.is_empty()); + + match updates.as_slice() { + [Update::NewEdge { + source, + destination, + edge_weight, + }] => { + assert_eq!( + base_graph + .get_node_index_by_id(base_prop_id) + .expect("Unable to get prop NodeIndex"), + *source + ); + assert_eq!( + base_graph + .get_node_index_by_id(attribute_prototype_id) + .expect("Unable to get prop NodeIndex"), + *destination + ); + assert_eq!(&EdgeWeightKind::Prototype(None), edge_weight.kind()); + } + other => panic!("Unexpected updates: {:?}", other), + } + } + + #[test] + fn detect_conflicts_and_updates_complex() { + let initial_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let base_change_set = &initial_change_set; + let mut base_graph = WorkspaceSnapshotGraph::new(base_change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + // Docker Image Schema + let docker_image_schema_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let docker_image_schema_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + docker_image_schema_id, + ContentAddress::Schema(ContentHash::from("first")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema A"); + base_graph + .add_edge( + base_graph.root_index, + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + docker_image_schema_index, + ) + .expect("Unable to add root -> schema edge"); + + // Docker Image Schema Variant + let docker_image_schema_variant_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let docker_image_schema_variant_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + docker_image_schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::from("first")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema Variant A"); + base_graph + .add_edge( + base_graph + .get_node_index_by_id(docker_image_schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + docker_image_schema_variant_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + // Nginx Docker Image Component + let nginx_docker_image_component_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let nginx_docker_image_component_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + nginx_docker_image_component_id, + ContentAddress::Component(ContentHash::from("first")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Component A"); + base_graph + .add_edge( + base_graph.root_index, + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + nginx_docker_image_component_index, + ) + .expect("Unable to add root -> component edge"); + base_graph + .add_edge( + base_graph + .get_node_index_by_id(nginx_docker_image_component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + base_graph + .get_node_index_by_id(docker_image_schema_variant_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add component -> schema variant edge"); + + // Alpine Component + let alpine_component_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let alpine_component_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + alpine_component_id, + ContentAddress::Component(ContentHash::from("first")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Component A"); + base_graph + .add_edge( + base_graph.root_index, + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + alpine_component_index, + ) + .expect("Unable to add root -> component edge"); + base_graph + .add_edge( + base_graph + .get_node_index_by_id(alpine_component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + base_graph + .get_node_index_by_id(docker_image_schema_variant_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add component -> schema variant edge"); + + // Butane Schema + let butane_schema_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let butane_schema_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + butane_schema_id, + ContentAddress::Schema(ContentHash::from("first")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema A"); + base_graph + .add_edge( + base_graph.root_index, + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + butane_schema_index, + ) + .expect("Unable to add root -> schema edge"); + + // Butane Schema Variant + let butane_schema_variant_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let butane_schema_variant_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + butane_schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::from("first")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema Variant A"); + base_graph + .add_edge( + base_graph + .get_node_index_by_id(butane_schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + butane_schema_variant_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + // Nginx Butane Component + let nginx_butane_component_id = base_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let nginx_butane_node_index = base_graph + .add_node( + NodeWeight::new_content( + base_change_set, + nginx_butane_component_id, + ContentAddress::Component(ContentHash::from("first")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema Variant A"); + base_graph + .add_edge( + base_graph.root_index, + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + nginx_butane_node_index, + ) + .expect("Unable to add root -> component edge"); + base_graph + .add_edge( + base_graph + .get_node_index_by_id(nginx_butane_component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + base_graph + .get_node_index_by_id(butane_schema_variant_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add component -> schema variant edge"); + + base_graph.cleanup(); + println!("Initial base graph (Root {:?}):", base_graph.root_index); + base_graph.dot(); + + // Create a new change set to cause some problems! + let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let new_change_set = &new_change_set; + let mut new_graph = base_graph.clone(); + + // Create a modify removed item conflict. + base_graph + .remove_edge( + base_change_set, + base_graph.root_index, + base_graph + .get_node_index_by_id(nginx_butane_component_id) + .expect("Unable to get NodeIndex"), + EdgeWeightKindDiscriminants::Use, + ) + .expect("Unable to update the component"); + new_graph + .update_content( + new_change_set, + nginx_butane_component_id, + ContentHash::from("second"), + ) + .expect("Unable to update the component"); + + // Create a node content conflict. + base_graph + .update_content( + base_change_set, + docker_image_schema_variant_id, + ContentHash::from("oopsie"), + ) + .expect("Unable to update the component"); + new_graph + .update_content( + new_change_set, + docker_image_schema_variant_id, + ContentHash::from("poopsie"), + ) + .expect("Unable to update the component"); + + // Create a pure update. + base_graph + .update_content( + base_change_set, + docker_image_schema_id, + ContentHash::from("bg3"), + ) + .expect("Unable to update the schema"); + + let (conflicts, updates) = new_graph + .detect_conflicts_and_updates( + new_change_set.vector_clock_id(), + &base_graph, + base_change_set.vector_clock_id(), + ) + .expect("Unable to detect conflicts and updates"); + + println!("base graph current root: {:?}", base_graph.root_index); + base_graph.dot(); + println!("new graph current root: {:?}", new_graph.root_index); + new_graph.dot(); + + let expected_conflicts = vec![ + Conflict::ModifyRemovedItem( + new_graph + .get_node_index_by_id(nginx_butane_component_id) + .expect("Unable to get component NodeIndex"), + ), + Conflict::NodeContent { + onto: base_graph + .get_node_index_by_id(docker_image_schema_variant_id) + .expect("Unable to get component NodeIndex"), + to_rebase: new_graph + .get_node_index_by_id(docker_image_schema_variant_id) + .expect("Unable to get component NodeIndex"), + }, + ]; + let expected_updates = vec![Update::ReplaceSubgraph { + onto: base_graph + .get_node_index_by_id(docker_image_schema_id) + .expect("Unable to get NodeIndex"), + to_rebase: new_graph + .get_node_index_by_id(docker_image_schema_id) + .expect("Unable to get NodeIndex"), + }]; + + assert_eq!( + ConflictsAndUpdates { + conflicts: expected_conflicts, + updates: expected_updates, + }, + ConflictsAndUpdates { conflicts, updates }, + ); + } + + #[test] + fn add_ordered_node() { + let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let change_set = &change_set; + let mut graph = WorkspaceSnapshotGraph::new(change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let schema_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_id, + ContentAddress::Schema(ContentHash::new( + SchemaId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema"); + let schema_variant_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_variant_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::new( + SchemaVariantId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema variant"); + + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_index, + ) + .expect("Unable to add root -> schema edge"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + let func_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let func_index = graph + .add_node( + NodeWeight::new_content( + change_set, + func_id, + ContentAddress::Func(ContentHash::new( + FuncId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add func"); + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + func_index, + ) + .expect("Unable to add root -> func edge"); + + let prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let prop_index = graph + .add_ordered_node( + change_set, + NodeWeight::new_content( + change_set, + prop_id, + ContentAddress::Prop(ContentHash::new( + PropId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add prop"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + prop_index, + ) + .expect("Unable to add schema variant -> prop edge"); + graph + .add_edge( + graph + .get_node_index_by_id(prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(func_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add prop -> func edge"); + graph.cleanup(); + graph.dot(); + + let ordered_prop_1_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let ordered_prop_1_index = graph + .add_node( + NodeWeight::new_content( + change_set, + ordered_prop_1_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_1_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(prop_id) + .expect("Unable to get NodeWeight for prop"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create uses edge weight"), + ordered_prop_1_index, + ) + .expect("Unable to add prop -> ordered_prop_1 edge"); + + let ordered_prop_2_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let ordered_prop_2_index = graph + .add_node( + NodeWeight::new_content( + change_set, + ordered_prop_2_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_2_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(prop_id) + .expect("Unable to get NodeWeight for prop"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create uses edge weight"), + ordered_prop_2_index, + ) + .expect("Unable to add prop -> ordered_prop_2 edge"); + + let ordered_prop_3_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let ordered_prop_3_index = graph + .add_node( + NodeWeight::new_content( + change_set, + ordered_prop_3_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_3_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(prop_id) + .expect("Unable to get NodeWeight for prop"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create uses edge weight"), + ordered_prop_3_index, + ) + .expect("Unable to add prop -> ordered_prop_3 edge"); + graph.cleanup(); + graph.dot(); + + assert_eq!( + vec![ + ordered_prop_1_index, + ordered_prop_2_index, + ordered_prop_3_index, + ], + graph + .ordered_children_for_node( + graph + .get_node_index_by_id(prop_id) + .expect("Unable to get prop NodeIndex") + ) + .expect("Unable to find ordered children for node") + .expect("Node is not an ordered node") + ); + } + + #[test] + fn add_ordered_node_below_root() { + let base_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let active_change_set = &base_change_set; + let mut graph = WorkspaceSnapshotGraph::new(active_change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let prop_id = active_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let prop_index = graph + .add_ordered_node( + active_change_set, + NodeWeight::new_content( + active_change_set, + prop_id, + ContentAddress::Prop(ContentHash::new(prop_id.to_string().as_bytes())), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add prop"); + + graph + .add_edge( + graph.root_index, + EdgeWeight::new(active_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + prop_index, + ) + .expect("Unable to add root -> prop edge"); + + graph.cleanup(); + assert_eq!( + Vec::::new(), + graph + .ordered_children_for_node( + graph + .get_node_index_by_id(prop_id) + .expect("Unable to get prop NodeIndex") + ) + .expect("Unable to find ordered children for node") + .expect("Node is not an ordered node") + ); + } + + #[test] + fn reorder_ordered_node() { + let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let change_set = &change_set; + let mut graph = WorkspaceSnapshotGraph::new(change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let schema_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_id, + ContentAddress::Schema(ContentHash::new( + SchemaId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema"); + let schema_variant_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_variant_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::new( + SchemaVariantId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema variant"); + + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_index, + ) + .expect("Unable to add root -> schema edge"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + let func_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let func_index = graph + .add_node( + NodeWeight::new_content( + change_set, + func_id, + ContentAddress::Func(ContentHash::new( + FuncId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add func"); + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + func_index, + ) + .expect("Unable to add root -> func edge"); + + let prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let prop_index = graph + .add_ordered_node( + change_set, + NodeWeight::new_content( + change_set, + prop_id, + ContentAddress::Prop(ContentHash::new( + PropId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add prop"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + prop_index, + ) + .expect("Unable to add schema variant -> prop edge"); + graph + .add_edge( + graph + .get_node_index_by_id(prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(func_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add prop -> func edge"); + graph.cleanup(); + graph.dot(); + + let ordered_prop_1_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let ordered_prop_1_index = graph + .add_node( + NodeWeight::new_content( + change_set, + ordered_prop_1_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_1_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(prop_id) + .expect("Unable to get NodeWeight for prop"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create uses edge weight"), + ordered_prop_1_index, + ) + .expect("Unable to add prop -> ordered_prop_1 edge"); + + let ordered_prop_2_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let ordered_prop_2_index = graph + .add_node( + NodeWeight::new_content( + change_set, + ordered_prop_2_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_2_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(prop_id) + .expect("Unable to get NodeWeight for prop"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create uses edge weight"), + ordered_prop_2_index, + ) + .expect("Unable to add prop -> ordered_prop_2 edge"); + + let ordered_prop_3_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let ordered_prop_3_index = graph + .add_node( + NodeWeight::new_content( + change_set, + ordered_prop_3_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_3_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(prop_id) + .expect("Unable to get NodeWeight for prop"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create uses edge weight"), + ordered_prop_3_index, + ) + .expect("Unable to add prop -> ordered_prop_3 edge"); + + let ordered_prop_4_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let ordered_prop_4_index = graph + .add_node( + NodeWeight::new_content( + change_set, + ordered_prop_4_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_4_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(prop_id) + .expect("Unable to get NodeWeight for prop"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create uses edge weight"), + ordered_prop_4_index, + ) + .expect("Unable to add prop -> ordered_prop_4 edge"); + + graph.cleanup(); + graph.dot(); + + assert_eq!( + vec![ + ordered_prop_1_index, + ordered_prop_2_index, + ordered_prop_3_index, + ordered_prop_4_index, + ], + graph + .ordered_children_for_node( + graph + .get_node_index_by_id(prop_id) + .expect("Unable to get prop NodeIndex") + ) + .expect("Unable to find ordered children for node") + .expect("Node is not an ordered node") + ); + + let new_order = vec![ + ordered_prop_2_id, + ordered_prop_1_id, + ordered_prop_4_id, + ordered_prop_3_id, + ]; + + graph + .update_order(change_set, prop_id, new_order) + .expect("Unable to update order of prop's children"); + + assert_eq!( + vec![ + ordered_prop_2_index, + ordered_prop_1_index, + ordered_prop_4_index, + ordered_prop_3_index, + ], + graph + .ordered_children_for_node( + graph + .get_node_index_by_id(prop_id) + .expect("Unable to get prop NodeIndex") + ) + .expect("Unable to find ordered children for node") + .expect("Node is not an ordered node") + ); + } + + #[test] + fn remove_unordered_node_and_detect_edge_removal() { + let initial_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let initial_change_set = &initial_change_set; + let mut graph = WorkspaceSnapshotGraph::new(initial_change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let schema_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_index = graph + .add_node( + NodeWeight::new_content( + initial_change_set, + schema_id, + ContentAddress::Schema(ContentHash::new( + SchemaId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema"); + let schema_variant_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_variant_index = graph + .add_node( + NodeWeight::new_content( + initial_change_set, + schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::new( + SchemaVariantId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema variant"); + + graph + .add_edge( + graph.root_index, + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_index, + ) + .expect("Unable to add root -> schema edge"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + let schema_variant_2_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_variant_2_index = graph + .add_node( + NodeWeight::new_content( + initial_change_set, + schema_variant_2_id, + ContentAddress::SchemaVariant(ContentHash::new( + SchemaVariantId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema variant"); + + graph + .add_edge( + graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_2_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + let expected_edges = HashSet::from([schema_variant_2_index, schema_variant_index]); + + let existing_edges: HashSet = graph + .edges_directed( + graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex for schema"), + Outgoing, + ) + .map(|edge_ref| edge_ref.target()) + .collect(); + + assert_eq!( + expected_edges, existing_edges, + "confirm edges are there before deleting" + ); + + graph + .mark_graph_seen(initial_change_set.vector_clock_id()) + .expect("Unable to mark initial graph as seen"); + + let mut graph_with_deleted_edge = graph.clone(); + let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let new_change_set = &new_change_set; + + graph_with_deleted_edge.dot(); + + graph_with_deleted_edge + .remove_edge( + new_change_set, + graph_with_deleted_edge + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex for schema"), + schema_variant_2_index, + EdgeWeightKindDiscriminants::Use, + ) + .expect("Edge removal failed"); + + graph_with_deleted_edge.dot(); + + let existing_edges: Vec = graph_with_deleted_edge + .edges_directed( + graph_with_deleted_edge + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex for schema"), + Outgoing, + ) + .map(|edge_ref| edge_ref.target()) + .collect(); + + assert_eq!( + vec![schema_variant_index], + existing_edges, + "confirm edges after deletion" + ); + + graph_with_deleted_edge + .mark_graph_seen(new_change_set.vector_clock_id()) + .expect("Unable to mark new graph as seen"); + + let (conflicts, updates) = graph + .detect_conflicts_and_updates( + initial_change_set.vector_clock_id(), + &graph_with_deleted_edge, + new_change_set.vector_clock_id(), + ) + .expect("Failed to detect conflicts and updates"); + + assert!(conflicts.is_empty()); + dbg!(&updates); + assert_eq!(1, updates.len()); + + assert!(matches!( + updates.first().expect("should be there"), + Update::RemoveEdge { .. } + )); + } + + #[test] + fn remove_unordered_node() { + let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let change_set = &change_set; + let mut graph = WorkspaceSnapshotGraph::new(change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let schema_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_id, + ContentAddress::Schema(ContentHash::new( + SchemaId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema"); + let schema_variant_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_variant_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::new( + SchemaVariantId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema variant"); + + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_index, + ) + .expect("Unable to add root -> schema edge"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + let schema_variant_2_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_variant_2_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_variant_2_id, + ContentAddress::SchemaVariant(ContentHash::new( + SchemaVariantId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema variant"); + + graph + .add_edge( + graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_2_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + let expected_edges = HashSet::from([schema_variant_2_index, schema_variant_index]); + + let existing_edges: HashSet = graph + .edges_directed( + graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex for schema"), + Outgoing, + ) + .map(|edge_ref| edge_ref.target()) + .collect(); + + assert_eq!( + expected_edges, existing_edges, + "confirm edges are there before deleting" + ); + + graph + .remove_edge( + change_set, + graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex for schema"), + schema_variant_2_index, + EdgeWeightKindDiscriminants::Use, + ) + .expect("Edge removal failed"); + + let existing_edges: Vec = graph + .edges_directed( + graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex for schema"), + Outgoing, + ) + .map(|edge_ref| edge_ref.target()) + .collect(); + + assert_eq!( + vec![schema_variant_index], + existing_edges, + "confirm edges after deletion" + ); + } + + #[test] + fn remove_ordered_node() { + let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let change_set = &change_set; + let mut graph = WorkspaceSnapshotGraph::new(change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let schema_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_id, + ContentAddress::Schema(ContentHash::new( + SchemaId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema"); + let schema_variant_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_variant_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::new( + SchemaVariantId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema variant"); + + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_index, + ) + .expect("Unable to add root -> schema edge"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + let func_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let func_index = graph + .add_node( + NodeWeight::new_content( + change_set, + func_id, + ContentAddress::Func(ContentHash::new( + FuncId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add func"); + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + func_index, + ) + .expect("Unable to add root -> func edge"); + + let root_prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let root_prop_index = graph + .add_ordered_node( + change_set, + NodeWeight::new_content( + change_set, + root_prop_id, + ContentAddress::Prop(ContentHash::new( + PropId::generate().to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add prop"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + root_prop_index, + ) + .expect("Unable to add schema variant -> prop edge"); + graph + .add_edge( + graph + .get_node_index_by_id(root_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(func_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add prop -> func edge"); + graph.cleanup(); + graph.dot(); + + let ordered_prop_1_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let ordered_prop_1_index = graph + .add_node( + NodeWeight::new_content( + change_set, + ordered_prop_1_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_1_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(root_prop_id) + .expect("Unable to get NodeWeight for prop"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create uses edge weight"), + ordered_prop_1_index, + ) + .expect("Unable to add prop -> ordered_prop_1 edge"); + + let ordered_prop_2_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let ordered_prop_2_index = graph + .add_node( + NodeWeight::new_content( + change_set, + ordered_prop_2_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_2_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(root_prop_id) + .expect("Unable to get NodeWeight for prop"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create uses edge weight"), + ordered_prop_2_index, + ) + .expect("Unable to add prop -> ordered_prop_2 edge"); + + let ordered_prop_3_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let ordered_prop_3_index = graph + .add_node( + NodeWeight::new_content( + change_set, + ordered_prop_3_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_3_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(root_prop_id) + .expect("Unable to get NodeWeight for prop"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create uses edge weight"), + ordered_prop_3_index, + ) + .expect("Unable to add prop -> ordered_prop_3 edge"); + + let ordered_prop_4_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let ordered_prop_4_index = graph + .add_node( + NodeWeight::new_content( + change_set, + ordered_prop_4_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_4_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(root_prop_id) + .expect("Unable to get NodeWeight for prop"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create uses edge weight"), + ordered_prop_4_index, + ) + .expect("Unable to add prop -> ordered_prop_4 edge"); + + graph.cleanup(); + graph.dot(); + + assert_eq!( + vec![ + ordered_prop_1_index, + ordered_prop_2_index, + ordered_prop_3_index, + ordered_prop_4_index, + ], + graph + .ordered_children_for_node( + graph + .get_node_index_by_id(root_prop_id) + .expect("Unable to get prop NodeIndex") + ) + .expect("Unable to find ordered children for node") + .expect("Node is not an ordered node") + ); + + graph + .remove_edge( + change_set, + graph + .get_node_index_by_id(root_prop_id) + .expect("Unable to get NodeIndex for prop"), + ordered_prop_2_index, + EdgeWeightKindDiscriminants::Use, + ) + .expect("Unable to remove prop -> ordered_prop_2 edge"); + + assert_eq!( + vec![ + ordered_prop_1_index, + ordered_prop_3_index, + ordered_prop_4_index, + ], + graph + .ordered_children_for_node( + graph + .get_node_index_by_id(root_prop_id) + .expect("Unable to get prop NodeIndex") + ) + .expect("Unable to find ordered children for node") + .expect("Node is not an ordered node") + ); + if let NodeWeight::Ordering(ordering_weight) = graph + .get_node_weight( + graph + .ordering_node_index_for_container( + graph + .get_node_index_by_id(root_prop_id) + .expect("Unable to find ordering node for prop"), + ) + .expect("Error getting ordering NodeIndex for prop") + .expect("Unable to find ordering NodeIndex"), + ) + .expect("Unable to get ordering NodeWeight for ordering node") + { + assert_eq!( + &vec![ordered_prop_1_id, ordered_prop_3_id, ordered_prop_4_id], + ordering_weight.order() + ); + } else { + panic!("Unable to destructure ordering node weight"); + } + } + + #[test] + fn detect_conflicts_and_updates_simple_ordering_no_conflicts_no_updates_in_base() { + let initial_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let initial_change_set = &initial_change_set; + let mut initial_graph = WorkspaceSnapshotGraph::new(initial_change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let schema_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + schema_id, + ContentAddress::Schema(ContentHash::from("Schema A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema A"); + let schema_variant_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_variant_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::from("Schema Variant A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema Variant A"); + + initial_graph + .add_edge( + initial_graph.root_index, + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_index, + ) + .expect("Unable to add root -> schema edge"); + initial_graph + .add_edge( + initial_graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + let container_prop_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let container_prop_index = initial_graph + .add_ordered_node( + initial_change_set, + NodeWeight::new_content( + initial_change_set, + container_prop_id, + ContentAddress::Prop(ContentHash::new( + container_prop_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add container prop"); + initial_graph + .add_edge( + initial_graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + container_prop_index, + ) + .expect("Unable to add schema variant -> container prop edge"); + + let ordered_prop_1_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_1_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_1_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_1_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 1"); + initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + ordered_prop_1_index, + ) + .expect("Unable to add container prop -> ordered prop 1 edge"); + + let ordered_prop_2_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_2_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_2_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_2_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 2"); + initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + ordered_prop_2_index, + ) + .expect("Unable to add container prop -> ordered prop 2 edge"); + + let ordered_prop_3_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_3_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_3_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_3_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 3"); + initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + ordered_prop_3_index, + ) + .expect("Unable to add container prop -> ordered prop 3 edge"); + + let ordered_prop_4_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_4_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_4_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_4_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 4"); + initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + ordered_prop_4_index, + ) + .expect("Unable to add container prop -> ordered prop 4 edge"); + + initial_graph.cleanup(); + initial_graph.dot(); + + let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let new_change_set = &new_change_set; + let mut new_graph = initial_graph.clone(); + + let ordered_prop_5_id = new_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_5_index = new_graph + .add_node( + NodeWeight::new_content( + new_change_set, + ordered_prop_5_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_5_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 5"); + new_graph + .add_ordered_edge( + new_change_set, + new_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(new_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + ordered_prop_5_index, + ) + .expect("Unable to add container prop -> ordered prop 5 edge"); + + new_graph.cleanup(); + new_graph.dot(); + + let (conflicts, updates) = new_graph + .detect_conflicts_and_updates( + new_change_set.vector_clock_id(), + &initial_graph, + initial_change_set.vector_clock_id(), + ) + .expect("Unable to detect conflicts and updates"); + + assert_eq!(Vec::::new(), conflicts); + assert_eq!(Vec::::new(), updates); + } + + #[test] + fn detect_conflicts_and_updates_simple_ordering_no_conflicts_with_updates_in_base() { + let initial_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let initial_change_set = &initial_change_set; + let mut initial_graph = WorkspaceSnapshotGraph::new(initial_change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let schema_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + schema_id, + ContentAddress::Schema(ContentHash::from("Schema A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema A"); + let schema_variant_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_variant_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::from("Schema Variant A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema Variant A"); + + initial_graph + .add_edge( + initial_graph.root_index, + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_index, + ) + .expect("Unable to add root -> schema edge"); + initial_graph + .add_edge( + initial_graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + let container_prop_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let container_prop_index = initial_graph + .add_ordered_node( + initial_change_set, + NodeWeight::new_content( + initial_change_set, + container_prop_id, + ContentAddress::Prop(ContentHash::new( + container_prop_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add container prop"); + initial_graph + .add_edge( + initial_graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + container_prop_index, + ) + .expect("Unable to add schema variant -> container prop edge"); + + let ordered_prop_1_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_1_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_1_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_1_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 1"); + initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + ordered_prop_1_index, + ) + .expect("Unable to add container prop -> ordered prop 1 edge"); + + let ordered_prop_2_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_2_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_2_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_2_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 2"); + initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + ordered_prop_2_index, + ) + .expect("Unable to add container prop -> ordered prop 2 edge"); + + let ordered_prop_3_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_3_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_3_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_3_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 3"); + initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + ordered_prop_3_index, + ) + .expect("Unable to add container prop -> ordered prop 3 edge"); + + let ordered_prop_4_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_4_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_4_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_4_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 4"); + initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + ordered_prop_4_index, + ) + .expect("Unable to add container prop -> ordered prop 4 edge"); + + initial_graph.dot(); + + let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let new_change_set = &new_change_set; + let new_graph = initial_graph.clone(); + + let ordered_prop_5_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_5_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_5_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_5_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 5"); + let new_edge_weight = EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"); + let (_, maybe_ordinal_edge_information) = initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + new_edge_weight.clone(), + ordered_prop_5_index, + ) + .expect("Unable to add container prop -> ordered prop 5 edge"); + let ( + ordinal_edge_index, + source_node_index_for_ordinal_edge, + destination_node_index_for_ordinal_edge, + ) = maybe_ordinal_edge_information.expect("ordinal edge information not found"); + let ordinal_edge_weight = initial_graph + .get_edge_weight_opt(ordinal_edge_index) + .expect("should not error when getting edge") + .expect("could not get edge weight for index") + .to_owned(); + let source_node_id_for_ordinal_edge = initial_graph + .get_node_weight(source_node_index_for_ordinal_edge) + .expect("could not get node weight") + .id(); + let destination_node_id_for_ordinal_edge = initial_graph + .get_node_weight(destination_node_index_for_ordinal_edge) + .expect("could not get node weight") + .id(); + + new_graph.dot(); + + let (conflicts, updates) = new_graph + .detect_conflicts_and_updates( + new_change_set.vector_clock_id(), + &initial_graph, + initial_change_set.vector_clock_id(), + ) + .expect("Unable to detect conflicts and updates"); + + assert_eq!(Vec::::new(), conflicts); + assert_eq!( + vec![ + Update::NewEdge { + source: new_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + destination: initial_graph + .get_node_index_by_id(ordered_prop_5_id) + .expect("Unable to get NodeIndex"), + edge_weight: new_edge_weight, + }, + Update::ReplaceSubgraph { + onto: initial_graph + .ordering_node_index_for_container( + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get container NodeIndex") + ) + .expect("Unable to get new ordering NodeIndex") + .expect("Ordering NodeIndex not found"), + to_rebase: new_graph + .ordering_node_index_for_container( + new_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get container NodeIndex") + ) + .expect("Unable to get old ordering NodeIndex") + .expect("Ordering NodeIndex not found"), + }, + Update::NewEdge { + source: new_graph + .get_node_index_by_id(source_node_id_for_ordinal_edge) + .expect("could not get node index by id"), + destination: initial_graph + .get_node_index_by_id(destination_node_id_for_ordinal_edge) + .expect("could not get node index by id"), + edge_weight: ordinal_edge_weight, + } + ], + updates + ); + } + + #[test] + fn detect_conflicts_and_updates_simple_ordering_with_conflicting_ordering_updates() { + let initial_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let initial_change_set = &initial_change_set; + let mut initial_graph = WorkspaceSnapshotGraph::new(initial_change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let schema_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + schema_id, + ContentAddress::Schema(ContentHash::from("Schema A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema A"); + let schema_variant_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_variant_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::from("Schema Variant A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema Variant A"); + + initial_graph + .add_edge( + initial_graph.root_index, + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_index, + ) + .expect("Unable to add root -> schema edge"); + initial_graph + .add_edge( + initial_graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + let container_prop_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let container_prop_index = initial_graph + .add_ordered_node( + initial_change_set, + NodeWeight::new_content( + initial_change_set, + container_prop_id, + ContentAddress::Prop(ContentHash::new( + container_prop_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add container prop"); + initial_graph + .add_edge( + initial_graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + container_prop_index, + ) + .expect("Unable to add schema variant -> container prop edge"); + + let ordered_prop_1_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_1_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_1_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_1_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 1"); + initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + ordered_prop_1_index, + ) + .expect("Unable to add container prop -> ordered prop 1 edge"); + + let ordered_prop_2_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_2_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_2_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_2_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 2"); + initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + ordered_prop_2_index, + ) + .expect("Unable to add container prop -> ordered prop 2 edge"); + + let ordered_prop_3_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_3_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_3_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_3_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 3"); + initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + ordered_prop_3_index, + ) + .expect("Unable to add container prop -> ordered prop 3 edge"); + + let ordered_prop_4_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_4_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_4_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_4_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 4"); + initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + ordered_prop_4_index, + ) + .expect("Unable to add container prop -> ordered prop 4 edge"); + + initial_graph.dot(); + + let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let new_change_set = &new_change_set; + let mut new_graph = initial_graph.clone(); + + let new_order = vec![ + ordered_prop_2_id, + ordered_prop_1_id, + ordered_prop_4_id, + ordered_prop_3_id, + ]; + new_graph + .update_order(new_change_set, container_prop_id, new_order) + .expect("Unable to update order of container prop's children"); + + let ordered_prop_5_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_5_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_5_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_5_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 5"); + let new_edge_weight = EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"); + let (_, maybe_ordinal_edge_information) = initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + new_edge_weight.clone(), + ordered_prop_5_index, + ) + .expect("Unable to add container prop -> ordered prop 5 edge"); + let ( + ordinal_edge_index, + source_node_index_for_ordinal_edge, + destination_node_index_for_ordinal_edge, + ) = maybe_ordinal_edge_information.expect("ordinal edge information not found"); + let ordinal_edge_weight = initial_graph + .get_edge_weight_opt(ordinal_edge_index) + .expect("should not error when getting edge") + .expect("could not get edge weight for index") + .to_owned(); + let source_node_id_for_ordinal_edge = initial_graph + .get_node_weight(source_node_index_for_ordinal_edge) + .expect("could not get node weight") + .id(); + let destination_node_id_for_ordinal_edge = initial_graph + .get_node_weight(destination_node_index_for_ordinal_edge) + .expect("could not get node weight") + .id(); + + new_graph.dot(); + + let (conflicts, updates) = new_graph + .detect_conflicts_and_updates( + new_change_set.vector_clock_id(), + &initial_graph, + initial_change_set.vector_clock_id(), + ) + .expect("Unable to detect conflicts and updates"); + + assert_eq!( + vec![Conflict::ChildOrder { + onto: initial_graph + .ordering_node_index_for_container( + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get container NodeIndex") + ) + .expect("Unable to get ordering NodeIndex") + .expect("Ordering NodeIndex not found"), + to_rebase: new_graph + .ordering_node_index_for_container( + new_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get container NodeIndex") + ) + .expect("Unable to get ordering NodeIndex") + .expect("Ordering NodeIndex not found"), + }], + conflicts + ); + assert_eq!( + vec![ + Update::NewEdge { + source: new_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get new_graph container NodeIndex"), + destination: initial_graph + .get_node_index_by_id(ordered_prop_5_id) + .expect("Unable to get ordered prop 5 NodeIndex"), + edge_weight: new_edge_weight, + }, + Update::NewEdge { + source: new_graph + .get_node_index_by_id(source_node_id_for_ordinal_edge) + .expect("could not get node index by id"), + destination: initial_graph + .get_node_index_by_id(destination_node_id_for_ordinal_edge) + .expect("could not get node index by id"), + edge_weight: ordinal_edge_weight, + } + ], + updates + ); + } + + #[test] + fn detect_conflicts_and_updates_simple_ordering_with_no_conflicts_add_in_onto_remove_in_to_rebase( + ) { + let initial_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let initial_change_set = &initial_change_set; + let mut initial_graph = WorkspaceSnapshotGraph::new(initial_change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + let schema_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + schema_id, + ContentAddress::Schema(ContentHash::from("Schema A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema A"); + let schema_variant_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let schema_variant_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + schema_variant_id, + ContentAddress::SchemaVariant(ContentHash::from("Schema Variant A")), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add Schema Variant A"); + + initial_graph + .add_edge( + initial_graph.root_index, + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_index, + ) + .expect("Unable to add root -> schema edge"); + initial_graph + .add_edge( + initial_graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + let container_prop_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let container_prop_index = initial_graph + .add_ordered_node( + initial_change_set, + NodeWeight::new_content( + initial_change_set, + container_prop_id, + ContentAddress::Prop(ContentHash::new( + container_prop_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add container prop"); + initial_graph + .add_edge( + initial_graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + container_prop_index, + ) + .expect("Unable to add schema variant -> container prop edge"); + + let ordered_prop_1_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_1_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_1_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_1_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 1"); + initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + ordered_prop_1_index, + ) + .expect("Unable to add container prop -> ordered prop 1 edge"); + + let ordered_prop_2_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_2_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_2_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_2_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 2"); + initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + ordered_prop_2_index, + ) + .expect("Unable to add container prop -> ordered prop 2 edge"); + + let ordered_prop_3_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_3_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_3_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_3_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 3"); + initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + ordered_prop_3_index, + ) + .expect("Unable to add container prop -> ordered prop 3 edge"); + + let ordered_prop_4_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_4_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_4_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_4_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 4"); + initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + ordered_prop_4_index, + ) + .expect("Unable to add container prop -> ordered prop 4 edge"); + + initial_graph.cleanup(); + initial_graph + .mark_graph_seen(initial_change_set.vector_clock_id()) + .expect("Unable to update recently seen information"); + // initial_graph.dot(); + + let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let new_change_set = &new_change_set; + let mut new_graph = initial_graph.clone(); + + new_graph + .remove_edge( + new_change_set, + new_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get container NodeIndex"), + ordered_prop_2_index, + EdgeWeightKindDiscriminants::Use, + ) + .expect("Unable to remove container prop -> prop 2 edge"); + + let ordered_prop_5_id = initial_change_set + .generate_ulid() + .expect("Unable to generate Ulid"); + let ordered_prop_5_index = initial_graph + .add_node( + NodeWeight::new_content( + initial_change_set, + ordered_prop_5_id, + ContentAddress::Prop(ContentHash::new( + ordered_prop_5_id.to_string().as_bytes(), + )), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ordered prop 5"); + + let new_edge_weight = EdgeWeight::new(initial_change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"); + let (_, maybe_ordinal_edge_information) = initial_graph + .add_ordered_edge( + initial_change_set, + initial_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get NodeIndex"), + new_edge_weight.clone(), + ordered_prop_5_index, + ) + .expect("Unable to add container prop -> ordered prop 5 edge"); + let ( + ordinal_edge_index, + source_node_index_for_ordinal_edge, + destination_node_index_for_ordinal_edge, + ) = maybe_ordinal_edge_information.expect("ordinal edge information not found"); + let ordinal_edge_weight = initial_graph + .get_edge_weight_opt(ordinal_edge_index) + .expect("should not error when getting edge") + .expect("could not get edge weight for index") + .to_owned(); + let source_node_id_for_ordinal_edge = initial_graph + .get_node_weight(source_node_index_for_ordinal_edge) + .expect("could not get node weight") + .id(); + let destination_node_id_for_ordinal_edge = initial_graph + .get_node_weight(destination_node_index_for_ordinal_edge) + .expect("could not get node weight") + .id(); + + initial_graph.cleanup(); + initial_graph.dot(); + + new_graph.cleanup(); + new_graph.dot(); + + let (conflicts, updates) = new_graph + .detect_conflicts_and_updates( + new_change_set.vector_clock_id(), + &initial_graph, + initial_change_set.vector_clock_id(), + ) + .expect("Unable to detect conflicts and updates"); + + assert_eq!(Vec::::new(), conflicts); + assert_eq!( + vec![ + Update::NewEdge { + source: new_graph + .get_node_index_by_id(container_prop_id) + .expect("Unable to get new_graph container NodeIndex"), + destination: initial_graph + .get_node_index_by_id(ordered_prop_5_id) + .expect("Unable to get ordered prop 5 NodeIndex"), + edge_weight: new_edge_weight, + }, + Update::NewEdge { + source: new_graph + .get_node_index_by_id(source_node_id_for_ordinal_edge) + .expect("could not get node index by id"), + destination: initial_graph + .get_node_index_by_id(destination_node_id_for_ordinal_edge) + .expect("could not get node index by id"), + edge_weight: ordinal_edge_weight, + } + ], + updates + ); + } + + #[tokio::test] + #[cfg(ignore)] + async fn attribute_value_build_view() { + let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let change_set = &change_set; + let mut graph = WorkspaceSnapshotGraph::new(change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + let mut content_store = content_store::LocalStore::default(); + + let schema_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_content_hash = content_store + .add(&serde_json::json!("Schema A")) + .expect("Unable to add to content store"); + let schema_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_id, + ContentAddress::Schema(schema_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema"); + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_node_index, + ) + .expect("Unable to add root -> schema edge"); + + let schema_variant_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_variant_content_hash = content_store + .add(&serde_json::json!("Schema Variant A")) + .expect("Unable to add to content store"); + let schema_variant_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_variant_id, + ContentAddress::SchemaVariant(schema_variant_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema variant"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_node_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + let root_prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let root_prop_content_hash = content_store + .add(&serde_json::json!("Root prop")) + .expect("Unable to add to content store"); + let root_prop_node_index = graph + .add_node( + NodeWeight::new_prop( + change_set, + root_prop_id, + PropKind::Object, + "root", + root_prop_content_hash, + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add root prop"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + root_prop_node_index, + ) + .expect("Unable to add schema variant -> root prop edge"); + + let si_prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let si_prop_content_hash = content_store + .add(&serde_json::json!("SI Prop Content")) + .expect("Unable to add to content store"); + let si_prop_node_index = graph + .add_node( + NodeWeight::new_prop( + change_set, + si_prop_id, + PropKind::Object, + "si", + si_prop_content_hash, + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add si prop"); + graph + .add_edge( + graph + .get_node_index_by_id(root_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + si_prop_node_index, + ) + .expect("Unable to add root prop -> si prop edge"); + + let name_prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let name_prop_content_hash = content_store + .add(&serde_json::json!("Name Prop Content")) + .expect("Unable to add to content store"); + let name_prop_node_index = graph + .add_node( + NodeWeight::new_prop( + change_set, + name_prop_id, + PropKind::Object, + "name", + name_prop_content_hash, + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add name prop"); + graph + .add_edge( + graph + .get_node_index_by_id(si_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + name_prop_node_index, + ) + .expect("Unable to add si prop -> name prop edge"); + + let component_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let component_content_hash = content_store + .add(&serde_json::json!("Component Content")) + .expect("Unable to add to content store"); + let component_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + component_id, + ContentAddress::Component(component_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add component"); + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + component_node_index, + ) + .expect("Unable to add root -> component edge"); + graph + .add_edge( + graph + .get_node_index_by_id(component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add component -> schema variant edge"); + + let root_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let root_av_content_hash = content_store + .add(&serde_json::json!({})) + .expect("Unable to add to content store"); + let root_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + root_av_id, + ContentAddress::AttributeValue(root_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add root av"); + graph + .add_edge( + graph + .get_node_index_by_id(component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + root_av_node_index, + ) + .expect("Unable to add component -> root av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(root_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(root_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add root av -> root prop edge"); + + let si_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let si_av_content_hash = content_store + .add(&serde_json::json!({})) + .expect("Unable to add to content store"); + let si_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + si_av_id, + ContentAddress::AttributeValue(si_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add si av"); + graph + .add_edge( + graph + .get_node_index_by_id(root_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Contain(None)) + .expect("Unable to create EdgeWeight"), + si_av_node_index, + ) + .expect("Unable to add root av -> si av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(si_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(si_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add si av -> si prop edge"); + + let name_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let name_av_content_hash = content_store + .add(&serde_json::json!("component name")) + .expect("Unable to add to content store"); + let name_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + name_av_id, + ContentAddress::AttributeValue(name_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add name av"); + graph + .add_edge( + graph + .get_node_index_by_id(si_av_id) + .expect("Unable to get NodeWeight"), + EdgeWeight::new(change_set, EdgeWeightKind::Contain(None)) + .expect("Unable to create EdgeWeight"), + name_av_node_index, + ) + .expect("Unable to add si av -> name av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(name_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(name_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to create name av -> name prop edge"); + + graph.cleanup(); + graph.dot(); + + assert_eq!( + serde_json::json![{"si": {"name": "component name"}}], + graph + .attribute_value_view( + &mut content_store, + graph + .get_node_index_by_id(root_av_id) + .expect("Unable to get NodeIndex"), + ) + .await + .expect("Unable to generate attribute value view"), + ); + } + + #[tokio::test] + #[cfg(ignore)] + async fn attribute_value_build_view_unordered_object() { + let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let change_set = &change_set; + let mut graph = WorkspaceSnapshotGraph::new(change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + let mut content_store = content_store::LocalStore::default(); + + let schema_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_content_hash = content_store + .add(&serde_json::json!("Schema A")) + .expect("Unable to add to content store"); + let schema_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_id, + ContentAddress::Schema(schema_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema"); + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_node_index, + ) + .expect("Unable to add root -> schema edge"); + + let schema_variant_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_variant_content_hash = content_store + .add(&serde_json::json!("Schema Variant A")) + .expect("Unable to add to content store"); + let schema_variant_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_variant_id, + ContentAddress::SchemaVariant(schema_variant_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema variant"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_node_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + let root_prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let root_prop_content_hash = content_store + .add(&serde_json::json!("Root prop")) + .expect("Unable to add to content store"); + let root_prop_node_index = graph + .add_node( + NodeWeight::new_prop( + change_set, + root_prop_id, + PropKind::Object, + "root", + root_prop_content_hash, + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add root prop"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + root_prop_node_index, + ) + .expect("Unable to add schema variant -> root prop edge"); + + let si_prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let si_prop_content_hash = content_store + .add(&serde_json::json!("SI Prop Content")) + .expect("Unable to add to content store"); + let si_prop_node_index = graph + .add_node( + NodeWeight::new_prop( + change_set, + si_prop_id, + PropKind::Object, + "si", + si_prop_content_hash, + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add si prop"); + graph + .add_edge( + graph + .get_node_index_by_id(root_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + si_prop_node_index, + ) + .expect("Unable to add root prop -> si prop edge"); + + let name_prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let name_prop_content_hash = content_store + .add(&serde_json::json!("Name Prop Content")) + .expect("Unable to add to content store"); + let name_prop_node_index = graph + .add_node( + NodeWeight::new_prop( + change_set, + name_prop_id, + PropKind::Object, + "name", + name_prop_content_hash, + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add name prop"); + graph + .add_edge( + graph + .get_node_index_by_id(si_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + name_prop_node_index, + ) + .expect("Unable to add si prop -> name prop edge"); + + let description_prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let description_prop_content_hash = content_store + .add(&serde_json::json!("Description Prop Content")) + .expect("Unable to add to content store"); + let description_prop_node_index = graph + .add_node( + NodeWeight::new_prop( + change_set, + description_prop_id, + PropKind::String, + "description", + description_prop_content_hash, + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add description prop"); + graph + .add_edge( + graph + .get_node_index_by_id(si_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + description_prop_node_index, + ) + .expect("Unable to add si prop -> description prop edge"); + + let component_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let component_content_hash = content_store + .add(&serde_json::json!("Component Content")) + .expect("Unable to add to content store"); + let component_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + component_id, + ContentAddress::Component(component_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add component"); + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + component_node_index, + ) + .expect("Unable to add root -> component edge"); + graph + .add_edge( + graph + .get_node_index_by_id(component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add component -> schema variant edge"); + + let root_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let root_av_content_hash = content_store + .add(&serde_json::json!({})) + .expect("Unable to add to content store"); + let root_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + root_av_id, + ContentAddress::AttributeValue(root_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add root av"); + graph + .add_edge( + graph + .get_node_index_by_id(component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + root_av_node_index, + ) + .expect("Unable to add component -> root av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(root_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(root_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add root av -> root prop edge"); + + let si_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let si_av_content_hash = content_store + .add(&serde_json::json!({})) + .expect("Unable to add to content store"); + let si_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + si_av_id, + ContentAddress::AttributeValue(si_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add si av"); + graph + .add_edge( + graph + .get_node_index_by_id(root_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Contain(None)) + .expect("Unable to create EdgeWeight"), + si_av_node_index, + ) + .expect("Unable to add root av -> si av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(si_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(si_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add si av -> si prop edge"); + + let name_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let name_av_content_hash = content_store + .add(&serde_json::json!("component name")) + .expect("Unable to add to content store"); + let name_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + name_av_id, + ContentAddress::AttributeValue(name_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add name av"); + graph + .add_edge( + graph + .get_node_index_by_id(si_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Contain(None)) + .expect("Unable to create EdgeWeight"), + name_av_node_index, + ) + .expect("Unable to add si av -> name av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(name_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(name_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to create name av -> name prop edge"); + + let description_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let description_av_content_hash = content_store + .add(&serde_json::json!("Component description")) + .expect("Unable to add to content store"); + let description_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + description_av_id, + ContentAddress::AttributeValue(description_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add description av"); + graph + .add_edge( + graph + .get_node_index_by_id(si_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Contain(None)) + .expect("Unable to create EdgeWeight"), + description_av_node_index, + ) + .expect("Unable to add si av -> description av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(description_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(description_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add description av -> description prop edge"); + + graph.cleanup(); + graph.dot(); + + assert_eq!( + serde_json::json![{ + "si": { + "description": "Component description", + "name": "component name", + } + }], + graph + .attribute_value_view( + &mut content_store, + graph + .get_node_index_by_id(root_av_id) + .expect("Unable to get NodeIndex"), + ) + .await + .expect("Unable to generate attribute value view"), + ); + } + + #[tokio::test] + #[cfg(ignore)] + async fn attribute_value_build_view_ordered_array() { + let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let change_set = &change_set; + let mut graph = WorkspaceSnapshotGraph::new(change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + let mut content_store = content_store::LocalStore::default(); + + let schema_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_content_hash = content_store + .add(&serde_json::json!("Schema A")) + .expect("Unable to add to content store"); + let schema_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_id, + ContentAddress::Schema(schema_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema"); + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_node_index, + ) + .expect("Unable to add root -> schema edge"); + + let schema_variant_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_variant_content_hash = content_store + .add(&serde_json::json!("Schema Variant A")) + .expect("Unable to add to content store"); + let schema_variant_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_variant_id, + ContentAddress::SchemaVariant(schema_variant_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema variant"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_node_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + let root_prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let root_prop_content_hash = content_store + .add(&serde_json::json!("Root prop")) + .expect("Unable to add to content store"); + let root_prop_node_index = graph + .add_node( + NodeWeight::new_prop( + change_set, + root_prop_id, + PropKind::Object, + "root", + root_prop_content_hash, + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add root prop"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + root_prop_node_index, + ) + .expect("Unable to add schema variant -> root prop edge"); + + let domain_prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let domain_prop_content_hash = content_store + .add(&serde_json::json!("domain Prop Content")) + .expect("Unable to add to content store"); + let domain_prop_node_index = graph + .add_node( + NodeWeight::new_prop( + change_set, + domain_prop_id, + PropKind::Object, + "domain", + domain_prop_content_hash, + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add domain prop"); + graph + .add_edge( + graph + .get_node_index_by_id(root_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + domain_prop_node_index, + ) + .expect("Unable to add root prop -> domain prop edge"); + + let ports_prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let ports_prop_content_hash = content_store + .add(&serde_json::json!("ports Prop Content")) + .expect("Unable to add to content store"); + let ports_prop_node_index = graph + .add_node( + NodeWeight::new_prop( + change_set, + ports_prop_id, + PropKind::Array, + "ports", + ports_prop_content_hash, + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ports prop"); + graph + .add_edge( + graph + .get_node_index_by_id(domain_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + ports_prop_node_index, + ) + .expect("Unable to add domain prop -> ports prop edge"); + + let port_prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let port_prop_content_hash = content_store + .add(&serde_json::json!("port Prop Content")) + .expect("Unable to add to content store"); + let port_prop_node_index = graph + .add_node( + NodeWeight::new_prop( + change_set, + port_prop_id, + PropKind::String, + "port", + port_prop_content_hash, + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add port prop"); + graph + .add_edge( + graph + .get_node_index_by_id(ports_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + port_prop_node_index, + ) + .expect("Unable to add ports prop -> port prop edge"); + + let component_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let component_content_hash = content_store + .add(&serde_json::json!("Component Content")) + .expect("Unable to add to content store"); + let component_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + component_id, + ContentAddress::Component(component_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add component"); + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + component_node_index, + ) + .expect("Unable to add root -> component edge"); + graph + .add_edge( + graph + .get_node_index_by_id(component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add component -> schema variant edge"); + + let root_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let root_av_content_hash = content_store + .add(&serde_json::json!({})) + .expect("Unable to add to content store"); + let root_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + root_av_id, + ContentAddress::AttributeValue(root_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add root av"); + graph + .add_edge( + graph + .get_node_index_by_id(component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + root_av_node_index, + ) + .expect("Unable to add component -> root av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(root_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(root_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add root av -> root prop edge"); + + let domain_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let domain_av_content_hash = content_store + .add(&serde_json::json!({})) + .expect("Unable to add to content store"); + let domain_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + domain_av_id, + ContentAddress::AttributeValue(domain_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add domain av"); + graph + .add_edge( + graph + .get_node_index_by_id(root_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Contain(None)) + .expect("Unable to create EdgeWeight"), + domain_av_node_index, + ) + .expect("Unable to add root av -> domain av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(domain_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(domain_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add domain av -> domain prop edge"); + + let ports_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let ports_av_content_hash = content_store + .add(&serde_json::json!([])) + .expect("Unable to add to content store"); + let ports_av_node_index = graph + .add_ordered_node( + change_set, + NodeWeight::new_content( + change_set, + ports_av_id, + ContentAddress::AttributeValue(ports_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add ports av"); + graph + .add_edge( + graph + .get_node_index_by_id(domain_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Contain(None)) + .expect("Unable to create EdgeWeight"), + ports_av_node_index, + ) + .expect("Unable to add domain av -> ports av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(ports_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(ports_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to create ports av -> ports prop edge"); + + let port1_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let port1_av_content_hash = content_store + .add(&serde_json::json!("Port 1")) + .expect("Unable to add to content store"); + let port1_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + port1_av_id, + ContentAddress::AttributeValue(port1_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add port 1 av"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(ports_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Contain(None)) + .expect("Unable to create EdgeWeight"), + port1_av_node_index, + ) + .expect("Unable to add ports av -> port 1 av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(port1_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(port_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add port 1 av -> port prop edge"); + + let port2_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let port2_av_content_hash = content_store + .add(&serde_json::json!("Port 2")) + .expect("Unable to add to content store"); + let port2_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + port2_av_id, + ContentAddress::AttributeValue(port2_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add port 2 av"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(ports_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Contain(None)) + .expect("Unable to create EdgeWeight"), + port2_av_node_index, + ) + .expect("Unable to add ports av -> port 2 av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(port2_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(port_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add port 2 av -> port prop edge"); + + let port3_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let port3_av_content_hash = content_store + .add(&serde_json::json!("Port 3")) + .expect("Unable to add to content store"); + let port3_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + port3_av_id, + ContentAddress::AttributeValue(port3_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add port 3 av"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(ports_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Contain(None)) + .expect("Unable to create EdgeWeight"), + port3_av_node_index, + ) + .expect("Unable to add ports av -> port 3 av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(port3_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(port_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add port 3 av -> port prop edge"); + + let port4_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let port4_av_content_hash = content_store + .add(&serde_json::json!("Port 4")) + .expect("Unable to add to content store"); + let port4_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + port4_av_id, + ContentAddress::AttributeValue(port4_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add port 4 av"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(ports_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Contain(None)) + .expect("Unable to create EdgeWeight"), + port4_av_node_index, + ) + .expect("Unable to add ports av -> port 4 av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(port4_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(port_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add port 4 av -> port prop edge"); + + graph.cleanup(); + graph.dot(); + + assert_eq!( + serde_json::json![{ + "domain": { + "ports": [ + "Port 1", + "Port 2", + "Port 3", + "Port 4", + ], + } + }], + graph + .attribute_value_view( + &mut content_store, + graph + .get_node_index_by_id(root_av_id) + .expect("Unable to get NodeIndex"), + ) + .await + .expect("Unable to generate attribute value view"), + ); + + let new_order = vec![port3_av_id, port1_av_id, port4_av_id, port2_av_id]; + graph + .update_order(change_set, ports_av_id, new_order) + .expect("Unable to update order of ports attribute value's children"); + assert_eq!( + serde_json::json![{ + "domain": { + "ports": [ + "Port 3", + "Port 1", + "Port 4", + "Port 2", + ] + } + }], + graph + .attribute_value_view( + &mut content_store, + graph + .get_node_index_by_id(root_av_id) + .expect("Unable to get NodeIndex"), + ) + .await + .expect("Unable to generate attribute value view"), + ); + + let port5_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let port5_av_content_hash = content_store + .add(&serde_json::json!("Port 5")) + .expect("Unable to add to content store"); + let port5_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + port5_av_id, + ContentAddress::AttributeValue(port5_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add port 5 av"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(ports_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Contain(None)) + .expect("Unable to create EdgeWeight"), + port5_av_node_index, + ) + .expect("Unable to add ports av -> port 5 av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(port5_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(port_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add port 5 av -> port prop edge"); + + assert_eq!( + serde_json::json![{ + "domain": { + "ports": [ + "Port 3", + "Port 1", + "Port 4", + "Port 2", + "Port 5", + ] + } + }], + graph + .attribute_value_view( + &mut content_store, + graph + .get_node_index_by_id(root_av_id) + .expect("Unable to get NodeIndex"), + ) + .await + .expect("Unable to generate attribute value view"), + ); + } + + #[tokio::test] + #[cfg(ignore)] + async fn attribute_value_build_view_ordered_map() { + let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let change_set = &change_set; + let mut graph = WorkspaceSnapshotGraph::new(change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + let mut content_store = content_store::LocalStore::default(); + + let schema_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_content_hash = content_store + .add(&serde_json::json!("Schema A")) + .expect("Unable to add to content store"); + let schema_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_id, + ContentAddress::Schema(schema_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema"); + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_node_index, + ) + .expect("Unable to add root -> schema edge"); + + let schema_variant_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let schema_variant_content_hash = content_store + .add(&serde_json::json!("Schema Variant A")) + .expect("Unable to add to content store"); + let schema_variant_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + schema_variant_id, + ContentAddress::SchemaVariant(schema_variant_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add schema variant"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + schema_variant_node_index, + ) + .expect("Unable to add schema -> schema variant edge"); + + let root_prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let root_prop_content_hash = content_store + .add(&serde_json::json!("Root prop")) + .expect("Unable to add to content store"); + let root_prop_node_index = graph + .add_node( + NodeWeight::new_prop( + change_set, + root_prop_id, + PropKind::Object, + "root", + root_prop_content_hash, + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add root prop"); + graph + .add_edge( + graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + root_prop_node_index, + ) + .expect("Unable to add schema variant -> root prop edge"); + + let domain_prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let domain_prop_content_hash = content_store + .add(&serde_json::json!("domain Prop Content")) + .expect("Unable to add to content store"); + let domain_prop_node_index = graph + .add_node( + NodeWeight::new_prop( + change_set, + domain_prop_id, + PropKind::Object, + "domain", + domain_prop_content_hash, + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add domain prop"); + graph + .add_edge( + graph + .get_node_index_by_id(root_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + domain_prop_node_index, + ) + .expect("Unable to add root prop -> domain prop edge"); + + let environment_prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let environment_prop_content_hash = content_store + .add(&serde_json::json!("environment Prop Content")) + .expect("Unable to add to content store"); + let environment_prop_node_index = graph + .add_node( + NodeWeight::new_prop( + change_set, + environment_prop_id, + PropKind::Array, + "environment", + environment_prop_content_hash, + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add environment prop"); + graph + .add_edge( + graph + .get_node_index_by_id(domain_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + environment_prop_node_index, + ) + .expect("Unable to add domain prop -> environment prop edge"); + + let env_var_prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let env_var_prop_content_hash = content_store + .add(&serde_json::json!("port Prop Content")) + .expect("Unable to add to content store"); + let env_var_prop_node_index = graph + .add_node( + NodeWeight::new_prop( + change_set, + env_var_prop_id, + PropKind::String, + "port", + env_var_prop_content_hash, + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add env var prop"); + graph + .add_edge( + graph + .get_node_index_by_id(environment_prop_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + env_var_prop_node_index, + ) + .expect("Unable to add environment prop -> env var prop edge"); + + let component_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let component_content_hash = content_store + .add(&serde_json::json!("Component Content")) + .expect("Unable to add to content store"); + let component_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + component_id, + ContentAddress::Component(component_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add component"); + graph + .add_edge( + graph.root_index, + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + component_node_index, + ) + .expect("Unable to add root -> component edge"); + graph + .add_edge( + graph + .get_node_index_by_id(component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(schema_variant_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add component -> schema variant edge"); + + let root_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let root_av_content_hash = content_store + .add(&serde_json::json!({})) + .expect("Unable to add to content store"); + let root_av_node_index = graph + .add_node( + NodeWeight::new_attribute_value(change_set, root_av_id, None, None, None) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add root av"); + graph + .add_edge( + graph + .get_node_index_by_id(component_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::new_use()) + .expect("Unable to create EdgeWeight"), + root_av_node_index, + ) + .expect("Unable to add component -> root av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(root_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(root_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add root av -> root prop edge"); + + let domain_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let domain_av_content_hash = content_store + .add(&serde_json::json!({})) + .expect("Unable to add to content store"); + let domain_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + domain_av_id, + ContentAddress::AttributeValue(domain_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add domain av"); + graph + .add_edge( + graph + .get_node_index_by_id(root_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Contain(None)) + .expect("Unable to create EdgeWeight"), + domain_av_node_index, + ) + .expect("Unable to add root av -> domain av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(domain_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(domain_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add domain av -> domain prop edge"); + + let envrionment_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let ports_av_content_hash = content_store + .add(&serde_json::json!({})) + .expect("Unable to add to content store"); + let environment_av_node_index = graph + .add_ordered_node( + change_set, + NodeWeight::new_content( + change_set, + envrionment_av_id, + ContentAddress::AttributeValue(ports_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add environment av"); + graph + .add_edge( + graph + .get_node_index_by_id(domain_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Contain(None)) + .expect("Unable to create EdgeWeight"), + environment_av_node_index, + ) + .expect("Unable to add domain av -> environment av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(envrionment_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(environment_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to create environment av -> environment prop edge"); + + let env_var1_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let env_var1_av_content_hash = content_store + .add(&serde_json::json!("1111")) + .expect("Unable to add to content store"); + let port1_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + env_var1_av_id, + ContentAddress::AttributeValue(env_var1_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add env_var 1 av"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(envrionment_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new( + change_set, + EdgeWeightKind::Contain(Some("PORT_1".to_string())), + ) + .expect("Unable to create EdgeWeight"), + port1_av_node_index, + ) + .expect("Unable to add environment av -> env var 1 av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(env_var1_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(env_var_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add env var 1 av -> env var prop edge"); + + let env_var2_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let env_var2_av_content_hash = content_store + .add(&serde_json::json!("2222")) + .expect("Unable to add to content store"); + let env_var2_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + env_var2_av_id, + ContentAddress::AttributeValue(env_var2_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add env var 2 av"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(envrionment_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new( + change_set, + EdgeWeightKind::Contain(Some("PORT_2".to_string())), + ) + .expect("Unable to create EdgeWeight"), + env_var2_av_node_index, + ) + .expect("Unable to add environment av -> env var 2 av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(env_var2_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(env_var_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add env var 2 av -> env var prop edge"); + + let env_var3_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let env_var3_av_content_hash = content_store + .add(&serde_json::json!("3333")) + .expect("Unable to add to content store"); + let port3_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + env_var3_av_id, + ContentAddress::AttributeValue(env_var3_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add env var 3 av"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(envrionment_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new( + change_set, + EdgeWeightKind::Contain(Some("PORT_3".to_string())), + ) + .expect("Unable to create EdgeWeight"), + port3_av_node_index, + ) + .expect("Unable to add environment av -> env var 3 av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(env_var3_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(env_var_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add env var 3 av -> env var prop edge"); + + let env_var4_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let env_var4_av_content_hash = content_store + .add(&serde_json::json!("4444")) + .expect("Unable to add to content store"); + let env_var4_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + env_var4_av_id, + ContentAddress::AttributeValue(env_var4_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add env var 4 av"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(envrionment_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new( + change_set, + EdgeWeightKind::Contain(Some("PORT_4".to_string())), + ) + .expect("Unable to create EdgeWeight"), + env_var4_av_node_index, + ) + .expect("Unable to add environment av -> env var 4 av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(env_var4_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(env_var_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add env var 4 av -> env var prop edge"); + + graph.cleanup(); + graph.dot(); + + assert_eq!( + serde_json::json![{ + "domain": { + "environment": { + "PORT_1": "1111", + "PORT_2": "2222", + "PORT_3": "3333", + "PORT_4": "4444", + }, + } + }], + graph + .attribute_value_view( + &mut content_store, + graph + .get_node_index_by_id(root_av_id) + .expect("Unable to get NodeIndex"), + ) + .await + .expect("Unable to generate attribute value view"), + ); + + let new_order = vec![ + env_var3_av_id, + env_var1_av_id, + env_var4_av_id, + env_var2_av_id, + ]; + graph + .update_order(change_set, envrionment_av_id, new_order) + .expect("Unable to update order of environment attribute value's children"); + assert_eq!( + serde_json::json![{ + "domain": { + "environment": { + "PORT_3": "3333", + "PORT_1": "1111", + "PORT_4": "4444", + "PORT_2": "2222", + }, + } + }], + graph + .attribute_value_view( + &mut content_store, + graph + .get_node_index_by_id(root_av_id) + .expect("Unable to get NodeIndex"), + ) + .await + .expect("Unable to generate attribute value view"), + ); + + let env_var5_av_id = change_set.generate_ulid().expect("Unable to generate Ulid"); + let env_var5_av_content_hash = content_store + .add(&serde_json::json!("5555")) + .expect("Unable to add to content store"); + let env_var5_av_node_index = graph + .add_node( + NodeWeight::new_content( + change_set, + env_var5_av_id, + ContentAddress::AttributeValue(env_var5_av_content_hash), + ) + .expect("Unable to create NodeWeight"), + ) + .expect("Unable to add env var 5 av"); + graph + .add_ordered_edge( + change_set, + graph + .get_node_index_by_id(envrionment_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new( + change_set, + EdgeWeightKind::Contain(Some("PORT_5".to_string())), + ) + .expect("Unable to create EdgeWeight"), + env_var5_av_node_index, + ) + .expect("Unable to add environment av -> env var 5 av edge"); + graph + .add_edge( + graph + .get_node_index_by_id(env_var5_av_id) + .expect("Unable to get NodeIndex"), + EdgeWeight::new(change_set, EdgeWeightKind::Prop) + .expect("Unable to create EdgeWeight"), + graph + .get_node_index_by_id(env_var_prop_id) + .expect("Unable to get NodeIndex"), + ) + .expect("Unable to add env var 5 av -> env var prop edge"); + + assert_eq!( + serde_json::json![{ + "domain": { + "environment": { + "PORT_3": "3333", + "PORT_1": "1111", + "PORT_4": "4444", + "PORT_2": "2222", + "PORT_5": "5555", + }, + } + }], + graph + .attribute_value_view( + &mut content_store, + graph + .get_node_index_by_id(root_av_id) + .expect("Unable to get NodeIndex"), + ) + .await + .expect("Unable to generate attribute value view"), + ); + } +} diff --git a/lib/dal/src/workspace_snapshot/graph/tests/rebase.rs b/lib/dal/src/workspace_snapshot/graph/tests/rebase.rs new file mode 100644 index 0000000000..243e31c4a2 --- /dev/null +++ b/lib/dal/src/workspace_snapshot/graph/tests/rebase.rs @@ -0,0 +1,180 @@ +#[allow(clippy::panic)] +#[cfg(test)] +mod test { + use pretty_assertions_sorted::assert_eq; + use si_events::ContentHash; + + use crate::change_set::ChangeSet; + use crate::func::FuncKind; + use crate::workspace_snapshot::content_address::ContentAddress; + use crate::workspace_snapshot::edge_weight::{EdgeWeight, EdgeWeightKind}; + use crate::workspace_snapshot::node_weight::category_node_weight::CategoryNodeKind; + use crate::workspace_snapshot::node_weight::NodeWeight; + use crate::workspace_snapshot::node_weight::{ContentNodeWeight, FuncNodeWeight}; + use crate::WorkspaceSnapshotGraph; + + #[test] + fn simulate_rebase() { + let to_rebase_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let to_rebase_change_set = &to_rebase_change_set; + let mut to_rebase = WorkspaceSnapshotGraph::new(to_rebase_change_set) + .expect("Unable to create WorkspaceSnapshotGraph"); + + // Set up the to rebase graph. + let schema_category_node_index = to_rebase + .add_category_node(to_rebase_change_set, CategoryNodeKind::Schema) + .expect("could not add category node"); + to_rebase + .add_edge( + to_rebase.root_index, + EdgeWeight::new(to_rebase_change_set, EdgeWeightKind::new_use()) + .expect("could not create edge weight"), + schema_category_node_index, + ) + .expect("could not add edge"); + let func_category_node_index = to_rebase + .add_category_node(to_rebase_change_set, CategoryNodeKind::Func) + .expect("could not add category node"); + to_rebase + .add_edge( + to_rebase.root_index, + EdgeWeight::new(to_rebase_change_set, EdgeWeightKind::new_use()) + .expect("could not create edge weight"), + func_category_node_index, + ) + .expect("could not add edge"); + + // Create the onto graph from the to rebase graph. + let onto_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); + let onto_change_set = &onto_change_set; + let mut onto = to_rebase.clone(); + + // FuncCategory --Use--> Func + let func_id = onto_change_set + .generate_ulid() + .expect("could not generate ulid"); + let func_node_weight = FuncNodeWeight::new( + onto_change_set, + func_id, + ContentAddress::Func(ContentHash::from("foo")), + "foo".to_string(), + FuncKind::Intrinsic, + ) + .expect("could not create func node weight"); + let func_node_index = onto + .add_node(NodeWeight::Func(func_node_weight)) + .expect("could not add node"); + onto.add_edge( + func_category_node_index, + EdgeWeight::new(onto_change_set, EdgeWeightKind::new_use()) + .expect("could not create edge weight"), + func_node_index, + ) + .expect("could not add edge"); + + // SchemaCategory --Use--> Schema + let schema_node_weight = ContentNodeWeight::new( + onto_change_set, + onto_change_set + .generate_ulid() + .expect("could not generate ulid"), + ContentAddress::Schema(ContentHash::from("foo")), + ) + .expect("could not create func node weight"); + let schema_node_index = onto + .add_node(NodeWeight::Content(schema_node_weight)) + .expect("could not add node"); + onto.add_edge( + schema_category_node_index, + EdgeWeight::new(onto_change_set, EdgeWeightKind::new_use()) + .expect("could not create edge weight"), + schema_node_index, + ) + .expect("could not add edge"); + + // Schema --Use--> SchemaVariant + let schema_variant_node_weight = ContentNodeWeight::new( + onto_change_set, + onto_change_set + .generate_ulid() + .expect("could not generate ulid"), + ContentAddress::SchemaVariant(ContentHash::from("foo")), + ) + .expect("could not create func node weight"); + let schema_variant_node_index = onto + .add_node(NodeWeight::Content(schema_variant_node_weight)) + .expect("could not add node"); + onto.add_edge( + schema_node_index, + EdgeWeight::new(onto_change_set, EdgeWeightKind::new_use()) + .expect("could not create edge weight"), + schema_variant_node_index, + ) + .expect("could not add edge"); + + // SchemaVariant --Use--> Func + let func_node_index = onto + .get_node_index_by_id(func_id) + .expect("could not get node index by id"); + onto.add_edge( + schema_variant_node_index, + EdgeWeight::new(onto_change_set, EdgeWeightKind::new_use()) + .expect("could not create edge weight"), + func_node_index, + ) + .expect("could not add edge"); + + // Before cleanup, detect conflicts and updates. + let (before_cleanup_conflicts, before_cleanup_updates) = to_rebase + .detect_conflicts_and_updates( + to_rebase_change_set.vector_clock_id(), + &onto, + onto_change_set.vector_clock_id(), + ) + .expect("could not detect conflicts and updates"); + + // Cleanup and check node count. + onto.cleanup(); + to_rebase.cleanup(); + assert_eq!( + 6, // expected + onto.node_count() // actual + ); + + // Detect conflicts and updates. Ensure cleanup did not affect the results. + let (conflicts, updates) = to_rebase + .detect_conflicts_and_updates( + to_rebase_change_set.vector_clock_id(), + &onto, + onto_change_set.vector_clock_id(), + ) + .expect("could not detect conflicts and updates"); + assert!(conflicts.is_empty()); + assert_eq!( + 2, // expected + updates.len() // actual + ); + assert_eq!( + before_cleanup_conflicts, // expected + conflicts // actual + ); + assert_eq!( + before_cleanup_updates, // expected + updates // actual + ); + + // Ensure that we do not have duplicate updates. + let mut deduped_updates = updates.clone(); + deduped_updates.dedup(); + assert_eq!( + deduped_updates.len(), // expected + updates.len() // actual + ); + + // Perform the updates. In the future, we may want to see if the onto and resulting to + // rebase graphs are logically equivalent after updates are performed. + to_rebase + .perform_updates(to_rebase_change_set, &onto, &updates) + .expect("could not perform updates"); + } +} diff --git a/lib/dal/src/workspace_snapshot/node_weight.rs b/lib/dal/src/workspace_snapshot/node_weight.rs index dc2452165c..db4afe980e 100644 --- a/lib/dal/src/workspace_snapshot/node_weight.rs +++ b/lib/dal/src/workspace_snapshot/node_weight.rs @@ -211,6 +211,20 @@ impl NodeWeight { } } + pub fn merkle_tree_hash(&self) -> ContentHash { + match self { + NodeWeight::AttributePrototypeArgument(weight) => weight.merkle_tree_hash(), + NodeWeight::AttributeValue(weight) => weight.merkle_tree_hash(), + NodeWeight::Category(weight) => weight.merkle_tree_hash(), + NodeWeight::Component(weight) => weight.merkle_tree_hash(), + NodeWeight::Content(weight) => weight.merkle_tree_hash(), + NodeWeight::Func(weight) => weight.merkle_tree_hash(), + NodeWeight::FuncArgument(weight) => weight.merkle_tree_hash(), + NodeWeight::Ordering(weight) => weight.merkle_tree_hash(), + NodeWeight::Prop(weight) => weight.merkle_tree_hash(), + } + } + pub fn new_content_hash(&mut self, content_hash: ContentHash) -> NodeWeightResult<()> { match self { NodeWeight::Component(weight) => weight.new_content_hash(content_hash), @@ -281,6 +295,20 @@ impl NodeWeight { } } + pub fn set_merkle_tree_hash(&mut self, new_hash: ContentHash) { + match self { + NodeWeight::AttributePrototypeArgument(weight) => weight.set_merkle_tree_hash(new_hash), + NodeWeight::AttributeValue(weight) => weight.set_merkle_tree_hash(new_hash), + NodeWeight::Category(weight) => weight.set_merkle_tree_hash(new_hash), + NodeWeight::Component(weight) => weight.set_merkle_tree_hash(new_hash), + NodeWeight::Content(weight) => weight.set_merkle_tree_hash(new_hash), + NodeWeight::Func(weight) => weight.set_merkle_tree_hash(new_hash), + NodeWeight::FuncArgument(weight) => weight.set_merkle_tree_hash(new_hash), + NodeWeight::Ordering(weight) => weight.set_merkle_tree_hash(new_hash), + NodeWeight::Prop(weight) => weight.set_merkle_tree_hash(new_hash), + } + } + pub fn set_order(&mut self, change_set: &ChangeSet, order: Vec) -> NodeWeightResult<()> { match self { NodeWeight::Ordering(ordering_weight) => ordering_weight.set_order(change_set, order), diff --git a/lib/dal/src/workspace_snapshot/node_weight/attribute_prototype_argument_node_weight.rs b/lib/dal/src/workspace_snapshot/node_weight/attribute_prototype_argument_node_weight.rs index de5199c526..cd30e5b2d1 100644 --- a/lib/dal/src/workspace_snapshot/node_weight/attribute_prototype_argument_node_weight.rs +++ b/lib/dal/src/workspace_snapshot/node_weight/attribute_prototype_argument_node_weight.rs @@ -25,6 +25,7 @@ pub struct ArgumentTargets { pub struct AttributePrototypeArgumentNodeWeight { id: Ulid, lineage_id: LineageId, + merkle_tree_hash: ContentHash, vector_clock_first_seen: VectorClock, vector_clock_recently_seen: VectorClock, vector_clock_write: VectorClock, @@ -41,6 +42,7 @@ impl AttributePrototypeArgumentNodeWeight { Ok(Self { id, lineage_id: change_set.generate_ulid()?, + merkle_tree_hash: ContentHash::default(), targets, vector_clock_first_seen: VectorClock::new(change_set.vector_clock_id())?, vector_clock_recently_seen: VectorClock::new(change_set.vector_clock_id())?, @@ -113,6 +115,10 @@ impl AttributePrototypeArgumentNodeWeight { Ok(()) } + pub fn merkle_tree_hash(&self) -> ContentHash { + self.merkle_tree_hash + } + pub fn targets(&self) -> Option { self.targets } @@ -127,6 +133,10 @@ impl AttributePrototypeArgumentNodeWeight { Ok(new_node_weight) } + pub fn set_merkle_tree_hash(&mut self, new_hash: ContentHash) { + self.merkle_tree_hash = new_hash; + } + pub fn set_vector_clock_recently_seen_to( &mut self, change_set: &ChangeSet, @@ -156,6 +166,7 @@ impl std::fmt::Debug for AttributePrototypeArgumentNodeWeight { .field("lineage_id", &self.lineage_id.to_string()) .field("targets", &self.targets) .field("node_hash", &self.node_hash()) + .field("merkle_tree_hash", &self.merkle_tree_hash) .field("vector_clock_first_seen", &self.vector_clock_first_seen) .field( "vector_clock_recently_seen", diff --git a/lib/dal/src/workspace_snapshot/node_weight/category_node_weight.rs b/lib/dal/src/workspace_snapshot/node_weight/category_node_weight.rs index a75e7553af..ebd21d450d 100644 --- a/lib/dal/src/workspace_snapshot/node_weight/category_node_weight.rs +++ b/lib/dal/src/workspace_snapshot/node_weight/category_node_weight.rs @@ -1,14 +1,14 @@ use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use si_events::ContentHash; -use strum::{Display, EnumIter}; +use strum::Display; use ulid::Ulid; use crate::change_set::ChangeSet; use crate::workspace_snapshot::vector_clock::VectorClockId; use crate::workspace_snapshot::{node_weight::NodeWeightResult, vector_clock::VectorClock}; -#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Display, EnumIter)] +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Display)] pub enum CategoryNodeKind { ActionBatch, Component, diff --git a/lib/dal/tests/integration_test/graph/mod.rs b/lib/dal/tests/integration_test/graph/mod.rs deleted file mode 100644 index 3d130d79ac..0000000000 --- a/lib/dal/tests/integration_test/graph/mod.rs +++ /dev/null @@ -1,3880 +0,0 @@ -mod rebase; - -use dal::workspace_snapshot::content_address::ContentAddress; -use dal::workspace_snapshot::edge_weight::EdgeWeightKindDiscriminants; -use dal::ComponentId; -use dal::DalContext; -use dal::FuncId; -use dal::PropId; -use dal::SchemaId; -use dal::SchemaVariantId; -use dal::WorkspaceSnapshot; -use petgraph::graph::NodeIndex; -use petgraph::Outgoing; -use si_events::ContentHash; -use std::collections::HashMap; -use std::collections::HashSet; -use std::str::FromStr; -use ulid::Ulid; - -use dal::change_set::ChangeSet; -use dal::workspace_snapshot::conflict::Conflict; -use dal::workspace_snapshot::edge_weight::{EdgeWeight, EdgeWeightKind}; -use dal::workspace_snapshot::node_weight::NodeWeight; -use dal::workspace_snapshot::update::Update; -use dal::PropKind; -use dal_test::test; -use si_events::MerkleTreeHash; - -#[derive(Debug, PartialEq)] -struct ConflictsAndUpdates { - conflicts: Vec, - updates: Vec, -} - -// #[test] -// async fn new(ctx: DalContext) { -// let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); -// let change_set = &change_set; -// let graph = -// WorkspaceSnapshotGraph::new(change_set).expect("Unable to create WorkspaceSnapshotGraph"); -// assert!(graph.is_acyclic_directed()); -// } - -// Previously, WorkspaceSnapshotGraph::new would not populate its node_index_by_id, so this test -// would fail, in addition to any functionality that depended on getting the root node index -// on a fresh graph (like add_ordered_node) -#[test] -async fn get_root_index_by_root_id_on_fresh_graph(ctx: &DalContext) { - let base_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let active_change_set = &base_change_set; - let graph = WorkspaceSnapshot::empty(ctx, active_change_set) - .await - .expect("Unable to create WorkspaceSnapshot"); - - let root_id = graph.root_id().await.expect("Unable to get rootId"); - - let root_node_idx = graph - .get_node_index_by_id(root_id) - .await - .expect("get root node index from ULID"); - - assert_eq!( - graph.root().await.expect("Unable to get root idx"), - root_node_idx - ); -} - -#[test] -async fn multiply_parented_nodes(ctx: DalContext) { - // All edges are outgoing from top to bottom except e to u - // - // root node---->t--->u--->v - // | ^ - // | | - // r ------ | - // / \ | | - // a b | | - // \ / \ | | - // c | | | - // / | | | | - // | d <- | | - // | | | | - // ->e<------ | - // | | - // ---------------- - // - // Edge from e to u mimics a function edge from a prop through a prototype to a function - // There are a few other edges to "u" that are not represented in the drawing above. - // - - let nodes = ["r", "t", "u", "v", "a", "b", "c", "d", "e"]; - let edges = [ - (None, "r"), - (None, "t"), - (Some("t"), "u"), - (Some("u"), "v"), - (Some("r"), "a"), - (Some("r"), "b"), - (Some("r"), "e"), - (Some("a"), "c"), - (Some("b"), "c"), - (Some("c"), "d"), - (Some("b"), "d"), - (Some("d"), "e"), - (Some("c"), "e"), - (Some("e"), "u"), - (Some("c"), "u"), - (Some("a"), "u"), - (Some("a"), "b"), - ]; - - let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let change_set = &change_set; - let graph = WorkspaceSnapshot::empty(&ctx, change_set) - .await - .expect("should create snapshot"); - let root_id = graph.root_id().await.expect("should get root id"); - - let mut node_id_map = HashMap::new(); - - for node in nodes { - // "props" here are just nodes that are easy to create and render the name on the dot - // output. there is no domain modeling in this test. - let node_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let prop_node_weight = NodeWeight::new_prop( - change_set, - node_id, - PropKind::Object, - node, - ContentHash::new(node.as_bytes()), - ) - .expect("create prop node weight"); - graph - .add_node(prop_node_weight) - .await - .expect("Unable to add prop"); - - node_id_map.insert(node, node_id); - } - - for (source, target) in edges { - let source = match source { - None => root_id, - Some(node) => node_id_map.get(node).copied().expect("should be there"), - }; - - let target = node_id_map - .get(target) - .copied() - .expect("target node should have an id"); - - graph - .add_edge( - source, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()).expect("create edge weight"), - target, - ) - .await - .expect("add edge"); - } - - graph.cleanup().await.expect("should cleanup"); - - for (source, target) in edges { - let source = match source { - None => root_id, - Some(node) => node_id_map.get(node).copied().expect("should be there"), - }; - - let target_idx = graph - .get_node_index_by_id( - node_id_map - .get(target) - .copied() - .expect("target node should have an id"), - ) - .await - .expect("get node index by id"); - - assert!( - graph - .edges_directed(source, Outgoing) - .await - .expect("should be able to get edges directed") - .iter() - .any(|(_, _, target)| target == &target_idx), - "An edge from {} to {} should exist", - source, - target - ); - } - - for (_, id) in node_id_map.iter() { - let idx_for_node = graph - .get_node_index_by_id(*id) - .await - .expect("able to get idx by id"); - graph - .get_node_weight(idx_for_node) - .await - .expect("node with weight in graph"); - } -} - -#[test] -async fn add_nodes_and_edges(ctx: DalContext) { - let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let change_set = &change_set; - let graph = WorkspaceSnapshot::empty(&ctx, change_set) - .await - .expect("should create snapshot"); - let root_id = graph.root_id().await.expect("should get root id"); - - let schema_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - graph - .add_node( - NodeWeight::new_content( - change_set, - schema_id, - ContentAddress::Schema(ContentHash::new( - SchemaId::generate().to_string().as_bytes(), - )), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add schema"); - let schema_variant_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - graph - .add_node( - NodeWeight::new_content( - change_set, - schema_variant_id, - ContentAddress::SchemaVariant(ContentHash::new( - SchemaVariantId::generate().to_string().as_bytes(), - )), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add schema variant"); - let component_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - graph - .add_node( - NodeWeight::new_content( - change_set, - component_id, - ContentAddress::Component(ContentHash::new( - ComponentId::generate().to_string().as_bytes(), - )), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add component"); - - graph - .add_edge( - root_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - component_id, - ) - .await - .expect("Unable to add root -> component edge"); - graph - .add_edge( - root_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_id, - ) - .await - .expect("Unable to add root -> schema edge"); - graph - .add_edge( - schema_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - graph - .add_edge( - component_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add component -> schema variant edge"); - - let func_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - graph - .add_node( - NodeWeight::new_content( - change_set, - func_id, - ContentAddress::Func(ContentHash::new(FuncId::generate().to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add func"); - let prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - graph - .add_node( - NodeWeight::new_content( - change_set, - prop_id, - ContentAddress::Prop(ContentHash::new(PropId::generate().to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add prop"); - - graph - .add_edge( - root_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - func_id, - ) - .await - .expect("Unable to add root -> func edge"); - graph - .add_edge( - schema_variant_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - prop_id, - ) - .await - .expect("Unable to add schema variant -> prop edge"); - graph - .add_edge( - prop_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - func_id, - ) - .await - .expect("Unable to add prop -> func edge"); - - assert!(graph.is_acyclic_directed().await); -} - -#[test] -async fn cyclic_failure(ctx: DalContext) { - let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let change_set = &change_set; - let graph = WorkspaceSnapshot::empty(&ctx, change_set) - .await - .expect("should create snapshot"); - let root_id = graph.root_id().await.expect("should get root id"); - - let schema_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - graph - .add_node( - NodeWeight::new_content( - change_set, - schema_id, - ContentAddress::Schema(ContentHash::new( - SchemaId::generate().to_string().as_bytes(), - )), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add node"); - - graph - .add_edge( - root_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_id, - ) - .await - .expect("Unable to add root -> component edge"); - - let pre_cycle_root_index = root_id; - - println!("before cycle check"); - // This should cause a cycle. - graph - .add_edge( - schema_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - root_id, - ) - .await - .expect_err("Created a cycle"); - println!("after cycle check"); - - let current_root_id = graph.root_id().await.expect("should get root id"); - - assert_eq!(pre_cycle_root_index, current_root_id,); -} - -#[test] -async fn update_content(ctx: DalContext) { - let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let change_set = &change_set; - let graph = WorkspaceSnapshot::empty(&ctx, change_set) - .await - .expect("should create snapshot"); - let root_id = graph.root_id().await.expect("should get root id"); - - let schema_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - graph - .add_node( - NodeWeight::new_content( - change_set, - schema_id, - ContentAddress::Schema(ContentHash::from("Constellation")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add schema"); - let schema_variant_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - graph - .add_node( - NodeWeight::new_content( - change_set, - schema_variant_id, - ContentAddress::SchemaVariant(ContentHash::new("Freestar Collective".as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add schema variant"); - let component_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - graph - .add_node( - NodeWeight::new_content( - change_set, - component_id, - ContentAddress::Component(ContentHash::from("Crimson Fleet")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add component"); - - graph - .add_edge( - root_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - component_id, - ) - .await - .expect("Unable to add root -> component edge"); - graph - .add_edge( - root_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_id, - ) - .await - .expect("Unable to add root -> schema edge"); - graph - .add_edge( - schema_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - graph - .add_edge( - component_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add component -> schema variant edge"); - - // Ensure that the root node merkle tree hash looks as we expect before the update. - let pre_update_root_node_merkle_tree_hash = MerkleTreeHash::from_str( - "6b3b0374a25049046f34d6c7e98f890387a963249aaace3d66bb47ce70399033", - ) - .expect("could not make merkle tree hash from hex bytes"); - assert_eq!( - pre_update_root_node_merkle_tree_hash, // expected - graph - .get_graph_local_node_weight(graph.root_id().await.expect("get root")) - .await - .expect("could not get node weight") - .merkle_tree_hash(), // actual - ); - - let updated_content_hash = ContentHash::from("new_content"); - graph - .update_content(change_set, component_id, updated_content_hash) - .await - .expect("Unable to update Component content hash"); - - let post_update_root_node_merkle_tree_hash = MerkleTreeHash::from_str( - "46babffabf1567fd20594c7038cfea58991b394b8eb6cc1f81167d2314617e35", - ) - .expect("merkle hash from str"); - assert_eq!( - post_update_root_node_merkle_tree_hash, // expected - graph - .get_graph_local_node_weight(graph.root_id().await.expect("get root")) - .await - .expect("could not get node weight") - .merkle_tree_hash(), // actual - ); - assert_eq!( - updated_content_hash, // expected - graph - .get_node_weight( - graph - .get_node_index_by_id(component_id) - .await - .expect("could not get node index by id") - ) - .await - .expect("could not get node weight") - .content_hash(), // actual - ); - - graph.cleanup().await.expect("should cleanup"); - - // Ensure that there are not more nodes than the ones that should be in use. - assert_eq!(4, graph.node_count().await); - - // The hashes must not change upon cleanup. - assert_eq!( - post_update_root_node_merkle_tree_hash, // expected - graph - .get_graph_local_node_weight(graph.root_id().await.expect("get root")) - .await - .expect("could not get node weight") - .merkle_tree_hash() - ); - assert_eq!( - updated_content_hash, // expected - graph - .get_node_weight( - graph - .get_node_index_by_id(component_id) - .await - .expect("could not get node index by id") - ) - .await - .expect("could not get node weight") - .content_hash(), // actual - ); -} - -#[test] -async fn detect_conflicts_and_updates_simple_no_conflicts_no_updates_in_base(ctx: DalContext) { - let empty_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let empty_change_set = &empty_change_set; - let empty_graph = WorkspaceSnapshot::initial(&ctx, empty_change_set) - .await - .expect("should create snapshot"); - let empty_root_id = empty_graph.root_id().await.expect("should get root id"); - - let schema_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - schema_id, - ContentAddress::Schema(ContentHash::from("Schema A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema A"); - let schema_variant_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - schema_variant_id, - ContentAddress::SchemaVariant(ContentHash::from("Schema Variant A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema Variant A"); - - empty_graph - .add_edge( - empty_root_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_id, - ) - .await - .expect("Unable to add root -> schema edge"); - empty_graph - .add_edge( - schema_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - - // empty_graph.dot(); - - let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let new_change_set = &new_change_set; - let new_graph = empty_graph.real_clone().await; - let new_root_id = empty_graph.root_id().await.expect("should get root id"); - - let component_id = new_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - new_graph - .add_node( - NodeWeight::new_content( - new_change_set, - component_id, - ContentAddress::Schema(ContentHash::from("Component A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Component A"); - new_graph - .add_edge( - new_root_id, - EdgeWeight::new(new_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - component_id, - ) - .await - .expect("Unable to add root -> component edge"); - new_graph - .add_edge( - component_id, - EdgeWeight::new(new_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add component -> schema variant edge"); - - // new_graph.dot(); - - let (conflicts, updates) = new_graph - .detect_conflicts_and_updates( - new_change_set.vector_clock_id(), - &empty_graph, - empty_change_set.vector_clock_id(), - ) - .await - .expect("Unable to detect conflicts and updates"); - - assert_eq!(Vec::::new(), conflicts); - assert_eq!(Vec::::new(), updates); -} - -#[test] -async fn detect_conflicts_and_updates_simple_no_conflicts_with_purely_new_content_in_base( - ctx: DalContext, -) { - let empty_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let base_change_set = &empty_change_set; - let base_graph = WorkspaceSnapshot::empty(&ctx, base_change_set) - .await - .expect("should create snapshot"); - let base_root_id = base_graph.root_id().await.expect("should get root id"); - - let schema_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - schema_id, - ContentAddress::Schema(ContentHash::from("Schema A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema A"); - let schema_variant_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - schema_variant_id, - ContentAddress::SchemaVariant(ContentHash::from("Schema Variant A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema Variant A"); - - base_graph - .add_edge( - base_root_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_id, - ) - .await - .expect("Unable to add root -> schema edge"); - base_graph - .add_edge( - schema_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - - println!("Initial base graph (Root {:?}):", base_root_id); - // base_graph.dot(); - - let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let new_change_set = &new_change_set; - let new_graph = base_graph.real_clone().await; - let new_root_id = new_graph.root_id().await.expect("should get root id"); - - let new_onto_component_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - new_onto_component_id, - ContentAddress::Component(ContentHash::from("Component B")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Component B"); - base_graph - .add_edge( - base_root_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - new_onto_component_id, - ) - .await - .expect("Unable to add root -> component edge"); - base_graph - .add_edge( - new_onto_component_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add component -> schema variant edge"); - - println!("Updated base graph (Root: {:?}):", new_root_id); - // base_graph.dot(); - - let (conflicts, updates) = new_graph - .detect_conflicts_and_updates( - new_change_set.vector_clock_id(), - &base_graph, - base_change_set.vector_clock_id(), - ) - .await - .expect("Unable to detect conflicts and updates"); - - assert_eq!(Vec::::new(), conflicts); - - let new_onto_component_index = base_graph - .get_node_index_by_id(new_onto_component_id) - .await - .expect("Unable to get NodeIndex"); - match updates.as_slice() { - [Update::NewEdge { - source, - destination, - edge_weight, - }] => { - assert_eq!( - new_graph.root().await.expect("should get root index"), - *source - ); - assert_eq!(new_onto_component_index, *destination); - assert_eq!(&EdgeWeightKind::new_use(), edge_weight.kind()); - } - other => panic!("Unexpected updates: {:?}", other), - } -} - -#[test] -async fn detect_conflicts_and_updates_with_purely_new_content_in_new_graph(ctx: DalContext) { - let empty_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let base_change_set = &empty_change_set; - let base_graph = WorkspaceSnapshot::empty(&ctx, base_change_set) - .await - .expect("should create snapshot"); - let base_root_id = base_graph.root_id().await.expect("should get root id"); - - let component_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - component_id, - ContentAddress::Component(ContentHash::from("Component A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema A"); - base_graph - .add_edge( - base_root_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - component_id, - ) - .await - .expect("Unable to add root -> component edge"); - - base_graph.cleanup().await.expect("should cleanup"); - println!("Initial base graph (Root {:?}):", base_root_id); - // base_graph.dot(); - - let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let new_change_set = &new_change_set; - let new_graph = base_graph.real_clone().await; - let new_root_id = new_graph.root_id().await.expect("should get root id"); - - let new_component_id = new_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - let new_component_index = new_graph - .add_node( - NodeWeight::new_content( - new_change_set, - new_component_id, - ContentAddress::Component(ContentHash::from("Component B")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Component B"); - new_graph - .add_edge( - new_root_id, - EdgeWeight::new(new_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - new_component_id, - ) - .await - .expect("Unable to add root -> component edge"); - - new_graph.cleanup().await.expect("should clean up"); - println!("Updated new graph (Root: {:?}):", new_root_id); - // new_graph.dot(); - - let (conflicts, updates) = new_graph - .detect_conflicts_and_updates( - new_change_set.vector_clock_id(), - &base_graph, - base_change_set.vector_clock_id(), - ) - .await - .expect("Unable to detect conflicts and updates"); - - assert!(updates.is_empty()); - assert!(conflicts.is_empty()); - - let (conflicts, updates) = base_graph - .detect_conflicts_and_updates( - base_change_set.vector_clock_id(), - &new_graph, - new_change_set.vector_clock_id(), - ) - .await - .expect("Unable to detect conflicts and updates"); - - assert!(conflicts.is_empty()); - - match updates.as_slice() { - [Update::NewEdge { - source, - destination, - edge_weight, - }] => { - assert_eq!( - base_graph.root().await.expect("should get root index"), - *source - ); - assert_eq!(new_component_index, *destination); - assert_eq!(&EdgeWeightKind::new_use(), edge_weight.kind()); - } - other => panic!("Unexpected updates: {:?}", other), - } -} - -#[test] -async fn detect_conflicts_and_updates_simple_no_conflicts_with_updates_on_both_sides( - ctx: DalContext, -) { - let empty_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let base_change_set = &empty_change_set; - let base_graph = WorkspaceSnapshot::empty(&ctx, base_change_set) - .await - .expect("should create snapshot"); - let base_root_id = base_graph.root_id().await.expect("should get root id"); - - let schema_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - schema_id, - ContentAddress::Schema(ContentHash::from("Schema A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema A"); - let schema_variant_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - schema_variant_id, - ContentAddress::SchemaVariant(ContentHash::from("Schema Variant A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema Variant A"); - - base_graph - .add_edge( - base_root_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_id, - ) - .await - .expect("Unable to add root -> schema edge"); - base_graph - .add_edge( - schema_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - - println!("Initial base graph (Root {:?}):", base_root_id); - // base_graph.dot(); - - let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let new_change_set = &new_change_set; - let new_graph = base_graph.real_clone().await; - let new_root_id = new_graph.root_id().await.expect("should get root id"); - - let component_id = new_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - new_graph - .add_node( - NodeWeight::new_content( - new_change_set, - component_id, - ContentAddress::Component(ContentHash::from("Component A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Component A"); - new_graph - .add_edge( - new_root_id, - EdgeWeight::new(new_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - component_id, - ) - .await - .expect("Unable to add root -> component edge"); - new_graph - .add_edge( - component_id, - EdgeWeight::new(new_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add component -> schema variant edge"); - - println!("new graph (Root {:?}):", new_root_id); - // new_graph.dot(); - - let new_onto_component_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - new_onto_component_id, - ContentAddress::Component(ContentHash::from("Component B")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Component B"); - base_graph - .add_edge( - base_root_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - new_onto_component_id, - ) - .await - .expect("Unable to add root -> component edge"); - base_graph - .add_edge( - new_onto_component_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add component -> schema variant edge"); - - println!("Updated base graph (Root: {:?}):", base_root_id); - // base_graph.dot(); - - let (conflicts, updates) = new_graph - .detect_conflicts_and_updates( - new_change_set.vector_clock_id(), - &base_graph, - base_change_set.vector_clock_id(), - ) - .await - .expect("Unable to detect conflicts and updates"); - - assert_eq!(Vec::::new(), conflicts); - - let new_onto_component_index = base_graph - .get_node_index_by_id(new_onto_component_id) - .await - .expect("Unable to get NodeIndex"); - match updates.as_slice() { - [Update::NewEdge { - source, - destination, - edge_weight, - }] => { - assert_eq!( - new_graph.root().await.expect("should get root index"), - *source - ); - assert_eq!(new_onto_component_index, *destination); - assert_eq!(&EdgeWeightKind::new_use(), edge_weight.kind()); - } - other => panic!("Unexpected updates: {:?}", other), - } -} - -#[test] -async fn detect_conflicts_and_updates_simple_with_content_conflict(ctx: DalContext) { - let empty_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let base_change_set = &empty_change_set; - let base_graph = WorkspaceSnapshot::empty(&ctx, base_change_set) - .await - .expect("should create snapshot"); - let base_root_id = base_graph.root_id().await.expect("should get root id"); - - let schema_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - schema_id, - ContentAddress::Schema(ContentHash::from("Schema A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema A"); - let schema_variant_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - schema_variant_id, - ContentAddress::SchemaVariant(ContentHash::from("Schema Variant A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema Variant A"); - - base_graph - .add_edge( - base_root_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_id, - ) - .await - .expect("Unable to add root -> schema edge"); - base_graph - .add_edge( - schema_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - - let component_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - component_id, - ContentAddress::Component(ContentHash::from("Component A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Component A"); - base_graph - .add_edge( - base_root_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - component_id, - ) - .await - .expect("Unable to add root -> component edge"); - base_graph - .add_edge( - component_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add component -> schema variant edge"); - - base_graph.cleanup().await.expect("should clean up"); - println!("Initial base graph (Root {:?}):", base_root_id); - // base_graph.dot(); - - let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let new_change_set = &new_change_set; - let new_graph = base_graph.real_clone().await; - let new_root_id = new_graph.root_id().await.expect("should get root id"); - - new_graph - .update_content( - new_change_set, - component_id, - ContentHash::from("Updated Component A"), - ) - .await - .expect("Unable to update Component A"); - - new_graph.cleanup().await.expect("should clean up"); - // new_graph.dot(); - - base_graph - .update_content( - base_change_set, - component_id, - ContentHash::from("Base Updated Component A"), - ) - .await - .expect("Unable to update Component A"); - // new_graph.dot(); - - base_graph.cleanup().await.expect("should clean up"); - let base_root_id = base_graph.root_id().await.expect("should get root id"); - println!("=========================="); - println!("Updated base graph (Root: {:?}):", base_root_id); - dbg!(base_graph - .get_node_weight(base_graph.root().await.expect("...")) - .await - .expect("get root")); - dbg!( - new_root_id, - base_graph - .get_graph_local_node_weight(new_root_id) - .await - .expect("get graph local node weight"), - base_graph - .get_node_index_by_id(new_root_id) - .await - .expect(".."), - ); - dbg!( - new_root_id, - new_graph - .get_graph_local_node_weight(new_root_id) - .await - .expect("get graph local node weight"), - new_graph - .get_node_index_by_id(new_root_id) - .await - .expect(".."), - ); - - // base_graph.tiny_dot_to_file(Some("base_graph")).await; - // new_graph.tiny_dot_to_file(Some("new_graph")).await; - // - let (conflicts, updates) = new_graph - .detect_conflicts_and_updates( - new_change_set.vector_clock_id(), - &base_graph, - base_change_set.vector_clock_id(), - ) - .await - .expect("Unable to detect conflicts and updates"); - - assert_eq!( - vec![Conflict::NodeContent { - onto: base_graph - .get_node_index_by_id(component_id) - .await - .expect("Unable to get component NodeIndex"), - to_rebase: new_graph - .get_node_index_by_id(component_id) - .await - .expect("Unable to get component NodeIndex"), - }], - conflicts - ); - assert_eq!(Vec::::new(), updates); -} - -#[test] -async fn detect_conflicts_and_updates_simple_with_modify_removed_item_conflict(ctx: &DalContext) { - let empty_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let base_change_set = &empty_change_set; - let base_graph = WorkspaceSnapshot::empty(ctx, base_change_set) - .await - .expect("should create snapshot"); - let base_root_id = base_graph.root_id().await.expect("should get root id"); - - let schema_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - schema_id, - ContentAddress::Schema(ContentHash::from("Schema A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema A"); - - let schema_variant_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - schema_variant_id, - ContentAddress::SchemaVariant(ContentHash::from("Schema Variant A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema Variant A"); - - base_graph - .add_edge( - dbg!(base_root_id), - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - dbg!(schema_id), - ) - .await - .expect("Unable to add root -> schema edge"); - base_graph - .add_edge( - schema_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - - let component_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - let _component_index = base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - component_id, - ContentAddress::Component(ContentHash::from("Component A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Component A"); - base_graph - .add_edge( - base_root_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - component_id, - ) - .await - .expect("Unable to add root -> component edge"); - base_graph - .add_edge( - component_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add component -> schema variant edge"); - - base_graph.cleanup().await.expect("should clean up"); - - let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let new_change_set = &new_change_set; - let base_root_id = base_graph.root_id().await.expect("get root id"); - let new_graph = base_graph.real_clone().await; - - base_graph - .remove_edge_for_ulids( - base_change_set, - base_root_id, - component_id, - EdgeWeightKindDiscriminants::Use, - ) - .await - .expect("Unable to remove Component A"); - - base_graph.cleanup().await.expect("should clean up"); - - new_graph - .update_content( - new_change_set, - component_id, - ContentHash::from("Updated Component A"), - ) - .await - .expect("Unable to update Component A"); - - new_graph.cleanup().await.expect("should clean up"); - - let (conflicts, updates) = new_graph - .detect_conflicts_and_updates( - new_change_set.vector_clock_id(), - &base_graph, - base_change_set.vector_clock_id(), - ) - .await - .expect("Unable to detect conflicts and updates"); - - assert_eq!( - vec![Conflict::ModifyRemovedItem( - new_graph - .get_node_index_by_id(component_id) - .await - .expect("Unable to get NodeIndex") - )], - conflicts - ); - assert_eq!(Vec::::new(), updates); -} - -#[test] -async fn detect_conflicts_and_updates_complex(ctx: &DalContext) { - let empty_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let base_change_set = &empty_change_set; - let base_graph = WorkspaceSnapshot::empty(ctx, base_change_set) - .await - .expect("Unable to create WorkspaceSnapshotGraph"); - let base_root_id = base_graph.root_id().await.expect("unable to get root id"); - - // Docker Image Schema - let docker_image_schema_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - let _docker_image_schema_index = base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - docker_image_schema_id, - ContentAddress::Schema(ContentHash::from("first")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema A"); - - base_graph - .add_edge( - base_root_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - docker_image_schema_id, - ) - .await - .expect("Unable to add root -> schema edge"); - - // Docker Image Schema Variant - let docker_image_schema_variant_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - docker_image_schema_variant_id, - ContentAddress::SchemaVariant(ContentHash::from("first")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema Variant A"); - base_graph - .add_edge( - docker_image_schema_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - docker_image_schema_variant_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - - // Nginx Docker Image Component - let nginx_docker_image_component_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - let _nginx_docker_image_component_index = base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - nginx_docker_image_component_id, - ContentAddress::Component(ContentHash::from("first")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Component A"); - base_graph - .add_edge( - base_root_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - nginx_docker_image_component_id, - ) - .await - .expect("Unable to add root -> component edge"); - base_graph - .add_edge( - nginx_docker_image_component_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - docker_image_schema_variant_id, - ) - .await - .expect("Unable to add component -> schema variant edge"); - - // Alpine Component - let alpine_component_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - alpine_component_id, - ContentAddress::Component(ContentHash::from("first")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Component A"); - base_graph - .add_edge( - base_root_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - alpine_component_id, - ) - .await - .expect("Unable to add root -> component edge"); - base_graph - .add_edge( - alpine_component_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - docker_image_schema_variant_id, - ) - .await - .expect("Unable to add component -> schema variant edge"); - - // Butane Schema - let butane_schema_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - let _butane_schema_index = base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - butane_schema_id, - ContentAddress::Schema(ContentHash::from("first")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema A"); - base_graph - .add_edge( - base_root_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - butane_schema_id, - ) - .await - .expect("Unable to add root -> schema edge"); - - // Butane Schema Variant - let butane_schema_variant_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - butane_schema_variant_id, - ContentAddress::SchemaVariant(ContentHash::from("first")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema Variant A"); - base_graph - .add_edge( - butane_schema_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - butane_schema_variant_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - - // Nginx Butane Component - let nginx_butane_component_id = base_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - let _nginx_butane_node_index = base_graph - .add_node( - NodeWeight::new_content( - base_change_set, - nginx_butane_component_id, - ContentAddress::Component(ContentHash::from("first")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema Variant A"); - base_graph - .add_edge( - base_root_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - nginx_butane_component_id, - ) - .await - .expect("Unable to add root -> component edge"); - base_graph - .add_edge( - nginx_butane_component_id, - EdgeWeight::new(base_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - butane_schema_variant_id, - ) - .await - .expect("Unable to add component -> schema variant edge"); - - base_graph.cleanup().await.expect("should clean up"); - - // Create a new change set to cause some problems! - let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let new_change_set = &new_change_set; - let new_graph = base_graph.real_clone().await; - - // Create a modify removed item conflict. - base_graph - .remove_edge_for_ulids( - base_change_set, - base_root_id, - nginx_butane_component_id, - EdgeWeightKindDiscriminants::Use, - ) - .await - .expect("Unable to update the component"); - new_graph - .update_content( - new_change_set, - nginx_butane_component_id, - ContentHash::from("second"), - ) - .await - .expect("Unable to update the component"); - - // Create a node content conflict. - base_graph - .update_content( - base_change_set, - docker_image_schema_variant_id, - ContentHash::from("oopsie"), - ) - .await - .expect("Unable to update the component"); - new_graph - .update_content( - new_change_set, - docker_image_schema_variant_id, - ContentHash::from("poopsie"), - ) - .await - .expect("Unable to update the component"); - - // Create a pure update. - base_graph - .update_content( - base_change_set, - docker_image_schema_id, - ContentHash::from("bg3"), - ) - .await - .expect("Unable to update the schema"); - - let (conflicts, updates) = new_graph - .detect_conflicts_and_updates( - new_change_set.vector_clock_id(), - &base_graph, - base_change_set.vector_clock_id(), - ) - .await - .expect("Unable to detect conflicts and updates"); - - let expected_conflicts = vec![ - Conflict::ModifyRemovedItem( - new_graph - .get_node_index_by_id(nginx_butane_component_id) - .await - .expect("Unable to get component NodeIndex"), - ), - Conflict::NodeContent { - onto: base_graph - .get_node_index_by_id(docker_image_schema_variant_id) - .await - .expect("Unable to get component NodeIndex"), - to_rebase: new_graph - .get_node_index_by_id(docker_image_schema_variant_id) - .await - .expect("Unable to get component NodeIndex"), - }, - ]; - let expected_updates = vec![Update::ReplaceSubgraph { - onto: base_graph - .get_node_index_by_id(docker_image_schema_id) - .await - .expect("Unable to get NodeIndex"), - to_rebase: new_graph - .get_node_index_by_id(docker_image_schema_id) - .await - .expect("Unable to get NodeIndex"), - }]; - - assert_eq!( - ConflictsAndUpdates { - conflicts: expected_conflicts, - updates: expected_updates, - }, - ConflictsAndUpdates { conflicts, updates }, - ); -} - -#[test] -async fn add_ordered_node(ctx: &DalContext) { - let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let change_set = &change_set; - let graph = WorkspaceSnapshot::empty(ctx, change_set) - .await - .expect("Unable to create WorkspaceSnapshotGraph"); - - let root_id = graph - .root_id() - .await - .expect("couldn't get root id for graph"); - - let schema_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let _schema_index = graph - .add_node( - NodeWeight::new_content( - change_set, - schema_id, - ContentAddress::Schema(ContentHash::new( - SchemaId::generate().to_string().as_bytes(), - )), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add schema"); - let schema_variant_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let _schema_variant_index = graph - .add_node( - NodeWeight::new_content( - change_set, - schema_variant_id, - ContentAddress::SchemaVariant(ContentHash::new( - SchemaVariantId::generate().to_string().as_bytes(), - )), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add schema variant"); - - graph - .add_edge( - root_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_id, - ) - .await - .expect("Unable to add root -> schema edge"); - graph - .add_edge( - schema_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - - let func_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let _func_index = graph - .add_node( - NodeWeight::new_content( - change_set, - func_id, - ContentAddress::Func(ContentHash::new(FuncId::generate().to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add func"); - graph - .add_edge( - root_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - func_id, - ) - .await - .expect("Unable to add root -> func edge"); - - let prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let _prop_index = graph - .add_ordered_node( - change_set, - NodeWeight::new_content( - change_set, - prop_id, - ContentAddress::Prop(ContentHash::new(PropId::generate().to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add prop"); - graph - .add_edge( - schema_variant_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - prop_id, - ) - .await - .expect("Unable to add schema variant -> prop edge"); - graph - .add_edge( - prop_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - func_id, - ) - .await - .expect("Unable to add prop -> func edge"); - graph.cleanup().await.expect("should clean up"); - - let ordered_prop_1_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let _ordered_prop_1_index = graph - .add_node( - NodeWeight::new_content( - change_set, - ordered_prop_1_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_1_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop"); - graph - .add_ordered_edge( - change_set, - prop_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create uses edge weight"), - ordered_prop_1_id, - ) - .await - .expect("Unable to add prop -> ordered_prop_1 edge"); - - let ordered_prop_2_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let _ordered_prop_2_index = graph - .add_node( - NodeWeight::new_content( - change_set, - ordered_prop_2_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_2_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop"); - graph - .add_ordered_edge( - change_set, - prop_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create uses edge weight"), - ordered_prop_2_id, - ) - .await - .expect("Unable to add prop -> ordered_prop_2 edge"); - - let ordered_prop_3_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let _ordered_prop_3_index = graph - .add_node( - NodeWeight::new_content( - change_set, - ordered_prop_3_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_3_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop"); - graph - .add_ordered_edge( - change_set, - prop_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create uses edge weight"), - ordered_prop_3_id, - ) - .await - .expect("Unable to add prop -> ordered_prop_3 edge"); - graph.cleanup().await.expect("should clean up"); - - assert_eq!( - vec![ordered_prop_1_id, ordered_prop_2_id, ordered_prop_3_id,], - graph - .ordered_children_for_node(prop_id) - .await - .expect("Unable to find ordered children for node") - .expect("Node is not an ordered node") - ); -} - -#[test] -async fn reorder_ordered_node(ctx: &DalContext) { - let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let change_set = &change_set; - let graph = WorkspaceSnapshot::empty(ctx, change_set) - .await - .expect("Unable to create WorkspaceSnapshotGraph"); - - let root_id = graph.root_id().await.expect("get root id"); - let schema_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let _schema_index = graph - .add_node( - NodeWeight::new_content( - change_set, - schema_id, - ContentAddress::Schema(ContentHash::new( - SchemaId::generate().to_string().as_bytes(), - )), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add schema"); - let schema_variant_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let _schema_variant_index = graph - .add_node( - NodeWeight::new_content( - change_set, - schema_variant_id, - ContentAddress::SchemaVariant(ContentHash::new( - SchemaVariantId::generate().to_string().as_bytes(), - )), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add schema variant"); - - graph - .add_edge( - root_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_id, - ) - .await - .expect("Unable to add root -> schema edge"); - graph - .add_edge( - schema_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - - let func_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let _func_index = graph - .add_node( - NodeWeight::new_content( - change_set, - func_id, - ContentAddress::Func(ContentHash::new(FuncId::generate().to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add func"); - graph - .add_edge( - root_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - func_id, - ) - .await - .expect("Unable to add root -> func edge"); - - let prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let _prop_index = graph - .add_ordered_node( - change_set, - NodeWeight::new_content( - change_set, - prop_id, - ContentAddress::Prop(ContentHash::new(PropId::generate().to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add prop"); - graph - .add_edge( - schema_variant_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - prop_id, - ) - .await - .expect("Unable to add schema variant -> prop edge"); - graph - .add_edge( - prop_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - func_id, - ) - .await - .expect("Unable to add prop -> func edge"); - graph.cleanup().await.expect("should clean up"); - - let ordered_prop_1_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let _ordered_prop_1_index = graph - .add_node( - NodeWeight::new_content( - change_set, - ordered_prop_1_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_1_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop"); - graph - .add_ordered_edge( - change_set, - prop_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create uses edge weight"), - ordered_prop_1_id, - ) - .await - .expect("Unable to add prop -> ordered_prop_1 edge"); - - let ordered_prop_2_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let _ordered_prop_2_index = graph - .add_node( - NodeWeight::new_content( - change_set, - ordered_prop_2_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_2_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop"); - graph - .add_ordered_edge( - change_set, - prop_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create uses edge weight"), - ordered_prop_2_id, - ) - .await - .expect("Unable to add prop -> ordered_prop_2 edge"); - - let ordered_prop_3_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let _ordered_prop_3_index = graph - .add_node( - NodeWeight::new_content( - change_set, - ordered_prop_3_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_3_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop"); - graph - .add_ordered_edge( - change_set, - prop_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create uses edge weight"), - ordered_prop_3_id, - ) - .await - .expect("Unable to add prop -> ordered_prop_3 edge"); - - let ordered_prop_4_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let _ordered_prop_4_index = graph - .add_node( - NodeWeight::new_content( - change_set, - ordered_prop_4_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_4_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop"); - graph - .add_ordered_edge( - change_set, - prop_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create uses edge weight"), - ordered_prop_4_id, - ) - .await - .expect("Unable to add prop -> ordered_prop_4 edge"); - - graph.cleanup().await.expect("should clean up"); - - assert_eq!( - vec![ - ordered_prop_1_id, - ordered_prop_2_id, - ordered_prop_3_id, - ordered_prop_4_id, - ], - graph - .ordered_children_for_node(prop_id) - .await - .expect("Unable to find ordered children for node") - .expect("Node is not an ordered node") - ); - - let new_order = vec![ - ordered_prop_2_id, - ordered_prop_1_id, - ordered_prop_4_id, - ordered_prop_3_id, - ]; - - graph - .update_order(change_set, prop_id, new_order) - .await - .expect("Unable to update order of prop's children"); - - assert_eq!( - vec![ - ordered_prop_2_id, - ordered_prop_1_id, - ordered_prop_4_id, - ordered_prop_3_id, - ], - graph - .ordered_children_for_node(prop_id) - .await - .expect("Unable to find ordered children for node") - .expect("Node is not an ordered node") - ); -} -// -#[test] -async fn remove_unordered_node_and_detect_edge_removal(ctx: &DalContext) { - let empty_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let empty_change_set = &empty_change_set; - let graph = WorkspaceSnapshot::empty(ctx, empty_change_set) - .await - .expect("Unable to create WorkspaceSnapshotGraph"); - - let root_id = graph.root_id().await.expect("unable to get root id"); - - let schema_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - let _schema_index = graph - .add_node( - NodeWeight::new_content( - empty_change_set, - schema_id, - ContentAddress::Schema(ContentHash::new( - SchemaId::generate().to_string().as_bytes(), - )), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add schema"); - let schema_variant_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - let schema_variant_index = graph - .add_node( - NodeWeight::new_content( - empty_change_set, - schema_variant_id, - ContentAddress::SchemaVariant(ContentHash::new( - SchemaVariantId::generate().to_string().as_bytes(), - )), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add schema variant"); - - graph - .add_edge( - root_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_id, - ) - .await - .expect("Unable to add root -> schema edge"); - graph - .add_edge( - schema_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - - let schema_variant_2_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - let schema_variant_2_index = graph - .add_node( - NodeWeight::new_content( - empty_change_set, - schema_variant_2_id, - ContentAddress::SchemaVariant(ContentHash::new( - SchemaVariantId::generate().to_string().as_bytes(), - )), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add schema variant"); - - graph - .add_edge( - schema_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_2_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - - let expected_edges = HashSet::from([schema_variant_2_index, schema_variant_index]); - - let existing_edges: HashSet = graph - .edges_directed(schema_id, Outgoing) - .await - .expect("able to get edges directed") - .into_iter() - .map(|(_, _, target)| target) - .collect(); - - assert_eq!( - expected_edges, existing_edges, - "confirm edges are there before deleting" - ); - - graph - .mark_graph_seen(empty_change_set.vector_clock_id()) - .await - .expect("Unable to mark empty graph as seen"); - - let graph_with_deleted_edge = graph.real_clone().await; - let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let new_change_set = &new_change_set; - - graph_with_deleted_edge - .remove_edge( - new_change_set, - graph_with_deleted_edge - .get_node_index_by_id(schema_id) - .await - .expect("Unable to get NodeIndex for schema"), - schema_variant_2_index, - EdgeWeightKindDiscriminants::Use, - ) - .await - .expect("Edge removal failed"); - - let existing_edges: Vec = graph_with_deleted_edge - .edges_directed(schema_id, Outgoing) - .await - .expect("able to get edges directed") - .into_iter() - .map(|(_, _, target)| target) - .collect(); - - assert_eq!( - vec![schema_variant_index], - existing_edges, - "confirm edges after deletion" - ); - - graph_with_deleted_edge - .mark_graph_seen(new_change_set.vector_clock_id()) - .await - .expect("Unable to mark new graph as seen"); - - let (conflicts, updates) = graph - .detect_conflicts_and_updates( - empty_change_set.vector_clock_id(), - &graph_with_deleted_edge, - new_change_set.vector_clock_id(), - ) - .await - .expect("Failed to detect conflicts and updates"); - - assert!(conflicts.is_empty()); - assert_eq!(1, updates.len()); - - assert!(matches!( - updates.first().expect("should be there"), - Update::RemoveEdge { .. } - )); -} - -#[test] -async fn remove_unordered_node(ctx: &DalContext) { - let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let change_set = &change_set; - let graph = WorkspaceSnapshot::empty(ctx, change_set) - .await - .expect("Unable to create WorkspaceSnapshotGraph"); - let root_id = graph.root_id().await.expect("unable to get root id"); - - let schema_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let _schema_index = graph - .add_node( - NodeWeight::new_content( - change_set, - schema_id, - ContentAddress::Schema(ContentHash::new( - SchemaId::generate().to_string().as_bytes(), - )), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add schema"); - let schema_variant_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let schema_variant_index = graph - .add_node( - NodeWeight::new_content( - change_set, - schema_variant_id, - ContentAddress::SchemaVariant(ContentHash::new( - SchemaVariantId::generate().to_string().as_bytes(), - )), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add schema variant"); - - graph - .add_edge( - root_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_id, - ) - .await - .expect("Unable to add root -> schema edge"); - graph - .add_edge( - schema_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - - let schema_variant_2_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let schema_variant_2_index = graph - .add_node( - NodeWeight::new_content( - change_set, - schema_variant_2_id, - ContentAddress::SchemaVariant(ContentHash::new( - SchemaVariantId::generate().to_string().as_bytes(), - )), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add schema variant"); - - graph - .add_edge( - schema_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_2_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - - let expected_edges = HashSet::from([schema_variant_2_index, schema_variant_index]); - - let existing_edges: HashSet = graph - .edges_directed(schema_id, Outgoing) - .await - .expect("unable to get edges directed") - .into_iter() - .map(|(_, _, target)| target) - .collect(); - - assert_eq!( - expected_edges, existing_edges, - "confirm edges are there before deleting" - ); - - graph - .remove_edge_for_ulids( - change_set, - schema_id, - schema_variant_2_id, - EdgeWeightKindDiscriminants::Use, - ) - .await - .expect("Edge removal failed"); - - let existing_edges: Vec = graph - .edges_directed(schema_id, Outgoing) - .await - .expect("unable to get edges directed") - .into_iter() - .map(|(_, _, target)| target) - .collect(); - - assert_eq!( - vec![schema_variant_index], - existing_edges, - "confirm edges after deletion" - ); -} - -#[test] -async fn remove_ordered_node(ctx: &DalContext) { - let change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let change_set = &change_set; - let graph = WorkspaceSnapshot::empty(ctx, change_set) - .await - .expect("should create snapshot"); - let root_id = graph.root_id().await.expect("should get root id"); - - let schema_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - graph - .add_node( - NodeWeight::new_content( - change_set, - schema_id, - ContentAddress::Schema(ContentHash::new( - SchemaId::generate().to_string().as_bytes(), - )), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add schema"); - let schema_variant_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - graph - .add_node( - NodeWeight::new_content( - change_set, - schema_variant_id, - ContentAddress::SchemaVariant(ContentHash::new( - SchemaVariantId::generate().to_string().as_bytes(), - )), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add schema variant"); - - graph - .add_edge( - root_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_id, - ) - .await - .expect("Unable to add root -> schema edge"); - graph - .add_edge( - schema_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - - let func_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - graph - .add_node( - NodeWeight::new_content( - change_set, - func_id, - ContentAddress::Func(ContentHash::new(FuncId::generate().to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add func"); - graph - .add_edge( - root_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - func_id, - ) - .await - .expect("Unable to add root -> func edge"); - - let root_prop_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - graph - .add_ordered_node( - change_set, - NodeWeight::new_content( - change_set, - root_prop_id, - ContentAddress::Prop(ContentHash::new(PropId::generate().to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add prop"); - graph - .add_edge( - schema_variant_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - root_prop_id, - ) - .await - .expect("Unable to add schema variant -> prop edge"); - graph - .add_edge( - root_prop_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - func_id, - ) - .await - .expect("Unable to add prop -> func edge"); - graph.cleanup().await.expect("should clean up"); - // graph.dot(); - - let ordered_prop_1_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - graph - .add_node( - NodeWeight::new_content( - change_set, - ordered_prop_1_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_1_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop"); - graph - .add_ordered_edge( - change_set, - root_prop_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create uses edge weight"), - ordered_prop_1_id, - ) - .await - .expect("Unable to add prop -> ordered_prop_1 edge"); - - let ordered_prop_2_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - let ordered_prop_2_index = graph - .add_node( - NodeWeight::new_content( - change_set, - ordered_prop_2_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_2_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop"); - graph - .add_ordered_edge( - change_set, - root_prop_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create uses edge weight"), - ordered_prop_2_id, - ) - .await - .expect("Unable to add prop -> ordered_prop_2 edge"); - - let ordered_prop_3_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - graph - .add_node( - NodeWeight::new_content( - change_set, - ordered_prop_3_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_3_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop"); - graph - .add_ordered_edge( - change_set, - root_prop_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create uses edge weight"), - ordered_prop_3_id, - ) - .await - .expect("Unable to add prop -> ordered_prop_3 edge"); - - let ordered_prop_4_id = change_set.generate_ulid().expect("Unable to generate Ulid"); - graph - .add_node( - NodeWeight::new_content( - change_set, - ordered_prop_4_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_4_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop"); - graph - .add_ordered_edge( - change_set, - root_prop_id, - EdgeWeight::new(change_set, EdgeWeightKind::new_use()) - .expect("Unable to create uses edge weight"), - ordered_prop_4_id, - ) - .await - .expect("Unable to add prop -> ordered_prop_4 edge"); - - graph.cleanup().await.expect("should clean up"); - // graph.dot(); - - assert_eq!( - vec![ - ordered_prop_1_id, - ordered_prop_2_id, - ordered_prop_3_id, - ordered_prop_4_id, - ], - graph - .ordered_children_for_node(root_prop_id,) - .await - .expect("Unable to find ordered children for node") - .expect("Node is not an ordered node") - ); - - graph - .remove_edge( - change_set, - graph - .get_node_index_by_id(root_prop_id) - .await - .expect("Unable to get NodeIndex for prop"), - ordered_prop_2_index, - EdgeWeightKindDiscriminants::Use, - ) - .await - .expect("Unable to remove prop -> ordered_prop_2 edge"); - - assert_eq!( - vec![ordered_prop_1_id, ordered_prop_3_id, ordered_prop_4_id,], - graph - .ordered_children_for_node(root_prop_id,) - .await - .expect("Unable to find ordered children for node") - .expect("Node is not an ordered node") - ); - if let NodeWeight::Ordering(ordering_weight) = graph - .get_node_weight( - graph - .get_node_index_by_id( - graph - .ordering_node_for_container(root_prop_id) - .await - .expect("Error getting ordering NodeIndex for prop") - .expect("Unable to find ordering NodeIndex") - .id(), - ) - .await - .expect("why am I doing this"), - ) - .await - .expect("Unable to get node weight") - .as_ref() - { - assert_eq!( - &vec![ordered_prop_1_id, ordered_prop_3_id, ordered_prop_4_id], - ordering_weight.order() - ); - } else { - panic!("Unable to destructure ordering node weight"); - } -} - -#[test] -async fn detect_conflicts_and_updates_simple_ordering_no_conflicts_no_updates_in_base( - ctx: &DalContext, -) { - let empty_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let empty_change_set = &empty_change_set; - let empty_graph = WorkspaceSnapshot::empty(ctx, empty_change_set) - .await - .expect("should create snapshot"); - let empty_root_id = empty_graph.root_id().await.expect("should get root id"); - - let schema_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - schema_id, - ContentAddress::Schema(ContentHash::from("Schema A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema A"); - let schema_variant_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - schema_variant_id, - ContentAddress::SchemaVariant(ContentHash::from("Schema Variant A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema Variant A"); - - empty_graph - .add_edge( - empty_root_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_id, - ) - .await - .expect("Unable to add root -> schema edge"); - empty_graph - .add_edge( - schema_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - - let container_prop_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_ordered_node( - empty_change_set, - NodeWeight::new_content( - empty_change_set, - container_prop_id, - ContentAddress::Prop(ContentHash::new(container_prop_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add container prop"); - empty_graph - .add_edge( - schema_variant_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - container_prop_id, - ) - .await - .expect("Unable to add schema variant -> container prop edge"); - - let ordered_prop_1_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_1_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_1_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 1"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - ordered_prop_1_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 1 edge"); - - let ordered_prop_2_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_2_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_2_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 2"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - ordered_prop_2_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 2 edge"); - - let ordered_prop_3_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_3_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_3_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 3"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - ordered_prop_3_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 3 edge"); - - let ordered_prop_4_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_4_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_4_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 4"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - ordered_prop_4_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 4 edge"); - - empty_graph.cleanup().await.expect("should clean up"); - // empty_graph.dot(); - - let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let new_change_set = &new_change_set; - let new_graph = empty_graph.real_clone().await; - - let ordered_prop_5_id = new_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - new_graph - .add_node( - NodeWeight::new_content( - new_change_set, - ordered_prop_5_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_5_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 5"); - new_graph - .add_ordered_edge( - new_change_set, - container_prop_id, - EdgeWeight::new(new_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - ordered_prop_5_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 5 edge"); - - new_graph.cleanup().await.expect("should clean up"); - // new_graph.dot(); - - let (conflicts, updates) = new_graph - .detect_conflicts_and_updates( - new_change_set.vector_clock_id(), - &empty_graph, - empty_change_set.vector_clock_id(), - ) - .await - .expect("Unable to detect conflicts and updates"); - - assert_eq!(Vec::::new(), conflicts); - assert_eq!(Vec::::new(), updates); -} - -#[test] -async fn detect_conflicts_and_updates_simple_ordering_no_conflicts_with_updates_in_base( - ctx: &DalContext, -) { - let empty_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let empty_change_set = &empty_change_set; - let empty_graph = WorkspaceSnapshot::empty(ctx, empty_change_set) - .await - .expect("should create snapshot"); - let empty_root_id = empty_graph.root_id().await.expect("should get root id"); - - let schema_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - schema_id, - ContentAddress::Schema(ContentHash::from("Schema A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema A"); - let schema_variant_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - schema_variant_id, - ContentAddress::SchemaVariant(ContentHash::from("Schema Variant A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema Variant A"); - - empty_graph - .add_edge( - empty_root_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_id, - ) - .await - .expect("Unable to add root -> schema edge"); - empty_graph - .add_edge( - schema_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - - let container_prop_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_ordered_node( - empty_change_set, - NodeWeight::new_content( - empty_change_set, - container_prop_id, - ContentAddress::Prop(ContentHash::new(container_prop_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add container prop"); - empty_graph - .add_edge( - schema_variant_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - container_prop_id, - ) - .await - .expect("Unable to add schema variant -> container prop edge"); - - let ordered_prop_1_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_1_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_1_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 1"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - ordered_prop_1_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 1 edge"); - - let ordered_prop_2_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_2_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_2_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 2"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - ordered_prop_2_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 2 edge"); - - let ordered_prop_3_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_3_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_3_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 3"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - ordered_prop_3_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 3 edge"); - - let ordered_prop_4_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_4_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_4_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 4"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - ordered_prop_4_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 4 edge"); - - // empty_graph.dot(); - - let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let new_change_set = &new_change_set; - let new_graph = empty_graph.real_clone().await; - - let ordered_prop_5_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_5_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_5_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 5"); - let new_edge_weight = EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - new_edge_weight.clone(), - ordered_prop_5_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 5 edge"); - - // new_graph.dot(); - - let (conflicts, updates) = new_graph - .detect_conflicts_and_updates( - new_change_set.vector_clock_id(), - &empty_graph, - empty_change_set.vector_clock_id(), - ) - .await - .expect("Unable to detect conflicts and updates"); - - let empty_graph_ordering_node = empty_graph - .ordering_node_for_container(container_prop_id) - .await - .expect("Unable to get ordering node") - .expect("No Ordering Node found"); - - let new_graph_ordering_node = empty_graph - .ordering_node_for_container(container_prop_id) - .await - .expect("Unable to get ordering node") - .expect("No Ordering Node found"); - - let ordinal_edge_weight = empty_graph - .get_edges_between_nodes(new_graph_ordering_node.id(), ordered_prop_5_id) - .await - .expect("should not error when getting edge") - .first() - .expect("unable to get edge weight") - .to_owned(); - - assert_eq!(Vec::::new(), conflicts); - assert_eq!( - vec![ - Update::NewEdge { - source: new_graph - .get_node_index_by_id(container_prop_id) - .await - .expect("Unable to get NodeIndex"), - destination: empty_graph - .get_node_index_by_id(ordered_prop_5_id) - .await - .expect("Unable to get NodeIndex"), - edge_weight: new_edge_weight, - }, - Update::ReplaceSubgraph { - onto: empty_graph - .get_node_index_by_id(empty_graph_ordering_node.id()) - .await - .expect("Ordering NodeIndex not found"), - to_rebase: new_graph - .get_node_index_by_id(new_graph_ordering_node.id()) - .await - .expect("Ordering NodeIndex not found"), - }, - Update::NewEdge { - source: new_graph - .get_node_index_by_id(new_graph_ordering_node.id()) - .await - .expect("could not get node index by id"), - destination: empty_graph - .get_node_index_by_id(ordered_prop_5_id) - .await - .expect("could not get node index by id"), - edge_weight: ordinal_edge_weight, - } - ], - updates - ); -} - -#[test] -async fn detect_conflicts_and_updates_simple_ordering_with_conflicting_ordering_updates( - ctx: &DalContext, -) { - let empty_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let empty_change_set = &empty_change_set; - let empty_graph = WorkspaceSnapshot::empty(ctx, empty_change_set) - .await - .expect("Unable to create WorkspaceSnapshotGraph"); - let empty_root_id = empty_graph.root_id().await.expect("Unable to get root id"); - - let schema_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - schema_id, - ContentAddress::Schema(ContentHash::from("Schema A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema A"); - let schema_variant_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - schema_variant_id, - ContentAddress::SchemaVariant(ContentHash::from("Schema Variant A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema Variant A"); - - empty_graph - .add_edge( - empty_root_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_id, - ) - .await - .expect("Unable to add root -> schema edge"); - empty_graph - .add_edge( - schema_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - - let container_prop_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_ordered_node( - empty_change_set, - NodeWeight::new_content( - empty_change_set, - container_prop_id, - ContentAddress::Prop(ContentHash::new(container_prop_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add container prop"); - empty_graph - .add_edge( - schema_variant_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - container_prop_id, - ) - .await - .expect("Unable to add schema variant -> container prop edge"); - - let ordered_prop_1_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_1_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_1_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 1"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - ordered_prop_1_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 1 edge"); - - let ordered_prop_2_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_2_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_2_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 2"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - ordered_prop_2_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 2 edge"); - - let ordered_prop_3_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_3_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_3_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 3"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - ordered_prop_3_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 3 edge"); - - let ordered_prop_4_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_4_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_4_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 4"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - ordered_prop_4_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 4 edge"); - - // empty_graph.dot(); - - let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let new_change_set = &new_change_set; - let new_graph = empty_graph.real_clone().await; - - let new_order = vec![ - ordered_prop_2_id, - ordered_prop_1_id, - ordered_prop_4_id, - ordered_prop_3_id, - ]; - new_graph - .update_order(new_change_set, container_prop_id, new_order) - .await - .expect("Unable to update order of container prop's children"); - - let ordered_prop_5_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_5_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_5_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 5"); - let new_edge_weight = EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - new_edge_weight.clone(), - ordered_prop_5_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 5 edge"); - // new_graph.dot(); - - let (conflicts, updates) = new_graph - .detect_conflicts_and_updates( - new_change_set.vector_clock_id(), - &empty_graph, - empty_change_set.vector_clock_id(), - ) - .await - .expect("Unable to detect conflicts and updates"); - - let empty_graph_ordering_node = empty_graph - .ordering_node_for_container(container_prop_id) - .await - .expect("Unable to get ordering node") - .expect("No Ordering Node found"); - - let new_graph_ordering_node = empty_graph - .ordering_node_for_container(container_prop_id) - .await - .expect("Unable to get ordering node") - .expect("No Ordering Node found"); - - let ordinal_edge_weight = empty_graph - .get_edges_between_nodes(new_graph_ordering_node.id(), ordered_prop_5_id) - .await - .expect("should not error when getting edge") - .first() - .expect("unable to get edge weight") - .to_owned(); - - assert_eq!( - vec![Conflict::ChildOrder { - onto: empty_graph - .get_node_index_by_id(empty_graph_ordering_node.id()) - .await - .expect("Ordering NodeIndex not found"), - to_rebase: new_graph - .get_node_index_by_id(new_graph_ordering_node.id()) - .await - .expect("Ordering NodeIndex not found"), - }], - conflicts - ); - assert_eq!( - vec![ - Update::NewEdge { - source: new_graph - .get_node_index_by_id(container_prop_id) - .await - .expect("Unable to get new_graph container NodeIndex"), - destination: empty_graph - .get_node_index_by_id(ordered_prop_5_id) - .await - .expect("Unable to get ordered prop 5 NodeIndex"), - edge_weight: new_edge_weight, - }, - Update::NewEdge { - source: new_graph - .get_node_index_by_id(new_graph_ordering_node.id()) - .await - .expect("could not get node index by id"), - destination: empty_graph - .get_node_index_by_id(ordered_prop_5_id) - .await - .expect("could not get node index by id"), - edge_weight: ordinal_edge_weight, - } - ], - updates - ); -} - -#[test] -async fn detect_conflicts_and_updates_simple_ordering_with_no_conflicts_add_in_onto_remove_in_to_rebase( - ctx: &DalContext, -) { - let empty_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let empty_change_set = &empty_change_set; - let empty_graph = WorkspaceSnapshot::empty(ctx, empty_change_set) - .await - .expect("should create snapshot"); - let empty_root_id = empty_graph.root_id().await.expect("should get root id"); - - let schema_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - schema_id, - ContentAddress::Schema(ContentHash::from("Schema A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema A"); - let schema_variant_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - schema_variant_id, - ContentAddress::SchemaVariant(ContentHash::from("Schema Variant A")), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add Schema Variant A"); - - empty_graph - .add_edge( - empty_root_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_id, - ) - .await - .expect("Unable to add root -> schema edge"); - empty_graph - .add_edge( - schema_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - schema_variant_id, - ) - .await - .expect("Unable to add schema -> schema variant edge"); - - let container_prop_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_ordered_node( - empty_change_set, - NodeWeight::new_content( - empty_change_set, - container_prop_id, - ContentAddress::Prop(ContentHash::new(container_prop_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add container prop"); - empty_graph - .add_edge( - schema_variant_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - container_prop_id, - ) - .await - .expect("Unable to add schema variant -> container prop edge"); - - let ordered_prop_1_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_1_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_1_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 1"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - ordered_prop_1_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 1 edge"); - - let ordered_prop_2_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_2_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_2_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 2"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - ordered_prop_2_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 2 edge"); - - let ordered_prop_3_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_3_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_3_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 3"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - ordered_prop_3_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 3 edge"); - - let ordered_prop_4_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_4_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_4_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 4"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - ordered_prop_4_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 4 edge"); - - empty_graph.cleanup().await.expect("should clean up"); - empty_graph - .mark_graph_seen(empty_change_set.vector_clock_id()) - .await - .expect("Unable to update recently seen information"); - // empty_graph.dot(); - - let new_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let new_change_set = &new_change_set; - let new_graph = empty_graph.real_clone().await; - - new_graph - .remove_edge( - new_change_set, - empty_graph - .get_node_index_by_id(container_prop_id) - .await - .expect("Unable to get NodeIndex"), - empty_graph - .get_node_index_by_id(ordered_prop_2_id) - .await - .expect("Unable to get NodeIndex"), - EdgeWeightKindDiscriminants::Use, - ) - .await - .expect("Unable to remove container prop -> prop 2 edge"); - - let ordered_prop_5_id = empty_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - empty_graph - .add_node( - NodeWeight::new_content( - empty_change_set, - ordered_prop_5_id, - ContentAddress::Prop(ContentHash::new(ordered_prop_5_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add ordered prop 5"); - - let new_edge_weight = EdgeWeight::new(empty_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"); - empty_graph - .add_ordered_edge( - empty_change_set, - container_prop_id, - new_edge_weight.clone(), - ordered_prop_5_id, - ) - .await - .expect("Unable to add container prop -> ordered prop 5 edge"); - let ordering_node = empty_graph - .ordering_node_for_container(container_prop_id) - .await - .expect("Unable to get ordering node") - .expect("No Ordering Node found"); - let ordinal_edge_weight = empty_graph - .get_edges_between_nodes(ordering_node.id(), ordered_prop_5_id) - .await - .expect("should not error when getting edge") - .first() - .expect("unable to get edge weight") - .to_owned(); - - empty_graph.cleanup().await.expect("should clean up"); - // empty_graph.dot(); - - new_graph.cleanup().await.expect("should clean up"); - // new_graph.dot(); - - let (conflicts, updates) = new_graph - .detect_conflicts_and_updates( - new_change_set.vector_clock_id(), - &empty_graph, - empty_change_set.vector_clock_id(), - ) - .await - .expect("Unable to detect conflicts and updates"); - - assert_eq!(Vec::::new(), conflicts); - assert_eq!( - vec![ - Update::NewEdge { - source: new_graph - .get_node_index_by_id(container_prop_id) - .await - .expect("Unable to get node index"), - destination: empty_graph - .get_node_index_by_id(ordered_prop_5_id) - .await - .expect("Unable to get ordered prop 5 NodeIndex"), - edge_weight: new_edge_weight, - }, - Update::NewEdge { - source: new_graph - .get_node_index_by_id(ordering_node.id()) - .await - .expect("could not get node index by id"), - destination: empty_graph - .get_node_index_by_id(ordered_prop_5_id) - .await - .expect("could not get node index by id"), - edge_weight: ordinal_edge_weight, - } - ], - updates - ); -} - -#[test] -async fn add_ordered_node_below_root(ctx: &DalContext) { - let base_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let active_change_set = &base_change_set; - let graph = WorkspaceSnapshot::empty(ctx, active_change_set) - .await - .expect("should create snapshot"); - let root_id = graph.root_id().await.expect("should get root id"); - - let prop_id = active_change_set - .generate_ulid() - .expect("Unable to generate Ulid"); - graph - .add_ordered_node( - active_change_set, - NodeWeight::new_content( - active_change_set, - prop_id, - ContentAddress::Prop(ContentHash::new(prop_id.to_string().as_bytes())), - ) - .expect("Unable to create NodeWeight"), - ) - .await - .expect("Unable to add prop"); - - graph - .add_edge( - root_id, - EdgeWeight::new(active_change_set, EdgeWeightKind::new_use()) - .expect("Unable to create EdgeWeight"), - prop_id, - ) - .await - .expect("Unable to add root -> prop edge"); - - graph.cleanup().await.expect("unable to clean graph"); - assert_eq!( - Vec::::new(), - graph - .ordered_children_for_node(prop_id,) - .await - .expect("Unable to find ordered children for node") - .expect("Node is not an ordered node") - ); -} diff --git a/lib/dal/tests/integration_test/graph/rebase.rs b/lib/dal/tests/integration_test/graph/rebase.rs deleted file mode 100644 index f9e3da6a28..0000000000 --- a/lib/dal/tests/integration_test/graph/rebase.rs +++ /dev/null @@ -1,189 +0,0 @@ -use dal::workspace_snapshot::update::Update; -use pretty_assertions_sorted::assert_eq; -use si_events::*; - -use dal::workspace_snapshot::edge_weight::{EdgeWeight, EdgeWeightKind}; -use dal::workspace_snapshot::node_weight::category_node_weight::CategoryNodeKind; -use dal::workspace_snapshot::node_weight::NodeWeight; -use dal::workspace_snapshot::node_weight::{ContentNodeWeight, FuncNodeWeight}; -use dal::DalContext; -use dal::{change_set::ChangeSet, workspace_snapshot::content_address::ContentAddress}; -use dal::{func::FuncKind, WorkspaceSnapshot}; -use dal_test::test; - -#[test] -async fn simulate_rebase(ctx: &DalContext) { - let to_rebase_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let to_rebase_change_set = &to_rebase_change_set; - let to_rebase = WorkspaceSnapshot::initial(ctx, to_rebase_change_set) - .await - .expect("Unable to create WorkspaceSnapshotGraph"); - - // Set up the to rebase graph. - let func_category_node_id = to_rebase - .get_category_node(None, CategoryNodeKind::Func) - .await - .expect("should get func categopry node"); - - let schema_category_node_id = to_rebase - .get_category_node(None, CategoryNodeKind::Func) - .await - .expect("should get func categopry node"); - - // Create the onto graph from the to rebase graph. - let onto_change_set = ChangeSet::new_local().expect("Unable to create ChangeSet"); - let onto_change_set = &onto_change_set; - let onto = to_rebase.real_clone().await; - - // FuncCategory --Use--> Func - let func_id = onto_change_set - .generate_ulid() - .expect("could not generate ulid"); - let func_node_weight = FuncNodeWeight::new( - onto_change_set, - func_id, - ContentAddress::Func(ContentHash::from("foo")), - "foo".to_string(), - FuncKind::Intrinsic, - ) - .expect("could not create func node weight"); - onto.add_node(NodeWeight::Func(func_node_weight)) - .await - .expect("could not add node"); - onto.add_edge( - func_category_node_id, - EdgeWeight::new(onto_change_set, EdgeWeightKind::new_use()) - .expect("could not create edge weight"), - func_id, - ) - .await - .expect("could not add edge"); - - // SchemaCategory --Use--> Schema - let schema_id = onto_change_set - .generate_ulid() - .expect("could not generate ulid"); - let schema_node_weight = ContentNodeWeight::new( - onto_change_set, - schema_id, - ContentAddress::Schema(ContentHash::from("foo")), - ) - .expect("could not create func node weight"); - onto.add_node(NodeWeight::Content(schema_node_weight)) - .await - .expect("could not add node"); - onto.add_edge( - schema_category_node_id, - EdgeWeight::new(onto_change_set, EdgeWeightKind::new_use()) - .expect("could not create edge weight"), - schema_id, - ) - .await - .expect("could not add edge"); - - // Schema --Use--> SchemaVariant - let schema_variant_node_id = onto_change_set - .generate_ulid() - .expect("could not generate ulid"); - let schema_variant_node_weight = ContentNodeWeight::new( - onto_change_set, - schema_variant_node_id, - ContentAddress::SchemaVariant(ContentHash::from("foo")), - ) - .expect("could not create func node weight"); - onto.add_node(NodeWeight::Content(schema_variant_node_weight)) - .await - .expect("could not add node"); - onto.add_edge( - schema_id, - EdgeWeight::new(onto_change_set, EdgeWeightKind::new_use()) - .expect("could not create edge weight"), - schema_variant_node_id, - ) - .await - .expect("could not add edge"); - - // SchemaVariant --Use--> Func - onto.get_node_index_by_id(func_id) - .await - .expect("could not get node index by id"); - onto.add_edge( - schema_variant_node_id, - EdgeWeight::new(onto_change_set, EdgeWeightKind::new_use()) - .expect("could not create edge weight"), - func_id, - ) - .await - .expect("could not add edge"); - - // Before cleanup, detect conflicts and updates. - let (before_cleanup_conflicts, mut before_cleanup_updates) = to_rebase - .detect_conflicts_and_updates( - to_rebase_change_set.vector_clock_id(), - &onto, - onto_change_set.vector_clock_id(), - ) - .await - .expect("could not detect conflicts and updates"); - - // Cleanup and check node count. - onto.cleanup().await.expect("should clean up"); - to_rebase.cleanup().await.expect("should clean up"); - assert_eq!( - 9, // expected - onto.node_count().await // actual - ); - - // Detect conflicts and updates. Ensure cleanup did not affect the results. - let (conflicts, mut updates) = to_rebase - .detect_conflicts_and_updates( - to_rebase_change_set.vector_clock_id(), - &onto, - onto_change_set.vector_clock_id(), - ) - .await - .expect("could not detect conflicts and updates"); - - assert!(conflicts.is_empty()); - assert_eq!( - 2, // expected - updates.len() // actual - ); - assert_eq!( - before_cleanup_conflicts, // expected - conflicts // actual - ); - - // detect_conflicts_and_updates is not deterministic about order, so we need to sort before - // comparing - let match_update = |k: &Update| match k { - Update::NewEdge { - source, - destination, - .. - } => (*source, *destination), - Update::RemoveEdge { .. } => todo!(), - Update::ReplaceSubgraph { .. } => todo!(), - }; - before_cleanup_updates.sort_by_key(match_update); - updates.sort_by_key(match_update); - assert_eq!( - before_cleanup_updates, // expected - updates // actual - ); - - // Ensure that we do not have duplicate updates. - let mut deduped_updates = updates.clone(); - deduped_updates.dedup(); - assert_eq!( - deduped_updates.len(), // expected - updates.len() // actual - ); - - // Perform the updates. In the future, we may want to see if the onto and resulting to - // rebase graphs are logically equivalent after updates are performed. - to_rebase - .perform_updates(to_rebase_change_set, &onto, &updates) - .await - .expect("could not perform updates"); -} diff --git a/lib/dal/tests/integration_test/mod.rs b/lib/dal/tests/integration_test/mod.rs index 9c98426778..d51cf57587 100644 --- a/lib/dal/tests/integration_test/mod.rs +++ b/lib/dal/tests/integration_test/mod.rs @@ -6,7 +6,6 @@ mod component; mod connection; mod dependent_values_update; mod frame; -mod graph; mod prop; mod property_editor; mod rebaser; diff --git a/lib/pinga-server/BUCK b/lib/pinga-server/BUCK index 32799f5019..934474e9ec 100644 --- a/lib/pinga-server/BUCK +++ b/lib/pinga-server/BUCK @@ -1,8 +1,4 @@ -load( - "@prelude-si//:macros.bzl", - "rust_library", - "rust_library_integration_test", -) +load("@prelude-si//:macros.bzl", "rust_library") rust_library( name = "pinga-server", @@ -34,35 +30,3 @@ rust_library( "src/**/*.rs", ]), ) - -rust_library_integration_test( - name = "pinga-server-integration-test", - crate = "pinga_server", - deps = [ - "//lib/buck2-resources:buck2-resources", - "//lib/dal:dal-integration-test", - "//lib/nats-subscriber:nats-subscriber", - "//lib/si-crypto:si-crypto", - "//lib/si-data-nats:si-data-nats", - "//lib/si-data-pg:si-data-pg", - "//lib/si-layer-cache:si-layer-cache", - "//lib/si-settings:si-settings", - "//lib/si-std:si-std", - "//lib/telemetry-rs:telemetry", - "//lib/veritech-client:veritech-client", - "//third-party/rust:derive_builder", - "//third-party/rust:futures", - "//third-party/rust:remain", - "//third-party/rust:serde", - "//third-party/rust:serde_json", - "//third-party/rust:stream-cancel", - "//third-party/rust:thiserror", - "//third-party/rust:tokio", - "//third-party/rust:tokio-stream", - "//third-party/rust:tokio-util", - "//third-party/rust:ulid", - ], - srcs = glob([ - "src/**/*.rs", - ]), -) diff --git a/lib/rebaser-server/BUCK b/lib/rebaser-server/BUCK index 10756b56d7..7dddb97985 100644 --- a/lib/rebaser-server/BUCK +++ b/lib/rebaser-server/BUCK @@ -1,8 +1,4 @@ -load( - "@prelude-si//:macros.bzl", - "rust_library", - "rust_library_integration_test", -) +load("@prelude-si//:macros.bzl", "rust_library") rust_library( name = "rebaser-server", @@ -35,37 +31,3 @@ rust_library( "src/**/*.rs", ]), ) - -rust_library_integration_test( - name = "rebaser-server-integration-test", - crate = "rebaser_server", - deps = [ - "//lib/buck2-resources:buck2-resources", - "//lib/dal:dal-integration-test", - "//lib/nats-subscriber:nats-subscriber", - "//lib/rebaser-core:rebaser-core", - "//lib/si-crypto:si-crypto", - "//lib/si-data-nats:si-data-nats", - "//lib/si-data-pg:si-data-pg", - "//lib/si-layer-cache:si-layer-cache", - "//lib/si-settings:si-settings", - "//lib/si-std:si-std", - "//lib/si-test-macros:si-test-macros", - "//lib/telemetry-rs:telemetry", - "//lib/veritech-client:veritech-client", - "//third-party/rust:derive_builder", - "//third-party/rust:futures", - "//third-party/rust:remain", - "//third-party/rust:serde", - "//third-party/rust:serde_json", - "//third-party/rust:stream-cancel", - "//third-party/rust:thiserror", - "//third-party/rust:tokio", - "//third-party/rust:tokio-util", - "//third-party/rust:ulid", - ], - srcs = glob([ - "src/**/*.rs", - ]), -) - diff --git a/lib/rebaser-server/src/server/rebase.rs b/lib/rebaser-server/src/server/rebase.rs index 63f3c3c402..384046ecd1 100644 --- a/lib/rebaser-server/src/server/rebase.rs +++ b/lib/rebaser-server/src/server/rebase.rs @@ -68,7 +68,6 @@ pub(crate) async fn perform_rebase( // Perform the conflicts and updates detection. let onto_vector_clock_id: VectorClockId = message.payload.onto_vector_clock_id.into(); - let detect_start = Instant::now(); let (conflicts, updates) = to_rebase_workspace_snapshot .detect_conflicts_and_updates( to_rebase_change_set.vector_clock_id(), @@ -80,14 +79,13 @@ pub(crate) async fn perform_rebase( "count: conflicts ({}) and updates ({}), {:?}", conflicts.len(), updates.len(), - detect_start.elapsed() + start.elapsed() ); // If there are conflicts, immediately assemble a reply message that conflicts were found. // Otherwise, we can perform updates and assemble a "success" reply message. let message: RebaseStatus = if conflicts.is_empty() { // TODO(nick): store the offset with the change set. - let update_start = Instant::now(); to_rebase_workspace_snapshot .perform_updates( &to_rebase_change_set, @@ -95,7 +93,7 @@ pub(crate) async fn perform_rebase( updates.as_slice(), ) .await?; - info!("updates complete: {:?}", update_start.elapsed()); + info!("updates complete: {:?}", start.elapsed()); if !updates.is_empty() { // Once all updates have been performed, we can write out, mark everything as recently seen diff --git a/lib/sdf-server/BUCK b/lib/sdf-server/BUCK index bd4911e6d1..9ef9020cbd 100644 --- a/lib/sdf-server/BUCK +++ b/lib/sdf-server/BUCK @@ -1,9 +1,4 @@ -load( - "@prelude-si//:macros.bzl", - "rust_library", - "rust_library_integration_test", - "rust_test", -) +load("@prelude-si//:macros.bzl", "rust_library") rust_library( name = "sdf-server", @@ -62,69 +57,6 @@ rust_library( "src/server/service/func/defaults/*.ts", "src/server/service/ts_types/*.ts", ]), -) - -rust_library_integration_test( - name = "sdf-integration-test", - crate = "sdf_server", - deps = [ - "//lib/buck2-resources:buck2-resources", - "//lib/dal:dal-integration-test", - "//lib/module-index-client:module-index-client", - "//lib/nats-multiplexer:nats-multiplexer", - "//lib/nats-multiplexer-client:nats-multiplexer-client", - "//lib/nats-subscriber:nats-subscriber", - "//lib/si-crypto:si-crypto", - "//lib/si-data-nats:si-data-nats", - "//lib/si-data-pg:si-data-pg", - "//lib/si-layer-cache:si-layer-cache", - "//lib/si-pkg:si-pkg", - "//lib/si-posthog-rs:si-posthog", - "//lib/si-settings:si-settings", - "//lib/si-std:si-std", - "//lib/telemetry-http-rs:telemetry-http", - "//lib/telemetry-rs:telemetry", - "//lib/veritech-client:veritech-client", - "//third-party/rust:async-trait", - "//third-party/rust:axum", - "//third-party/rust:base64", - "//third-party/rust:chrono", - "//third-party/rust:convert_case", - "//third-party/rust:derive_builder", - "//third-party/rust:futures", - "//third-party/rust:futures-lite", - "//third-party/rust:hyper", - "//third-party/rust:names", - "//third-party/rust:nix", - "//third-party/rust:once_cell", - "//third-party/rust:pathdiff", - "//third-party/rust:rand", - "//third-party/rust:remain", - "//third-party/rust:reqwest", - "//third-party/rust:serde", - "//third-party/rust:serde_json", - "//third-party/rust:serde_with", - "//third-party/rust:sodiumoxide", - "//third-party/rust:strum", - "//third-party/rust:thiserror", - "//third-party/rust:tokio", - "//third-party/rust:tokio-stream", - "//third-party/rust:tokio-tungstenite", - "//third-party/rust:tower", - "//third-party/rust:tower-http", - "//third-party/rust:ulid", - "//third-party/rust:url", - "//third-party/rust:y-sync", - "//third-party/rust:yrs", - ], - rustc_flags = [ - "--cfg=integration_test", - ], - srcs = glob([ - "src/**/*.rs", - "src/server/service/func/defaults/*.ts", - "src/server/service/ts_types/*.ts", - ]), extra_test_targets = [":test-integration"], ) @@ -133,7 +65,7 @@ rust_test( edition = "2021", deps = [ "//lib/dal-test:dal-test", - "//lib/dal:dal-integration-test", + "//lib/dal:dal", "//lib/nats-multiplexer-client:nats-multiplexer-client", "//lib/nats-multiplexer:nats-multiplexer", "//lib/si-data-nats:si-data-nats", @@ -155,7 +87,7 @@ rust_test( "//third-party/rust:tower", "//third-party/rust:y-sync", "//third-party/rust:yrs", - ":sdf-integration-test", + ":sdf-server", ], crate_root = "tests/api.rs", srcs = glob([ diff --git a/lib/sdf-server/src/server/service/graphviz.rs b/lib/sdf-server/src/server/service/graphviz.rs index e8dc7383f6..21b74423ec 100644 --- a/lib/sdf-server/src/server/service/graphviz.rs +++ b/lib/sdf-server/src/server/service/graphviz.rs @@ -120,7 +120,7 @@ pub async fn schema_variant( GraphVizNode { id: sv_node_weight.id(), content_kind: sv_node_weight.content_address_discriminants(), - node_kind: sv_node_weight.as_ref().into(), + node_kind: sv_node_weight.into(), name: Some(sv.name().to_owned()), } }; @@ -147,7 +147,7 @@ pub async fn schema_variant( }); }); } - let name = match target.as_ref() { + let name = match &target { NodeWeight::Category(inner) => Some(inner.kind().to_string()), NodeWeight::Func(inner) => { func_nodes.push(inner.id()); @@ -162,7 +162,7 @@ pub async fn schema_variant( nodes.push(GraphVizNode { id: target.id(), content_kind: target.content_address_discriminants(), - node_kind: target.as_ref().into(), + node_kind: target.into(), name, }) } @@ -195,7 +195,7 @@ pub async fn schema_variant( }); } - let name = match source.as_ref() { + let name = match &source { NodeWeight::Category(inner) => Some(inner.kind().to_string()), NodeWeight::Func(inner) => Some(inner.name().to_owned()), NodeWeight::Prop(inner) => Some(inner.name().to_owned()), @@ -207,7 +207,7 @@ pub async fn schema_variant( nodes.push(GraphVizNode { id: source.id(), content_kind: source.content_address_discriminants(), - node_kind: source.as_ref().into(), + node_kind: source.into(), name, }) } @@ -225,7 +225,7 @@ pub async fn schema_variant( .await? .to_owned(); - if let NodeWeight::Category(cat_inner) = user_node.as_ref() { + if let NodeWeight::Category(cat_inner) = &user_node { let name = Some(cat_inner.kind().to_string()); if !added_edges.contains(&(func_id, cat_inner.id())) { added_edges.insert((func_id, cat_inner.id())); @@ -246,7 +246,7 @@ pub async fn schema_variant( nodes.push(GraphVizNode { id: cat_inner.id(), content_kind: user_node.content_address_discriminants(), - node_kind: user_node.as_ref().into(), + node_kind: user_node.to_owned().into(), name, }) } @@ -325,7 +325,7 @@ pub async fn components( let node = GraphVizNode { id: node_weight.id(), content_kind: node_weight.content_address_discriminants(), - node_kind: node_weight.as_ref().into(), + node_kind: node_weight.into(), name: Some(component.name(&ctx).await?.to_owned()), }; nodes.push(node); @@ -351,7 +351,7 @@ pub async fn components( } // TODO encapsulate this in node weight logic - let name = match target.as_ref() { + let name = match &target { NodeWeight::Category(inner) => Some(inner.kind().to_string()), NodeWeight::Func(inner) => { func_nodes.push(inner.id()); @@ -375,7 +375,7 @@ pub async fn components( nodes.push(GraphVizNode { id: target.id(), content_kind: target.content_address_discriminants(), - node_kind: target.as_ref().into(), + node_kind: target.into(), name, }) } @@ -409,7 +409,7 @@ pub async fn nodes_edges( for (weight, idx) in workspace_snapshot.nodes().await?.into_iter() { node_idx_to_id.insert(idx, weight.id()); - let name = match weight.as_ref() { + let name = match &weight { NodeWeight::Category(inner) => Some(inner.kind().to_string()), NodeWeight::Func(inner) => Some(inner.name().to_owned()), NodeWeight::Prop(inner) => Some(inner.name().to_owned()), @@ -435,7 +435,7 @@ pub async fn nodes_edges( nodes.push(GraphVizNode { id: weight.id(), content_kind: weight.content_address_discriminants(), - node_kind: weight.as_ref().into(), + node_kind: weight.into(), name, }) } diff --git a/lib/si-data-pg/src/lib.rs b/lib/si-data-pg/src/lib.rs index eb76bf14ad..22a5b486cd 100644 --- a/lib/si-data-pg/src/lib.rs +++ b/lib/si-data-pg/src/lib.rs @@ -15,6 +15,7 @@ use tokio_postgres_rustls::MakeRustlsConnect; use base64::{engine::general_purpose, Engine}; use std::{ + cmp, fmt::{self, Debug}, net::ToSocketAddrs, path::Path, @@ -49,7 +50,7 @@ pub use tokio_postgres::error::SqlState; pub use tokio_postgres::types as postgres_types; const MIGRATION_LOCK_NUMBER: i64 = 42; -const MAX_POOL_SIZE_MINIMUM: usize = 2; +const MAX_POOL_SIZE_MINIMUM: usize = 32; const TEST_QUERY: &str = "SELECT 1"; @@ -121,7 +122,7 @@ pub struct PgPoolConfig { impl Default for PgPoolConfig { fn default() -> Self { - let pool_max_size = MAX_POOL_SIZE_MINIMUM; // cmp::max(MAX_POOL_SIZE_MINIMUM, num_cpus::get_physical() * 4); + let pool_max_size = cmp::max(MAX_POOL_SIZE_MINIMUM, num_cpus::get_physical() * 4); PgPoolConfig { user: String::from("si"), diff --git a/lib/si-events-rs/BUCK b/lib/si-events-rs/BUCK index 041d779f52..93bf49a173 100644 --- a/lib/si-events-rs/BUCK +++ b/lib/si-events-rs/BUCK @@ -11,7 +11,6 @@ rust_library( "//lib/si-hash:si-hash", "//third-party/rust:blake3", "//third-party/rust:bytes", - "//third-party/rust:paste", "//third-party/rust:postgres-types", "//third-party/rust:remain", "//third-party/rust:serde", diff --git a/lib/si-events-rs/Cargo.toml b/lib/si-events-rs/Cargo.toml index 2a2425d92f..8eb73094d6 100644 --- a/lib/si-events-rs/Cargo.toml +++ b/lib/si-events-rs/Cargo.toml @@ -14,6 +14,5 @@ serde = { workspace = true } ulid = { workspace = true } serde_json = { workspace = true } thiserror = { workspace = true } -paste = { workspace = true } postgres-types = { workspace = true } blake3 = { workspace = true } diff --git a/lib/si-events-rs/src/content_address.rs b/lib/si-events-rs/src/content_address.rs deleted file mode 100644 index 82ac44b079..0000000000 --- a/lib/si-events-rs/src/content_address.rs +++ /dev/null @@ -1,238 +0,0 @@ -#[macro_export] -macro_rules! content_address { - ( - $(#[$($attrss:tt)*])* - $name:ident - ) => { - $(#[$($attrss)*])* - #[derive(Clone, Copy, Eq, PartialEq, std::hash::Hash)] - pub struct $name(::blake3::Hash); - - impl $name { - pub fn new(input: &[u8]) -> Self { - Self(blake3::hash(input)) - } - - pub fn as_bytes(&self) -> &[u8] { - self.0.as_bytes() - } - - pub fn nil() -> Self { - Self(blake3::Hash::from_bytes([0; 32])) - } - } - - impl PartialOrd for $name { - fn partial_cmp(&self, other: &Self) -> Option<::std::cmp::Ordering> { - Some(self.cmp(other)) - } - } - - impl Ord for $name { - fn cmp(&self, other: &Self) -> ::std::cmp::Ordering { - let mut other_bytes = [0u8; 32]; - other_bytes.copy_from_slice(other.as_bytes()); - let mut self_bytes = [0u8; 32]; - self_bytes.copy_from_slice(self.as_bytes()); - - self_bytes.cmp(&other_bytes) - } - } - - impl AsRef<[u8]> for $name { - fn as_ref(&self) -> &[u8] { - self.0.as_bytes() - } - } - - impl From<&::serde_json::Value> for $name { - fn from(value: &::serde_json::Value) -> Self { - let input = value.to_string(); - Self::new(input.as_bytes()) - } - } - - impl From<&str> for $name { - fn from(input: &str) -> Self { - Self::new(input.as_bytes()) - } - } - - impl Default for $name { - fn default() -> Self { - Self::new("".as_bytes()) - } - } - - impl ::std::fmt::Debug for $name { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, std::concat!(stringify!($name), "({})"), self.0) - } - } - - impl ::std::fmt::Display for $name { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - self.0.fmt(f) - } - } - - impl ::serde::Serialize for $name { - fn serialize(&self, serializer: S) -> Result - where - S: ::serde::Serializer, - { - serializer.serialize_str(&self.to_string()) - } - } - - paste::paste! { - struct [<$name Visitor>]; - - impl<'de> ::serde::de::Visitor<'de> for [<$name Visitor>] { - type Value = $name; - - fn expecting(&self, formatter: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - formatter.write_str("a blake3 hash string") - } - - fn visit_str(self, v: &str) -> Result - where - E: ::serde::de::Error, - { - use ::std::str::FromStr; - - $name::from_str(v).map_err(|e| E::custom(e.to_string())) - } - } - - impl<'de> ::serde::Deserialize<'de> for $name { - fn deserialize(deserializer: D) -> Result - where - D: ::serde::Deserializer<'de>, - { - deserializer.deserialize_str([<$name Visitor>]) - } - } - - #[derive(Debug, ::thiserror::Error)] - #[error("failed to parse hash hex string")] - pub struct [<$name ParseError>](#[from] ::blake3::HexError); - - impl ::std::str::FromStr for $name { - type Err = [<$name ParseError>]; - - fn from_str(s: &str) -> Result { - Ok(Self(blake3::Hash::from_str(s)?)) - } - } - - impl $name { - /// Provide a [`hasher`](ContentHasher) to create [`hashes`](ContentHash). - pub fn hasher() -> [<$name Hasher>] { - [<$name Hasher>]::new() - } - } - - #[derive(Debug, Default)] - pub struct [<$name Hasher>](::blake3::Hasher); - - impl [<$name Hasher>] { - pub fn new() -> Self { - Self(::blake3::Hasher::new()) - } - - pub fn update(&mut self, input: &[u8]) { - self.0.update(input); - } - - pub fn finalize(&self) -> $name { - $name(self.0.finalize()) - } - } - - struct [<$name BytesVisitor>]; - - impl<'de> ::serde::de::Visitor<'de> for [<$name BytesVisitor>] { - type Value = $name; - - fn expecting(&self, formatter: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - formatter.write_str("a blake3 hash byte slice") - } - - fn visit_bytes(self, v: &[u8]) -> Result - where - E: ::serde::de::Error, - { - if v.len() != 32 { - return Err(E::custom(std::concat!("deserializer received wrong sized byte slice when attempting to deserialize a ", stringify!($name)))); - } - - let mut hash_bytes = [0u8; 32]; - hash_bytes.copy_from_slice(v); - - Ok($name(::blake3::Hash::from_bytes(hash_bytes))) - } - } - - pub fn []<'de, D>(d: D) -> Result<$name, D::Error> - where D: ::serde::de::Deserializer<'de> - { - d.deserialize_bytes([<$name BytesVisitor>]) - } - - pub fn [](value: &$name, serializer: S) -> Result - where S: ::serde::ser::Serializer, - { - serializer.serialize_bytes(value.as_bytes()) - } - } - - impl ::postgres_types::ToSql for $name { - fn to_sql( - &self, - ty: &postgres_types::Type, - out: &mut ::bytes::BytesMut, - ) -> Result> - where - Self: Sized, - { - let self_string = self.to_string(); - - self_string.to_sql(ty, out) - } - - fn accepts(ty: &postgres_types::Type) -> bool - where - Self: Sized, - { - String::accepts(ty) - } - - fn to_sql_checked( - &self, - ty: &postgres_types::Type, - out: &mut ::bytes::BytesMut, - ) -> Result> { - let self_string = self.to_string(); - self_string.to_sql_checked(ty, out) - } - } - - impl<'a> postgres_types::FromSql<'a> for $name { - fn from_sql( - ty: &postgres_types::Type, - raw: &'a [u8], - ) -> Result> { - use ::std::str::FromStr; - - let hash_string: String = postgres_types::FromSql::from_sql(ty, raw)?; - Ok(Self(blake3::Hash::from_str(&hash_string)?)) - } - - fn accepts(ty: &postgres_types::Type) -> bool { - ty == &postgres_types::Type::TEXT - || ty.kind() == &postgres_types::Kind::Domain(postgres_types::Type::TEXT) - } - } - }; -} diff --git a/lib/si-events-rs/src/content_hash.rs b/lib/si-events-rs/src/content_hash.rs new file mode 100644 index 0000000000..96d318191c --- /dev/null +++ b/lib/si-events-rs/src/content_hash.rs @@ -0,0 +1,164 @@ +use bytes::BytesMut; +use std::{fmt, str::FromStr}; + +use postgres_types::ToSql; +use serde::{ + de::{self, Visitor}, + Deserialize, Serialize, +}; +use serde_json::Value; + +use thiserror::Error; + +/// The [`blake3::Hash`] of a given set of contents. +#[derive(Clone, Copy, Eq, Hash, PartialEq)] +pub struct ContentHash(blake3::Hash); + +impl ContentHash { + /// Create a new [`ContentHash`] from a byte array. + #[must_use] + pub fn new(input: &[u8]) -> Self { + Self(blake3::hash(input)) + } + + /// Provide a [`hasher`](ContentHasher) to create [`hashes`](ContentHash). + pub fn hasher() -> ContentHasher { + ContentHasher::new() + } + + pub fn as_bytes(&self) -> &[u8] { + self.0.as_bytes() + } +} + +impl AsRef<[u8]> for ContentHash { + fn as_ref(&self) -> &[u8] { + self.0.as_bytes() + } +} + +impl From<&Value> for ContentHash { + fn from(value: &Value) -> Self { + let input = value.to_string(); + Self::new(input.as_bytes()) + } +} + +impl From<&str> for ContentHash { + fn from(input: &str) -> Self { + Self::new(input.as_bytes()) + } +} + +impl Default for ContentHash { + fn default() -> Self { + Self::new("".as_bytes()) + } +} + +impl fmt::Debug for ContentHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "ContentHash({})", self.0) + } +} + +impl fmt::Display for ContentHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl Serialize for ContentHash { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + +struct ContentHashVisitor; + +impl<'de> Visitor<'de> for ContentHashVisitor { + type Value = ContentHash; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a blake3 hash string") + } + + fn visit_str(self, v: &str) -> Result + where + E: de::Error, + { + ContentHash::from_str(v).map_err(|e| E::custom(e.to_string())) + } +} + +impl<'de> Deserialize<'de> for ContentHash { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_str(ContentHashVisitor) + } +} + +#[derive(Debug, Error)] +#[error("failed to parse hash hex string")] +pub struct ContentHashParseError(#[from] blake3::HexError); + +impl FromStr for ContentHash { + type Err = ContentHashParseError; + + fn from_str(s: &str) -> Result { + Ok(Self(blake3::Hash::from_str(s)?)) + } +} + +#[derive(Debug, Default)] +pub struct ContentHasher(blake3::Hasher); + +impl ContentHasher { + pub fn new() -> Self { + ContentHasher(blake3::Hasher::new()) + } + + pub fn update(&mut self, input: &[u8]) { + self.0.update(input); + } + + pub fn finalize(&self) -> ContentHash { + ContentHash(self.0.finalize()) + } +} + +impl ToSql for ContentHash { + fn to_sql( + &self, + ty: &postgres_types::Type, + out: &mut BytesMut, + ) -> Result> + where + Self: Sized, + { + let self_string = self.to_string(); + + self_string.to_sql(ty, out) + } + + fn accepts(ty: &postgres_types::Type) -> bool + where + Self: Sized, + { + String::accepts(ty) + } + + fn to_sql_checked( + &self, + ty: &postgres_types::Type, + out: &mut BytesMut, + ) -> Result> { + let self_string = self.to_string(); + self_string.to_sql_checked(ty, out) + } +} diff --git a/lib/si-events-rs/src/lib.rs b/lib/si-events-rs/src/lib.rs index 077647cdfa..b9103ad22c 100644 --- a/lib/si-events-rs/src/lib.rs +++ b/lib/si-events-rs/src/lib.rs @@ -1,16 +1,15 @@ +pub mod content_hash; +pub mod workspace_snapshot_address; + mod actor; mod cas; -pub mod content_address; mod encrypted_secret; mod tenancy; mod web_event; pub use crate::{ - actor::Actor, actor::UserPk, cas::CasValue, encrypted_secret::EncryptedSecretKey, - tenancy::ChangeSetId, tenancy::Tenancy, tenancy::WorkspacePk, web_event::WebEvent, + actor::Actor, actor::UserPk, cas::CasValue, content_hash::ContentHash, + encrypted_secret::EncryptedSecretKey, tenancy::ChangeSetId, tenancy::Tenancy, + tenancy::WorkspacePk, web_event::WebEvent, + workspace_snapshot_address::WorkspaceSnapshotAddress, }; - -content_address!(ContentHash); -content_address!(WorkspaceSnapshotAddress); -content_address!(NodeWeightAddress); -content_address!(MerkleTreeHash); diff --git a/lib/si-events-rs/src/workspace_snapshot_address.rs b/lib/si-events-rs/src/workspace_snapshot_address.rs new file mode 100644 index 0000000000..c408220311 --- /dev/null +++ b/lib/si-events-rs/src/workspace_snapshot_address.rs @@ -0,0 +1,121 @@ +use bytes::BytesMut; +use postgres_types::ToSql; +use serde::{ + de::{self, Visitor}, + Deserialize, Serialize, +}; +use std::{fmt, str::FromStr}; +use thiserror::Error; + +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub struct WorkspaceSnapshotAddress(blake3::Hash); + +impl WorkspaceSnapshotAddress { + #[must_use] + pub fn new(input: &[u8]) -> Self { + Self(blake3::hash(input)) + } + + pub fn nil() -> Self { + Self(blake3::Hash::from_bytes([0; 32])) + } +} + +#[derive(Debug, Error)] +#[error("failed to parse hash hex string")] +pub struct WorkspaceSnapshotAddressParseError(#[from] blake3::HexError); + +impl FromStr for WorkspaceSnapshotAddress { + type Err = WorkspaceSnapshotAddressParseError; + + fn from_str(s: &str) -> Result { + Ok(Self(blake3::Hash::from_str(s)?)) + } +} + +impl std::fmt::Display for WorkspaceSnapshotAddress { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +impl Serialize for WorkspaceSnapshotAddress { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + +struct WorkspaceSnapshotAddressVisitor; + +impl<'de> Visitor<'de> for WorkspaceSnapshotAddressVisitor { + type Value = WorkspaceSnapshotAddress; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a blake3 hash string") + } + + fn visit_str(self, v: &str) -> Result + where + E: de::Error, + { + WorkspaceSnapshotAddress::from_str(v).map_err(|e| E::custom(e.to_string())) + } +} + +impl<'de> Deserialize<'de> for WorkspaceSnapshotAddress { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_str(WorkspaceSnapshotAddressVisitor) + } +} + +impl ToSql for WorkspaceSnapshotAddress { + fn to_sql( + &self, + ty: &postgres_types::Type, + out: &mut BytesMut, + ) -> Result> + where + Self: Sized, + { + let self_string = self.to_string(); + + self_string.to_sql(ty, out) + } + + fn accepts(ty: &postgres_types::Type) -> bool + where + Self: Sized, + { + String::accepts(ty) + } + + fn to_sql_checked( + &self, + ty: &postgres_types::Type, + out: &mut BytesMut, + ) -> Result> { + let self_string = self.to_string(); + self_string.to_sql_checked(ty, out) + } +} + +impl<'a> postgres_types::FromSql<'a> for WorkspaceSnapshotAddress { + fn from_sql( + ty: &postgres_types::Type, + raw: &'a [u8], + ) -> Result> { + let hash_string: String = postgres_types::FromSql::from_sql(ty, raw)?; + Ok(Self(blake3::Hash::from_str(&hash_string)?)) + } + + fn accepts(ty: &postgres_types::Type) -> bool { + ty == &postgres_types::Type::TEXT + || ty.kind() == &postgres_types::Kind::Domain(postgres_types::Type::TEXT) + } +} diff --git a/lib/si-layer-cache/src/db.rs b/lib/si-layer-cache/src/db.rs index bdbc3f68da..aaf87a117d 100644 --- a/lib/si-layer-cache/src/db.rs +++ b/lib/si-layer-cache/src/db.rs @@ -16,29 +16,23 @@ use crate::{ persister::{PersisterClient, PersisterTask}, }; -use self::{ - cache_updates::CacheUpdatesTask, cas::CasDb, node_weight::NodeWeightDb, - workspace_snapshot::WorkspaceSnapshotDb, -}; +use self::{cache_updates::CacheUpdatesTask, cas::CasDb, workspace_snapshot::WorkspaceSnapshotDb}; mod cache_updates; pub mod cas; pub mod encrypted_secret; -pub mod node_weight; pub mod workspace_snapshot; #[derive(Debug, Clone)] -pub struct LayerDb +pub struct LayerDb where CasValue: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, EncryptedSecretValue: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, WorkspaceSnapshotValue: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, - NodeWeightValue: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, { cas: CasDb, encrypted_secret: EncryptedSecretDb, workspace_snapshot: WorkspaceSnapshotDb, - node_weight: NodeWeightDb, sled: sled::Db, pg_pool: PgPool, nats_client: NatsClient, @@ -47,13 +41,12 @@ where instance_id: Ulid, } -impl - LayerDb +impl + LayerDb where CasValue: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, EncryptedSecretValue: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, WorkspaceSnapshotValue: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, - NodeWeightValue: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, { pub async fn initialize( disk_path: impl AsRef, @@ -84,16 +77,12 @@ where ) .await?; - let node_weight_cache: LayerCache> = - LayerCache::new(node_weight::CACHE_NAME, sled.clone(), pg_pool.clone()).await?; - let cache_updates_task = CacheUpdatesTask::create( instance_id, &nats_client, cas_cache.clone(), encrypted_secret_cache.clone(), snapshot_cache.clone(), - node_weight_cache.clone(), token.clone(), ) .await?; @@ -114,7 +103,6 @@ where let encrypted_secret = EncryptedSecretDb::new(encrypted_secret_cache, persister_client.clone()); let workspace_snapshot = WorkspaceSnapshotDb::new(snapshot_cache, persister_client.clone()); - let node_weight = NodeWeightDb::new(node_weight_cache, persister_client.clone()); let activity = ActivityClient::new(instance_id, nats_client.clone(), token.clone()); let graceful_shutdown = LayerDbGracefulShutdown { tracker, token }; @@ -124,7 +112,6 @@ where cas, encrypted_secret, workspace_snapshot, - node_weight, sled, pg_pool, persister_client, @@ -163,9 +150,6 @@ where &self.workspace_snapshot } - pub fn node_weight(&self) -> &NodeWeightDb { - &self.node_weight - } pub fn instance_id(&self) -> Ulid { self.instance_id } diff --git a/lib/si-layer-cache/src/db/cache_updates.rs b/lib/si-layer-cache/src/db/cache_updates.rs index 3e3ee5cbe6..dd6b31266e 100644 --- a/lib/si-layer-cache/src/db/cache_updates.rs +++ b/lib/si-layer-cache/src/db/cache_updates.rs @@ -26,32 +26,28 @@ use crate::{ enum CacheName { Cas, EncryptedSecret, - NodeWeights, WorkspaceSnapshots, } -pub struct CacheUpdatesTask +pub struct CacheUpdatesTask where CasValue: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, EncryptedSecretValue: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, WorkspaceSnapshotValue: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, - NodeWeightValue: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, { instance_id: Ulid, messages: ChunkedMessagesStream, cas_cache: LayerCache>, encrypted_secret_cache: LayerCache>, snapshot_cache: LayerCache>, - node_weight_cache: LayerCache>, } -impl - CacheUpdatesTask +impl + CacheUpdatesTask where CasValue: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, EncryptedSecretValue: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, WorkspaceSnapshotValue: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, - NodeWeightValue: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, { const NAME: &'static str = "LayerDB::CacheUpdatesTask"; @@ -61,7 +57,6 @@ where cas_cache: LayerCache>, encrypted_secret_cache: LayerCache>, snapshot_cache: LayerCache>, - node_weight_cache: LayerCache>, shutdown_token: CancellationToken, ) -> LayerDbResult { let context = jetstream::new(nats_client.as_inner().clone()); @@ -81,7 +76,6 @@ where cas_cache, encrypted_secret_cache, snapshot_cache, - node_weight_cache, }) } @@ -97,7 +91,6 @@ where self.cas_cache.clone(), self.encrypted_secret_cache.clone(), self.snapshot_cache.clone(), - self.node_weight_cache.clone(), ); // Turns out I think it's probably dangerous to do this spawned, since we want // to make sure we insert things into the cache in the order we receive them. @@ -132,40 +125,35 @@ where } } -struct CacheUpdateTask +struct CacheUpdateTask where Q: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, R: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, S: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, - T: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, { instance_id: Ulid, cas_cache: LayerCache>, encrypted_secret_cache: LayerCache>, snapshot_cache: LayerCache>, - node_weight_cache: LayerCache>, } -impl CacheUpdateTask +impl CacheUpdateTask where Q: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, R: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, S: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, - T: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, { fn new( instance_id: Ulid, cas_cache: LayerCache>, encrypted_secret_cache: LayerCache>, snapshot_cache: LayerCache>, - node_weight_cache: LayerCache>, - ) -> CacheUpdateTask { + ) -> CacheUpdateTask { CacheUpdateTask { instance_id, cas_cache, encrypted_secret_cache, snapshot_cache, - node_weight_cache, } } @@ -240,23 +228,6 @@ where .await?; } } - CacheName::NodeWeights => { - if !self.node_weight_cache.contains(key) { - let event: LayeredEvent = postcard::from_bytes(&msg.payload)?; - let memory_value = self - .node_weight_cache - .deserialize_memory_value(&event.payload.value)?; - let serialized_value = Arc::try_unwrap(event.payload.value) - .unwrap_or_else(|arc| (*arc).clone()); - self.node_weight_cache - .insert_from_cache_updates( - key.into(), - memory_value, - serialized_value, - ) - .await?; - } - } } } // Message headers are incomplete diff --git a/lib/si-layer-cache/src/db/node_weight.rs b/lib/si-layer-cache/src/db/node_weight.rs deleted file mode 100644 index e31f6e93a7..0000000000 --- a/lib/si-layer-cache/src/db/node_weight.rs +++ /dev/null @@ -1,120 +0,0 @@ -use std::sync::Arc; -use std::{collections::HashMap, fmt::Display}; - -use serde::{de::DeserializeOwned, Serialize}; -use si_events::{Actor, NodeWeightAddress, Tenancy, WebEvent}; - -use crate::{ - error::LayerDbResult, - event::{LayeredEvent, LayeredEventKind}, - layer_cache::LayerCache, - persister::{PersisterClient, PersisterStatusReader}, - LayerDbError, -}; - -pub const DBNAME: &str = "node_weights"; -pub const CACHE_NAME: &str = "node_weights"; -pub const PARTITION_KEY: &str = "node_weights"; - -#[derive(Debug, Clone)] -pub struct NodeWeightDb -where - V: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, -{ - pub cache: LayerCache>, - persister_client: PersisterClient, -} - -impl NodeWeightDb -where - V: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, -{ - pub fn new(cache: LayerCache>, persister_client: PersisterClient) -> Self { - NodeWeightDb { - cache, - persister_client, - } - } - - pub async fn write( - &self, - value: Arc, - web_events: Option>, - tenancy: Tenancy, - actor: Actor, - ) -> LayerDbResult<(NodeWeightAddress, PersisterStatusReader)> { - let postcard_value = postcard::to_stdvec(&value)?; - let key = NodeWeightAddress::new(&postcard_value); - let cache_key: Arc = key.to_string().into(); - - self.cache.insert(cache_key.clone(), value.clone()).await; - - let event = LayeredEvent::new( - LayeredEventKind::NodeWeightWrite, - Arc::new(DBNAME.to_string()), - cache_key, - Arc::new(postcard_value), - Arc::new("node_weights".to_string()), - web_events, - tenancy, - actor, - ); - let reader = self.persister_client.write_event(event)?; - - Ok((key, reader)) - } - - pub async fn read(&self, key: &NodeWeightAddress) -> LayerDbResult>> { - self.cache.get(key.to_string().into()).await - } - - /// We often need to extract the value from the arc by cloning it (although - /// this should be avoided for large values). This will do that, and also - /// helpfully convert the value to the type we want to deal with - pub async fn try_read_as(&self, key: &NodeWeightAddress) -> LayerDbResult> - where - V: TryInto, - >::Error: Display, - { - Ok(match self.read(key).await? { - None => None, - Some(arc_v) => Some( - arc_v - .as_ref() - .clone() - .try_into() - .map_err(|err| LayerDbError::ContentConversion(err.to_string()))?, - ), - }) - } - - pub async fn read_many( - &self, - keys: &[NodeWeightAddress], - ) -> LayerDbResult>> { - self.cache.get_bulk(keys).await - } - - pub async fn try_read_many_as( - &self, - keys: &[NodeWeightAddress], - ) -> LayerDbResult> - where - V: TryInto, - >::Error: Display, - { - let mut result = HashMap::new(); - for (key, arc_v) in self.cache.get_bulk(keys).await? { - result.insert( - key, - arc_v - .as_ref() - .clone() - .try_into() - .map_err(|err| LayerDbError::ContentConversion(err.to_string()))?, - ); - } - - Ok(result) - } -} diff --git a/lib/si-layer-cache/src/error.rs b/lib/si-layer-cache/src/error.rs index c6832ed1fa..9b7248fce8 100644 --- a/lib/si-layer-cache/src/error.rs +++ b/lib/si-layer-cache/src/error.rs @@ -2,7 +2,7 @@ use std::error; use si_data_nats::async_nats::jetstream; use si_data_pg::{PgError, PgPoolError}; -use si_events::ContentHashParseError; +use si_events::content_hash::ContentHashParseError; use si_std::CanonicalFileError; use thiserror::Error; use tokio_stream::Elapsed; diff --git a/lib/si-layer-cache/src/event.rs b/lib/si-layer-cache/src/event.rs index b0f10be1f5..deebea420e 100644 --- a/lib/si-layer-cache/src/event.rs +++ b/lib/si-layer-cache/src/event.rs @@ -59,7 +59,6 @@ impl std::str::FromStr for LayeredEventId { pub enum LayeredEventKind { CasInsertion, EncryptedSecretInsertion, - NodeWeightWrite, Raw, SnapshotWrite, } diff --git a/lib/si-layer-cache/src/layer_cache.rs b/lib/si-layer-cache/src/layer_cache.rs index 26c0ed6e15..92d0239ff3 100644 --- a/lib/si-layer-cache/src/layer_cache.rs +++ b/lib/si-layer-cache/src/layer_cache.rs @@ -1,5 +1,4 @@ use std::{collections::HashMap, fmt::Display, path::Path, sync::Arc}; -use telemetry::prelude::*; use serde::{de::DeserializeOwned, Serialize}; use si_data_pg::{PgPool, PgPoolConfig}; @@ -50,7 +49,6 @@ where Some(memory_value) => Some(memory_value), None => match self.disk_cache.get(&key)? { Some(value) => { - info!("hitting sled"); let deserialized: V = postcard::from_bytes(&value)?; self.memory_cache.insert(key, deserialized.clone()).await; @@ -59,7 +57,6 @@ where None => match self.pg.get(&key).await? { Some(value) => { let deserialized: V = postcard::from_bytes(&value)?; - info!("hitting pg"); self.memory_cache .insert(key.clone(), deserialized.clone()) diff --git a/lib/si-layer-cache/src/memory_cache.rs b/lib/si-layer-cache/src/memory_cache.rs index 59bfd4a5ec..95dca870c1 100644 --- a/lib/si-layer-cache/src/memory_cache.rs +++ b/lib/si-layer-cache/src/memory_cache.rs @@ -1,12 +1,8 @@ -use std::{sync::Arc, time::Duration}; +use std::sync::Arc; use moka::future::Cache; use serde::{de::DeserializeOwned, Serialize}; -const DEFAULT_SIZE: u64 = 65_536; -const DEFAULT_TTL: Duration = Duration::from_secs(60 * 60 * 24 * 2); -const DEFAULT_TTI: Duration = Duration::from_secs(60 * 60 * 24); - #[derive(Clone, Debug)] pub struct MemoryCache where @@ -30,11 +26,7 @@ where { pub fn new() -> Self { Self { - cache: Cache::builder() - .max_capacity(DEFAULT_SIZE) - .time_to_idle(DEFAULT_TTI) - .time_to_live(DEFAULT_TTL) - .build(), + cache: Cache::new(u64::MAX), } } diff --git a/lib/si-layer-cache/src/migrations/U0004__node_weights.sql b/lib/si-layer-cache/src/migrations/U0004__node_weights.sql deleted file mode 100644 index 04a76c0f1f..0000000000 --- a/lib/si-layer-cache/src/migrations/U0004__node_weights.sql +++ /dev/null @@ -1,10 +0,0 @@ -CREATE TABLE node_weights -( - key text NOT NULL PRIMARY KEY, - sort_key text NOT NULL, - created_at timestamp with time zone NOT NULL DEFAULT CLOCK_TIMESTAMP(), - value bytea NOT NULL, - serialization_lib text NOT NULL DEFAULT 'postcard' -); - -CREATE INDEX IF NOT EXISTS node_weights_sort_key ON node_weights (sort_key); diff --git a/lib/si-layer-cache/tests/integration_test/activities.rs b/lib/si-layer-cache/tests/integration_test/activities.rs index 400c8a0695..4888b4ee3b 100644 --- a/lib/si-layer-cache/tests/integration_test/activities.rs +++ b/lib/si-layer-cache/tests/integration_test/activities.rs @@ -11,7 +11,7 @@ use tokio_util::sync::CancellationToken; use crate::integration_test::{setup_nats_client, setup_pg_db}; -type TestLayerDb = LayerDb, Arc, String, String>; +type TestLayerDb = LayerDb, Arc, String>; #[tokio::test] async fn activities() { diff --git a/lib/si-layer-cache/tests/integration_test/activities/rebase.rs b/lib/si-layer-cache/tests/integration_test/activities/rebase.rs index b9c22dba19..6225a6778c 100644 --- a/lib/si-layer-cache/tests/integration_test/activities/rebase.rs +++ b/lib/si-layer-cache/tests/integration_test/activities/rebase.rs @@ -8,7 +8,7 @@ use ulid::Ulid; use crate::integration_test::{setup_nats_client, setup_pg_db}; -type TestLayerDb = LayerDb, Arc, String, String>; +type TestLayerDb = LayerDb, Arc, String>; #[tokio::test] async fn subscribe_rebaser_requests_work_queue() { diff --git a/lib/si-layer-cache/tests/integration_test/db/cas.rs b/lib/si-layer-cache/tests/integration_test/db/cas.rs index 99bb355f97..00f78f62bf 100644 --- a/lib/si-layer-cache/tests/integration_test/db/cas.rs +++ b/lib/si-layer-cache/tests/integration_test/db/cas.rs @@ -7,7 +7,7 @@ use tokio_util::sync::CancellationToken; use crate::integration_test::{setup_nats_client, setup_pg_db}; -type TestLayerDb = LayerDb; +type TestLayerDb = LayerDb; #[tokio::test] async fn write_to_db() { diff --git a/prelude-si/macros.bzl b/prelude-si/macros.bzl index 4101f66296..2a449ad98a 100644 --- a/prelude-si/macros.bzl +++ b/prelude-si/macros.bzl @@ -81,11 +81,9 @@ load( _rust_binary = "rust_binary", _rust_library = "rust_library", _rust_test = "rust_test", - _rust_library_integration_test = "rust_library_integration_test", ) rust_binary = _rust_binary rust_library = _rust_library -rust_library_integration_test = _rust_library_integration_test rust_test = _rust_test load( @@ -106,4 +104,4 @@ load( tilt_docker_compose_pull = _tilt_docker_compose_pull tilt_docker_compose_stop = _tilt_docker_compose_stop tilt_down = _tilt_down -tilt_up = _tilt_up +tilt_up = _tilt_up \ No newline at end of file diff --git a/prelude-si/macros/rust.bzl b/prelude-si/macros/rust.bzl index 1bd87eecca..19b83e1fa4 100644 --- a/prelude-si/macros/rust.bzl +++ b/prelude-si/macros/rust.bzl @@ -156,39 +156,6 @@ def rust_binary( visibility = visibility, ) - -def rust_library_integration_test( - name, - srcs, - deps, - crate_root = "src/lib.rs", - edition = "2021", - resources = [], - test_unit_deps = [], - test_unit_srcs = [], - test_unit_resources = {}, - extra_test_targets = [], - proc_macro = False, - visibility = ["PUBLIC"], - **kwargs): - - native.rust_library( - name = name, - edition = edition, - srcs = srcs, - deps = deps, - crate_root = crate_root, - resources = resources, - proc_macro = proc_macro, - visibility = visibility, - **kwargs - ) - - _alias( - name = "integration_test", - actual = ":{}".format(name), - ) - def rust_library( name, srcs, diff --git a/third-party/rust/BUCK b/third-party/rust/BUCK index f03a00731a..7137883439 100644 --- a/third-party/rust/BUCK +++ b/third-party/rust/BUCK @@ -481,18 +481,18 @@ cargo.rust_library( ) http_archive( - name = "async-compression-0.4.7.crate", - sha256 = "86a9249d1447a85f95810c620abea82e001fe58a31713fcce614caf52499f905", - strip_prefix = "async-compression-0.4.7", - urls = ["https://crates.io/api/v1/crates/async-compression/0.4.7/download"], + name = "async-compression-0.4.6.crate", + sha256 = "a116f46a969224200a0a97f29cfd4c50e7534e4b4826bd23ea2c3c533039c82c", + strip_prefix = "async-compression-0.4.6", + urls = ["https://crates.io/api/v1/crates/async-compression/0.4.6/download"], visibility = [], ) cargo.rust_library( - name = "async-compression-0.4.7", - srcs = [":async-compression-0.4.7.crate"], + name = "async-compression-0.4.6", + srcs = [":async-compression-0.4.6.crate"], crate = "async_compression", - crate_root = "async-compression-0.4.7.crate/src/lib.rs", + crate_root = "async-compression-0.4.6.crate/src/lib.rs", edition = "2018", features = [ "brotli", @@ -507,7 +507,7 @@ cargo.rust_library( ":flate2-1.0.28", ":futures-core-0.3.30", ":memchr-2.7.2", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":tokio-1.37.0", ], ) @@ -621,7 +621,7 @@ cargo.rust_library( deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -643,7 +643,7 @@ cargo.rust_library( deps = [ ":async-stream-impl-0.3.5", ":futures-core-0.3.30", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ], ) @@ -666,7 +666,7 @@ cargo.rust_library( deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -695,7 +695,7 @@ cargo.rust_library( deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -720,7 +720,7 @@ cargo.rust_library( ":futures-sink-0.3.30", ":futures-util-0.3.30", ":memchr-2.7.2", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ], ) @@ -917,7 +917,7 @@ cargo.rust_library( ":mime-0.3.17", ":multer-2.1.0", ":percent-encoding-2.3.1", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":serde-1.0.197", ":serde_json-1.0.115", ":serde_path_to_error-0.1.16", @@ -980,7 +980,7 @@ cargo.rust_library( ":heck-0.4.1", ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -1562,7 +1562,7 @@ cargo.rust_library( ":hyper-1.2.0", ":hyper-util-0.1.3", ":log-0.4.21", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":serde-1.0.197", ":serde_derive-1.0.197", ":serde_json-1.0.115", @@ -2026,7 +2026,7 @@ cargo.rust_library( ":heck-0.5.0", ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -3277,7 +3277,7 @@ cargo.rust_library( deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -3331,7 +3331,7 @@ cargo.rust_library( ":proc-macro2-1.0.79", ":quote-1.0.35", ":strsim-0.10.0", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -3354,7 +3354,7 @@ cargo.rust_library( deps = [ ":darling_core-0.20.8", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -3494,18 +3494,18 @@ cargo.rust_library( ) http_archive( - name = "der-0.7.9.crate", - sha256 = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0", - strip_prefix = "der-0.7.9", - urls = ["https://crates.io/api/v1/crates/der/0.7.9/download"], + name = "der-0.7.8.crate", + sha256 = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c", + strip_prefix = "der-0.7.8", + urls = ["https://crates.io/api/v1/crates/der/0.7.8/download"], visibility = [], ) cargo.rust_library( - name = "der-0.7.9", - srcs = [":der-0.7.9.crate"], + name = "der-0.7.8", + srcs = [":der-0.7.8.crate"], crate = "der", - crate_root = "der-0.7.9.crate/src/lib.rs", + crate_root = "der-0.7.8.crate/src/lib.rs", edition = "2021", features = [ "alloc", @@ -3620,7 +3620,7 @@ cargo.rust_library( ":darling-0.20.8", ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -3643,7 +3643,7 @@ cargo.rust_library( visibility = [], deps = [ ":derive_builder_core-0.20.0", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -3966,7 +3966,7 @@ cargo.rust_library( ], visibility = [], deps = [ - ":der-0.7.9", + ":der-0.7.8", ":digest-0.10.7", ":elliptic-curve-0.13.8", ":rfc6979-0.4.0", @@ -4110,7 +4110,7 @@ cargo.rust_library( ":enum-ordinalize-4.3.0", ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -4281,7 +4281,7 @@ cargo.rust_library( deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -4907,7 +4907,7 @@ cargo.rust_library( ":futures-core-0.3.30", ":futures-io-0.3.30", ":parking-2.2.0", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ], ) @@ -4930,7 +4930,7 @@ cargo.rust_library( deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -5038,7 +5038,7 @@ cargo.rust_library( ":futures-sink-0.3.30", ":futures-task-0.3.30", ":memchr-2.7.2", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":pin-utils-0.1.0", ":slab-0.4.9", ], @@ -5803,7 +5803,7 @@ cargo.rust_library( deps = [ ":bytes-1.6.0", ":http-0.2.12", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ], ) @@ -5848,7 +5848,7 @@ cargo.rust_library( ":futures-core-0.3.30", ":http-1.1.0", ":http-body-1.0.0", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ], ) @@ -5952,7 +5952,7 @@ cargo.rust_library( ":httparse-1.8.0", ":httpdate-1.0.3", ":itoa-1.0.11", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":socket2-0.5.6", ":tokio-1.37.0", ":tower-service-0.3.2", @@ -5989,7 +5989,7 @@ cargo.rust_library( ":http-body-1.0.0", ":httparse-1.8.0", ":itoa-1.0.11", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":smallvec-1.13.2", ":tokio-1.37.0", ":want-0.3.1", @@ -6015,7 +6015,7 @@ cargo.rust_library( ":hex-0.4.3", ":hyper-1.2.0", ":hyper-util-0.1.3", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":tokio-1.37.0", ":tower-service-0.3.2", ":winapi-0.3.9", @@ -6094,7 +6094,7 @@ cargo.rust_library( visibility = [], deps = [ ":hyper-0.14.28", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":tokio-1.37.0", ":tokio-io-timeout-1.2.0", ], @@ -6129,7 +6129,7 @@ cargo.rust_library( ":http-1.1.0", ":http-body-1.0.0", ":hyper-1.2.0", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":socket2-0.5.6", ":tokio-1.37.0", ":tower-0.4.13", @@ -6192,7 +6192,7 @@ cargo.rust_library( ":http-body-util-0.1.1", ":hyper-1.2.0", ":hyper-util-0.1.3", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":tokio-1.37.0", ":tower-service-0.3.2", ], @@ -6300,7 +6300,7 @@ cargo.rust_library( ":proc-macro2-1.0.79", ":quote-1.0.35", ":serde-1.0.197", - ":syn-2.0.57", + ":syn-2.0.55", ":toml-0.8.12", ":unicode-xid-0.2.4", ], @@ -6495,7 +6495,7 @@ cargo.rust_library( deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -7578,7 +7578,7 @@ cargo.rust_library( deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -7880,7 +7880,7 @@ cargo.rust_library( ":futures-util-0.3.30", ":once_cell-1.19.0", ":parking_lot-0.12.1", - ":quanta-0.12.3", + ":quanta-0.12.2", ":smallvec-1.13.2", ":tagptr-0.2.0", ":thiserror-1.0.58", @@ -8680,7 +8680,7 @@ cargo.rust_library( ":futures-core-0.3.30", ":futures-sink-0.3.30", ":once_cell-1.19.0", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":thiserror-1.0.58", ":urlencoding-2.1.3", ], @@ -9037,7 +9037,7 @@ cargo.rust_library( ":proc-macro-error-1.0.4", ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -9064,7 +9064,7 @@ cargo.rust_library( ":proc-macro2-1.0.79", ":proc-macro2-diagnostics-0.10.1", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -9594,29 +9594,29 @@ cargo.rust_library( deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) alias( name = "pin-project-lite", - actual = ":pin-project-lite-0.2.14", + actual = ":pin-project-lite-0.2.13", visibility = ["PUBLIC"], ) http_archive( - name = "pin-project-lite-0.2.14.crate", - sha256 = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02", - strip_prefix = "pin-project-lite-0.2.14", - urls = ["https://crates.io/api/v1/crates/pin-project-lite/0.2.14/download"], + name = "pin-project-lite-0.2.13.crate", + sha256 = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58", + strip_prefix = "pin-project-lite-0.2.13", + urls = ["https://crates.io/api/v1/crates/pin-project-lite/0.2.13/download"], visibility = [], ) cargo.rust_library( - name = "pin-project-lite-0.2.14", - srcs = [":pin-project-lite-0.2.14.crate"], + name = "pin-project-lite-0.2.13", + srcs = [":pin-project-lite-0.2.13.crate"], crate = "pin_project_lite", - crate_root = "pin-project-lite-0.2.14.crate/src/lib.rs", + crate_root = "pin-project-lite-0.2.13.crate/src/lib.rs", edition = "2018", visibility = [], ) @@ -9661,7 +9661,7 @@ cargo.rust_library( ], visibility = [], deps = [ - ":der-0.7.9", + ":der-0.7.8", ":pkcs8-0.10.2", ":spki-0.7.3", ], @@ -9688,7 +9688,7 @@ cargo.rust_library( ], visibility = [], deps = [ - ":der-0.7.9", + ":der-0.7.8", ":spki-0.7.3", ], ) @@ -9987,7 +9987,7 @@ cargo.rust_library( ":heck-0.4.1", ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -10326,7 +10326,7 @@ cargo.rust_library( deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ":yansi-1.0.1", ], ) @@ -10378,23 +10378,23 @@ cargo.rust_library( ":itertools-0.11.0", ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) http_archive( - name = "quanta-0.12.3.crate", - sha256 = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5", - strip_prefix = "quanta-0.12.3", - urls = ["https://crates.io/api/v1/crates/quanta/0.12.3/download"], + name = "quanta-0.12.2.crate", + sha256 = "9ca0b7bac0b97248c40bb77288fc52029cf1459c0461ea1b05ee32ccf011de2c", + strip_prefix = "quanta-0.12.2", + urls = ["https://crates.io/api/v1/crates/quanta/0.12.2/download"], visibility = [], ) cargo.rust_library( - name = "quanta-0.12.3", - srcs = [":quanta-0.12.3.crate"], + name = "quanta-0.12.2", + srcs = [":quanta-0.12.2.crate"], crate = "quanta", - crate_root = "quanta-0.12.3.crate/src/lib.rs", + crate_root = "quanta-0.12.2.crate/src/lib.rs", edition = "2021", features = [ "default", @@ -10758,56 +10758,53 @@ cargo.rust_library( alias( name = "refinery", - actual = ":refinery-0.8.13", + actual = ":refinery-0.8.12", visibility = ["PUBLIC"], ) http_archive( - name = "refinery-0.8.13.crate", - sha256 = "425d0fb45561a45e274d318bfbd1bdfbb7a88045860c7b51a2b812ded1a5efc7", - strip_prefix = "refinery-0.8.13", - urls = ["https://crates.io/api/v1/crates/refinery/0.8.13/download"], + name = "refinery-0.8.12.crate", + sha256 = "a2783724569d96af53464d0711dff635cab7a4934df5e22e9fbc9e181523b83e", + strip_prefix = "refinery-0.8.12", + urls = ["https://crates.io/api/v1/crates/refinery/0.8.12/download"], visibility = [], ) cargo.rust_library( - name = "refinery-0.8.13", - srcs = [":refinery-0.8.13.crate"], + name = "refinery-0.8.12", + srcs = [":refinery-0.8.12.crate"], crate = "refinery", - crate_root = "refinery-0.8.13.crate/src/lib.rs", + crate_root = "refinery-0.8.12.crate/src/lib.rs", edition = "2018", features = [ "default", "tokio-postgres", - "toml", ], visibility = [], deps = [ - ":refinery-core-0.8.13", - ":refinery-macros-0.8.13", + ":refinery-core-0.8.12", + ":refinery-macros-0.8.12", ], ) http_archive( - name = "refinery-core-0.8.13.crate", - sha256 = "3d0f5d1af6a2e8d5972ca187b2acf7ecb8d6a1a6ece52bceeae8f57880eaf62f", - strip_prefix = "refinery-core-0.8.13", - urls = ["https://crates.io/api/v1/crates/refinery-core/0.8.13/download"], + name = "refinery-core-0.8.12.crate", + sha256 = "08d6c80329c0455510a8d42fce286ecb4b6bcd8c57e1816d9f2d6bd7379c2cc8", + strip_prefix = "refinery-core-0.8.12", + urls = ["https://crates.io/api/v1/crates/refinery-core/0.8.12/download"], visibility = [], ) cargo.rust_library( - name = "refinery-core-0.8.13", - srcs = [":refinery-core-0.8.13.crate"], + name = "refinery-core-0.8.12", + srcs = [":refinery-core-0.8.12.crate"], crate = "refinery_core", - crate_root = "refinery-core-0.8.13.crate/src/lib.rs", + crate_root = "refinery-core-0.8.12.crate/src/lib.rs", edition = "2018", features = [ "default", - "serde", "tokio", "tokio-postgres", - "toml", ], visibility = [], deps = [ @@ -10828,27 +10825,27 @@ cargo.rust_library( ) http_archive( - name = "refinery-macros-0.8.13.crate", - sha256 = "7ba59636ac45d953f2225dc4ca3a55cfda1b015d0e6ff51ea16329918b436d51", - strip_prefix = "refinery-macros-0.8.13", - urls = ["https://crates.io/api/v1/crates/refinery-macros/0.8.13/download"], + name = "refinery-macros-0.8.12.crate", + sha256 = "6ab6e31e166a49d55cb09b62639e5ab9ba2e73f2f124336b06f6c321dc602779", + strip_prefix = "refinery-macros-0.8.12", + urls = ["https://crates.io/api/v1/crates/refinery-macros/0.8.12/download"], visibility = [], ) cargo.rust_library( - name = "refinery-macros-0.8.13", - srcs = [":refinery-macros-0.8.13.crate"], + name = "refinery-macros-0.8.12", + srcs = [":refinery-macros-0.8.12.crate"], crate = "refinery_macros", - crate_root = "refinery-macros-0.8.13.crate/src/lib.rs", + crate_root = "refinery-macros-0.8.12.crate/src/lib.rs", edition = "2018", proc_macro = True, visibility = [], deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":refinery-core-0.8.13", + ":refinery-core-0.8.12", ":regex-1.10.4", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -11053,7 +11050,7 @@ cargo.rust_library( deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -11099,7 +11096,7 @@ cargo.rust_library( ":mime-0.3.17", ":once_cell-1.19.0", ":percent-encoding-2.3.1", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":rustls-0.22.3", ":rustls-pemfile-1.0.4", ":rustls-pki-types-1.4.1", @@ -11120,7 +11117,7 @@ cargo.rust_library( ":mime-0.3.17", ":once_cell-1.19.0", ":percent-encoding-2.3.1", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":rustls-0.22.3", ":rustls-pemfile-1.0.4", ":rustls-pki-types-1.4.1", @@ -11141,7 +11138,7 @@ cargo.rust_library( ":mime-0.3.17", ":once_cell-1.19.0", ":percent-encoding-2.3.1", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":rustls-0.22.3", ":rustls-pemfile-1.0.4", ":rustls-pki-types-1.4.1", @@ -11162,7 +11159,7 @@ cargo.rust_library( ":mime-0.3.17", ":once_cell-1.19.0", ":percent-encoding-2.3.1", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":rustls-0.22.3", ":rustls-pemfile-1.0.4", ":rustls-pki-types-1.4.1", @@ -11183,7 +11180,7 @@ cargo.rust_library( ":mime-0.3.17", ":once_cell-1.19.0", ":percent-encoding-2.3.1", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":rustls-0.22.3", ":rustls-pemfile-1.0.4", ":rustls-pki-types-1.4.1", @@ -11205,7 +11202,7 @@ cargo.rust_library( ":mime-0.3.17", ":once_cell-1.19.0", ":percent-encoding-2.3.1", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":rustls-0.22.3", ":rustls-pemfile-1.0.4", ":rustls-pki-types-1.4.1", @@ -12395,10 +12392,10 @@ cargo.rust_library( deps = [":openssl-probe-0.1.5"], ), "macos-arm64": dict( - deps = [":security-framework-2.10.0"], + deps = [":security-framework-2.9.2"], ), "macos-x86_64": dict( - deps = [":security-framework-2.10.0"], + deps = [":security-framework-2.9.2"], ), "windows-gnu": dict( deps = [":schannel-0.1.23"], @@ -12436,10 +12433,10 @@ cargo.rust_library( deps = [":openssl-probe-0.1.5"], ), "macos-arm64": dict( - deps = [":security-framework-2.10.0"], + deps = [":security-framework-2.9.2"], ), "macos-x86_64": dict( - deps = [":security-framework-2.10.0"], + deps = [":security-framework-2.9.2"], ), "windows-gnu": dict( deps = [":schannel-0.1.23"], @@ -12735,7 +12732,7 @@ cargo.rust_library( ":proc-macro-error-1.0.4", ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -12837,7 +12834,7 @@ cargo.rust_library( ":heck-0.4.1", ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ":unicode-ident-1.0.12", ], ) @@ -12965,7 +12962,7 @@ cargo.rust_library( visibility = [], deps = [ ":base16ct-0.2.0", - ":der-0.7.9", + ":der-0.7.8", ":generic-array-0.14.7", ":pkcs8-0.10.2", ":subtle-2.5.0", @@ -12974,22 +12971,20 @@ cargo.rust_library( ) http_archive( - name = "security-framework-2.10.0.crate", - sha256 = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6", - strip_prefix = "security-framework-2.10.0", - urls = ["https://crates.io/api/v1/crates/security-framework/2.10.0/download"], + name = "security-framework-2.9.2.crate", + sha256 = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de", + strip_prefix = "security-framework-2.9.2", + urls = ["https://crates.io/api/v1/crates/security-framework/2.9.2/download"], visibility = [], ) cargo.rust_library( - name = "security-framework-2.10.0", - srcs = [":security-framework-2.10.0.crate"], + name = "security-framework-2.9.2", + srcs = [":security-framework-2.9.2.crate"], crate = "security_framework", - crate_root = "security-framework-2.10.0.crate/src/lib.rs", + crate_root = "security-framework-2.9.2.crate/src/lib.rs", edition = "2021", features = [ - "OSX_10_10", - "OSX_10_11", "OSX_10_9", "default", ], @@ -12999,29 +12994,25 @@ cargo.rust_library( ":core-foundation-0.9.4", ":core-foundation-sys-0.8.6", ":libc-0.2.153", - ":security-framework-sys-2.10.0", + ":security-framework-sys-2.9.1", ], ) http_archive( - name = "security-framework-sys-2.10.0.crate", - sha256 = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef", - strip_prefix = "security-framework-sys-2.10.0", - urls = ["https://crates.io/api/v1/crates/security-framework-sys/2.10.0/download"], + name = "security-framework-sys-2.9.1.crate", + sha256 = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a", + strip_prefix = "security-framework-sys-2.9.1", + urls = ["https://crates.io/api/v1/crates/security-framework-sys/2.9.1/download"], visibility = [], ) cargo.rust_library( - name = "security-framework-sys-2.10.0", - srcs = [":security-framework-sys-2.10.0.crate"], + name = "security-framework-sys-2.9.1", + srcs = [":security-framework-sys-2.9.1.crate"], crate = "security_framework_sys", - crate_root = "security-framework-sys-2.10.0.crate/src/lib.rs", + crate_root = "security-framework-sys-2.9.1.crate/src/lib.rs", edition = "2021", - features = [ - "OSX_10_10", - "OSX_10_11", - "OSX_10_9", - ], + features = ["OSX_10_9"], visibility = [], deps = [ ":core-foundation-sys-0.8.6", @@ -13223,7 +13214,7 @@ cargo.rust_library( deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -13323,7 +13314,7 @@ cargo.rust_library( deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -13494,7 +13485,7 @@ cargo.rust_library( ":darling-0.20.8", ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -13518,7 +13509,7 @@ cargo.rust_library( ":darling-0.20.8", ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -14117,7 +14108,7 @@ cargo.rust_library( visibility = [], deps = [ ":base64ct-1.6.0", - ":der-0.7.9", + ":der-0.7.8", ], ) @@ -14526,7 +14517,7 @@ cargo.rust_library( ":proc-macro2-1.0.79", ":quote-1.0.35", ":rustversion-1.0.14", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -14551,7 +14542,7 @@ cargo.rust_library( ":proc-macro2-1.0.79", ":quote-1.0.35", ":rustversion-1.0.14", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -14643,23 +14634,23 @@ cargo.rust_library( alias( name = "syn", - actual = ":syn-2.0.57", + actual = ":syn-2.0.55", visibility = ["PUBLIC"], ) http_archive( - name = "syn-2.0.57.crate", - sha256 = "11a6ae1e52eb25aab8f3fb9fca13be982a373b8f1157ca14b897a825ba4a2d35", - strip_prefix = "syn-2.0.57", - urls = ["https://crates.io/api/v1/crates/syn/2.0.57/download"], + name = "syn-2.0.55.crate", + sha256 = "002a1b3dbf967edfafc32655d0f377ab0bb7b994aa1d32c8cc7e9b8bf3ebb8f0", + strip_prefix = "syn-2.0.55", + urls = ["https://crates.io/api/v1/crates/syn/2.0.55/download"], visibility = [], ) cargo.rust_library( - name = "syn-2.0.57", - srcs = [":syn-2.0.57.crate"], + name = "syn-2.0.55", + srcs = [":syn-2.0.55.crate"], crate = "syn", - crate_root = "syn-2.0.57.crate/src/lib.rs", + crate_root = "syn-2.0.55.crate/src/lib.rs", edition = "2021", features = [ "clone-impls", @@ -14671,6 +14662,7 @@ cargo.rust_library( "parsing", "printing", "proc-macro", + "quote", "visit", "visit-mut", ], @@ -14923,7 +14915,7 @@ cargo.rust_library( deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -14992,7 +14984,7 @@ cargo.rust_binary( ":paste-1.0.14", ":pathdiff-0.2.1", ":petgraph-0.6.4", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":podman-api-0.10.0", ":postcard-1.0.8", ":postgres-types-0.2.6", @@ -15000,7 +14992,7 @@ cargo.rust_binary( ":proc-macro2-1.0.79", ":quote-1.0.35", ":rand-0.8.5", - ":refinery-0.8.13", + ":refinery-0.8.12", ":regex-1.10.4", ":remain-0.2.13", ":reqwest-0.12.2", @@ -15020,7 +15012,7 @@ cargo.rust_binary( ":sodiumoxide-0.2.7", ":stream-cancel-0.8.2", ":strum-0.26.2", - ":syn-2.0.57", + ":syn-2.0.55", ":tar-0.4.40", ":tempfile-3.10.1", ":test-log-0.2.15", @@ -15094,7 +15086,7 @@ cargo.rust_library( deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -15380,7 +15372,7 @@ cargo.rust_library( ":mio-0.8.11", ":num_cpus-1.16.0", ":parking_lot-0.12.1", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":tokio-macros-2.2.0", ":tracing", ], @@ -15402,7 +15394,7 @@ cargo.rust_library( edition = "2018", visibility = [], deps = [ - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":tokio-1.37.0", ], ) @@ -15426,7 +15418,7 @@ cargo.rust_library( deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -15488,7 +15480,7 @@ cargo.rust_library( ":parking_lot-0.12.1", ":percent-encoding-2.3.1", ":phf-0.11.2", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":postgres-protocol-0.6.6", ":postgres-types-0.2.6", ":rand-0.8.5", @@ -15645,7 +15637,7 @@ cargo.rust_library( visibility = [], deps = [ ":futures-core-0.3.30", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":tokio-1.37.0", ":tokio-util-0.7.10", ], @@ -15751,7 +15743,7 @@ cargo.rust_library( ":futures-core-0.3.30", ":futures-sink-0.3.30", ":futures-util-0.3.30", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":tokio-1.37.0", ":tracing-0.1.40", ], @@ -15984,7 +15976,7 @@ cargo.rust_library( ":hdrhistogram-7.5.4", ":indexmap-1.9.3", ":pin-project-1.1.5", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":rand-0.8.5", ":slab-0.4.9", ":tokio-1.37.0", @@ -16029,7 +16021,7 @@ cargo.rust_library( ], visibility = [], deps = [ - ":async-compression-0.4.7", + ":async-compression-0.4.6", ":bitflags-2.5.0", ":bytes-1.6.0", ":futures-core-0.3.30", @@ -16037,7 +16029,7 @@ cargo.rust_library( ":http-0.2.12", ":http-body-0.4.6", ":http-range-header-0.3.1", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":tokio-1.37.0", ":tokio-util-0.7.10", ":tower-layer-0.3.2", @@ -16110,7 +16102,7 @@ cargo.rust_library( visibility = [], deps = [ ":log-0.4.21", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":tracing-attributes-0.1.27", ":tracing-core-0.1.32", ], @@ -16135,7 +16127,7 @@ cargo.rust_library( deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) @@ -16396,7 +16388,7 @@ cargo.rust_library( visibility = [], deps = [ ":futures-0.3.30", - ":pin-project-lite-0.2.14", + ":pin-project-lite-0.2.13", ":tokio-1.37.0", ], ) @@ -17542,7 +17534,7 @@ cargo.rust_library( ":bcder-0.7.4", ":bytes-1.6.0", ":chrono-0.4.37", - ":der-0.7.9", + ":der-0.7.8", ":hex-0.4.3", ":pem-3.0.3", ":ring-0.17.5", @@ -17756,6 +17748,6 @@ cargo.rust_library( deps = [ ":proc-macro2-1.0.79", ":quote-1.0.35", - ":syn-2.0.57", + ":syn-2.0.55", ], ) diff --git a/third-party/rust/Cargo.lock b/third-party/rust/Cargo.lock index c68fd3770a..9fb68b103a 100644 --- a/third-party/rust/Cargo.lock +++ b/third-party/rust/Cargo.lock @@ -166,9 +166,9 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "async-compression" -version = "0.4.7" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86a9249d1447a85f95810c620abea82e001fe58a31713fcce614caf52499f905" +checksum = "a116f46a969224200a0a97f29cfd4c50e7534e4b4826bd23ea2c3c533039c82c" dependencies = [ "brotli", "flate2", @@ -228,7 +228,7 @@ checksum = "30c5ef0ede93efbf733c1a727f3b6b5a1060bbedd5600183e66f6e4be4af0ec5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -250,7 +250,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -261,7 +261,7 @@ checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -409,7 +409,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -608,7 +608,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", "syn_derive", ] @@ -827,7 +827,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -1262,7 +1262,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -1286,7 +1286,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -1297,7 +1297,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -1353,9 +1353,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.9" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" dependencies = [ "const-oid", "pem-rfc7468", @@ -1401,7 +1401,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -1411,7 +1411,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "206868b8242f27cecce124c19fd88157fbd0dd334df2587f36417bafbc85097b" dependencies = [ "derive_builder_core", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -1586,7 +1586,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -1657,7 +1657,7 @@ checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -1905,7 +1905,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -2495,7 +2495,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.57", + "syn 2.0.55", "toml", "unicode-xid", ] @@ -2571,7 +2571,7 @@ checksum = "0122b7114117e64a63ac49f752a5ca4624d534c7b1c7de796ac196381cd2d947" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -2741,12 +2741,13 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libredox" -version = "0.1.3" +version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ "bitflags 2.5.0", "libc", + "redox_syscall 0.4.1", ] [[package]] @@ -2817,7 +2818,7 @@ checksum = "5cf92c10c7e361d6b99666ec1c6f9805b0bea2c3bd8c78dc6fe98ac5bd78db11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -3299,7 +3300,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -3313,7 +3314,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -3510,14 +3511,14 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -3649,7 +3650,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -3776,7 +3777,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", "version_check", "yansi 1.0.1", ] @@ -3801,7 +3802,7 @@ dependencies = [ "itertools 0.11.0", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -3837,9 +3838,9 @@ dependencies = [ [[package]] name = "quanta" -version = "0.12.3" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" +checksum = "9ca0b7bac0b97248c40bb77288fc52029cf1459c0461ea1b05ee32ccf011de2c" dependencies = [ "crossbeam-utils", "libc", @@ -3995,9 +3996,9 @@ dependencies = [ [[package]] name = "redox_users" -version = "0.4.5" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" +checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" dependencies = [ "getrandom 0.2.12", "libredox", @@ -4006,9 +4007,9 @@ dependencies = [ [[package]] name = "refinery" -version = "0.8.13" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "425d0fb45561a45e274d318bfbd1bdfbb7a88045860c7b51a2b812ded1a5efc7" +checksum = "a2783724569d96af53464d0711dff635cab7a4934df5e22e9fbc9e181523b83e" dependencies = [ "refinery-core", "refinery-macros", @@ -4016,9 +4017,9 @@ dependencies = [ [[package]] name = "refinery-core" -version = "0.8.13" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d0f5d1af6a2e8d5972ca187b2acf7ecb8d6a1a6ece52bceeae8f57880eaf62f" +checksum = "08d6c80329c0455510a8d42fce286ecb4b6bcd8c57e1816d9f2d6bd7379c2cc8" dependencies = [ "async-trait", "cfg-if", @@ -4037,15 +4038,15 @@ dependencies = [ [[package]] name = "refinery-macros" -version = "0.8.13" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba59636ac45d953f2225dc4ca3a55cfda1b015d0e6ff51ea16329918b436d51" +checksum = "6ab6e31e166a49d55cb09b62639e5ab9ba2e73f2f124336b06f6c321dc602779" dependencies = [ "proc-macro2", "quote", "refinery-core", "regex", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -4100,7 +4101,7 @@ checksum = "ad9f2390298a947ee0aa6073d440e221c0726188cfbcdf9604addb6ee393eb4a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -4471,7 +4472,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -4512,7 +4513,7 @@ dependencies = [ "proc-macro2", "quote", "sea-bae", - "syn 2.0.57", + "syn 2.0.55", "unicode-ident", ] @@ -4571,9 +4572,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.10.0" +version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -4584,9 +4585,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.10.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" dependencies = [ "core-foundation-sys", "libc", @@ -4640,7 +4641,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -4682,7 +4683,7 @@ checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -4759,7 +4760,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -4771,7 +4772,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -5294,7 +5295,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -5307,7 +5308,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -5342,9 +5343,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.57" +version = "2.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11a6ae1e52eb25aab8f3fb9fca13be982a373b8f1157ca14b897a825ba4a2d35" +checksum = "002a1b3dbf967edfafc32655d0f377ab0bb7b994aa1d32c8cc7e9b8bf3ebb8f0" dependencies = [ "proc-macro2", "quote", @@ -5360,7 +5361,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -5441,7 +5442,7 @@ checksum = "c8f546451eaa38373f549093fe9fd05e7d2bade739e2ddf834b9968621d60107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -5533,7 +5534,7 @@ dependencies = [ "sodiumoxide", "stream-cancel", "strum 0.26.2", - "syn 2.0.57", + "syn 2.0.55", "tar", "tempfile", "test-log", @@ -5580,7 +5581,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -5695,7 +5696,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -5986,7 +5987,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -6363,7 +6364,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", "wasm-bindgen-shared", ] @@ -6397,7 +6398,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6749,7 +6750,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] [[package]] @@ -6769,5 +6770,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.57", + "syn 2.0.55", ] diff --git a/third-party/rust/Cargo.toml b/third-party/rust/Cargo.toml index 4eb9e8b0a6..8fb7861abd 100644 --- a/third-party/rust/Cargo.toml +++ b/third-party/rust/Cargo.toml @@ -55,7 +55,7 @@ directories = "5.0.1" docker-api = "0.14.0" dyn-clone = "1.0.17" flate2 = "1.0.28" -futures = {version = "0.3.30", features = ["executor"]} +futures = "0.3.30" futures-lite = "2.3.0" hex = "0.4.3" http = "0.2.12" # todo: upgrade this alongside hyper/axum/tokio-tungstenite/tower-http