diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 05379902d..090b44914 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,9 +1,13 @@ FROM fedora:latest +ENV PATH "$PATH:/home/vscode/.cargo/bin" + RUN bash -c "$(curl -fsSL "https://raw.githubusercontent.com/microsoft/vscode-dev-containers/main/script-library/common-redhat.sh")" -- "true" "vscode" "1000" "1000" "true" RUN dnf install -y \ - sudo git cargo rust rust-src git-core openssl openssl-devel clippy rustfmt golang tpm2-tss-devel clevis clevis-luks cryptsetup cryptsetup-devel clang-devel \ + sudo git cargo rust rust-src git-core openssl openssl-devel clippy rustfmt golang tpm2-tss-devel clevis clevis-luks cryptsetup cryptsetup-devel clang-devel sqlite sqlite-devel libpq libpq-devel \ && dnf clean all USER vscode + +RUN cargo install --force diesel_cli --no-default-features --features sqlite \ No newline at end of file diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index a972b346c..713753531 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -27,6 +27,17 @@ "serayuzgur.crates", "rust-lang.rust-analyzer" ], + "remoteEnv": { + "PATH": "${containerEnv:PATH}:/home/vscode/.cargo/bin", + "SQLITE_MANUFACTURER_DATABASE_URL": "../ci-manufacturer-db.sqlite", + "SQLITE_OWNER_DATABASE_URL": "../ci-owner-db.sqlite", + "SQLITE_RENDEZVOUS_DATABASE_URL": "../ci-rendezvous-db.sqlite" + }, + "containerEnv": { + "SQLITE_MANUFACTURER_DATABASE_URL": "../ci-manufacturer-db.sqlite", + "SQLITE_OWNER_DATABASE_URL": "../ci-owner-db.sqlite", + "SQLITE_RENDEZVOUS_DATABASE_URL": "../ci-rendezvous-db.sqlite" + }, "hostRequirements": { "memory": "4gb" }, @@ -35,5 +46,6 @@ "cargo", "build" ], + "postCreateCommand": "cargo install --force diesel_cli --no-default-features --features sqlite && diesel migration run --migration-dir ./migrations/migrations_manufacturing_server_sqlite --database-url ./ci-manufacturer-db.sqlite && diesel migration run --migration-dir ./migrations/migrations_owner_onboarding_server_sqlite --database-url ./ci-owner-db.sqlite && diesel migration run --migration-dir ./migrations/migrations_rendezvous_server_sqlite --database-url ./ci-rendezvous-db.sqlite", "waitFor": "onCreateCommand" } diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5e4313fc0..265104c38 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -87,7 +87,7 @@ jobs: steps: - name: Install deps run: | - dnf install -y make gcc openssl openssl-devel findutils golang git tpm2-tss-devel swtpm swtpm-tools git clevis clevis-luks cryptsetup cryptsetup-devel clang-devel cracklib-dicts + dnf install -y make gcc openssl openssl-devel findutils golang git tpm2-tss-devel swtpm swtpm-tools git clevis clevis-luks cryptsetup cryptsetup-devel clang-devel cracklib-dicts sqlite sqlite-devel libpq libpq-devel - uses: actions/checkout@v3 with: persist-credentials: false @@ -112,7 +112,19 @@ jobs: env: FDO_PRIVILEGED: true PER_DEVICE_SERVICEINFO: false - run: cargo test --workspace + SQLITE_MANUFACTURER_DATABASE_URL: ../ci-manufacturer-db.sqlite + SQLITE_OWNER_DATABASE_URL: ../ci-owner-db.sqlite + SQLITE_RENDEZVOUS_DATABASE_URL: ../ci-rendezvous-db.sqlite + run: | + # prep for database tests + cargo install --force diesel_cli --no-default-features --features sqlite + diesel migration run --migration-dir ./migrations/migrations_manufacturing_server_sqlite --database-url ./ci-manufacturer-db.sqlite + diesel migration run --migration-dir ./migrations/migrations_owner_onboarding_server_sqlite --database-url ./ci-owner-db.sqlite + diesel migration run --migration-dir ./migrations/migrations_rendezvous_server_sqlite --database-url ./ci-rendezvous-db.sqlite + # run tests + cargo test --workspace + # delete sqlite databases + rm -f ./ci-manufacturer-db.sqlite ./ci-owner-db.sqlite ./ci-rendezvous-db.sqlite - name: Check aio run: | mkdir aio-dir/ @@ -127,6 +139,15 @@ jobs: run: | git diff --exit-code + postgres_test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Run test + run: test/fdo-postgres.sh + commitlint: runs-on: ubuntu-latest steps: @@ -169,4 +190,12 @@ jobs: - name: Test building in devcontainer run: docker run --rm -v `pwd`:/code:z --workdir /code --user root devcontainer-fdo-rs cargo build --workspace --verbose - name: Test testing in devcontainer - run: docker run --rm -v `pwd`:/code:z --workdir /code --user root devcontainer-fdo-rs cargo test --lib --bins --workspace --verbose + run: | + docker run -d -v `pwd`:/code:z --workdir /code --user root -e SQLITE_MANUFACTURER_DATABASE_URL='../ci-manufacturer-db.sqlite' -e SQLITE_OWNER_DATABASE_URL='../ci-owner-db.sqlite' -e SQLITE_RENDEZVOUS_DATABASE_URL='../ci-rendezvous-db.sqlite' --name tests devcontainer-fdo-rs sleep infinity + docker exec --user root tests cargo build --lib --bins --workspace --verbose + docker exec --user root tests diesel migration run --migration-dir ./migrations/migrations_manufacturing_server_sqlite --database-url ./ci-manufacturer-db.sqlite + docker exec --user root tests diesel migration run --migration-dir ./migrations/migrations_owner_onboarding_server_sqlite --database-url ./ci-owner-db.sqlite + docker exec --user root tests diesel migration run --migration-dir ./migrations/migrations_rendezvous_server_sqlite --database-url ./ci-rendezvous-db.sqlite + docker exec --user root tests cargo test + docker stop tests + docker rm tests diff --git a/Cargo.lock b/Cargo.lock index 314a2bc79..988f5dab2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -666,6 +666,43 @@ dependencies = [ "bindgen 0.68.1", ] +[[package]] +name = "diesel" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d98235fdc2f355d330a8244184ab6b4b33c28679c0b4158f63138e51d6cf7e88" +dependencies = [ + "bitflags 2.4.1", + "byteorder", + "diesel_derives", + "itoa", + "libsqlite3-sys", + "pq-sys", + "r2d2", + "time", +] + +[[package]] +name = "diesel_derives" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e054665eaf6d97d1e7125512bb2d35d07c73ac86cc6920174cb42d1ab697a554" +dependencies = [ + "diesel_table_macro_syntax", + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "diesel_table_macro_syntax" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" +dependencies = [ + "syn 2.0.48", +] + [[package]] name = "diff" version = "0.1.13" @@ -872,6 +909,17 @@ dependencies = [ "uuid", ] +[[package]] +name = "fdo-db" +version = "0.4.13" +dependencies = [ + "anyhow", + "diesel", + "fdo-data-formats", + "fdo-http-wrapper", + "openssl", +] + [[package]] name = "fdo-http-wrapper" version = "0.4.13" @@ -963,6 +1011,7 @@ dependencies = [ "anyhow", "clap 4.3.0", "fdo-data-formats", + "fdo-db", "fdo-http-wrapper", "fdo-util", "hex", @@ -1016,8 +1065,11 @@ dependencies = [ name = "fdo-store" version = "0.4.13" dependencies = [ + "anyhow", "async-trait", + "diesel", "fdo-data-formats", + "fdo-db", "log", "serde", "serde_cbor", @@ -1638,6 +1690,16 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +[[package]] +name = "libsqlite3-sys" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326" +dependencies = [ + "pkg-config", + "vcpkg", +] + [[package]] name = "linked-hash-map" version = "0.5.6" @@ -2163,6 +2225,15 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "pq-sys" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31c0052426df997c0cbd30789eb44ca097e3541717a7b8fa36b1c464ee7edebd" +dependencies = [ + "vcpkg", +] + [[package]] name = "pretty_assertions" version = "1.3.0" @@ -2213,6 +2284,17 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "r2d2" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" +dependencies = [ + "log", + "parking_lot", + "scheduled-thread-pool", +] + [[package]] name = "rand" version = "0.8.5" @@ -2404,6 +2486,15 @@ dependencies = [ "windows-sys 0.42.0", ] +[[package]] +name = "scheduled-thread-pool" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" +dependencies = [ + "parking_lot", +] + [[package]] name = "scoped-tls" version = "1.0.1" @@ -2856,8 +2947,10 @@ version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc" dependencies = [ + "itoa", "serde", "time-core", + "time-macros", ] [[package]] @@ -2866,6 +2959,15 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +[[package]] +name = "time-macros" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" +dependencies = [ + "time-core", +] + [[package]] name = "tinyvec" version = "1.6.0" diff --git a/Cargo.toml b/Cargo.toml index 6e42d3a88..a05cbe018 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,7 @@ members = [ "http-wrapper", "store", "util", + "db", "client-linuxapp", "owner-onboarding-server", @@ -24,6 +25,7 @@ default-members = [ "http-wrapper", "store", "util", + "db", "client-linuxapp", "owner-onboarding-server", diff --git a/HOWTO.md b/HOWTO.md index c1dc23b10..890765f41 100644 --- a/HOWTO.md +++ b/HOWTO.md @@ -8,12 +8,15 @@ - How to get information about an OV - How to extend an OV with the Owner's Certificate - How to convert a PEM (plain-text) format OV to a COSE (binary) format OV + - How to export OVs from the Manufacturer Server (Database specific) + - How to import OVs into the Owner Onboarding Server (Database specific) - Configuration Files - `manufacturing-server.yml` - `rendezvous_info` field and `rendezvous-info.yml` - `owner-onboarding-server.yml` - `rendezvous-server.yml` - `serviceinfo-api-server.yml` +- Database management - How to run the servers: - Manufacturing Server - Owner Onboarding Server @@ -212,6 +215,61 @@ Use `fdo-owner-tool dump-ownership-voucher`: fdo-owner-tool dump-ownership-voucher your_ownership_voucher --outform cose > your_ownership_voucher.cose ``` +### How to export OVs from the Manufacturer Server (Database specific) + +Use `fdo-owner-tool export-manufacturer-vouchers`: + +``` +$ fdo-owner-tool export-manufacturer-vouchers --help +Exports a single or all the ownership vouchers present in the Manufacturer DB + +Usage: fdo-owner-tool export-manufacturer-vouchers [GUID] + +Arguments: + Type of the Manufacturer DB holding the OVs [possible values: sqlite, postgres] + DB connection URL, or path to the DB file + Path to dir where the OVs will be exported + [GUID] GUID of the voucher to be exported, if no GUID is given all the OVs will be exported +``` + +For example: + +```bash +fdo-owner-tool export-manufacturer-vouchers postgres \ +postgresql://test:test@localhost/test_manufacturer \ +/path/to/manufacturer-exports/ +``` + +### How to import OVs into the Owner Onboarding Server (Database specific) + +``` +$ fdo-owner-tool import-ownership-vouchers --help +Imports into the Owner DB a single ownership voucher or all the ownership vouchers present at a given path + +Usage: fdo-owner-tool import-ownership-vouchers + +Arguments: + Type of the Owner DB to import the OVs [possible values: sqlite, postgres] + DB connection URL or path to DB file + Path to the OV to be imported, or path to a directory where all the OVs to be imported are located + +Options: + -h, --help Print help +``` + +When importing OVs the tool will attempt to import each OV once, ignoring all +possible errors and then giving a summary of which OVs couldn't be imported. + +For example: + +``` +fdo-owner-tool import-ownership-vouchers postgres postgresql://test:test@localhost/test_owner /path/to/ovs/to/import/ +Unable to import all OVs. OV import operations yielded the following error/s: + +- Error Some(duplicate key value violates unique constraint "owner_vouchers_pkey") inserting OV d5bc48f8-b603-a1c0-e8b9-ae4d9bdf1570 from path "/path/to/ovs/to/import/d5bc48f8-b603-a1c0-e8b9-ae4d9bdf1570" +- Error Empty data serializing OV contents at path "/path/to/ovs/to/import/this-is-not-an-OV" +``` + ## Configuration Files This project uses @@ -274,7 +332,33 @@ Where: - `session_store_driver`: path to a directory that will hold session information. -- `ownership_voucher_store_driver`: path to a directory that will hold OVs. +- `ownership_voucher_store_driver`: this selects the ownership voucher storage + method. Select between `Directory`, `Sqlite` or `Postgres`. + - `Directory`: expects a `path` to the directory that will hold the OVs. + For example: + ``` + ownership_voucher_store_driver: + Directory: + path: /home/fedora/ownership_vouchers + ``` + - `Sqlite`: will use a Sqlite database to store the ownership vouchers. + When using this option you must set `Manufacturer` as the DB type as + shown below: + ``` + ownership_voucher_store_driver: + Sqlite: + Manufacturer + ``` + Please refer to the [Database management section](#database-management) on how to initialize databases. + - `Postgres`: will use a Postgres database to store the ownership vouchers. + When using this option you must set `Manufacturer` as the DB type as + shown below: + ``` + ownership_voucher_store_driver: + Postgres: + Manufacturer + ``` + Please refer to the [Database management section](#database-management) on how to initialize databases. - `public_key_store_driver:` [OPTIONAL] path to a directory that will hold the Manufacturer's public keys. - `bind`: IP address and port that this server will take. @@ -376,8 +460,33 @@ service_info_api_authentication: None Where: -- `ownership_voucher_store_driver`: path to a directory that will hold the OVs - owned by this server. +- `ownership_voucher_store_driver`: this selects the ownership voucher storage + method. Select between `Directory`, `Sqlite` or `Postgres`. + - `Directory`: expects a `path` to the directory that will hold the OVs. + For example: + ``` + ownership_voucher_store_driver: + Directory: + path: /home/fedora/ownership_vouchers + ``` + - `Sqlite`: will use a Sqlite database to store the ownership vouchers. + When using this option you must set `Owner` as the DB type as + shown below: + ``` + ownership_voucher_store_driver: + Sqlite: + Owner + ``` + Please refer to the [Database management section](#database-management) on how to initialize databases. + - `Postgres`: will use a Postgres database to store the ownership vouchers. + When using this option you must set `Owner` as the DB type as + shown below: + ``` + ownership_voucher_store_driver: + Postgres: + Owner + ``` + Please refer to the [Database management section](#database-management) on how to initialize databases. - `session_store_driver`: path to a directory that will hold session information. - `trusted_device_keys_path`: path to the Device Certificate Authority @@ -421,8 +530,34 @@ bind: "0.0.0.0:8082" Where: -- `storage_driver`: path to a directory that will hold OVs registered with the - Rendezvous Server. +- `storage_driver`: this selects the server's storage method. Select between + `Directory`, `Sqlite` or `Postgres`. + - `Directory`: expects a `path` to the directory that will serve as the + server's storage. + For example: + ``` + storage_driver: + Directory: + path: /home/fedora/rendezvous_storage + ``` + - `Sqlite`: will use a Sqlite database as the server's storage. + When using this option you must set `Rendezvous` as the DB type as + shown below: + ``` + storage_driver: + Sqlite: + Rendezvous + ``` + Please refer to the [Database management section](#database-management) on how to initialize databases. + - `Postgres`: will use a Sqlite database as the server's storage. + When using this option you must set `Rendezvous` as the DB type as + shown below: + ``` + storage_driver: + Postgres: + Rendezvous + ``` + Please refer to the [Database management section](#database-management) on how to initialize databases. - `session_store_driver`: path to a directory that will hold session information. - `trusted_manufacturer_keys_path`: path to the Manufacturer Certificate. @@ -515,6 +650,58 @@ Where: rebooted after onboarding has completed, boolean (default false). - `additional_service_info`: [OPTIONAL] +## Database management + +When using the `Sqlite` or `Postgres` storage driver configuration you are able +to use Sqlite or Postgres databases to serve as the storage driver of the +Manufacturing, Owner and/or Rendezvous servers. + +You are able to use different database systems for each server (e.g. Sqlite for +the Manufacturing server and Postgres for the rest), or even mix +database storage in some servers with filesystem storage in other servers +(e.g. filesystem storage for the Manufacturing server and Postgres for the +rest). + +### Dependencies + +Install the following packages: + +```bash +dnf install -y sqlite sqlite-devel libpq libpq-devel +``` + +and the `diesel` tool for schema management: + +```bash +cargo install --force diesel_cli --no-default-features --features "postgres sqlite" +``` + +### Creating the databases + +When using databases you need to initialize the database based on the FDO +server and database type that you'll be using. + +All the databases are initialized running + +```bash +diesel migration run --migration-dir $MIGRATION_DIRECTORY \ +--database-url $DATABASE_URL +``` + +where `$MIGRATION_DIRECTORY` is one of the `migration_*` directories that +matches your server type and database type combo +(`migrations_manufacturing_server_postgres`, +`migrations_manufacturing_server_sqlite`, +`migrations_owner_onboarding_server_postgres`, +`migrations_owner_onboarding_server_sqlite`, +`migrations_rendezvous_server_postgres`, +`migrations_rendezvous_server_sqlite`); the `$DATABASE_URL` is the Postgres +connection URL or a path to the location where the Sqlite database will be +located based on if you'll be using Postgres or Sqlite, respectively. + +> **NOTE:** if you are using Fedora IoT along with the Sqlite DB, you must +> create the DB in a writable location, for instance `/var/lib/fdo`. + ## How to run the servers Please mind how the configuration file must be specifically named (e.g. `-` VS @@ -541,6 +728,11 @@ Please mind how the configuration file must be specifically named (e.g. `-` VS file in [examples/systemd](https://github.com/fedora-iot/fido-device-onboard-rs/blob/main/examples/systemd/fdo-manufacturing-server.service). + If you are using a Sqlite or Postgres database for storage, before running + the server you must set the `SQLITE_MANUFACTURER_DATABASE_URL` or + `POSTGRES_MANUFACTURER_DATABASE_URL` environment variable with the proper + connection URL when using Sqlite or Postgres, respectively. + ### Owner Onboarding Server 1. Generate the required keys/certificates for the Owner, see [How to generate @@ -570,6 +762,11 @@ Please mind how the configuration file must be specifically named (e.g. `-` VS 4. Execute `fdo-owner-onboarding-server` or run it as a service, see sample file in [examples/systemd](https://github.com/fedora-iot/fido-device-onboard-rs/blob/main/examples/systemd/fdo-owner-onboarding-server.service). + If you are using a Sqlite or Postgres database for storage, before running + the server you must set the `SQLITE_OWNER_DATABASE_URL` or + `POSTGRES_OWNER_DATABASE_URL` environment variable with the proper + connection URL when using Sqlite or Postgres, respectively. + ### Rendezvous Server 1. Configure `rendezvous-server.yml`, see [Configuration @@ -583,6 +780,11 @@ Please mind how the configuration file must be specifically named (e.g. `-` VS 2. Execute `fdo-rendezvous-server` or run it as a service, see sample file in [examples/systemd](https://github.com/fedora-iot/fido-device-onboard-rs/blob/main/examples/systemd/fdo-rendezvous-server.service). + If you are using a Sqlite or Postgres database for storage, before running + the server you must set the `SQLITE_RENDEZVOUS_DATABASE_URL` or + `POSTGRES_RENDEZVOUS_DATABASE_URL` environment variable with the proper + connection URL when using Sqlite or Postgres, respectively. + ### Service Info API Server 1. Configure `serviceinfo-api-server.yml`, see [Configuration diff --git a/contrib/containers/admin-cli b/contrib/containers/admin-cli index 17bf9939b..038be2f05 100644 --- a/contrib/containers/admin-cli +++ b/contrib/containers/admin-cli @@ -1,4 +1,5 @@ FROM quay.io/centos/centos:stream9 ARG BUILDID COPY --from=fdo-build:${BUILDID} /usr/src/target/release/fdo-admin-tool /usr/local/bin +RUN yum install -y sqlite sqlite-devel libpq libpq-devel ENTRYPOINT ["fdo-admin-tool"] diff --git a/contrib/containers/build b/contrib/containers/build index f725dc81e..5f3dc9945 100644 --- a/contrib/containers/build +++ b/contrib/containers/build @@ -1,6 +1,6 @@ FROM quay.io/centos/centos:stream9 RUN yum update -y -RUN yum install -y --enablerepo=crb cargo gcc golang openssl-devel tpm2-tss-devel cryptsetup-devel clang-devel +RUN yum install -y --enablerepo=crb cargo gcc golang openssl-devel tpm2-tss-devel cryptsetup-devel clang-devel sqlite sqlite-devel libpq libpq-devel WORKDIR /usr/src COPY . . RUN cargo build --release --features openssl-kdf/deny_custom diff --git a/contrib/containers/manufacturing-server b/contrib/containers/manufacturing-server index 7c6cd63d6..c20d43648 100644 --- a/contrib/containers/manufacturing-server +++ b/contrib/containers/manufacturing-server @@ -4,5 +4,6 @@ COPY --from=fdo-build:${BUILDID} /usr/src/target/release/fdo-manufacturing-serve RUN mkdir -p /etc/fdo/sessions RUN mkdir -p /etc/fdo/keys RUN mkdir -p /etc/fdo/manufacturing-server.conf.d +RUN yum install -y sqlite sqlite-devel libpq libpq-devel ENV LOG_LEVEL=trace ENTRYPOINT ["fdo-manufacturing-server"] diff --git a/contrib/containers/owner-onboarding-server b/contrib/containers/owner-onboarding-server index f3df20ee1..eaaf9a621 100644 --- a/contrib/containers/owner-onboarding-server +++ b/contrib/containers/owner-onboarding-server @@ -4,5 +4,6 @@ COPY --from=fdo-build:${BUILDID} /usr/src/target/release/fdo-owner-onboarding-se RUN mkdir -p /etc/fdo/sessions RUN mkdir -p /etc/fdo/keys RUN mkdir -p /etc/fdo/owner-onboarding-server.conf.d +RUN yum install -y sqlite sqlite-devel libpq libpq-devel ENV LOG_LEVEL=trace ENTRYPOINT ["fdo-owner-onboarding-server"] diff --git a/contrib/containers/rendezvous-server b/contrib/containers/rendezvous-server index 8319d12fc..b2e0bc33e 100644 --- a/contrib/containers/rendezvous-server +++ b/contrib/containers/rendezvous-server @@ -4,5 +4,6 @@ COPY --from=fdo-build:${BUILDID} /usr/src/target/release/fdo-rendezvous-server / RUN mkdir -p /etc/fdo/sessions RUN mkdir -p /etc/fdo/keys RUN mkdir -p /etc/fdo/rendezvous-server.conf.d +RUN yum install -y sqlite sqlite-devel libpq libpq-devel ENV LOG_LEVEL=trace ENTRYPOINT ["fdo-rendezvous-server"] diff --git a/contrib/containers/serviceinfo-api-server b/contrib/containers/serviceinfo-api-server index bb1e844c7..ecd90cf11 100644 --- a/contrib/containers/serviceinfo-api-server +++ b/contrib/containers/serviceinfo-api-server @@ -4,5 +4,6 @@ COPY --from=fdo-build:${BUILDID} /usr/src/target/release/fdo-serviceinfo-api-ser RUN mkdir -p /etc/fdo/sessions RUN mkdir -p /etc/fdo/device_specific_serviceinfo RUN mkdir -p /etc/fdo/serviceinfo-api-server.conf.d +RUN yum install -y sqlite sqlite-devel libpq libpq-devel ENV LOG_LEVEL=trace ENTRYPOINT ["fdo-serviceinfo-api-server"] diff --git a/data-formats/src/errors.rs b/data-formats/src/errors.rs index bb7077c02..5320ae2d9 100644 --- a/data-formats/src/errors.rs +++ b/data-formats/src/errors.rs @@ -71,4 +71,6 @@ pub enum Error { UnsupportedVersion(Option), #[error("TPM/TSS error: {0:?}")] TssError(#[from] tss_esapi::Error), + #[error("Empty data")] + EmptyData, } diff --git a/data-formats/src/lib.rs b/data-formats/src/lib.rs index c11495fcf..ea34168fd 100644 --- a/data-formats/src/lib.rs +++ b/data-formats/src/lib.rs @@ -22,6 +22,7 @@ pub mod cborparser; mod serializable; pub use serializable::DeserializableMany; pub use serializable::Serializable; +pub use serializable::StoredItem; pub fn interoperable_kdf_available() -> bool { #[cfg(feature = "use_noninteroperable_kdf")] diff --git a/data-formats/src/ownershipvoucher.rs b/data-formats/src/ownershipvoucher.rs index 409126076..e1dd9fb29 100644 --- a/data-formats/src/ownershipvoucher.rs +++ b/data-formats/src/ownershipvoucher.rs @@ -175,6 +175,9 @@ impl OwnershipVoucher { } pub fn from_pem(data: &[u8]) -> Result { + if data.is_empty() { + return Err(Error::EmptyData); + } let parsed = pem::parse(data)?; if parsed.tag() != VOUCHER_PEM_TAG { return Err(Error::InvalidPemTag(parsed.tag().to_string())); @@ -183,6 +186,9 @@ impl OwnershipVoucher { } pub fn many_from_pem(data: &[u8]) -> Result> { + if data.is_empty() { + return Err(Error::EmptyData); + } pem::parse_many(data)? .into_iter() .map(|parsed| { @@ -196,6 +202,9 @@ impl OwnershipVoucher { } pub fn from_pem_or_raw(data: &[u8]) -> Result { + if data.is_empty() { + return Err(Error::EmptyData); + } if data[0] == data[1] && data[0] == b'-' { Self::from_pem(data) } else { diff --git a/data-formats/src/serializable.rs b/data-formats/src/serializable.rs index 89ad9fbfd..5e9375163 100644 --- a/data-formats/src/serializable.rs +++ b/data-formats/src/serializable.rs @@ -1,3 +1,6 @@ +use crate::cborparser::{ParsedArray, ParsedArrayBuilder, ParsedArraySize2}; +use crate::publickey::PublicKey; +use crate::types::COSESign; use crate::Error; pub trait Serializable { @@ -85,3 +88,35 @@ where ciborium::ser::into_writer(self, &mut writer).map_err(Error::from) } } + +#[derive(Clone, Debug)] +pub struct StoredItem { + pub public_key: PublicKey, + pub to1d: COSESign, +} + +impl Serializable for StoredItem { + fn deserialize_from_reader(reader: R) -> Result + where + R: std::io::Read, + { + let contents: ParsedArray = ParsedArray::deserialize_from_reader(reader)?; + + let public_key = contents.get(0)?; + let to1d = contents.get(1)?; + + Ok(StoredItem { public_key, to1d }) + } + + fn serialize_to_writer(&self, writer: W) -> Result<(), Error> + where + W: std::io::Write, + { + let mut contents: ParsedArrayBuilder = ParsedArrayBuilder::new(); + contents.set(0, &self.public_key)?; + contents.set(1, &self.to1d)?; + let contents = contents.build(); + + contents.serialize_to_writer(writer) + } +} diff --git a/db/Cargo.toml b/db/Cargo.toml new file mode 100644 index 000000000..cfc3319ab --- /dev/null +++ b/db/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "fdo-db" +version = "0.4.13" +edition = "2021" + + +[dependencies] +anyhow = "1.0" +diesel = { version = "2.1.0", features = ["sqlite", "postgres", "r2d2"] } + +fdo-data-formats = { path = "../data-formats", version = "0.4.13" } + +[dev-dependencies] +fdo-http-wrapper = { path = "../http-wrapper", version = "0.4.13", features = ["server"] } +openssl = "0.10.55" + +[features] +postgres = [] +sqlite = [] + +default = ["postgres", "sqlite"] \ No newline at end of file diff --git a/db/src/lib.rs b/db/src/lib.rs new file mode 100644 index 000000000..68414aaf3 --- /dev/null +++ b/db/src/lib.rs @@ -0,0 +1,123 @@ +pub mod models; +#[cfg(feature = "postgres")] +pub mod postgres; +pub mod schema; +#[cfg(feature = "sqlite")] +pub mod sqlite; + +use anyhow::Result; +use diesel::r2d2::ConnectionManager; +use diesel::r2d2::Pool; + +use fdo_data_formats::ownershipvoucher::OwnershipVoucher as OV; +use fdo_data_formats::StoredItem; +use models::ManufacturerOV; +use models::OwnerOV; +use models::RendezvousOV; + +pub trait DBStoreManufacturer +where + T: diesel::r2d2::R2D2Connection + 'static, +{ + /// Gets a connection pool + fn get_conn_pool() -> Pool>; + + /// Gets a connection to the db + fn get_connection() -> T; + + /// Inserts an OV + fn insert_ov(ov: &OV, ttl: Option, conn: &mut T) -> Result<()>; + + /// Gets an OV + fn get_ov(guid: &str, conn: &mut T) -> Result; + + /// Returns all the OVs in the DB + fn get_all_ovs(conn: &mut T) -> Result>; + + /// Deletes an OV + fn delete_ov(guid: &str, conn: &mut T) -> Result<()>; + + /// Deletes all OVs whose ttl is less or equal to the given ttl + fn delete_ov_ttl_le(ttl: i64, conn: &mut T) -> Result<()>; + + /// Updates the ttl of an existing OV. + /// Option is set as the ttl type so that we can set NULL in the + /// database if 'None' is passed as the ttl. + fn update_ov_ttl(guid: &str, ttl: Option, conn: &mut T) -> Result<()>; +} + +pub trait DBStoreOwner +where + T: diesel::r2d2::R2D2Connection + 'static, +{ + /// Gets a connection pool + fn get_conn_pool() -> Pool>; + + /// Gets a connection to the db + fn get_connection() -> T; + + /// Inserts an OV + fn insert_ov(ov: &OV, to2: Option, to0: Option, conn: &mut T) -> Result<()>; + + /// Gets an OV + fn get_ov(guid: &str, conn: &mut T) -> Result; + + /// Deletes an OV + fn delete_ov(guid: &str, conn: &mut T) -> Result<()>; + + /// Selects all the OVs with the given to2_performed status + fn select_ov_to2_performed(to2_performed: bool, conn: &mut T) -> Result>; + + /// Selects all the OVs whose to0 is less than the given maximum + fn select_ov_to0_less_than(to0_max: i64, conn: &mut T) -> Result>; + + /// Selects all the OVs with the given to2_performed status and those whose + /// to0 is less that then given maximum + fn select_ov_to2_performed_and_ov_to0_less_than( + to2_performed: bool, + to0_max: i64, + conn: &mut T, + ) -> Result>; + + /// Updates the to0_accept_owner_wait_seconds field of an existing OV. + /// Option is set as the ttl type so that we can set NULL in the + /// database if 'None' is passed as the value. + fn update_ov_to0_wait_seconds( + guid: &str, + wait_seconds: Option, + conn: &mut T, + ) -> Result<()>; + + /// Updates the to0 performed status of an existing OV. + /// Option is set as the ttl type so that we can set NULL in the + /// database if 'None' is passed as the to0_performed + fn update_ov_to2(guid: &str, to0_performed: Option, conn: &mut T) -> Result<()>; +} + +pub trait DBStoreRendezvous +where + T: diesel::r2d2::R2D2Connection + 'static, +{ + /// Gets a connection pool + fn get_conn_pool() -> Pool>; + + /// Gets a connection to the db + fn get_connection() -> T; + + /// Inserts an OV + fn insert_ov(ov: &StoredItem, guid: &str, ttl: Option, conn: &mut T) -> Result<()>; + + /// Gets an OV + fn get_ov(guid: &str, conn: &mut T) -> Result; + + /// Deletes an OV + fn delete_ov(guid: &str, conn: &mut T) -> Result<()>; + + /// Deletes all OVs whose ttl is less or equal to the given ttl + fn delete_ov_ttl_le(ttl: i64, conn: &mut T) -> Result<()>; + + /// Updates the ttl of an existing OV. + /// Option is set as the ttl type so that we can set NULL in the + /// database if 'None' is passed as the ttl. + fn update_ov_ttl(guid: &str, ttl: Option, conn: &mut T) -> Result<()>; +} diff --git a/db/src/models.rs b/db/src/models.rs new file mode 100644 index 000000000..bdd5bcd7d --- /dev/null +++ b/db/src/models.rs @@ -0,0 +1,88 @@ +use diesel::prelude::*; +use std::fmt; + +#[derive(Queryable, Selectable, Identifiable)] +#[diesel(table_name = crate::schema::rendezvous_vouchers)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(guid))] +pub struct RendezvousOV { + pub guid: String, + pub contents: Vec, + pub ttl: Option, +} + +#[derive(Insertable)] +#[diesel(table_name = crate::schema::rendezvous_vouchers)] +pub struct NewRendezvousOV { + pub guid: String, + pub contents: Vec, + pub ttl: Option, +} + +#[derive(Queryable, Selectable, Identifiable, AsChangeset)] +#[diesel(table_name = crate::schema::owner_vouchers)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(guid))] +pub struct OwnerOV { + pub guid: String, + pub contents: Vec, + pub to2_performed: Option, + pub to0_accept_owner_wait_seconds: Option, +} + +#[derive(Insertable)] +#[diesel(table_name = crate::schema::owner_vouchers)] +pub struct NewOwnerOV { + pub guid: String, + pub contents: Vec, + pub to2_performed: Option, + pub to0_accept_owner_wait_seconds: Option, +} + +#[derive(Queryable, Selectable, Identifiable, AsChangeset)] +#[diesel(table_name = crate::schema::manufacturer_vouchers)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(guid))] +pub struct ManufacturerOV { + pub guid: String, + pub contents: Vec, + pub ttl: Option, +} + +#[derive(Insertable)] +#[diesel(table_name = crate::schema::manufacturer_vouchers)] +pub struct NewManufacturerOV { + pub guid: String, + pub contents: Vec, + pub ttl: Option, +} + +impl fmt::Display for RendezvousOV { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "GUID: {}, ttl: {:?}, contents: {:?}", + self.guid, self.ttl, self.contents + ) + } +} + +impl fmt::Display for OwnerOV { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "GUID: {}, to2_performed: {:?}, to0_accept_owner_wait_seconds {:?}, contents: {:?}", + self.guid, self.to2_performed, self.to0_accept_owner_wait_seconds, self.contents + ) + } +} + +impl fmt::Display for ManufacturerOV { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "GUID: {}, ttl: {:?}, contents: {:?}", + self.guid, self.ttl, self.contents + ) + } +} diff --git a/db/src/postgres.rs b/db/src/postgres.rs new file mode 100644 index 000000000..e8f171291 --- /dev/null +++ b/db/src/postgres.rs @@ -0,0 +1,272 @@ +use super::{DBStoreManufacturer, DBStoreOwner, DBStoreRendezvous}; +use crate::models::NewManufacturerOV; +use crate::schema::manufacturer_vouchers; +use crate::schema::owner_vouchers; +use crate::schema::rendezvous_vouchers; +use fdo_data_formats::StoredItem; + +use diesel::prelude::*; +use diesel::r2d2::ConnectionManager; +use diesel::r2d2::Pool; +use diesel::PgConnection; + +use std::env; + +use anyhow::Result; + +use super::models::{ManufacturerOV, NewOwnerOV, NewRendezvousOV, OwnerOV, RendezvousOV}; + +use fdo_data_formats::ownershipvoucher::OwnershipVoucher as OV; +use fdo_data_formats::Serializable; + +pub struct PostgresManufacturerDB {} + +impl DBStoreManufacturer for PostgresManufacturerDB { + fn get_connection() -> PgConnection { + let database_url = env::var("POSTGRES_MANUFACTURER_DATABASE_URL") + .expect("POSTGRES_MANUFACTURER_DATABASE_URL must be set"); + PgConnection::establish(&database_url).expect("Error connecting to database") + } + + fn get_conn_pool() -> Pool> { + let database_url = env::var("POSTGRES_MANUFACTURER_DATABASE_URL") + .expect("POSTGRES_MANUFACTURER_DATABASE_URL must be set"); + let manager = ConnectionManager::::new(database_url); + Pool::builder() + .test_on_check_out(true) + .build(manager) + .expect("Couldn't build db connection pool") + } + + fn insert_ov(ov: &OV, ttl: Option, conn: &mut PgConnection) -> Result<()> { + let new_ov_manufacturer = NewManufacturerOV { + guid: ov.header().guid().to_string(), + contents: ov.serialize_data()?, + ttl, + }; + diesel::insert_into(super::schema::manufacturer_vouchers::table) + .values(new_ov_manufacturer) + .execute(conn)?; + Ok(()) + } + + fn get_ov(guid: &str, conn: &mut PgConnection) -> Result { + let result = super::schema::manufacturer_vouchers::dsl::manufacturer_vouchers + .filter(super::schema::manufacturer_vouchers::guid.eq(guid)) + .first(conn)?; + Ok(result) + } + + fn get_all_ovs(conn: &mut PgConnection) -> Result> { + let result = super::schema::manufacturer_vouchers::dsl::manufacturer_vouchers + .select(ManufacturerOV::as_select()) + .load(conn)?; + Ok(result) + } + + fn delete_ov(guid: &str, conn: &mut PgConnection) -> Result<()> { + diesel::delete(manufacturer_vouchers::dsl::manufacturer_vouchers) + .filter(super::schema::manufacturer_vouchers::guid.eq(guid)) + .execute(conn)?; + Ok(()) + } + + fn delete_ov_ttl_le(ttl: i64, conn: &mut PgConnection) -> Result<()> { + diesel::delete(manufacturer_vouchers::dsl::manufacturer_vouchers) + .filter(super::schema::manufacturer_vouchers::ttl.le(ttl)) + .execute(conn)?; + Ok(()) + } + + fn update_ov_ttl(guid: &str, ttl: Option, conn: &mut PgConnection) -> Result<()> { + diesel::update(manufacturer_vouchers::dsl::manufacturer_vouchers) + .filter(super::schema::manufacturer_vouchers::guid.eq(guid)) + .set(super::schema::manufacturer_vouchers::ttl.eq(ttl)) + .execute(conn)?; + Ok(()) + } +} + +pub struct PostgresOwnerDB {} + +impl DBStoreOwner for PostgresOwnerDB { + fn get_connection() -> PgConnection { + let database_url = env::var("POSTGRES_OWNER_DATABASE_URL") + .expect("POSTGRES_OWNER_DATABASE_URL must be set"); + PgConnection::establish(&database_url).expect("Error connecting to database") + } + + fn get_conn_pool() -> Pool> { + let database_url = env::var("POSTGRES_OWNER_DATABASE_URL") + .expect("POSTGRES_OWNER_DATABASE_URL must be set"); + let manager = ConnectionManager::::new(database_url); + Pool::builder() + .test_on_check_out(true) + .build(manager) + .expect("Couldn't build db connection pool") + } + + fn insert_ov( + ov: &OV, + to2: Option, + to0: Option, + conn: &mut PgConnection, + ) -> Result<()> { + let new_ov_owner = NewOwnerOV { + guid: ov.header().guid().to_string(), + contents: ov.serialize_data()?, + to2_performed: to2, + to0_accept_owner_wait_seconds: to0, + }; + diesel::insert_into(super::schema::owner_vouchers::table) + .values(new_ov_owner) + .execute(conn)?; + Ok(()) + } + + fn get_ov(guid: &str, conn: &mut PgConnection) -> Result { + let result = super::schema::owner_vouchers::dsl::owner_vouchers + .filter(super::schema::owner_vouchers::guid.eq(guid)) + .first(conn)?; + Ok(result) + } + + fn delete_ov(guid: &str, conn: &mut PgConnection) -> Result<()> { + diesel::delete(owner_vouchers::dsl::owner_vouchers) + .filter(super::schema::owner_vouchers::guid.eq(guid)) + .execute(conn)?; + Ok(()) + } + + #[allow(non_snake_case)] + fn select_ov_to2_performed( + to2_performed: bool, + conn: &mut PgConnection, + ) -> Result> { + let result = super::schema::owner_vouchers::dsl::owner_vouchers + .filter(super::schema::owner_vouchers::to2_performed.eq(to2_performed)) + .select(OwnerOV::as_select()) + .load(conn)?; + Ok(result) + } + + #[allow(non_snake_case)] + fn select_ov_to0_less_than(to0_max: i64, conn: &mut PgConnection) -> Result> { + let result = super::schema::owner_vouchers::dsl::owner_vouchers + .filter(super::schema::owner_vouchers::to0_accept_owner_wait_seconds.lt(to0_max)) + .select(OwnerOV::as_select()) + .load(conn)?; + Ok(result) + } + + fn select_ov_to2_performed_and_ov_to0_less_than( + to2_performed: bool, + to0_max: i64, + conn: &mut PgConnection, + ) -> Result> { + let result = super::schema::owner_vouchers::dsl::owner_vouchers + .filter( + super::schema::owner_vouchers::to0_accept_owner_wait_seconds + .lt(to0_max) + .or(super::schema::owner_vouchers::to0_accept_owner_wait_seconds.is_null()), + ) + .filter( + super::schema::owner_vouchers::to2_performed + .eq(to2_performed) + .or(super::schema::owner_vouchers::to2_performed.is_null()), + ) + .select(OwnerOV::as_select()) + .load(conn)?; + Ok(result) + } + + fn update_ov_to0_wait_seconds( + guid: &str, + wait_seconds: Option, + conn: &mut PgConnection, + ) -> Result<()> { + diesel::update(owner_vouchers::dsl::owner_vouchers) + .filter(super::schema::owner_vouchers::guid.eq(guid)) + .set(super::schema::owner_vouchers::to0_accept_owner_wait_seconds.eq(wait_seconds)) + .execute(conn)?; + Ok(()) + } + + fn update_ov_to2( + guid: &str, + to2_performed: Option, + conn: &mut PgConnection, + ) -> Result<()> { + diesel::update(owner_vouchers::dsl::owner_vouchers) + .filter(super::schema::owner_vouchers::guid.eq(guid)) + .set(super::schema::owner_vouchers::to2_performed.eq(to2_performed)) + .execute(conn)?; + Ok(()) + } +} + +pub struct PostgresRendezvousDB {} + +impl DBStoreRendezvous for PostgresRendezvousDB { + fn get_connection() -> PgConnection { + let database_url = env::var("POSTGRES_RENDEZVOUS_DATABASE_URL") + .expect("POSTGRES_RENDEZVOUS_DATABASE_URL must be set"); + PgConnection::establish(&database_url).expect("Error connecting to database") + } + + fn get_conn_pool() -> Pool> { + let database_url = env::var("POSTGRES_RENDEZVOUS_DATABASE_URL") + .expect("POSTGRES_RENDEZVOUS_DATABASE_URL must be set"); + let manager = ConnectionManager::::new(database_url); + Pool::builder() + .test_on_check_out(true) + .build(manager) + .expect("Couldn't build db connection pool") + } + + fn insert_ov( + ov: &StoredItem, + guid: &str, + ttl: Option, + conn: &mut PgConnection, + ) -> Result<()> { + let new_ov_rendezvous = NewRendezvousOV { + guid: guid.to_string(), + contents: ov.serialize_data()?, + ttl, + }; + diesel::insert_into(super::schema::rendezvous_vouchers::table) + .values(&new_ov_rendezvous) + .execute(conn)?; + Ok(()) + } + + fn get_ov(guid: &str, conn: &mut PgConnection) -> Result { + let result = super::schema::rendezvous_vouchers::dsl::rendezvous_vouchers + .filter(super::schema::rendezvous_vouchers::guid.eq(guid)) + .first(conn)?; + Ok(result) + } + + fn delete_ov(guid: &str, conn: &mut PgConnection) -> Result<()> { + diesel::delete(rendezvous_vouchers::dsl::rendezvous_vouchers) + .filter(super::schema::rendezvous_vouchers::guid.eq(guid)) + .execute(conn)?; + Ok(()) + } + + fn delete_ov_ttl_le(ttl: i64, conn: &mut PgConnection) -> Result<()> { + diesel::delete(rendezvous_vouchers::dsl::rendezvous_vouchers) + .filter(super::schema::rendezvous_vouchers::ttl.le(ttl)) + .execute(conn)?; + Ok(()) + } + + fn update_ov_ttl(guid: &str, ttl: Option, conn: &mut PgConnection) -> Result<()> { + diesel::update(rendezvous_vouchers::dsl::rendezvous_vouchers) + .filter(super::schema::rendezvous_vouchers::guid.eq(guid)) + .set(super::schema::rendezvous_vouchers::ttl.eq(ttl)) + .execute(conn)?; + Ok(()) + } +} diff --git a/db/src/schema.rs b/db/src/schema.rs new file mode 100644 index 000000000..65e68897c --- /dev/null +++ b/db/src/schema.rs @@ -0,0 +1,24 @@ +diesel::table! { + manufacturer_vouchers (guid) { + guid -> Text, + contents -> Binary, + ttl -> Nullable, + } +} + +diesel::table! { + owner_vouchers (guid) { + guid -> Text, + contents -> Binary, + to2_performed -> Nullable, + to0_accept_owner_wait_seconds -> Nullable, + } +} + +diesel::table! { + rendezvous_vouchers (guid) { + guid -> Text, + contents -> Binary, + ttl -> Nullable, + } +} diff --git a/db/src/sqlite.rs b/db/src/sqlite.rs new file mode 100644 index 000000000..78997e0e3 --- /dev/null +++ b/db/src/sqlite.rs @@ -0,0 +1,530 @@ +use super::{DBStoreManufacturer, DBStoreOwner, DBStoreRendezvous}; + +use diesel::prelude::*; +use diesel::r2d2::ConnectionManager; +use diesel::r2d2::Pool; +use diesel::SqliteConnection; + +use crate::models::ManufacturerOV; +use crate::models::NewManufacturerOV; +use crate::schema::manufacturer_vouchers; +use crate::schema::owner_vouchers; +use crate::schema::rendezvous_vouchers; + +use std::env; + +use anyhow::Result; + +use super::models::{NewOwnerOV, NewRendezvousOV, OwnerOV, RendezvousOV}; + +use fdo_data_formats::ownershipvoucher::OwnershipVoucher as OV; +use fdo_data_formats::Serializable; +use fdo_data_formats::StoredItem; + +pub struct SqliteManufacturerDB {} + +impl DBStoreManufacturer for SqliteManufacturerDB { + fn get_connection() -> SqliteConnection { + let database_url = env::var("SQLITE_MANUFACTURER_DATABASE_URL") + .expect("SQLITE_MANUFACTURER_DATABASE_URL must be set"); + SqliteConnection::establish(&database_url).expect("Error connecting to database") + } + + fn get_conn_pool() -> Pool> { + let database_url = env::var("SQLITE_MANUFACTURER_DATABASE_URL") + .expect("SQLITE_MANUFACTURER_DATABASE_URL must be set"); + let manager = ConnectionManager::::new(database_url); + Pool::builder() + .test_on_check_out(true) + .build(manager) + .expect("Couldn't build db connection pool") + } + + fn insert_ov(ov: &OV, ttl: Option, conn: &mut SqliteConnection) -> Result<()> { + let new_ov_manufacturer = NewManufacturerOV { + guid: ov.header().guid().to_string(), + contents: ov.serialize_data()?, + ttl, + }; + diesel::insert_into(super::schema::manufacturer_vouchers::table) + .values(new_ov_manufacturer) + .execute(conn)?; + Ok(()) + } + + fn get_ov(guid: &str, conn: &mut SqliteConnection) -> Result { + let result = super::schema::manufacturer_vouchers::dsl::manufacturer_vouchers + .filter(super::schema::manufacturer_vouchers::guid.eq(guid)) + .first(conn)?; + Ok(result) + } + + fn get_all_ovs(conn: &mut SqliteConnection) -> Result> { + let result = super::schema::manufacturer_vouchers::dsl::manufacturer_vouchers + .select(ManufacturerOV::as_select()) + .load(conn)?; + Ok(result) + } + + fn delete_ov(guid: &str, conn: &mut SqliteConnection) -> Result<()> { + diesel::delete(manufacturer_vouchers::dsl::manufacturer_vouchers) + .filter(super::schema::manufacturer_vouchers::guid.eq(guid)) + .execute(conn)?; + Ok(()) + } + + fn delete_ov_ttl_le(ttl: i64, conn: &mut SqliteConnection) -> Result<()> { + diesel::delete(manufacturer_vouchers::dsl::manufacturer_vouchers) + .filter(super::schema::manufacturer_vouchers::ttl.le(ttl)) + .execute(conn)?; + Ok(()) + } + + fn update_ov_ttl(guid: &str, ttl: Option, conn: &mut SqliteConnection) -> Result<()> { + diesel::update(manufacturer_vouchers::dsl::manufacturer_vouchers) + .filter(super::schema::manufacturer_vouchers::guid.eq(guid)) + .set(super::schema::manufacturer_vouchers::ttl.eq(ttl)) + .execute(conn)?; + Ok(()) + } +} + +pub struct SqliteOwnerDB {} + +impl DBStoreOwner for SqliteOwnerDB { + fn get_connection() -> SqliteConnection { + let database_url = + env::var("SQLITE_OWNER_DATABASE_URL").expect("SQLITE_OWNER_DATABASE_URL must be set"); + SqliteConnection::establish(&database_url).expect("Error connecting to database") + } + + fn get_conn_pool() -> Pool> { + let database_url = + env::var("SQLITE_OWNER_DATABASE_URL").expect("SQLITE_OWNER_DATABASE_URL must be set"); + let manager = ConnectionManager::::new(database_url); + Pool::builder() + .test_on_check_out(true) + .build(manager) + .expect("Couldn't build db connection pool") + } + + fn insert_ov( + ov: &OV, + to2: Option, + to0: Option, + conn: &mut SqliteConnection, + ) -> Result<()> { + let new_ov_owner = NewOwnerOV { + guid: ov.header().guid().to_string(), + contents: ov.serialize_data()?, + to2_performed: to2, + to0_accept_owner_wait_seconds: to0, + }; + diesel::insert_into(super::schema::owner_vouchers::table) + .values(new_ov_owner) + .execute(conn)?; + Ok(()) + } + + fn get_ov(guid: &str, conn: &mut SqliteConnection) -> Result { + let result = super::schema::owner_vouchers::dsl::owner_vouchers + .filter(super::schema::owner_vouchers::guid.eq(guid)) + .first(conn)?; + Ok(result) + } + + fn delete_ov(guid: &str, conn: &mut SqliteConnection) -> Result<()> { + diesel::delete(owner_vouchers::dsl::owner_vouchers) + .filter(super::schema::owner_vouchers::guid.eq(guid)) + .execute(conn)?; + Ok(()) + } + + #[allow(non_snake_case)] + fn select_ov_to2_performed( + to2_performed: bool, + conn: &mut SqliteConnection, + ) -> Result> { + let result = super::schema::owner_vouchers::dsl::owner_vouchers + .filter(super::schema::owner_vouchers::to2_performed.eq(to2_performed)) + .select(OwnerOV::as_select()) + .load(conn)?; + Ok(result) + } + + #[allow(non_snake_case)] + fn select_ov_to0_less_than(to0_max: i64, conn: &mut SqliteConnection) -> Result> { + let result = super::schema::owner_vouchers::dsl::owner_vouchers + .filter(super::schema::owner_vouchers::to0_accept_owner_wait_seconds.lt(to0_max)) + .select(OwnerOV::as_select()) + .load(conn)?; + Ok(result) + } + + fn select_ov_to2_performed_and_ov_to0_less_than( + to2_performed: bool, + to0_max: i64, + conn: &mut SqliteConnection, + ) -> Result> { + let result = super::schema::owner_vouchers::dsl::owner_vouchers + .filter( + super::schema::owner_vouchers::to0_accept_owner_wait_seconds + .lt(to0_max) + .or(super::schema::owner_vouchers::to0_accept_owner_wait_seconds.is_null()), + ) + .filter( + super::schema::owner_vouchers::to2_performed + .eq(to2_performed) + .or(super::schema::owner_vouchers::to2_performed.is_null()), + ) + .select(OwnerOV::as_select()) + .load(conn)?; + Ok(result) + } + + fn update_ov_to0_wait_seconds( + guid: &str, + wait_seconds: Option, + conn: &mut SqliteConnection, + ) -> Result<()> { + diesel::update(owner_vouchers::dsl::owner_vouchers) + .filter(super::schema::owner_vouchers::guid.eq(guid)) + .set(super::schema::owner_vouchers::to0_accept_owner_wait_seconds.eq(wait_seconds)) + .execute(conn)?; + Ok(()) + } + + fn update_ov_to2( + guid: &str, + to2_performed: Option, + conn: &mut SqliteConnection, + ) -> Result<()> { + diesel::update(owner_vouchers::dsl::owner_vouchers) + .filter(super::schema::owner_vouchers::guid.eq(guid)) + .set(super::schema::owner_vouchers::to2_performed.eq(to2_performed)) + .execute(conn)?; + Ok(()) + } +} + +pub struct SqliteRendezvousDB {} + +impl DBStoreRendezvous for SqliteRendezvousDB { + fn get_connection() -> SqliteConnection { + let database_url = env::var("SQLITE_RENDEZVOUS_DATABASE_URL") + .expect("SQLITE_RENDEZVOUS_DATABASE_URL must be set"); + SqliteConnection::establish(&database_url).expect("Error connecting to database") + } + + fn get_conn_pool() -> Pool> { + let database_url = env::var("SQLITE_RENDEZVOUS_DATABASE_URL") + .expect("SQLITE_RENDEZVOUS_DATABASE_URL must be set"); + let manager = ConnectionManager::::new(database_url); + Pool::builder() + .test_on_check_out(true) + .build(manager) + .expect("Couldn't build db connection pool") + } + + fn insert_ov( + ov: &StoredItem, + guid: &str, + ttl: Option, + conn: &mut SqliteConnection, + ) -> Result<()> { + let new_ov_rendezvous = NewRendezvousOV { + guid: guid.to_string(), + contents: ov.serialize_data()?, + ttl, + }; + diesel::insert_into(super::schema::rendezvous_vouchers::table) + .values(&new_ov_rendezvous) + .execute(conn)?; + Ok(()) + } + + fn get_ov(guid: &str, conn: &mut SqliteConnection) -> Result { + let result = super::schema::rendezvous_vouchers::dsl::rendezvous_vouchers + .filter(super::schema::rendezvous_vouchers::guid.eq(guid)) + .first(conn)?; + Ok(result) + } + + fn delete_ov(guid: &str, conn: &mut SqliteConnection) -> Result<()> { + diesel::delete(rendezvous_vouchers::dsl::rendezvous_vouchers) + .filter(super::schema::rendezvous_vouchers::guid.eq(guid)) + .execute(conn)?; + Ok(()) + } + + fn delete_ov_ttl_le(ttl: i64, conn: &mut SqliteConnection) -> Result<()> { + diesel::delete(rendezvous_vouchers::dsl::rendezvous_vouchers) + .filter(super::schema::rendezvous_vouchers::ttl.le(ttl)) + .execute(conn)?; + Ok(()) + } + + fn update_ov_ttl(guid: &str, ttl: Option, conn: &mut SqliteConnection) -> Result<()> { + diesel::update(rendezvous_vouchers::dsl::rendezvous_vouchers) + .filter(super::schema::rendezvous_vouchers::guid.eq(guid)) + .set(super::schema::rendezvous_vouchers::ttl.eq(ttl)) + .execute(conn)?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::{SqliteManufacturerDB, SqliteOwnerDB, SqliteRendezvousDB}; + use crate::{schema::*, DBStoreManufacturer, DBStoreOwner, DBStoreRendezvous}; + use anyhow::Result; + use diesel::connection::SimpleConnection; + use diesel::prelude::*; + use fdo_data_formats::ownershipvoucher::OwnershipVoucher as OV; + use fdo_data_formats::publickey::PublicKey; + use fdo_data_formats::types::{COSESign, Guid, Nonce, RendezvousInfo, TO2SetupDevicePayload}; + use fdo_data_formats::StoredItem; + use openssl::ec::{EcGroup, EcKey}; + use openssl::nid::Nid; + use openssl::pkey::PKey; + use std::collections::HashMap; + use std::env; + + #[test] + fn test_manufacturer_database() -> Result<()> { + println!("Current directory: {:?}", env::current_dir()); + + // read test ovs from the integration tests dir + let mut ov_map = HashMap::new(); + let pool = SqliteManufacturerDB::get_conn_pool(); + + // last_guid used later to delete an ov with that key + let mut last_guid = String::new(); + for path in std::fs::read_dir("../integration-tests/vouchers/v101").expect("Dir not found") + { + let ov_path = path.expect("error getting path").path(); + let content = std::fs::read(ov_path).expect("OV couldn't be read"); + let ov = OV::from_pem_or_raw(&content).expect("Error serializing OV"); + last_guid = ov.header().guid().to_string(); + ov_map.insert(ov.header().guid().to_string(), ov); + } + + // get a connection from the pool + let conn = &mut pool.get().unwrap(); + // sqlite does not enable this by default, not needed at this point, + // but I've left it here so that we don't forget + conn.batch_execute("PRAGMA foreign_keys = ON")?; + + for (_, ov) in ov_map.clone().into_iter() { + SqliteManufacturerDB::insert_ov(&ov, Some(5000_i64), conn)?; + } + + // we should have 3 ovs + let count: i64 = manufacturer_vouchers::dsl::manufacturer_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 3); + + // select ov by guid + let ov_db = SqliteManufacturerDB::get_ov(&last_guid, conn)?; + assert_eq!(ov_db.guid, last_guid); + + // update ttl of an OV + SqliteManufacturerDB::update_ov_ttl(&last_guid, Some(12345), conn)?; + let ov_db = SqliteManufacturerDB::get_ov(&last_guid, conn)?; + assert_eq!(ov_db.ttl, Some(12345)); + + // delete an ov by guid, we should have 2 at the end + SqliteManufacturerDB::delete_ov(&last_guid, conn)?; + let count: i64 = manufacturer_vouchers::dsl::manufacturer_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 2); + + // delete manufacturer ovs with ttl <= 4000, we shouldn't delete any of them + SqliteManufacturerDB::delete_ov_ttl_le(4000_i64, conn)?; + let count: i64 = manufacturer_vouchers::dsl::manufacturer_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 2); + + // delete manufacturer ovs with ttl <= 5000, we should delete the remaining 2 ovs + SqliteManufacturerDB::delete_ov_ttl_le(5000_i64, conn)?; + let count: i64 = manufacturer_vouchers::dsl::manufacturer_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 0); + Ok(()) + } + + #[test] + fn test_owner_database() -> Result<()> { + println!("Current directory: {:?}", env::current_dir()); + + // read test ovs from the integration tests dir + let mut ov_map = HashMap::new(); + let pool = SqliteOwnerDB::get_conn_pool(); + + // last_guid used later to delete an ov with that key + let mut last_guid = String::new(); + for path in std::fs::read_dir("../integration-tests/vouchers/v101").expect("Dir not found") + { + let ov_path = path.expect("error getting path").path(); + let content = std::fs::read(ov_path).expect("OV couldn't be read"); + let ov = OV::from_pem_or_raw(&content).expect("Error serializing OV"); + last_guid = ov.header().guid().to_string(); + ov_map.insert(ov.header().guid().to_string(), ov); + } + + // get a connection from the pool + let conn = &mut pool.get().unwrap(); + // sqlite does not enable this by default, not needed at this point, + // but I've left it here so that we don't forget + conn.batch_execute("PRAGMA foreign_keys = ON")?; + + let mut to2_done = true; + for (_, ov) in ov_map.clone().into_iter() { + if to2_done { + SqliteOwnerDB::insert_ov(&ov, Some(to2_done), Some(2000_i64), conn)?; + } else { + SqliteOwnerDB::insert_ov(&ov, Some(to2_done), Some(3000_i64), conn)?; + } + to2_done = !to2_done; + } + + // we should have 3 ovs + let count: i64 = owner_vouchers::dsl::owner_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 3); + + // select ov by guid + let ov_db = SqliteOwnerDB::get_ov(&last_guid, conn)?; + assert_eq!(ov_db.guid, last_guid); + + // select the owner ovs with to2 performed = true, we should have 2 + let result = SqliteOwnerDB::select_ov_to2_performed(true, conn)?; + assert_eq!(result.len(), 2); + + // select the owner ovs with to0 less than 2500, we should have 2 + let result = SqliteOwnerDB::select_ov_to0_less_than(2500_i64, conn)?; + assert_eq!(result.len(), 2); + + // update the wait_seconds field and to2 + SqliteOwnerDB::update_ov_to0_wait_seconds(&last_guid.to_string(), Some(1234), conn)?; + SqliteOwnerDB::update_ov_to2(&last_guid.to_string(), None, conn)?; + + let ov_db = SqliteOwnerDB::get_ov(&last_guid, conn)?; + assert_eq!(ov_db.to0_accept_owner_wait_seconds, Some(1234)); + assert_eq!(ov_db.to2_performed, None); + + // delete an ov from the owner, we should have 2 left + SqliteOwnerDB::delete_ov(&last_guid.to_string(), conn)?; + let count: i64 = owner_vouchers::dsl::owner_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 2); + + Ok(()) + } + + #[test] + fn test_rendezvous_database() -> Result<()> { + println!("Current directory: {:?}", env::current_dir()); + + // read test ovs from the integration tests dir + let mut ov_map = HashMap::new(); + let pool = SqliteRendezvousDB::get_conn_pool(); + + // last_guid used later to delete an ov with that key + let mut last_guid = String::new(); + // private key + let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1)?; + let key = EcKey::generate(&group)?; + let private_key = PKey::from_ec_key(key.clone())?; + for path in std::fs::read_dir("../integration-tests/vouchers/v101").expect("Dir not found") + { + let ov_path = path.expect("error getting path").path(); + let content = std::fs::read(ov_path).expect("OV couldn't be read"); + let ov = OV::from_pem_or_raw(&content).expect("Error serializing OV"); + last_guid = ov.header().guid().to_string(); + let pubkey: PublicKey = ov + .device_certificate_chain() + .unwrap() + .insecure_verify_without_root_verification() + .unwrap() + .clone() + .try_into() + .unwrap(); + let new_payload = TO2SetupDevicePayload::new( + RendezvousInfo::new(Vec::new()).unwrap(), + Guid::new().unwrap(), + Nonce::new().unwrap(), + pubkey.clone(), + ); + let cose = COSESign::new(&new_payload, None, &private_key).unwrap(); + let tmp = StoredItem { + public_key: pubkey, + to1d: cose, + }; + ov_map.insert(ov.header().guid().to_string(), tmp); + } + + // get a connection from the pool + let conn = &mut pool.get().unwrap(); + // sqlite does not enable this by default, not needed at this point, + // but I've left it here so that we don't forget + conn.batch_execute("PRAGMA foreign_keys = ON")?; + + for (guid, ov) in ov_map.clone().into_iter() { + SqliteRendezvousDB::insert_ov(&ov, &guid, Some(5000_i64), conn)?; + } + + // we should have 3 ovs + let count: i64 = rendezvous_vouchers::dsl::rendezvous_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 3); + + // get an ov by guid + let ov_db = SqliteRendezvousDB::get_ov(&last_guid, conn)?; + assert_eq!(ov_db.guid, last_guid); + + // update ttl of an ov + SqliteRendezvousDB::update_ov_ttl(&last_guid, None, conn)?; + let ov_db = SqliteRendezvousDB::get_ov(&last_guid, conn)?; + assert_eq!(ov_db.ttl, None); + + // delete an ov by guid, we should have 2 at the end + SqliteRendezvousDB::delete_ov(&last_guid, conn)?; + let count: i64 = rendezvous_vouchers::dsl::rendezvous_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 2); + + // delete rendezvous ovs with ttl <= 4000, we shouldn't delete any of them + SqliteRendezvousDB::delete_ov_ttl_le(4000_i64, conn)?; + let count: i64 = rendezvous_vouchers::dsl::rendezvous_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 2); + + // delete rendezvous ovs with ttl <= 5000, we should delete the remaining 2 ovs + SqliteRendezvousDB::delete_ov_ttl_le(5000_i64, conn)?; + let count: i64 = rendezvous_vouchers::dsl::rendezvous_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 0); + Ok(()) + } +} diff --git a/fido-device-onboard.spec b/fido-device-onboard.spec index 390b89475..0d6d25c90 100644 --- a/fido-device-onboard.spec +++ b/fido-device-onboard.spec @@ -61,6 +61,13 @@ install -D -m 0755 -t %{buildroot}%{_bindir} target/release/fdo-owner-tool install -D -m 0755 -t %{buildroot}%{_bindir} target/release/fdo-admin-tool install -D -m 0644 -t %{buildroot}%{_unitdir} examples/systemd/* install -D -m 0644 -t %{buildroot}%{_docdir}/fdo examples/config/* +# db sql files +install -D -m 0644 -t %{buildroot}%{_docdir}/fdo/migrations/migrations_manufacturing_server_postgres migrations/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/* +install -D -m 0644 -t %{buildroot}%{_docdir}/fdo/migrations/migrations_manufacturing_server_sqlite migrations/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/* +install -D -m 0644 -t %{buildroot}%{_docdir}/fdo/migrations/migrations_owner_onboarding_server_postgres migrations/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/* +install -D -m 0644 -t %{buildroot}%{_docdir}/fdo/migrations/migrations_owner_onboarding_server_sqlite migrations/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/* +install -D -m 0644 -t %{buildroot}%{_docdir}/fdo/migrations/migrations_rendezvous_server_postgres migrations/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/* +install -D -m 0644 -t %{buildroot}%{_docdir}/fdo/migrations/migrations_rendezvous_server_sqlite migrations/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/* # duplicates as needed by AIO command so link them ln -s %{_bindir}/fdo-owner-tool %{buildroot}%{_libexecdir}/fdo/fdo-owner-tool ln -s %{_bindir}/fdo-admin-tool %{buildroot}%{_libexecdir}/fdo/fdo-admin-tool @@ -123,6 +130,8 @@ Requires: openssl-libs >= 3.0.1-12 %{_docdir}/fdo/device_specific_serviceinfo.yml %{_docdir}/fdo/serviceinfo-api-server.yml %{_docdir}/fdo/owner-onboarding-server.yml +%{_docdir}/fdo/migrations/migrations_owner_onboarding_server_postgres/* +%{_docdir}/fdo/migrations/migrations_owner_onboarding_server_sqlite/* %{_unitdir}/fdo-serviceinfo-api-server.service %{_unitdir}/fdo-owner-onboarding-server.service @@ -156,6 +165,8 @@ License: %combined_license %dir %{_localstatedir}/lib/fdo %dir %{_docdir}/fdo %{_docdir}/fdo/rendezvous-*.yml +%{_docdir}/fdo/migrations/migrations_rendezvous_server_postgres/* +%{_docdir}/fdo/migrations/migrations_rendezvous_server_sqlite/* %{_unitdir}/fdo-rendezvous-server.service %post -n fdo-rendezvous-server @@ -188,6 +199,8 @@ Requires: openssl-libs >= 3.0.1-12 %dir %{_localstatedir}/lib/fdo %dir %{_docdir}/fdo %{_docdir}/fdo/manufacturing-server.yml +%{_docdir}/fdo/migrations/migrations_manufacturing_server_postgres/* +%{_docdir}/fdo/migrations/migrations_manufacturing_server_sqlite/* %{_unitdir}/fdo-manufacturing-server.service %post -n fdo-manufacturing-server diff --git a/migrations/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/down.sql b/migrations/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/down.sql new file mode 100644 index 000000000..79f509d4d --- /dev/null +++ b/migrations/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/down.sql @@ -0,0 +1,3 @@ +-- This file should undo anything in `up.sql` + +DROP TABLE manufacturer_vouchers; diff --git a/migrations/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/up.sql b/migrations/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/up.sql new file mode 100644 index 000000000..2233ba87a --- /dev/null +++ b/migrations/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/up.sql @@ -0,0 +1,7 @@ +-- Your SQL goes here + +CREATE TABLE manufacturer_vouchers ( + guid varchar(36) NOT NULL PRIMARY KEY, + contents bytea NOT NULL, + ttl bigint +); diff --git a/migrations/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/down.sql b/migrations/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/down.sql new file mode 100644 index 000000000..e30e9d7d9 --- /dev/null +++ b/migrations/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/down.sql @@ -0,0 +1 @@ +DROP TABLE manufacturer_vouchers; diff --git a/migrations/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/up.sql b/migrations/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/up.sql new file mode 100644 index 000000000..a9abe4c0d --- /dev/null +++ b/migrations/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/up.sql @@ -0,0 +1,5 @@ +CREATE TABLE manufacturer_vouchers ( + guid varchar(36) NOT NULL PRIMARY KEY, + contents blob NOT NULL, + ttl bigint +); diff --git a/migrations/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/down.sql b/migrations/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/down.sql new file mode 100644 index 000000000..d82e56978 --- /dev/null +++ b/migrations/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/down.sql @@ -0,0 +1,3 @@ +-- This file should undo anything in `up.sql` + +DROP TABLE owner_vouchers; diff --git a/migrations/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/up.sql b/migrations/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/up.sql new file mode 100644 index 000000000..98c25d85a --- /dev/null +++ b/migrations/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/up.sql @@ -0,0 +1,8 @@ +-- Your SQL goes here + +CREATE TABLE owner_vouchers ( + guid varchar(36) NOT NULL PRIMARY KEY, + contents bytea NOT NULL, + to2_performed boolean, + to0_accept_owner_wait_seconds bigint +); diff --git a/migrations/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/down.sql b/migrations/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/down.sql new file mode 100644 index 000000000..771908a38 --- /dev/null +++ b/migrations/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/down.sql @@ -0,0 +1 @@ +DROP TABLE owner_vouchers; diff --git a/migrations/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/up.sql b/migrations/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/up.sql new file mode 100644 index 000000000..b3f27ef6f --- /dev/null +++ b/migrations/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE owner_vouchers ( + guid varchar(36) NOT NULL PRIMARY KEY, + contents blob NOT NULL, + to2_performed bool, + to0_accept_owner_wait_seconds bigint +); diff --git a/migrations/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/down.sql b/migrations/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/down.sql new file mode 100644 index 000000000..c75aa021f --- /dev/null +++ b/migrations/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/down.sql @@ -0,0 +1,3 @@ +-- This file should undo anything in `up.sql` + +DROP TABLE rendezvous_vouchers; diff --git a/migrations/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/up.sql b/migrations/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/up.sql new file mode 100644 index 000000000..046166b84 --- /dev/null +++ b/migrations/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/up.sql @@ -0,0 +1,7 @@ +-- Your SQL goes here + +CREATE TABLE rendezvous_vouchers ( + guid varchar(36) NOT NULL PRIMARY KEY, + contents bytea NOT NULL, + ttl bigint +); diff --git a/migrations/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/down.sql b/migrations/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/down.sql new file mode 100644 index 000000000..67864a8e9 --- /dev/null +++ b/migrations/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/down.sql @@ -0,0 +1 @@ +DROP TABLE rendezvous_vouchers; diff --git a/migrations/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/up.sql b/migrations/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/up.sql new file mode 100644 index 000000000..c55b893f1 --- /dev/null +++ b/migrations/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/up.sql @@ -0,0 +1,5 @@ +CREATE TABLE rendezvous_vouchers ( + guid varchar(36) NOT NULL PRIMARY KEY, + contents blob NOT NULL, + ttl bigint +); diff --git a/owner-onboarding-server/src/main.rs b/owner-onboarding-server/src/main.rs index 4ef0d826b..f1221aa78 100644 --- a/owner-onboarding-server/src/main.rs +++ b/owner-onboarding-server/src/main.rs @@ -27,7 +27,7 @@ use fdo_data_formats::{ publickey::PublicKey, types::{Guid, TO2AddressEntry}, }; -use fdo_store::Store; +use fdo_store::{Store, StoreError}; use fdo_util::servers::{ configuration::{owner_onboarding_server::OwnerOnboardingServerSettings, AbsolutePathBuf}, settings_for, OwnershipVoucherStoreMetadataKey, @@ -72,42 +72,69 @@ fn load_private_key(path: &AbsolutePathBuf) -> Result> { Ok(PKey::private_key_from_der(&contents)?) } -async fn report_to_rendezvous(udt: OwnerServiceUDT) -> Result<()> { - let mut ft = udt.ownership_voucher_store.query_data().await?; - ft.neq( - &fdo_store::MetadataKey::Local(OwnershipVoucherStoreMetadataKey::To2Performed), - &true, - ); - ft.lt( - &fdo_store::MetadataKey::Local(OwnershipVoucherStoreMetadataKey::To0AcceptOwnerWaitSeconds), - time::OffsetDateTime::now_utc().unix_timestamp(), - ); +async fn _handle_report_to_rendezvous(udt: &OwnerServiceUDT, ov: &OwnershipVoucher) -> Result<()> { + match report_ov_to_rendezvous(ov, &udt.owner_addresses, &udt.owner_key).await { + Ok(wait_seconds) => { + udt.ownership_voucher_store + .store_metadata( + ov.header().guid(), + &fdo_store::MetadataKey::Local( + OwnershipVoucherStoreMetadataKey::To0AcceptOwnerWaitSeconds, + ), + &time::Duration::new(wait_seconds.into(), 0), + ) + .await?; + Ok(()) + } + Err(e) => { + log::warn!( + "OV({}): failed to report to rendezvous: {}", + ov.header().guid().to_string(), + e + ); + Ok(()) + } + } +} - let ov_iter = ft.query().await?; - if let Some(ovs) = ov_iter { - for ov in ovs { - match report_ov_to_rendezvous(&ov, &udt.owner_addresses, &udt.owner_key).await { - Ok(wait_seconds) => { - udt.ownership_voucher_store - .store_metadata( - ov.header().guid(), - &fdo_store::MetadataKey::Local( - OwnershipVoucherStoreMetadataKey::To0AcceptOwnerWaitSeconds, - ), - &time::Duration::new(wait_seconds.into(), 0), - ) - .await?; +async fn report_to_rendezvous(udt: OwnerServiceUDT) -> Result<()> { + match udt.ownership_voucher_store.query_data().await { + Ok(mut ft) => { + ft.neq( + &fdo_store::MetadataKey::Local(OwnershipVoucherStoreMetadataKey::To2Performed), + &true, + ); + ft.lt( + &fdo_store::MetadataKey::Local( + OwnershipVoucherStoreMetadataKey::To0AcceptOwnerWaitSeconds, + ), + time::OffsetDateTime::now_utc().unix_timestamp(), + ); + let ov_iter = ft.query().await?; + if let Some(ovs) = ov_iter { + for ov in ovs { + _handle_report_to_rendezvous(&udt, &ov).await?; + } + } + } + Err(StoreError::MethodNotAvailable) => { + match udt.ownership_voucher_store.query_ovs_db().await { + Ok(ovs) => { + for ov in ovs { + _handle_report_to_rendezvous(&udt, &ov).await? + } } + Err(StoreError::Unspecified(txt)) => { + log::warn!("DB error: {txt:?}") + } + Err(StoreError::MethodNotAvailable) => bail!("Unreachable"), Err(e) => { - log::warn!( - "OV({}): failed to report to rendezvous: {}", - ov.header().guid().to_string(), - e - ); + log::warn!("DB error: {e:?}") } - }; + } } - } + Err(e) => log::warn!("Error querying data: {e:?}"), + }; Ok(()) } diff --git a/owner-tool/Cargo.toml b/owner-tool/Cargo.toml index 56a0ddd7e..9654b062c 100644 --- a/owner-tool/Cargo.toml +++ b/owner-tool/Cargo.toml @@ -19,5 +19,6 @@ tss-esapi = { version = "7.4", features = ["generate-bindings"] } fdo-util = { path = "../util", version = "0.4.13" } fdo-data-formats = { path = "../data-formats", version = "0.4.13" } fdo-http-wrapper = { path = "../http-wrapper", version = "0.4.13", features = ["client"] } +fdo-db = { path = "../db", version = "0.4.13"} hex = "0.4" diff --git a/owner-tool/src/main.rs b/owner-tool/src/main.rs index f22d94b6f..7dcefadd1 100644 --- a/owner-tool/src/main.rs +++ b/owner-tool/src/main.rs @@ -1,7 +1,12 @@ -use std::{convert::TryFrom, fs, io::Write, path::Path, str::FromStr}; +use std::{convert::TryFrom, env, fs, io::Write, path::Path, str::FromStr}; use anyhow::{bail, Context, Error, Result}; use clap::{ArgAction, Args, Parser, Subcommand, ValueEnum}; +use fdo_db::models::ManufacturerOV; +use fdo_db::postgres::PostgresOwnerDB; +use fdo_db::sqlite::SqliteOwnerDB; +use fdo_db::DBStoreOwner; +use fdo_db::{postgres::PostgresManufacturerDB, sqlite::SqliteManufacturerDB, DBStoreManufacturer}; use openssl::{ asn1::{Asn1Integer, Asn1Time}, bn::BigNum, @@ -14,6 +19,7 @@ use openssl::{ x509::{X509Builder, X509NameBuilder, X509NameRef, X509}, }; use serde_yaml::Value; +use std::fs::File; use tss_esapi::{structures::Public as TssPublic, traits::UnMarshall}; use fdo_data_formats::{ @@ -42,6 +48,10 @@ enum Commands { DumpDeviceCredential(DumpDeviceCredentialArguments), /// Extends an ownership voucher for a new owner ExtendOwnershipVoucher(ExtendOwnershipVoucherArguments), + /// Exports a single or all the ownership vouchers present in the Manufacturer DB + ExportManufacturerVouchers(ExportManufacturerVouchersArguments), + /// Imports into the Owner DB a single ownership voucher or all the ownership vouchers present at a given path + ImportOwnershipVouchers(ImportOwnershipVouchersArguments), } #[derive(Args)] @@ -99,6 +109,34 @@ struct ExtendOwnershipVoucherArguments { new_owner_cert: String, } +#[derive(Args)] +struct ExportManufacturerVouchersArguments { + /// Type of the Manufacturer DB holding the OVs + db_type: DBType, + /// DB connection URL or path to the DB file + db_url: String, + /// Path to dir where the OVs will be exported + path: String, + /// GUID of the voucher to be exported, if no GUID is given all the OVs will be exported + guid: Option, +} + +#[derive(Copy, Clone, ValueEnum)] +enum DBType { + Sqlite, + Postgres, +} + +#[derive(Args)] +struct ImportOwnershipVouchersArguments { + /// Type of the Owner DB to import the OVs + db_type: DBType, + /// DB connection URL or path to DB file + db_url: String, + /// Path to the OV to be imported, or path to a directory where all the OVs to be imported are located + source_path: String, +} + #[tokio::main] async fn main() -> Result<()> { fdo_util::add_version!(); @@ -109,6 +147,8 @@ async fn main() -> Result<()> { Commands::DumpOwnershipVoucher(args) => dump_voucher(&args), Commands::DumpDeviceCredential(args) => dump_devcred(&args), Commands::ExtendOwnershipVoucher(args) => extend_voucher(&args), + Commands::ExportManufacturerVouchers(args) => export_manufacturer_vouchers(&args), + Commands::ImportOwnershipVouchers(args) => import_ownership_vouchers(&args), } } @@ -588,3 +628,169 @@ fn extend_voucher(args: &ExtendOwnershipVoucherArguments) -> Result<(), Error> { Ok(()) } + +fn _write_ov_to_disk(db_ov: &ManufacturerOV, path: &Path) -> Result<()> { + let new_path = path.join(&db_ov.guid); + let file = File::create(new_path)?; + let ov = OwnershipVoucher::from_pem_or_raw(&db_ov.contents).expect("Error serializing OV"); + OwnershipVoucher::serialize_to_writer(&ov, &file)?; + Ok(()) +} + +fn export_manufacturer_vouchers(args: &ExportManufacturerVouchersArguments) -> Result<()> { + let path = Path::new(&args.path); + if !path.is_dir() { + bail!("Please provide a path to a valid directory."); + } + match &args.guid { + Some(guid) => { + // export single + let db_ov = match args.db_type { + DBType::Sqlite => { + env::set_var("SQLITE_MANUFACTURER_DATABASE_URL", &args.db_url); + let pool = SqliteManufacturerDB::get_conn_pool(); + let conn = &mut pool.get()?; + SqliteManufacturerDB::get_ov(guid, conn)? + } + DBType::Postgres => { + env::set_var("POSTGRES_MANUFACTURER_DATABASE_URL", &args.db_url); + let pool = PostgresManufacturerDB::get_conn_pool(); + let conn = &mut pool.get()?; + PostgresManufacturerDB::get_ov(guid, conn)? + } + }; + _write_ov_to_disk(&db_ov, path)?; + println!("OV {guid} exported.") + } + None => { + // export all + let db_ovs = match args.db_type { + DBType::Sqlite => { + env::set_var("SQLITE_MANUFACTURER_DATABASE_URL", &args.db_url); + let pool = SqliteManufacturerDB::get_conn_pool(); + let conn = &mut pool.get()?; + SqliteManufacturerDB::get_all_ovs(conn)? + } + DBType::Postgres => { + env::set_var("POSTGRES_MANUFACTURER_DATABASE_URL", &args.db_url); + let pool = PostgresManufacturerDB::get_conn_pool(); + let conn = &mut pool.get()?; + PostgresManufacturerDB::get_all_ovs(conn)? + } + }; + for db_ov in db_ovs { + _write_ov_to_disk(&db_ov, path)?; + } + println!("OV/s exported."); + } + } + Ok(()) +} + +fn import_ownership_vouchers(args: &ImportOwnershipVouchersArguments) -> Result<()> { + let source_path = Path::new(&args.source_path); + let mut error_buff: Vec = vec![]; + if source_path.is_dir() { + // Import all the OVs in a directory, we will read them one by one and + // insert them, if there is an error, we will copy it in a buffer and + // log it afterwards. + for path in fs::read_dir(source_path)? { + let ov_path = match &path { + Ok(path) => path.path(), + Err(e) => { + error_buff.push(format!("Error {e} with path {:?}", &path)); + continue; + } + }; + let content = match fs::read(&ov_path) { + Ok(value) => value, + Err(e) => { + error_buff.push(format!("Error {e} reading path {:?}", &ov_path)); + continue; + } + }; + let ov = match OwnershipVoucher::from_pem_or_raw(&content) { + Ok(value) => value, + Err(e) => { + error_buff.push(format!( + "Error {e} serializing OV contents at path {:?}", + &ov_path + )); + continue; + } + }; + let ret = match args.db_type { + DBType::Postgres => { + env::set_var("POSTGRES_OWNER_DATABASE_URL", &args.db_url); + let pool = PostgresOwnerDB::get_conn_pool(); + let conn = &mut match pool.get() { + Ok(val) => val, + Err(e) => { + error_buff.push(format!( + "Error {e} getting a connection from the DB pool with OV {} from path {:?}", + ov.header().guid().to_string(), + &ov_path + )); + continue; + } + }; + PostgresOwnerDB::insert_ov(&ov, None, None, conn) + } + DBType::Sqlite => { + env::set_var("SQLITE_OWNER_DATABASE_URL", &args.db_url); + let pool = SqliteOwnerDB::get_conn_pool(); + let conn = &mut match pool.get() { + Ok(val) => val, + Err(e) => { + error_buff.push(format!( + "Error {e} getting a connection from the DB pool with OV {} from path {:?}", + ov.header().guid().to_string(), + &ov_path + )); + continue; + } + }; + SqliteOwnerDB::insert_ov(&ov, None, None, conn) + } + }; + if ret.is_err() { + error_buff.push(format!( + "Error {:?} inserting OV {} from path {:?}", + ret.err(), + ov.header().guid().to_string(), + &ov_path + )); + } + } + if !error_buff.is_empty() { + println!( + "Unable to import all OVs. OV import operations yielded the following error/s:" + ); + for error in error_buff { + println!("- {error}"); + } + } else { + println!("OV import finished.") + } + } else { + // import a single OV + let content = fs::read(&args.source_path)?; + let ov = OwnershipVoucher::from_pem_or_raw(&content)?; + match args.db_type { + DBType::Postgres => { + env::set_var("POSTGRES_OWNER_DATABASE_URL", &args.db_url); + let pool = PostgresOwnerDB::get_conn_pool(); + let conn = &mut pool.get()?; + PostgresOwnerDB::insert_ov(&ov, None, None, conn)?; + } + DBType::Sqlite => { + env::set_var("SQLITE_OWNER_DATABASE_URL", &args.db_url); + let pool = SqliteOwnerDB::get_conn_pool(); + let conn = &mut pool.get()?; + SqliteOwnerDB::insert_ov(&ov, None, None, conn)?; + } + } + println!("OV import finished."); + } + Ok(()) +} diff --git a/rendezvous-server/src/handlers_to0.rs b/rendezvous-server/src/handlers_to0.rs index c6a1adb00..d2d32f9b2 100644 --- a/rendezvous-server/src/handlers_to0.rs +++ b/rendezvous-server/src/handlers_to0.rs @@ -5,13 +5,12 @@ use fdo_data_formats::{ constants::ErrorCode, messages::Message, types::{Nonce, TO1DataPayload}, + StoredItem, }; use fdo_http_wrapper::server::Error; use fdo_http_wrapper::server::RequestInformation; -use super::StoredItem; - pub(super) async fn hello( _user_data: super::RendezvousUDT, mut ses_with_store: RequestInformation, diff --git a/rendezvous-server/src/main.rs b/rendezvous-server/src/main.rs index 37f0f9edf..8721dd7c4 100644 --- a/rendezvous-server/src/main.rs +++ b/rendezvous-server/src/main.rs @@ -5,53 +5,13 @@ use openssl::x509::X509; use tokio::signal::unix::{signal, SignalKind}; use warp::Filter; -use fdo_data_formats::{ - cborparser::{ParsedArray, ParsedArrayBuilder}, - enhanced_types::X5Bag, - publickey::PublicKey, - types::{COSESign, Guid}, - ProtocolVersion, Serializable, -}; +use fdo_data_formats::{enhanced_types::X5Bag, types::Guid, ProtocolVersion, StoredItem}; use fdo_store::Store; use fdo_util::servers::{configuration::rendezvous_server::RendezvousServerSettings, settings_for}; mod handlers_to0; mod handlers_to1; -#[derive(Clone, Debug)] -struct StoredItem { - public_key: PublicKey, - to1d: COSESign, -} - -impl Serializable for StoredItem { - fn deserialize_from_reader(reader: R) -> Result - where - R: std::io::Read, - { - let contents: ParsedArray = - ParsedArray::deserialize_from_reader(reader)?; - - let public_key = contents.get(0)?; - let to1d = contents.get(1)?; - - Ok(StoredItem { public_key, to1d }) - } - - fn serialize_to_writer(&self, writer: W) -> Result<(), fdo_data_formats::Error> - where - W: std::io::Write, - { - let mut contents: ParsedArrayBuilder = - ParsedArrayBuilder::new(); - contents.set(0, &self.public_key)?; - contents.set(1, &self.to1d)?; - let contents = contents.build(); - - contents.serialize_to_writer(writer) - } -} - #[derive(Debug, Clone, Copy)] #[non_exhaustive] enum RendezvousStoreMetadataKey {} diff --git a/store/Cargo.toml b/store/Cargo.toml index 3a8026f96..1d0fce1de 100644 --- a/store/Cargo.toml +++ b/store/Cargo.toml @@ -7,6 +7,8 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +anyhow = { version = "1", optional = true} + fdo-data-formats = { path = "../data-formats", version = "0.4.13" } thiserror = "1" @@ -20,5 +22,13 @@ time = "0.3" xattr = { version = "1.0", default-features = false, optional = true } # We *need* xattrs to store TTL serde_cbor = { version = "0.11", optional = true } +# database +fdo-db = { path = "../db", version = "0.4.13"} + +diesel = { version = "2.1.0", features = ["sqlite", "postgres", "r2d2"], optional = true } + [features] directory = ["xattr", "serde_cbor"] +db = ["diesel", "anyhow"] + +default = ["directory", "db"] \ No newline at end of file diff --git a/store/src/db.rs b/store/src/db.rs new file mode 100644 index 000000000..552588455 --- /dev/null +++ b/store/src/db.rs @@ -0,0 +1,962 @@ +use async_trait::async_trait; +use fdo_data_formats::ownershipvoucher::OwnershipVoucher; +use fdo_data_formats::StoredItem; +use fdo_db::*; +use std::marker::PhantomData; + +use crate::DBType; +use crate::ServerType; +use crate::Store; +use crate::StoreError; +use crate::{FilterType, MetadataLocalKey, MetadataValue, ValueIter}; +use fdo_data_formats::Serializable; + +pub(super) fn initialize( + db_type: DBType, + server_type: &ServerType, +) -> Result>, StoreError> +where + OT: crate::StoreOpenMode, + K: std::str::FromStr + std::string::ToString + Send + Sync + 'static, + V: Serializable + Send + Sync + Clone + 'static, + MKT: crate::MetadataLocalKey + 'static, +{ + match db_type { + DBType::Postgres => match server_type { + ServerType::Manufacturer => Ok(Box::new(PostgresManufacturerStore { + phantom_k: PhantomData, + phantom_v: PhantomData, + })), + ServerType::Owner => Ok(Box::new(PostgresOwnerStore { + phantom_k: PhantomData, + phantom_v: PhantomData, + })), + ServerType::Rendezvous => Ok(Box::new(PostgresRendezvousStore { + phantom_k: PhantomData, + phantom_v: PhantomData, + })), + }, + DBType::Sqlite => match server_type { + ServerType::Manufacturer => Ok(Box::new(SqliteManufacturerStore { + phantom_k: PhantomData, + phantom_v: PhantomData, + })), + ServerType::Owner => Ok(Box::new(SqliteOwnerStore { + phantom_k: PhantomData, + phantom_v: PhantomData, + })), + ServerType::Rendezvous => Ok(Box::new(SqliteRendezvousStore { + phantom_k: PhantomData, + phantom_v: PhantomData, + })), + }, + } +} + +struct SqliteManufacturerStore { + phantom_k: PhantomData, + phantom_v: PhantomData, +} + +impl SqliteManufacturerStore where K: std::string::ToString {} + +pub struct SqliteManufacturerStoreFilterType { + neqs: Vec, + lts: Vec, +} + +#[async_trait] +impl FilterType for SqliteManufacturerStoreFilterType +where + V: Serializable + Send + Sync + Clone + 'static, + MKT: MetadataLocalKey, +{ + fn neq(&mut self, _key: &crate::MetadataKey, _expected: &dyn MetadataValue) { + self.neqs = Vec::new(); + } + fn lt(&mut self, _key: &crate::MetadataKey, _max: i64) { + self.lts = Vec::new(); + } + async fn query(&self) -> Result, StoreError> { + let values = Vec::new(); + Ok(Some(ValueIter { + index: 0, + values, + errored: false, + })) + } +} + +#[async_trait] +impl Store for SqliteManufacturerStore +where + OT: crate::StoreOpenMode, + K: std::str::FromStr + std::string::ToString + Send + Sync + 'static, + V: Serializable + Send + Sync + Clone + 'static, + MKT: crate::MetadataLocalKey + 'static, +{ + async fn load_data(&self, key: &K) -> Result, StoreError> { + let pool = fdo_db::sqlite::SqliteManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let ov_db = fdo_db::sqlite::SqliteManufacturerDB::get_ov(&key.to_string(), conn) + .expect("Error selecting OV"); + Ok(Some(V::deserialize_data(&ov_db.contents).map_err(|e| { + StoreError::Unspecified(format!("Error deserializing value: {e:?}")) + })?)) + } + + async fn store_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + metadata_value: &dyn MetadataValue, + ) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let val = metadata_value + .to_text() + .parse::() + .expect("Unable to convert"); + fdo_db::sqlite::SqliteManufacturerDB::update_ov_ttl(&key.to_string(), Some(val), conn) + .map_err(|e| { + StoreError::Database(format!( + "Unable to update OV with guid {} with {val}: {e:?}", + key.to_string() + )) + }) + } + + async fn destroy_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + ) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::sqlite::SqliteManufacturerDB::update_ov_ttl(&key.to_string(), None, conn).map_err( + |e| { + StoreError::Database(format!( + "Unable to set 'None' metadata on OV {}: {e:?}", + key.to_string() + )) + }, + ) + } + + async fn query_data(&self) -> crate::QueryResult { + // NOTE: this function is only used in the owner onboarding server + // when we need to filter the OVs that haven't done the To2 and still + // have ttl. It is not used in the manufacturing server. + Err(StoreError::MethodNotAvailable) + } + + async fn query_ovs_db(&self) -> Result, StoreError> { + Err(StoreError::MethodNotAvailable) + } + + async fn store_data(&self, _key: K, value: V) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let raw = V::serialize_data(&value).expect("Error serializing data"); + let ov = OwnershipVoucher::from_pem_or_raw(&raw).expect("Error converting OV"); + fdo_db::sqlite::SqliteManufacturerDB::insert_ov(&ov, None, conn).map_err(|e| { + StoreError::Database(format!( + "Error inserting OV with guid {}: {e:?}", + ov.header().guid().to_string() + )) + }) + } + + async fn destroy_data(&self, key: &K) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::sqlite::SqliteManufacturerDB::delete_ov(&key.to_string(), conn).map_err(|e| { + StoreError::Database(format!( + "Error deleting OV with guid {}: {e:?}", + key.to_string() + )) + }) + } + + async fn perform_maintenance(&self) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let now = time::OffsetDateTime::now_utc().unix_timestamp(); + fdo_db::sqlite::SqliteManufacturerDB::delete_ov_ttl_le(now, conn).map_err(|e| { + StoreError::Database(format!("Error deleting OVs with ttl <= {now}: {e:?}")) + }) + } +} + +struct SqliteOwnerStore { + phantom_k: PhantomData, + phantom_v: PhantomData, +} + +impl SqliteOwnerStore where K: std::string::ToString {} + +pub struct SqliteOwnerStoreFilterType { + neqs: Vec, + lts: Vec, +} + +#[async_trait] +impl FilterType for SqliteOwnerStoreFilterType +where + V: Serializable + Send + Sync + Clone + 'static, + MKT: MetadataLocalKey, +{ + fn neq(&mut self, _key: &crate::MetadataKey, _expected: &dyn MetadataValue) { + self.neqs = Vec::new(); + } + fn lt(&mut self, _key: &crate::MetadataKey, _max: i64) { + self.lts = Vec::new(); + } + async fn query(&self) -> Result, StoreError> { + let values = Vec::new(); + Ok(Some(ValueIter { + index: 0, + values, + errored: false, + })) + } +} + +#[async_trait] +impl Store for SqliteOwnerStore +where + OT: crate::StoreOpenMode, + K: std::str::FromStr + std::string::ToString + Send + Sync + 'static, + V: Serializable + Send + Sync + Clone + 'static, + MKT: crate::MetadataLocalKey + 'static, +{ + async fn load_data(&self, key: &K) -> Result, StoreError> { + let pool = fdo_db::sqlite::SqliteOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let ov_db = fdo_db::sqlite::SqliteOwnerDB::get_ov(&key.to_string(), conn) + .expect("Error selecting OV"); + Ok(Some(V::deserialize_data(&ov_db.contents).map_err(|e| { + StoreError::Unspecified(format!("Error deserializing value: {e:?}")) + })?)) + } + + async fn store_metadata( + &self, + key: &K, + metadata_key: &crate::MetadataKey, + metadata_value: &dyn MetadataValue, + ) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + match metadata_key.to_key() { + "fdo.to2_performed" => { + let val = metadata_value + .to_text() + .parse::() + .expect("Unable to convert string to bool"); + fdo_db::sqlite::SqliteOwnerDB::update_ov_to2(&key.to_string(), Some(val), conn) + .map_err(|e| { + StoreError::Database(format!( + "Unable to update OV (guid {}) to2 with value {val}: {e:?}", + &key.to_string() + )) + }) + } + "fdo.to0_accept_owner_wait_seconds" => { + let val = metadata_value + .to_text() + .parse::() + .expect("Unable to convert string to i64"); + fdo_db::sqlite::SqliteOwnerDB::update_ov_to0_wait_seconds( + &key.to_string(), + Some(val), + conn, + ) + .map_err(|e| { + StoreError::Database(format!( + "Unable to update OV (guid {}) to0 with value {val}: {e:?}", + &key.to_string() + )) + }) + } + _ => Err(StoreError::Unspecified(format!( + "Unable to handle metadata key {}", + metadata_key.to_key() + ))), + } + } + + async fn destroy_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + ) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::sqlite::SqliteOwnerDB::update_ov_to0_wait_seconds(&key.to_string(), None, conn) + .map_err(|e| { + StoreError::Database(format!( + "Unable to set 'None' to0 metadata on OV {}: {e:?}", + key.to_string() + )) + })?; + fdo_db::sqlite::SqliteOwnerDB::update_ov_to2(&key.to_string(), None, conn).map_err(|e| { + StoreError::Database(format!( + "Unable to set 'None' to2 metadata on OV {}: {e:?}", + key.to_string() + )) + }) + } + + async fn query_data(&self) -> crate::QueryResult { + Err(StoreError::MethodNotAvailable) + } + + async fn query_ovs_db(&self) -> Result, StoreError> { + let mut ret = vec![]; + let pool = fdo_db::sqlite::SqliteOwnerDB::get_conn_pool(); + let conn = &mut pool + .get() + .map_err(|e| StoreError::Database(format!("Error connecting to DB {e:?}")))?; + let db_ovs = fdo_db::sqlite::SqliteOwnerDB::select_ov_to2_performed_and_ov_to0_less_than( + false, + time::OffsetDateTime::now_utc().unix_timestamp(), + conn, + ) + .map_err(|e| { + StoreError::Database(format!( + "Error selecting OVs filtering by to2 and to0: {e:?}" + )) + })?; + for db_ov in db_ovs { + ret.push( + OwnershipVoucher::from_pem_or_raw(&db_ov.contents).map_err(|e| { + StoreError::Unspecified(format!("Error parsing OV contents from DB: {e:?}")) + })?, + ); + } + Ok(ret) + } + + async fn store_data(&self, _key: K, value: V) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let raw = V::serialize_data(&value).expect("Error serializing data"); + let ov = OwnershipVoucher::from_pem_or_raw(&raw).expect("Error converting OV"); + fdo_db::sqlite::SqliteOwnerDB::insert_ov(&ov, None, None, conn).map_err(|e| { + StoreError::Database(format!( + "Error inserting OV with guid {}: {e:?}", + ov.header().guid().to_string() + )) + }) + } + + async fn destroy_data(&self, key: &K) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::sqlite::SqliteOwnerDB::delete_ov(&key.to_string(), conn).map_err(|e| { + StoreError::Database(format!( + "Error deleting OV with guid {}: {e:?}", + &key.to_string() + )) + }) + } + + async fn perform_maintenance(&self) -> Result<(), StoreError> { + // This is not used in the owner onboarding server since the OVs there + // do not have a ttl, but we still need to return Ok since the method + // will be called. + Ok(()) + } +} + +struct SqliteRendezvousStore { + phantom_k: PhantomData, + phantom_v: PhantomData, +} + +impl SqliteRendezvousStore where K: std::string::ToString {} + +pub struct SqliteRendezvousStoreFilterType { + neqs: Vec, + lts: Vec, +} + +#[async_trait] +impl FilterType for SqliteRendezvousStoreFilterType +where + V: Serializable + Send + Sync + Clone + 'static, + MKT: MetadataLocalKey, +{ + fn neq(&mut self, _key: &crate::MetadataKey, _expected: &dyn MetadataValue) { + self.neqs = Vec::new(); + } + fn lt(&mut self, _key: &crate::MetadataKey, _max: i64) { + self.lts = Vec::new(); + } + async fn query(&self) -> Result, StoreError> { + let values = Vec::new(); + Ok(Some(ValueIter { + index: 0, + values, + errored: false, + })) + } +} + +#[async_trait] +impl Store for SqliteRendezvousStore +where + OT: crate::StoreOpenMode, + K: std::str::FromStr + std::string::ToString + Send + Sync + 'static, + V: Serializable + Send + Sync + Clone + 'static, + MKT: crate::MetadataLocalKey + 'static, +{ + async fn load_data(&self, key: &K) -> Result, StoreError> { + let pool = fdo_db::sqlite::SqliteRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let ov_db = fdo_db::sqlite::SqliteRendezvousDB::get_ov(&key.to_string(), conn) + .expect("Error selecting OV"); + Ok(Some(V::deserialize_data(&ov_db.contents).map_err(|e| { + StoreError::Unspecified(format!("Error deserializing value: {e:?}")) + })?)) + } + + async fn store_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + metadata_value: &dyn MetadataValue, + ) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let val = metadata_value + .to_text() + .parse::() + .expect("Unable to convert"); + fdo_db::sqlite::SqliteRendezvousDB::update_ov_ttl(&key.to_string(), Some(val), conn) + .map_err(|e| { + StoreError::Database(format!( + "Unable to update OV with guid {} with {val}: {e:?}", + key.to_string() + )) + }) + } + + async fn destroy_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + ) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::sqlite::SqliteRendezvousDB::update_ov_ttl(&key.to_string(), None, conn).map_err( + |e| { + StoreError::Database(format!( + "Unable to set 'None' ttl on OV {}: {e:?}", + key.to_string() + )) + }, + ) + } + + async fn query_data(&self) -> crate::QueryResult { + // NOTE: this function is only used in the owner onboarding server + // when we need to filter the OVs that haven't done the To2 and still + // have ttl. It is not used in the rendezvous server. + Err(StoreError::MethodNotAvailable) + } + + async fn query_ovs_db(&self) -> Result, StoreError> { + Err(StoreError::MethodNotAvailable) + } + + async fn store_data(&self, key: K, value: V) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let raw = V::serialize_data(&value).expect("Error serializing data"); + let stored = StoredItem::deserialize_data(&raw).expect("Error converting StoredItem"); + fdo_db::sqlite::SqliteRendezvousDB::insert_ov(&stored, &key.to_string(), None, conn) + .map_err(|e| { + StoreError::Database(format!( + "Error inserting StoredItem with guid {}: {e:?}", + &key.to_string() + )) + }) + } + + async fn destroy_data(&self, key: &K) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::sqlite::SqliteRendezvousDB::delete_ov(&key.to_string(), conn).map_err(|e| { + StoreError::Database(format!( + "Error deleting OV with guid {}: {e:?}", + key.to_string() + )) + }) + } + + async fn perform_maintenance(&self) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let now = time::OffsetDateTime::now_utc().unix_timestamp(); + fdo_db::sqlite::SqliteRendezvousDB::delete_ov_ttl_le(now, conn).map_err(|e| { + StoreError::Database(format!("Error deleting OVs with ttl <= {now}: {e:?}")) + }) + } +} + +struct PostgresManufacturerStore { + phantom_k: PhantomData, + phantom_v: PhantomData, +} + +impl PostgresManufacturerStore where K: std::string::ToString {} + +pub struct PostgresManufacturerStoreFilterType { + neqs: Vec, + lts: Vec, +} + +#[async_trait] +impl FilterType for PostgresManufacturerStoreFilterType +where + V: Serializable + Send + Sync + Clone + 'static, + MKT: MetadataLocalKey, +{ + fn neq(&mut self, _key: &crate::MetadataKey, _expected: &dyn MetadataValue) { + self.neqs = Vec::new(); + } + fn lt(&mut self, _key: &crate::MetadataKey, _max: i64) { + self.lts = Vec::new(); + } + async fn query(&self) -> Result, StoreError> { + let values = Vec::new(); + Ok(Some(ValueIter { + index: 0, + values, + errored: false, + })) + } +} + +#[async_trait] +impl Store for PostgresManufacturerStore +where + OT: crate::StoreOpenMode, + K: std::str::FromStr + std::string::ToString + Send + Sync + 'static, + V: Serializable + Send + Sync + Clone + 'static, + MKT: crate::MetadataLocalKey + 'static, +{ + async fn load_data(&self, key: &K) -> Result, StoreError> { + let pool = fdo_db::postgres::PostgresManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let ov_db = fdo_db::postgres::PostgresManufacturerDB::get_ov(&key.to_string(), conn) + .expect("Error selecting OV"); + Ok(Some(V::deserialize_data(&ov_db.contents).map_err(|e| { + StoreError::Unspecified(format!("Error deserializing value: {e:?}")) + })?)) + } + + async fn store_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + metadata_value: &dyn MetadataValue, + ) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let val = metadata_value + .to_text() + .parse::() + .expect("Unable to convert"); + fdo_db::postgres::PostgresManufacturerDB::update_ov_ttl(&key.to_string(), Some(val), conn) + .map_err(|e| { + StoreError::Database(format!( + "Unable to update OV with guid {} with {val}: {e:?}", + key.to_string() + )) + }) + } + + async fn destroy_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + ) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::postgres::PostgresManufacturerDB::update_ov_ttl(&key.to_string(), None, conn) + .map_err(|e| { + StoreError::Database(format!( + "Unable to set 'None' metadata on OV {}: {e:?}", + key.to_string() + )) + }) + } + + async fn query_data(&self) -> crate::QueryResult { + // NOTE: this function is only used in the owner onboarding server + // when we need to filter the OVs that haven't done the To2 and still + // have ttl. It is not used in the manufacturing server. + Err(StoreError::MethodNotAvailable) + } + + async fn query_ovs_db(&self) -> Result, StoreError> { + Err(StoreError::MethodNotAvailable) + } + + async fn store_data(&self, _key: K, value: V) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let raw = V::serialize_data(&value).expect("Error serializing data"); + let ov = OwnershipVoucher::from_pem_or_raw(&raw).expect("Error converting OV"); + fdo_db::postgres::PostgresManufacturerDB::insert_ov(&ov, None, conn).map_err(|e| { + StoreError::Database(format!( + "Error inserting OV with guid {}: {e:?}", + ov.header().guid().to_string() + )) + }) + } + + async fn destroy_data(&self, key: &K) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::postgres::PostgresManufacturerDB::delete_ov(&key.to_string(), conn).map_err(|e| { + StoreError::Database(format!( + "Error deleting OV with guid {}: {e:?}", + key.to_string() + )) + }) + } + + async fn perform_maintenance(&self) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let now = time::OffsetDateTime::now_utc().unix_timestamp(); + fdo_db::postgres::PostgresManufacturerDB::delete_ov_ttl_le(now, conn).map_err(|e| { + StoreError::Database(format!("Error deleting OVs with ttl <= {now}: {e:?}")) + }) + } +} + +struct PostgresOwnerStore { + phantom_k: PhantomData, + phantom_v: PhantomData, +} + +impl PostgresOwnerStore where K: std::string::ToString {} + +pub struct PostgresOwnerStoreFilterType { + neqs: Vec, + lts: Vec, +} + +#[async_trait] +impl FilterType for PostgresOwnerStoreFilterType +where + V: Serializable + Send + Sync + Clone + 'static, + MKT: MetadataLocalKey, +{ + fn neq(&mut self, _key: &crate::MetadataKey, _expected: &dyn MetadataValue) { + self.neqs = Vec::new(); + } + fn lt(&mut self, _key: &crate::MetadataKey, _max: i64) { + self.lts = Vec::new(); + } + async fn query(&self) -> Result, StoreError> { + let values = Vec::new(); + Ok(Some(ValueIter { + index: 0, + values, + errored: false, + })) + } +} + +#[async_trait] +impl Store for PostgresOwnerStore +where + OT: crate::StoreOpenMode, + K: std::str::FromStr + std::string::ToString + Send + Sync + 'static, + V: Serializable + Send + Sync + Clone + 'static, + MKT: crate::MetadataLocalKey + 'static, +{ + async fn load_data(&self, key: &K) -> Result, StoreError> { + let pool = fdo_db::postgres::PostgresOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let ov_db = fdo_db::postgres::PostgresOwnerDB::get_ov(&key.to_string(), conn) + .expect("Error selecting OV"); + Ok(Some(V::deserialize_data(&ov_db.contents).map_err(|e| { + StoreError::Unspecified(format!("Error deserializing value: {e:?}")) + })?)) + } + + async fn store_metadata( + &self, + key: &K, + metadata_key: &crate::MetadataKey, + metadata_value: &dyn MetadataValue, + ) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + match metadata_key.to_key() { + "fdo.to2_performed" => { + let val = metadata_value + .to_text() + .parse::() + .expect("Unable to convert string to bool"); + fdo_db::postgres::PostgresOwnerDB::update_ov_to2(&key.to_string(), Some(val), conn) + .map_err(|e| { + StoreError::Database(format!( + "Unable to update OV (guid {}) to2 with value {val}: {e:?}", + &key.to_string() + )) + }) + } + "fdo.to0_accept_owner_wait_seconds" => { + let val = metadata_value + .to_text() + .parse::() + .expect("Unable to convert string to i64"); + fdo_db::postgres::PostgresOwnerDB::update_ov_to0_wait_seconds( + &key.to_string(), + Some(val), + conn, + ) + .map_err(|e| { + StoreError::Database(format!( + "Unable to update OV (guid {}) to0 with value {val}: {e:?}", + &key.to_string() + )) + }) + } + _ => Err(StoreError::Unspecified(format!( + "Unable to handle metadata key {}", + metadata_key.to_key() + ))), + } + } + + async fn destroy_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + ) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::postgres::PostgresOwnerDB::update_ov_to0_wait_seconds(&key.to_string(), None, conn) + .map_err(|e| { + StoreError::Database(format!( + "Unable to set 'None' to0 metadata on OV {}: {e:?}", + key.to_string() + )) + })?; + fdo_db::postgres::PostgresOwnerDB::update_ov_to2(&key.to_string(), None, conn).map_err( + |e| { + StoreError::Database(format!( + "Unable to set 'None' to2 metadata on OV {}: {e:?}", + key.to_string() + )) + }, + ) + } + + async fn query_data(&self) -> crate::QueryResult { + Err(StoreError::MethodNotAvailable) + } + + async fn query_ovs_db(&self) -> Result, StoreError> { + let mut ret = vec![]; + let pool = fdo_db::postgres::PostgresOwnerDB::get_conn_pool(); + let conn = &mut pool + .get() + .map_err(|e| StoreError::Database(format!("Error connecting to DB {e:?}")))?; + let db_ovs = + fdo_db::postgres::PostgresOwnerDB::select_ov_to2_performed_and_ov_to0_less_than( + false, + time::OffsetDateTime::now_utc().unix_timestamp(), + conn, + ) + .map_err(|e| { + StoreError::Database(format!( + "Error selecting OVs filtering by to2 and to0: {e:?}" + )) + })?; + for db_ov in db_ovs { + ret.push( + OwnershipVoucher::from_pem_or_raw(&db_ov.contents).map_err(|e| { + StoreError::Unspecified(format!("Error parsing OV contents from DB: {e:?}")) + })?, + ); + } + Ok(ret) + } + + async fn store_data(&self, _key: K, value: V) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let raw = V::serialize_data(&value).expect("Error serializing data"); + let ov = OwnershipVoucher::from_pem_or_raw(&raw).expect("Error converting OV"); + fdo_db::postgres::PostgresOwnerDB::insert_ov(&ov, None, None, conn).map_err(|e| { + StoreError::Database(format!( + "Error inserting OV with guid {}: {e:?}", + ov.header().guid().to_string() + )) + }) + } + + async fn destroy_data(&self, key: &K) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::postgres::PostgresOwnerDB::delete_ov(&key.to_string(), conn).map_err(|e| { + StoreError::Database(format!( + "Error deleting OV with guid {}: {e:?}", + &key.to_string() + )) + }) + } + + async fn perform_maintenance(&self) -> Result<(), StoreError> { + // This is not used in the owner onboarding server since the OVs there + // do not have a ttl, but we still need to return Ok since the method + // will be called. + Ok(()) + } +} + +struct PostgresRendezvousStore { + phantom_k: PhantomData, + phantom_v: PhantomData, +} + +impl PostgresRendezvousStore where K: std::string::ToString {} + +pub struct PostgresRendezvousStoreFilterType { + neqs: Vec, + lts: Vec, +} + +#[async_trait] +impl FilterType for PostgresRendezvousStoreFilterType +where + V: Serializable + Send + Sync + Clone + 'static, + MKT: MetadataLocalKey, +{ + fn neq(&mut self, _key: &crate::MetadataKey, _expected: &dyn MetadataValue) { + self.neqs = Vec::new(); + } + fn lt(&mut self, _key: &crate::MetadataKey, _max: i64) { + self.lts = Vec::new(); + } + async fn query(&self) -> Result, StoreError> { + let values = Vec::new(); + Ok(Some(ValueIter { + index: 0, + values, + errored: false, + })) + } +} + +#[async_trait] +impl Store for PostgresRendezvousStore +where + OT: crate::StoreOpenMode, + K: std::str::FromStr + std::string::ToString + Send + Sync + 'static, + V: Serializable + Send + Sync + Clone + 'static, + MKT: crate::MetadataLocalKey + 'static, +{ + async fn load_data(&self, key: &K) -> Result, StoreError> { + let pool = fdo_db::postgres::PostgresRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let ov_db = fdo_db::postgres::PostgresRendezvousDB::get_ov(&key.to_string(), conn) + .expect("Error selecting OV"); + Ok(Some(V::deserialize_data(&ov_db.contents).map_err(|e| { + StoreError::Unspecified(format!("Error deserializing value: {e:?}")) + })?)) + } + + async fn store_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + metadata_value: &dyn MetadataValue, + ) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let val = metadata_value + .to_text() + .parse::() + .expect("Unable to convert"); + fdo_db::postgres::PostgresRendezvousDB::update_ov_ttl(&key.to_string(), Some(val), conn) + .map_err(|e| { + StoreError::Database(format!( + "Unable to update OV with guid {} with {val}: {e:?}", + key.to_string() + )) + }) + } + + async fn destroy_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + ) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::postgres::PostgresRendezvousDB::update_ov_ttl(&key.to_string(), None, conn).map_err( + |e| { + StoreError::Database(format!( + "Unable to set 'None' ttl on OV {}: {e:?}", + key.to_string() + )) + }, + ) + } + + async fn query_data(&self) -> crate::QueryResult { + // NOTE: this function is only used in the owner onboarding server + // when we need to filter the OVs that haven't done the To2 and still + // have ttl. It is not used in the rendezvous server. + Err(StoreError::MethodNotAvailable) + } + + async fn query_ovs_db(&self) -> Result, StoreError> { + Err(StoreError::MethodNotAvailable) + } + + async fn store_data(&self, key: K, value: V) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let raw = V::serialize_data(&value).expect("Error serializing data"); + let stored = StoredItem::deserialize_data(&raw).expect("Error converting StoredItem"); + fdo_db::postgres::PostgresRendezvousDB::insert_ov(&stored, &key.to_string(), None, conn) + .map_err(|e| { + StoreError::Database(format!( + "Error inserting StoredItem with guid {}: {e:?}", + &key.to_string() + )) + }) + } + + async fn destroy_data(&self, key: &K) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::postgres::PostgresRendezvousDB::delete_ov(&key.to_string(), conn).map_err(|e| { + StoreError::Database(format!( + "Error deleting OV with guid {}: {e:?}", + key.to_string() + )) + }) + } + + async fn perform_maintenance(&self) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let now = time::OffsetDateTime::now_utc().unix_timestamp(); + fdo_db::postgres::PostgresRendezvousDB::delete_ov_ttl_le(now, conn).map_err(|e| { + StoreError::Database(format!("Error deleting OVs with ttl <= {now}: {e:?}")) + }) + } +} diff --git a/store/src/directory.rs b/store/src/directory.rs index 5ac5c8628..a7b31239d 100644 --- a/store/src/directory.rs +++ b/store/src/directory.rs @@ -1,3 +1,4 @@ +use fdo_data_formats::ownershipvoucher::OwnershipVoucher; use std::collections::HashSet; use std::convert::TryInto; use std::fs::{self, File}; @@ -297,6 +298,10 @@ where })) } + async fn query_ovs_db(&self) -> Result, StoreError> { + Err(StoreError::MethodNotAvailable) + } + async fn store_data(&self, key: K, value: V) -> Result<(), StoreError> { let finalpath = self.get_path(&key); let mut path = finalpath.clone(); diff --git a/store/src/lib.rs b/store/src/lib.rs index 5bf5a9a27..ed272fbb0 100644 --- a/store/src/lib.rs +++ b/store/src/lib.rs @@ -4,7 +4,7 @@ use core::pin::Pin; use serde::{Deserialize, Serialize}; use thiserror::Error; -use fdo_data_formats::Serializable; +use fdo_data_formats::{ownershipvoucher::OwnershipVoucher, Serializable}; #[derive(Debug, Error)] pub enum StoreError { @@ -12,6 +12,10 @@ pub enum StoreError { Unspecified(String), #[error("Configuration error: {0}")] Configuration(String), + #[error("Method not available")] + MethodNotAvailable, + #[error("Internal database error: {0}")] + Database(String), } mod private { @@ -184,6 +188,15 @@ pub trait Store: Send + Sync { Self: 'async_trait, OT: Writable; + fn query_ovs_db<'life0, 'async_trait>( + &'life0 self, + ) -> Pin< + Box, StoreError>> + 'async_trait + Send>, + > + where + 'life0: 'async_trait, + Self: 'async_trait; + fn store_data<'life0, 'async_trait>( &'life0 self, key: K, @@ -216,12 +229,31 @@ pub trait Store: Send + Sync { #[cfg(feature = "directory")] mod directory; +#[derive(Debug, Serialize, Deserialize)] +pub enum DBType { + Sqlite, + Postgres, +} + +#[derive(Debug, Serialize, Deserialize)] +pub enum ServerType { + Manufacturer, + Owner, + Rendezvous, +} + #[derive(Debug, Serialize, Deserialize)] pub enum StoreConfig { #[cfg(feature = "directory")] Directory { path: std::path::PathBuf }, + #[cfg(feature = "db")] + Sqlite(ServerType), + #[cfg(feature = "db")] + Postgres(ServerType), } +mod db; + impl StoreConfig { pub fn initialize(&self) -> Result>, StoreError> where @@ -234,6 +266,10 @@ impl StoreConfig { match self { #[cfg(feature = "directory")] StoreConfig::Directory { path } => directory::initialize(path), + #[cfg(feature = "db")] + StoreConfig::Sqlite(server) => db::initialize(DBType::Sqlite, server), + #[cfg(feature = "db")] + StoreConfig::Postgres(server) => db::initialize(DBType::Postgres, server), } } } diff --git a/test/fdo-postgres.sh b/test/fdo-postgres.sh new file mode 100755 index 000000000..28049ff78 --- /dev/null +++ b/test/fdo-postgres.sh @@ -0,0 +1,196 @@ +#!/bin/bash +set -euox pipefail + +# Colorful output. +function greenprint { + echo -e "\033[1;32m${1}\033[0m" +} + +POSTGRES_IP=192.168.200.2 +FDO_MANUFACTURING_ADDRESS=192.168.200.50 +FDO_OWNER_ONBOARDING_ADDRESS=192.168.200.51 +FDO_RENDEZVOUS_ADDRESS=192.168.200.52 + +POSTGRES_USERNAME=postgres +POSTGRES_PASSWORD=foobar +POSTGRES_DB=postgres + +# Prepare stage repo network +greenprint "🔧 Prepare stage repo network" +sudo podman network inspect edge >/dev/null 2>&1 || sudo podman network create --driver=bridge --subnet=192.168.200.0/24 --gateway=192.168.200.254 edge + +# Build FDO and clients container image +greenprint "🔧 Build FDO and clients container image" +sudo buildah build -f contrib/containers/build -t fdo-build:latest . +sudo buildah build -f contrib/containers/manufacturing-server --build-arg BUILDID=latest -t manufacturing-server:latest . +sudo buildah build -f contrib/containers/rendezvous-server --build-arg BUILDID=latest -t rendezvous-server:latest . +sudo buildah build -f contrib/containers/owner-onboarding-server --build-arg BUILDID=latest -t owner-onboarding-server:latest . +sudo buildah build -f contrib/containers/aio --build-arg BUILDID=latest -t aio:latest . +sudo buildah build -f test/files/clients --build-arg BUILDID=latest -t clients:latest . +sudo buildah images + +########################################################## +## +## Prepare FDO containers +## +########################################################## +greenprint "🔧 Generate FDO key and configuration files" +sudo mkdir aio +sudo podman run --rm \ + -v "$PWD"/aio/:/aio:z \ + "localhost/aio:latest" \ + aio --directory aio generate-configs-and-keys --contact-hostname "$FDO_MANUFACTURING_ADDRESS" + +# Prepare FDO config files +greenprint "🔧 Prepare FDO key and configuration files for FDO containers" +sudo cp -r aio/keys test/fdo/ +sudo rm -rf aio + +# Set servers store driver to postgres +greenprint "🔧 Set servers store driver to postgres" +sudo pip3 install yq +# Configure manufacturing server db +yq -yi 'del(.ownership_voucher_store_driver.Directory)' test/fdo/manufacturing-server.yml +yq -yi '.ownership_voucher_store_driver += {"Postgres": "Manufacturer"}' test/fdo/manufacturing-server.yml +# Configure owner onboarding server db +yq -yi 'del(.ownership_voucher_store_driver.Directory)' test/fdo/owner-onboarding-server.yml +yq -yi '.ownership_voucher_store_driver += {"Postgres": "Owner"}' test/fdo/owner-onboarding-server.yml +# Configure rendezvous server db +yq -yi 'del(.storage_driver.Directory)' test/fdo/rendezvous-server.yml +yq -yi '.storage_driver += {"Postgres": "Rendezvous"}' test/fdo/rendezvous-server.yml + +# Prepare postgres db init sql script +greenprint "🔧 Prepare postgres db init sql script" +mkdir -p initdb +cp migrations/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/up.sql initdb/manufacturing.sql +cp migrations/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/up.sql initdb/owner-onboarding.sql +cp migrations/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/up.sql initdb/rendezvous.sql + +greenprint "🔧 Starting postgres" +sudo podman run -d \ + --ip "$POSTGRES_IP" \ + --name postgres \ + --network edge \ + -e POSTGRES_PASSWORD="$POSTGRES_PASSWORD" \ + -v "$PWD"/initdb/:/docker-entrypoint-initdb.d/:z \ + "quay.io/xiaofwan/postgres" + +greenprint "🔧 Starting fdo manufacture server" +sudo podman run -d \ + --ip "$FDO_MANUFACTURING_ADDRESS" \ + --name manufacture-server \ + --network edge \ + -v "$PWD"/test/fdo/:/etc/fdo/:z \ + -p 8080:8080 \ + -e POSTGRES_MANUFACTURER_DATABASE_URL="postgresql://${POSTGRES_USERNAME}:${POSTGRES_PASSWORD}@${POSTGRES_IP}/${POSTGRES_DB}" \ + "localhost/manufacturing-server:latest" + +greenprint "🔧 Starting fdo owner onboarding server" +sudo podman run -d \ + --ip "$FDO_OWNER_ONBOARDING_ADDRESS" \ + --name owner-onboarding-server \ + --network edge \ + -v "$PWD"/test/fdo/:/etc/fdo/:z \ + -p 8081:8081 \ + -e POSTGRES_OWNER_DATABASE_URL="postgresql://${POSTGRES_USERNAME}:${POSTGRES_PASSWORD}@${POSTGRES_IP}/${POSTGRES_DB}" \ + "localhost/owner-onboarding-server:latest" + +greenprint "🔧 Starting fdo rendezvous server" +sudo podman run -d \ + --ip "$FDO_RENDEZVOUS_ADDRESS" \ + --name rendezvous-server \ + --network edge \ + -v "$PWD"/test/fdo/:/etc/fdo/:z \ + -p 8082:8082 \ + -e POSTGRES_RENDEZVOUS_DATABASE_URL="postgresql://${POSTGRES_USERNAME}:${POSTGRES_PASSWORD}@${POSTGRES_IP}/${POSTGRES_DB}" \ + "localhost/rendezvous-server:latest" + +# Wait for fdo containers to be up and running +until [ "$(curl -X POST http://${FDO_MANUFACTURING_ADDRESS}:8080/ping)" == "pong" ]; do + sleep 1; +done; + +until [ "$(curl -X POST http://${FDO_OWNER_ONBOARDING_ADDRESS}:8081/ping)" == "pong" ]; do + sleep 1; +done; + +until [ "$(curl -X POST http://${FDO_RENDEZVOUS_ADDRESS}:8082/ping)" == "pong" ]; do + sleep 1; +done; + + +greenprint "🔧 Check container running status" +sudo podman ps -a + +greenprint "🔧 Collecting container logs" +sudo podman logs postgres manufacture-server owner-onboarding-server rendezvous-server + +greenprint "🔧 Check db tables" +sudo podman exec \ + postgres \ + psql \ + --username="${POSTGRES_USERNAME}" \ + -c "\dt" | grep "3 rows" + +greenprint "🔧 Generate OV" +sudo podman run \ + --rm \ + --network edge \ + --privileged \ + localhost/clients \ + fdo-manufacturing-client no-plain-di --insecure --manufacturing-server-url "http://${FDO_MANUFACTURING_ADDRESS}:8080" + +greenprint "🔧 Check manufacturing server db for new OV" +sudo podman exec \ + postgres \ + psql \ + --username="${POSTGRES_USERNAME}" \ + -c "SELECT * FROM manufacturer_vouchers ;" | grep "1 row" + +greenprint "🔧 Check container running status" +sudo podman ps -a + +greenprint "🔧 Export OV" +mkdir export-ov +sudo podman run \ + --rm \ + --network edge \ + --privileged \ + -v "$PWD"/export-ov:/export-ov:z \ + localhost/clients \ + fdo-owner-tool export-manufacturer-vouchers postgres "postgresql://${POSTGRES_USERNAME}:${POSTGRES_PASSWORD}@${POSTGRES_IP}/${POSTGRES_DB}" /export-ov/ | grep "exported" +EXPORTED_FILE=$(ls -1 export-ov) +greenprint "🔧 Import OV into owner db" +sudo podman run \ + --rm \ + --network edge \ + --privileged \ + -v "$PWD"/export-ov:/export-ov:z \ + localhost/clients \ + fdo-owner-tool import-ownership-vouchers postgres "postgresql://${POSTGRES_USERNAME}:${POSTGRES_PASSWORD}@${POSTGRES_IP}/${POSTGRES_DB}" "/export-ov/${EXPORTED_FILE}" | grep "OV import finished" + +greenprint "🔧 Check owner db for imported OV" +sudo podman exec \ + postgres \ + psql \ + --username="${POSTGRES_USERNAME}" \ + -c "SELECT * FROM owner_vouchers ;" | grep "1 row" + +greenprint "🔧 Sleep 60 seconds to sync with rendezvous db" +sleep 60 + +greenprint "🔧 Check rendezvous db for synced OV" +sudo podman exec \ + postgres \ + psql \ + --username="${POSTGRES_USERNAME}" \ + -c "SELECT * FROM rendezvous_vouchers ;" | grep "1 row" + +greenprint "🔧 Check container running status" +sudo podman ps -a + +greenprint "🔧 Collecting container logs" +sudo podman logs rendezvous-server + +rm -rf initdb export-ov +exit 0 diff --git a/test/files/clients b/test/files/clients new file mode 100644 index 000000000..47567046b --- /dev/null +++ b/test/files/clients @@ -0,0 +1,5 @@ +FROM quay.io/centos/centos:stream9 +ARG BUILDID +COPY --from=fdo-build:${BUILDID} /usr/src/target/release/fdo-manufacturing-client /usr/local/bin +COPY --from=fdo-build:${BUILDID} /usr/src/target/release/fdo-owner-tool /usr/local/bin +RUN yum install -y postgresql libpq libpq-devel diff --git a/util/src/servers/mod.rs b/util/src/servers/mod.rs index 836e47494..8805238b8 100644 --- a/util/src/servers/mod.rs +++ b/util/src/servers/mod.rs @@ -74,6 +74,12 @@ pub fn settings_per_device(guid: &str) -> Result { path.push(file_name); path.to_string_lossy().into_owned() } + StoreConfig::Sqlite(_) => { + bail!("Per-device settings with sqlite database not implemented"); + } + StoreConfig::Postgres(_) => { + bail!("Per-device settings with Postgres database not implemented"); + } }; let config = Config::builder() .add_source(config::File::from(Path::new(&path_per_device_store)))