diff --git a/.github/workflows/manual-deploy-obscuro-gateway-database.yml b/.github/workflows/manual-deploy-obscuro-gateway-database.yml index 552a4a01c9..64a8c993bf 100644 --- a/.github/workflows/manual-deploy-obscuro-gateway-database.yml +++ b/.github/workflows/manual-deploy-obscuro-gateway-database.yml @@ -90,7 +90,7 @@ jobs: && curl -fsSL https://get.docker.com -o get-docker.sh && sh ./get-docker.sh \ && git clone --depth 1 -b ${{ env.BRANCH_NAME }} https://github.com/ten-protocol/go-ten.git /home/obscuro/go-obscuro \ && docker network create --driver bridge node_network || true \ - && mkdir -p /home/obscuro/promtail \ + && mkdir -p /home/obscuro/metrics \ && echo " server: http_listen_port: 9080 @@ -100,7 +100,7 @@ jobs: filename: /tmp/positions.yaml clients: - - url: ${{ vars.METRICS_URI }} + - url: ${{ vars.METRICS_URI }}/loki/api/v1/push batchwait: 3s batchsize: 1048576 tls_config: @@ -124,16 +124,58 @@ jobs: target_label: \"job\" - replacement: ${{ github.event.inputs.testnet_type }}-OG-MariaDB-${{ GITHUB.RUN_NUMBER }} target_label: "node_name" - " > /home/obscuro/promtail/promtail-config.yaml \ + " > /home/obscuro/metrics/promtail-config.yaml \ + && echo " + global: + scrape_interval: 15s + evaluation_interval: 15s + remote_write: + - url: ${{ vars.METRICS_URI }}/prometheus/metrics/api/v1/write + tls_config: + insecure_skip_verify: true + basic_auth: + username: ${{ secrets.LOKI_USER }} + password: ${{ secrets.LOKI_PASSWORD }} + scrape_configs: + # Node metrics + - job_name: node-${{ github.event.inputs.testnet_type }}-OG-MariaDB-${{ GITHUB.RUN_NUMBER }} + scrape_interval: 5s # Frequent scrapes for node metrics + static_configs: + - targets: + - node_exporter:9100 # Node Exporter instance + relabel_configs: + - source_labels: [job] + target_label: 'node' + replacement: ${{ github.event.inputs.testnet_type }}-OG-MariaDB-${{ GITHUB.RUN_NUMBER }} + + # Container metrics + - job_name: container-${{ github.event.inputs.testnet_type }}-OG-MariaDB-${{ GITHUB.RUN_NUMBER }} + scrape_interval: 5s + static_configs: + - targets: + - cadvisor:8080 # cAdvisor instance for container metrics + relabel_configs: + - source_labels: [job] + target_label: 'node' + replacement: ${{ github.event.inputs.testnet_type }}-OG-MariaDB-${{ GITHUB.RUN_NUMBER }} + + " > /home/obscuro/metrics/prometheus.yaml \ && docker run -d --name promtail \ --network node_network \ -e HOSTNAME=${{ github.event.inputs.testnet_type }}-OG-MariaDB-${{ GITHUB.RUN_NUMBER }} \ -v /var/log:/var/log \ - -v /home/obscuro/promtail:/etc/promtail \ + -v /home/obscuro/metrics:/etc/promtail \ -v /var/lib/docker/containers:/var/lib/docker/containers:ro \ -v /var/run/docker.sock:/var/run/docker.sock \ grafana/promtail:latest \ -config.file=/etc/promtail/promtail-config.yaml -config.expand-env=true \ + && docker run -d --name prometheus \ + --network node_network \ + -p 9090:9090 \ + -v /home/obscuro/metrics/prometheus.yaml:/etc/prometheus/prometheus.yml \ + -v prometheus-data:/prometheus \ + prom/prometheus:latest \ + --config.file=/etc/prometheus/prometheus.yml \ && cd /home/obscuro/go-obscuro/ \ && docker run -d --name ${{ github.event.inputs.testnet_type }}-OG-MariaDB-${{ GITHUB.RUN_NUMBER }} \ -p 3306:3306 \ diff --git a/.github/workflows/manual-deploy-obscuro-gateway.yml b/.github/workflows/manual-deploy-obscuro-gateway.yml index b04124670a..aae976d4d3 100644 --- a/.github/workflows/manual-deploy-obscuro-gateway.yml +++ b/.github/workflows/manual-deploy-obscuro-gateway.yml @@ -90,6 +90,7 @@ jobs: "GATEWAY_RATE_LIMIT_USER_COMPUTE_TIME" "GATEWAY_RATE_LIMIT_WINDOW" "GATEWAY_MAX_CONCURRENT_REQUESTS_PER_USER" + "GATEWAY_KEY_EXCHANGE_URL" ) for VAR_NAME in "${VAR_NAMES[@]}"; do @@ -119,6 +120,7 @@ jobs: echo "GATEWAY_RATE_LIMIT_USER_COMPUTE_TIME: $GATEWAY_RATE_LIMIT_USER_COMPUTE_TIME" echo "GATEWAY_RATE_LIMIT_WINDOW: $GATEWAY_RATE_LIMIT_WINDOW" echo "GATEWAY_MAX_CONCURRENT_REQUESTS_PER_USER: $GATEWAY_MAX_CONCURRENT_REQUESTS_PER_USER" + echo "GATEWAY_KEY_EXCHANGE_URL: $GATEWAY_KEY_EXCHANGE_URL" - name: "Print GitHub variables" # This is a useful record of what the environment variables were at the time the job ran, for debugging and reference @@ -239,9 +241,9 @@ jobs: cd /home/obscuro/go-obscuro/ # Promtail Integration Start - mkdir -p /home/obscuro/promtail + mkdir -p /home/obscuro/metrics - cat < /home/obscuro/promtail/promtail-config.yaml + cat < /home/obscuro/metrics/promtail-config.yaml server: http_listen_port: 9080 @@ -251,7 +253,7 @@ jobs: filename: /tmp/positions.yaml clients: - - url: "${{ vars.METRICS_URI }}" + - url: "${{ vars.METRICS_URI }}/loki/api/v1/push" batchwait: 3s batchsize: 1048576 tls_config: @@ -281,20 +283,108 @@ jobs: --network node_network \ -e HOSTNAME="${{ env.VM_NAME }}" \ -v /var/log:/var/log \ - -v /home/obscuro/promtail:/etc/promtail \ + -v /home/obscuro/metrics:/etc/promtail \ -v /var/lib/docker/containers:/var/lib/docker/containers:ro \ -v /var/run/docker.sock:/var/run/docker.sock \ grafana/promtail:latest \ -config.file=/etc/promtail/promtail-config.yaml -config.expand-env=true + + cat < /home/obscuro/metrics/prometheus.yaml + global: + scrape_interval: 15s + evaluation_interval: 15s + remote_write: + - url: "${{ vars.METRICS_URI }}/prometheus/metrics/api/v1/write" + tls_config: + insecure_skip_verify: true + basic_auth: + username: "${{ secrets.LOKI_USER }}" + password: "${{ secrets.LOKI_PASSWORD }}" + scrape_configs: + # Node metrics + - job_name: node-${{ env.VM_NAME }} + scrape_interval: 5s # Frequent scrapes for node metrics + static_configs: + - targets: + - node_exporter:9100 # Node Exporter instance + relabel_configs: + - source_labels: [job] + target_label: 'node' + replacement: node-${{ env.VM_NAME }} + + # Container metrics + - job_name: container-${{ env.VM_NAME }} + scrape_interval: 5s + static_configs: + - targets: + - cadvisor:8080 # cAdvisor instance for container metrics + relabel_configs: + - source_labels: [job] + target_label: 'node' + replacement: container-${{ env.VM_NAME }} + EOF + + docker volume create prometheus-data + docker run -d --name prometheus \ + --network node_network \ + -p 9090:9090 \ + -v /home/obscuro/metrics/prometheus.yaml:/etc/prometheus/prometheus.yml \ + -v prometheus-data:/prometheus \ + prom/prometheus:latest \ + --config.file=/etc/prometheus/prometheus.yml + + docker run -d --name node_exporter \ + --network node_network \ + -p 9100:9100 \ + --pid="host" \ + -v /:/host:ro \ + quay.io/prometheus/node-exporter:latest \ + --path.rootfs=/host + + docker run -d --name cadvisor \ + --network node_network \ + -p 8080:8080 \ + --privileged \ + -v /:/rootfs:ro \ + -v /var/run:/var/run:ro \ + -v /sys:/sys:ro \ + -v /var/lib/docker/:/var/lib/docker:ro \ + -v /dev/disk/:/dev/disk:ro \ + gcr.io/cadvisor/cadvisor:latest # Promtail Integration End + # Create a named volume for persistence + docker volume create "${{ env.VM_NAME }}-data" + # Start Ten Gateway Container docker run -d -p 80:80 -p 81:81 --name "${{ env.VM_NAME }}" \ --device /dev/sgx_enclave --device /dev/sgx_provision \ + -v "${{ env.VM_NAME }}-data:/data" \ -e OBSCURO_GATEWAY_VERSION="${{ github.run_number }}-${{ github.sha }}" \ -e OE_SIMULATION=0 \ "${{ env.DOCKER_BUILD_TAG_GATEWAY }}" \ ego run /home/ten/go-ten/tools/walletextension/main/main \ -host=0.0.0.0 -port=80 -portWS=81 -nodeHost="${{ env.L2_RPC_URL_VALIDATOR }}" -verbose=true \ -logPath=sys_out -dbType=cosmosDB -dbConnectionURL="${{ secrets.COSMOS_DB_CONNECTION_STRING }}" \ - -rateLimitUserComputeTime="${{ env.GATEWAY_RATE_LIMIT_USER_COMPUTE_TIME }}" -rateLimitWindow="${{ env.GATEWAY_RATE_LIMIT_WINDOW }}" -maxConcurrentRequestsPerUser="${{ env.GATEWAY_MAX_CONCURRENT_REQUESTS_PER_USER }}" ' + -rateLimitUserComputeTime="${{ env.GATEWAY_RATE_LIMIT_USER_COMPUTE_TIME }}" \ + -rateLimitWindow="${{ env.GATEWAY_RATE_LIMIT_WINDOW }}" \ + -maxConcurrentRequestsPerUser="${{ env.GATEWAY_MAX_CONCURRENT_REQUESTS_PER_USER }}" + -keyExchangeURL="${{ env.GATEWAY_KEY_EXCHANGE_URL }}" \ + -insideEnclave=true \ + + + # After starting the container, verify the volume mount + docker exec "${{ env.VM_NAME }}" sh -c " + echo \"Checking volume mount...\"; + df -h | grep /data; + echo \"Directory listing:\"; + ls -la /data; + echo \"Current working directory:\"; + pwd; + echo \"Directory permissions:\"; + ls -la /; + echo \"Process status:\"; + ps aux; + " + ' + \ No newline at end of file diff --git a/.github/workflows/manual-deploy-testnet-l1.yml b/.github/workflows/manual-deploy-testnet-l1.yml index d7ca9f1bc1..8aaeb0eaa9 100644 --- a/.github/workflows/manual-deploy-testnet-l1.yml +++ b/.github/workflows/manual-deploy-testnet-l1.yml @@ -113,7 +113,7 @@ jobs: && sudo snap refresh \ && curl -fsSL https://get.docker.com -o get-docker.sh && sh ./get-docker.sh \ && docker network create --driver bridge l1_network || true \ - && mkdir -p /home/obscuro/promtail \ + && mkdir -p /home/obscuro/metrics \ && echo " server: http_listen_port: 9080 @@ -123,7 +123,7 @@ jobs: filename: /tmp/positions.yaml clients: - - url: ${{ vars.METRICS_URI }} + - url: ${{ vars.METRICS_URI }}/loki/api/v1/push batchwait: 3s batchsize: 1048576 tls_config: @@ -147,16 +147,75 @@ jobs: target_label: \"job\" - replacement: ${{ github.event.inputs.testnet_type }}-eth2network-${{ GITHUB.RUN_NUMBER }} target_label: "node_name" - " > /home/obscuro/promtail/promtail-config.yaml \ + " > /home/obscuro/metrics/promtail-config.yaml \ + && echo " + global: + scrape_interval: 15s + evaluation_interval: 15s + remote_write: + - url: ${{ vars.METRICS_URI }}/prometheus/metrics/api/v1/write + tls_config: + insecure_skip_verify: true + basic_auth: + username: ${{ secrets.LOKI_USER }} + password: ${{ secrets.LOKI_PASSWORD }} + scrape_configs: + # Node metrics + - job_name: node-${{ github.event.inputs.testnet_type }}-eth2network-${{ GITHUB.RUN_NUMBER }} + scrape_interval: 5s # Frequent scrapes for node metrics + static_configs: + - targets: + - node_exporter:9100 # Node Exporter instance + relabel_configs: + - source_labels: [job] + target_label: 'node' + replacement: ${{ github.event.inputs.testnet_type }}-eth2network-${{ GITHUB.RUN_NUMBER }} + + # Container metrics + - job_name: container-${{ github.event.inputs.testnet_type }}-eth2network-${{ GITHUB.RUN_NUMBER }} + scrape_interval: 5s + static_configs: + - targets: + - cadvisor:8080 # cAdvisor instance for container metrics + relabel_configs: + - source_labels: [job] + target_label: 'node' + replacement: ${{ github.event.inputs.testnet_type }}-eth2network-${{ GITHUB.RUN_NUMBER }} + " > /home/obscuro/metrics/prometheus.yaml \ && docker run -d --name promtail \ --network l1_network \ -e HOSTNAME=${{ github.event.inputs.testnet_type }}-eth2network-${{ GITHUB.RUN_NUMBER }} \ -v /var/log:/var/log \ - -v /home/obscuro/promtail:/etc/promtail \ + -v /home/obscuro/metrics:/etc/promtail \ -v /var/lib/docker/containers:/var/lib/docker/containers:ro \ -v /var/run/docker.sock:/var/run/docker.sock \ grafana/promtail:latest \ -config.file=/etc/promtail/promtail-config.yaml -config.expand-env=true \ + && docker volume create prometheus-data \ + && docker run -d --name prometheus \ + --network l1_network \ + -p 9090:9090 \ + -v /home/obscuro/metrics/prometheus.yaml:/etc/prometheus/prometheus.yml \ + -v prometheus-data:/prometheus \ + prom/prometheus:latest \ + --config.file=/etc/prometheus/prometheus.yml \ + && docker run -d --name node_exporter \ + --network l1_network \ + -p 9100:9100 \ + --pid="host" \ + -v /:/host:ro \ + quay.io/prometheus/node-exporter:latest \ + --path.rootfs=/host \ + && docker run -d --name cadvisor \ + --network l1_network \ + -p 8080:8080 \ + --privileged \ + -v /:/rootfs:ro \ + -v /var/run:/var/run:ro \ + -v /sys:/sys:ro \ + -v /var/lib/docker/:/var/lib/docker:ro \ + -v /dev/disk/:/dev/disk:ro \ + gcr.io/cadvisor/cadvisor:latest \ && docker run -d \ -p 8025:8025 -p 8026:8026 -p 9000:9000 -p 9001:9001 -p 12600:12600 \ --entrypoint /home/obscuro/go-obscuro/integration/eth2network/main/main ${{ vars.DOCKER_BUILD_TAG_ETH2NETWORK }} \ diff --git a/.github/workflows/manual-deploy-testnet-l2.yml b/.github/workflows/manual-deploy-testnet-l2.yml index 967eb645ad..14082d8aa5 100644 --- a/.github/workflows/manual-deploy-testnet-l2.yml +++ b/.github/workflows/manual-deploy-testnet-l2.yml @@ -228,7 +228,7 @@ jobs: && chown obscurouser:obscurouser /home/obscurouser/edb-connect.sh \ && chmod u+x /home/obscurouser/edb-connect.sh \ && docker network create --driver bridge node_network || true \ - && mkdir -p /home/obscuro/promtail \ + && mkdir -p /home/obscuro/metrics \ && echo " server: http_listen_port: 9080 @@ -238,7 +238,7 @@ jobs: filename: /tmp/positions.yaml clients: - - url: ${{ vars.METRICS_URI }} + - url: ${{ vars.METRICS_URI }}/loki/api/v1/push batchwait: 3s batchsize: 1048576 tls_config: @@ -262,16 +262,76 @@ jobs: target_label: \"job\" - replacement: ${{ matrix.host_id }}-${{ github.event.inputs.testnet_type }}-${{ GITHUB.RUN_NUMBER }} target_label: "node_name" - " > /home/obscuro/promtail/promtail-config.yaml \ + " > /home/obscuro/metrics/promtail-config.yaml \ + && echo " + global: + scrape_interval: 15s + evaluation_interval: 15s + remote_write: + - url: ${{ vars.METRICS_URI }}/prometheus/metrics/api/v1/write + tls_config: + insecure_skip_verify: true + basic_auth: + username: ${{ secrets.LOKI_USER }} + password: ${{ secrets.LOKI_PASSWORD }} + scrape_configs: + # Node metrics + - job_name: node-${{ matrix.host_id }}-${{ github.event.inputs.testnet_type }}-${{ GITHUB.RUN_NUMBER }} + scrape_interval: 5s # Frequent scrapes for node metrics + static_configs: + - targets: + - node_exporter:9100 # Node Exporter instance + relabel_configs: + - source_labels: [job] + target_label: 'node' + replacement: ${{ matrix.host_id }}-${{ github.event.inputs.testnet_type }}-${{ GITHUB.RUN_NUMBER }} + + # Container metrics + - job_name: container-${{ matrix.host_id }}-${{ github.event.inputs.testnet_type }}-${{ GITHUB.RUN_NUMBER }} + scrape_interval: 5s + static_configs: + - targets: + - cadvisor:8080 # cAdvisor instance for container metrics + relabel_configs: + - source_labels: [job] + target_label: 'node' + replacement: ${{ matrix.host_id }}-${{ github.event.inputs.testnet_type }}-${{ GITHUB.RUN_NUMBER }} + + " > /home/obscuro/metrics/prometheus.yaml \ && docker run -d --name promtail \ --network node_network \ -e HOSTNAME=${{ matrix.host_id }}-${{ github.event.inputs.testnet_type }}-${{ GITHUB.RUN_NUMBER }} \ -v /var/log:/var/log \ - -v /home/obscuro/promtail:/etc/promtail \ + -v /home/obscuro/metrics:/etc/promtail \ -v /var/lib/docker/containers:/var/lib/docker/containers:ro \ -v /var/run/docker.sock:/var/run/docker.sock \ grafana/promtail:latest \ -config.file=/etc/promtail/promtail-config.yaml -config.expand-env=true \ + && docker volume create prometheus-data \ + && docker run -d --name prometheus \ + --network node_network \ + -p 9090:9090 \ + -v /home/obscuro/metrics/prometheus.yaml:/etc/prometheus/prometheus.yml \ + -v prometheus-data:/prometheus \ + prom/prometheus:latest \ + --config.file=/etc/prometheus/prometheus.yml \ + && docker run -d --name node_exporter \ + --network node_network \ + -p 9100:9100 \ + --pid="host" \ + -v /:/host:ro \ + quay.io/prometheus/node-exporter:latest \ + --path.rootfs=/host \ + && docker run -d --name cadvisor \ + --network node_network \ + -p 8080:8080 \ + --privileged \ + -v /:/rootfs:ro \ + -v /var/run:/var/run:ro \ + -v /sys:/sys:ro \ + -v /var/lib/docker/:/var/lib/docker:ro \ + -v /dev/disk/:/dev/disk:ro \ + gcr.io/cadvisor/cadvisor:latest \ && cd /home/obscuro/go-obscuro/ \ && sudo go run /home/obscuro/go-obscuro/go/node/cmd \ -is_genesis=${{ matrix.is_genesis }} \ @@ -407,4 +467,4 @@ jobs: - name: 'Send a repository dispatch to obscuro-test on deployment of testnet' if: ${{ (github.event.inputs.testnet_type == 'uat-testnet') }} run: | - curl -XPOST -H "Authorization: Bearer ${{ secrets.GH_TOKEN }}" -H "Accept:application/vnd.github" -H "Content-Type:application/json" https://api.github.com/repos/ten-protocol/ten-test/dispatches --data '{ "event_type": "uat_testnet_deployed", "client_payload": { "ref": "${{ github.ref_name }}" }' + curl -XPOST -H "Authorization: Bearer ${{ secrets.GH_TOKEN }}" -H "Accept:application/vnd.github" -H "Content-Type:application/json" https://api.github.com/repos/ten-protocol/ten-test/dispatches --data '{ "event_type": "uat_testnet_deployed", "client_payload": { "ref": "${{ github.ref_name }}" }' \ No newline at end of file diff --git a/.github/workflows/manual-deploy-testnet-validator.yml b/.github/workflows/manual-deploy-testnet-validator.yml index 74bf07017a..97d1585206 100644 --- a/.github/workflows/manual-deploy-testnet-validator.yml +++ b/.github/workflows/manual-deploy-testnet-validator.yml @@ -148,7 +148,7 @@ jobs: --scripts 'mkdir -p /home/obscuro \ && git clone --depth 1 -b ${{ env.BRANCH_NAME }} https://github.com/ten-protocol/go-ten.git /home/obscuro/go-obscuro \ && docker network create --driver bridge node_network || true \ - && mkdir -p /home/obscuro/promtail \ + && mkdir -p /home/obscuro/metrics \ && echo " server: http_listen_port: 9080 @@ -158,7 +158,7 @@ jobs: filename: /tmp/positions.yaml clients: - - url: ${{ vars.METRICS_URI }} + - url: ${{ vars.METRICS_URI }}/loki/api/v1/push batchwait: 3s batchsize: 1048576 tls_config: @@ -182,16 +182,75 @@ jobs: target_label: \"job\" - replacement: ${{ vars.AZURE_RESOURCE_PREFIX }}-${{ github.event.inputs.node_id }}-${{ GITHUB.RUN_NUMBER }} target_label: "node_name" - " > /home/obscuro/promtail/promtail-config.yaml \ + " > /home/obscuro/metrics/promtail-config.yaml \ + && echo " + global: + scrape_interval: 15s + evaluation_interval: 15s + remote_write: + - url: ${{ vars.METRICS_URI }}/prometheus/metrics/api/v1/write + tls_config: + insecure_skip_verify: true + basic_auth: + username: ${{ secrets.LOKI_USER }} + password: ${{ secrets.LOKI_PASSWORD }} + scrape_configs: + # Node metrics + - job_name: node-${{ github.event.inputs.testnet_type }}-validator-${{ GITHUB.RUN_NUMBER }} + scrape_interval: 5s # Frequent scrapes for node metrics + static_configs: + - targets: + - node_exporter:9100 # Node Exporter instance + relabel_configs: + - source_labels: [job] + target_label: 'node' + replacement: node-${{ github.event.inputs.testnet_type }}-validator-${{ GITHUB.RUN_NUMBER }} + + # Container metrics + - job_name: container-${{ github.event.inputs.testnet_type }}-validator-${{ GITHUB.RUN_NUMBER }} + scrape_interval: 5s + static_configs: + - targets: + - cadvisor:8080 # cAdvisor instance for container metrics + relabel_configs: + - source_labels: [job] + target_label: 'node' + replacement: container-${{ github.event.inputs.testnet_type }}-validator-${{ GITHUB.RUN_NUMBER }} + " > /home/obscuro/metrics/prometheus.yaml \ && docker run -d --name promtail \ --network node_network \ -e HOSTNAME=${{ vars.AZURE_RESOURCE_PREFIX }}-${{ github.event.inputs.node_id }}-${{ GITHUB.RUN_NUMBER }} \ -v /var/log:/var/log \ - -v /home/obscuro/promtail:/etc/promtail \ + -v /home/obscuro/metrics:/etc/promtail \ -v /var/lib/docker/containers:/var/lib/docker/containers:ro \ -v /var/run/docker.sock:/var/run/docker.sock \ grafana/promtail:latest \ -config.file=/etc/promtail/promtail-config.yaml -config.expand-env=true \ + && docker volume create prometheus-data \ + && docker run -d --name prometheus \ + --network node_network \ + -p 9090:9090 \ + -v /home/obscuro/metrics/prometheus.yaml:/etc/prometheus/prometheus.yml \ + -v prometheus-data:/prometheus \ + prom/prometheus:latest \ + --config.file=/etc/prometheus/prometheus.yml \ + && docker run -d --name node_exporter \ + --network node_network \ + -p 9100:9100 \ + --pid="host" \ + -v /:/host:ro \ + quay.io/prometheus/node-exporter:latest \ + --path.rootfs=/host \ + && docker run -d --name cadvisor \ + --network node_network \ + -p 8080:8080 \ + --privileged \ + -v /:/rootfs:ro \ + -v /var/run:/var/run:ro \ + -v /sys:/sys:ro \ + -v /var/lib/docker/:/var/lib/docker:ro \ + -v /dev/disk/:/dev/disk:ro \ + gcr.io/cadvisor/cadvisor:latest \ && cd /home/obscuro/go-obscuro/ \ && sudo go run /home/obscuro/go-obscuro/go/node/cmd \ -is_genesis=false \ diff --git a/.github/workflows/runner-scripts/wait-node-healthy.sh b/.github/workflows/runner-scripts/wait-node-healthy.sh index 2af0673d6f..6f3e8e8656 100755 --- a/.github/workflows/runner-scripts/wait-node-healthy.sh +++ b/.github/workflows/runner-scripts/wait-node-healthy.sh @@ -60,7 +60,7 @@ while ! [[ $net_status = *\"OverallHealth\":true* ]] do net_status=$(curl --request POST "http://${host}:${port}" \ --header 'Content-Type: application/json' \ - --data-raw '{ "method":"obscuro_health", "params":null, "id":1, "jsonrpc":"2.0" }') || true + --data-raw '{ "method":"ten_health", "params":null, "id":1, "jsonrpc":"2.0" }') || true echo $net_status sleep 2 diff --git a/contracts/scripts/sequencer/001_grant_sequencers.ts b/contracts/scripts/sequencer/001_grant_sequencers.ts new file mode 100644 index 0000000000..2f56a101ab --- /dev/null +++ b/contracts/scripts/sequencer/001_grant_sequencers.ts @@ -0,0 +1,46 @@ +import { ethers } from "hardhat"; +import { ManagementContract } from "../../typechain-types"; + +const grantSequencerStatus = async function (mgmtContractAddr: string, enclaveIDsStr: string) { + const managementContract = await ethers.getContractAt( + "ManagementContract", + mgmtContractAddr + ) as ManagementContract; + + const enclaveAddresses = enclaveIDsStr.split(","); + + for (const enclaveAddr of enclaveAddresses) { + console.log(`Granting sequencer status to enclave: ${enclaveAddr}`); + const tx = await managementContract.GrantSequencerEnclave(enclaveAddr); + await tx.wait(); + console.log(`Successfully granted sequencer status to: ${enclaveAddr}`); + + // check they've been added + const isSequencer = await managementContract.IsSequencerEnclave(enclaveAddr); + if (!isSequencer) { + throw new Error(`Failed to verify sequencer status for enclave: ${enclaveAddr}. IsSequencerEnclave returned false`); + } + console.log(`Verified sequencer status for enclave: ${enclaveAddr}`); + } + + console.log("\nFinal verification of all sequencer permissions:"); + for (const enclaveAddr of enclaveAddresses) { + const isSequencer = await managementContract.IsSequencerEnclave(enclaveAddr); + console.log(`Enclave ${enclaveAddr}: IsSequencerEnclave = ${isSequencer}`); + } +}; + +const args = process.argv.slice(2); +if (args.length !== 2) { + throw new Error("Required arguments: "); +} + +const [mgmtContractAddr, enclaveIDs] = args as [string, string]; +grantSequencerStatus(mgmtContractAddr, enclaveIDs) + .then(() => process.exit(0)) + .catch((error) => { + console.error(error); + process.exit(1); + }); + +export default grantSequencerStatus; \ No newline at end of file diff --git a/go/common/host/services.go b/go/common/host/services.go index da619c80ee..3b68d7e31b 100644 --- a/go/common/host/services.go +++ b/go/common/host/services.go @@ -2,6 +2,7 @@ package host import ( "context" + "github.com/ten-protocol/go-ten/go/common/l1" "math/big" "github.com/ten-protocol/go-ten/go/responses" @@ -10,7 +11,6 @@ import ( gethcommon "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ten-protocol/go-ten/go/common" - "github.com/ten-protocol/go-ten/go/ethadapter" ) // service names - these are the keys used to register known services with the host @@ -80,7 +80,7 @@ type P2PBatchRequestHandler interface { type L1BlockRepository interface { // Subscribe will register a block handler to receive new blocks as they arrive, returns unsubscribe func Subscribe(handler L1BlockHandler) func() - + // FetchBlockByHeight returns a block at a given height FetchBlockByHeight(height *big.Int) (*types.Block, error) // FetchNextBlock returns the next canonical block after a given block hash // It returns the new block, a bool which is true if the block is the current L1 head and a bool if the block is on a different fork to prevBlock @@ -104,9 +104,9 @@ type L1Publisher interface { // RequestSecret will send a management contract transaction to request a secret from the enclave, returning the L1 head at time of sending RequestSecret(report *common.AttestationReport) (gethcommon.Hash, error) // ExtractRelevantTenTransactions will return all TEN relevant tx from an L1 block - ExtractRelevantTenTransactions(block *types.Block, receipts types.Receipts) ([]*common.TxAndReceiptAndBlobs, []*ethadapter.L1RollupTx, []*ethadapter.L1SetImportantContractsTx) + ExtractRelevantTenTransactions(block *types.Block, receipts types.Receipts) ([]*common.TxAndReceiptAndBlobs, []*l1.L1RollupTx, []*l1.L1SetImportantContractsTx) // FindSecretResponseTx will return the secret response tx from an L1 block - FindSecretResponseTx(block *types.Block) []*ethadapter.L1RespondSecretTx + FindSecretResponseTx(block *types.Block) []*l1.L1RespondSecretTx // PublishRollup will create and publish a rollup tx to the management contract - fire and forget we don't wait for receipt // todo (#1624) - With a single sequencer, it is problematic if rollup publication fails; handle this case better PublishRollup(producedRollup *common.ExtRollup) diff --git a/go/common/l1/l1_transaction.go b/go/common/l1/l1_transaction.go new file mode 100644 index 0000000000..73d993476b --- /dev/null +++ b/go/common/l1/l1_transaction.go @@ -0,0 +1,5 @@ +package l1 + +// L1Transaction is an abstraction that transforms an Ethereum transaction into a format that can be consumed more +// easily by TEN. +type L1Transaction interface{} diff --git a/go/common/types.go b/go/common/types.go index dea6606040..cfc126909b 100644 --- a/go/common/types.go +++ b/go/common/types.go @@ -2,6 +2,8 @@ package common import ( "fmt" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ten-protocol/go-ten/go/common/l1" "math/big" "github.com/ethereum/go-ethereum/common" @@ -188,3 +190,31 @@ func (cf *ChainFork) String() string { func MaskedSender(address L2Address) L2Address { return common.BigToAddress(big.NewInt(0).Sub(address.Big(), big.NewInt(1))) } + +type L1TxType int + +const ( + RollupTx L1TxType = iota + SecretRequestTx + InitialiseSecretTx + CrossChainMessageTx + CrossChainValueTranserTx + SequencerAddedTx + SetImportantContractsTx +) + +// ProcessedL1Data is submitted to the enclave by the guardian +type ProcessedL1Data struct { + BlockHeader *types.Header + Events map[L1TxType][]*L1TxData +} + +// L1TxData represents an L1 transaction that's relevant to us +type L1TxData struct { + Type l1.L1Transaction + Transaction *types.Transaction + Receipt *types.Receipt + Blobs []*kzg4844.Blob // Only populated for blob transactions + CrossChainMessages *CrossChainMessages // Only populated for xchain messages + ValueTransfers *ValueTransferEvents // Only populated for xchain transfers +} diff --git a/go/enclave/components/batch_registry.go b/go/enclave/components/batch_registry.go index aabc9e7ea5..3df57a0765 100644 --- a/go/enclave/components/batch_registry.go +++ b/go/enclave/components/batch_registry.go @@ -8,7 +8,9 @@ import ( "sync" "time" + "github.com/ten-protocol/go-ten/go/common/gethencoding" "github.com/ten-protocol/go-ten/go/common/measure" + enclaveconfig "github.com/ten-protocol/go-ten/go/enclave/config" "github.com/ten-protocol/go-ten/go/common" @@ -34,9 +36,10 @@ type batchRegistry struct { callbackMutex sync.RWMutex healthTimeout time.Duration lastExecutedBatch *async.Timestamp + ethChainAdapter *EthChainAdapter } -func NewBatchRegistry(storage storage.Storage, logger gethlog.Logger) BatchRegistry { +func NewBatchRegistry(storage storage.Storage, config *enclaveconfig.EnclaveConfig, gethEncodingService gethencoding.EncodingService, logger gethlog.Logger) BatchRegistry { var headBatchSeq *big.Int headBatch, err := storage.FetchHeadBatchHeader(context.Background()) if err != nil { @@ -49,14 +52,20 @@ func NewBatchRegistry(storage storage.Storage, logger gethlog.Logger) BatchRegis } else { headBatchSeq = headBatch.SequencerOrderNo } - - return &batchRegistry{ + br := &batchRegistry{ storage: storage, headBatchSeq: headBatchSeq, logger: logger, healthTimeout: time.Minute, lastExecutedBatch: async.NewAsyncTimestamp(time.Now().Add(-time.Minute)), } + + br.ethChainAdapter = NewEthChainAdapter(big.NewInt(config.ObscuroChainID), br, storage, gethEncodingService, *config, logger) + return br +} + +func (br *batchRegistry) EthChain() *EthChainAdapter { + return br.ethChainAdapter } func (br *batchRegistry) HeadBatchSeq() *big.Int { @@ -86,7 +95,7 @@ func (br *batchRegistry) OnL1Reorg(_ *BlockIngestionType) { br.headBatchSeq = headBatch.SequencerOrderNo } -func (br *batchRegistry) OnBatchExecuted(batchHeader *common.BatchHeader, txExecResults []*core.TxExecResult) { +func (br *batchRegistry) OnBatchExecuted(batchHeader *common.BatchHeader, txExecResults []*core.TxExecResult) error { defer core.LogMethodDuration(br.logger, measure.NewStopwatch(), "OnBatchExecuted", log.BatchHashKey, batchHeader.Hash()) br.callbackMutex.RLock() defer br.callbackMutex.RUnlock() @@ -96,12 +105,17 @@ func (br *batchRegistry) OnBatchExecuted(batchHeader *common.BatchHeader, txExec // this function is called after a batch was successfully executed. This is a catastrophic failure br.logger.Crit("should not happen. cannot get transactions. ", log.ErrKey, err) } + batch := &core.Batch{ + Header: batchHeader, + Transactions: txs, + } + err = br.ethChainAdapter.IngestNewBlock(batch) + if err != nil { + return fmt.Errorf("failed to feed batch into the virtual eth chain. cause %w", err) + } + br.headBatchSeq = batchHeader.SequencerOrderNo if br.batchesCallback != nil { - batch := &core.Batch{ - Header: batchHeader, - Transactions: txs, - } txReceipts := make([]*types.Receipt, len(txExecResults)) for i, txExecResult := range txExecResults { txReceipts[i] = txExecResult.Receipt @@ -110,6 +124,7 @@ func (br *batchRegistry) OnBatchExecuted(batchHeader *common.BatchHeader, txExec } br.lastExecutedBatch.Mark() + return nil } func (br *batchRegistry) HasGenesisBatch() (bool, error) { diff --git a/go/enclave/evm/ethchainadapter/eth_chainadapter.go b/go/enclave/components/eth_chainadapter.go similarity index 93% rename from go/enclave/evm/ethchainadapter/eth_chainadapter.go rename to go/enclave/components/eth_chainadapter.go index 88ff2e4646..38cbe88b44 100644 --- a/go/enclave/evm/ethchainadapter/eth_chainadapter.go +++ b/go/enclave/components/eth_chainadapter.go @@ -1,9 +1,11 @@ -package ethchainadapter +package components import ( "context" "math/big" + "github.com/ten-protocol/go-ten/go/enclave/evm/ethchainadapter" + gethcommon "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/txpool/legacypool" @@ -11,7 +13,6 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ten-protocol/go-ten/go/common/gethencoding" "github.com/ten-protocol/go-ten/go/common/log" - "github.com/ten-protocol/go-ten/go/enclave/components" enclaveconfig "github.com/ten-protocol/go-ten/go/enclave/config" "github.com/ten-protocol/go-ten/go/enclave/core" "github.com/ten-protocol/go-ten/go/enclave/storage" @@ -24,7 +25,7 @@ import ( // EthChainAdapter is an obscuro wrapper around the ethereum core.Blockchain object type EthChainAdapter struct { newHeadChan chan gethcore.ChainHeadEvent - batchRegistry components.BatchRegistry + batchRegistry BatchRegistry gethEncoding gethencoding.EncodingService storage storage.Storage config enclaveconfig.EnclaveConfig @@ -33,7 +34,7 @@ type EthChainAdapter struct { } // NewEthChainAdapter returns a new instance -func NewEthChainAdapter(chainID *big.Int, batchRegistry components.BatchRegistry, storage storage.Storage, gethEncoding gethencoding.EncodingService, config enclaveconfig.EnclaveConfig, logger gethlog.Logger) *EthChainAdapter { +func NewEthChainAdapter(chainID *big.Int, batchRegistry BatchRegistry, storage storage.Storage, gethEncoding gethencoding.EncodingService, config enclaveconfig.EnclaveConfig, logger gethlog.Logger) *EthChainAdapter { return &EthChainAdapter{ newHeadChan: make(chan gethcore.ChainHeadEvent), batchRegistry: batchRegistry, @@ -47,7 +48,7 @@ func NewEthChainAdapter(chainID *big.Int, batchRegistry components.BatchRegistry // Config retrieves the chain's fork configuration. func (e *EthChainAdapter) Config() *params.ChainConfig { - return ChainParams(e.chainID) + return ethchainadapter.ChainParams(e.chainID) } // CurrentBlock returns the current head of the chain. diff --git a/go/enclave/components/interfaces.go b/go/enclave/components/interfaces.go index 24c488044d..cb7f8507e5 100644 --- a/go/enclave/components/interfaces.go +++ b/go/enclave/components/interfaces.go @@ -103,7 +103,7 @@ type BatchRegistry interface { SubscribeForExecutedBatches(func(*core.Batch, types.Receipts)) UnsubscribeFromBatches() - OnBatchExecuted(batch *common.BatchHeader, txExecResults []*core.TxExecResult) + OnBatchExecuted(batch *common.BatchHeader, txExecResults []*core.TxExecResult) error OnL1Reorg(*BlockIngestionType) // HasGenesisBatch - returns if genesis batch is available yet or not, or error in case @@ -112,6 +112,8 @@ type BatchRegistry interface { HeadBatchSeq() *big.Int + EthChain() *EthChainAdapter + HealthCheck() (bool, error) } diff --git a/go/enclave/components/rollup_compression.go b/go/enclave/components/rollup_compression.go index 270b6ff936..4bdff62194 100644 --- a/go/enclave/components/rollup_compression.go +++ b/go/enclave/components/rollup_compression.go @@ -474,7 +474,11 @@ func (rc *RollupCompression) executeAndSaveIncompleteBatches(ctx context.Context if err != nil { return err } - rc.batchRegistry.OnBatchExecuted(genBatch.Header, nil) + + err = rc.batchRegistry.OnBatchExecuted(genBatch.Header, nil) + if err != nil { + return err + } rc.logger.Info("Stored genesis", log.BatchHashKey, genBatch.Hash()) parentHash = genBatch.Hash() @@ -518,7 +522,10 @@ func (rc *RollupCompression) executeAndSaveIncompleteBatches(ctx context.Context if err != nil { return err } - rc.batchRegistry.OnBatchExecuted(computedBatch.Batch.Header, nil) + err = rc.batchRegistry.OnBatchExecuted(computedBatch.Batch.Header, nil) + if err != nil { + return err + } parentHash = computedBatch.Batch.Hash() } diff --git a/go/enclave/enclave.go b/go/enclave/enclave.go index bd528894ef..ec49f0a471 100644 --- a/go/enclave/enclave.go +++ b/go/enclave/enclave.go @@ -7,7 +7,6 @@ import ( "fmt" "math/big" - "github.com/ten-protocol/go-ten/go/common/compression" enclaveconfig "github.com/ten-protocol/go-ten/go/enclave/config" "github.com/ten-protocol/go-ten/go/enclave/evm/ethchainadapter" "github.com/ten-protocol/go-ten/go/enclave/gas" @@ -16,9 +15,6 @@ import ( "github.com/ten-protocol/go-ten/go/enclave/txpool" "github.com/ten-protocol/go-ten/go/enclave/components" - "github.com/ten-protocol/go-ten/go/enclave/nodetype" - - "github.com/ten-protocol/go-ten/go/enclave/l2chain" "github.com/ten-protocol/go-ten/go/responses" "github.com/ten-protocol/go-ten/go/enclave/genesis" @@ -26,18 +22,14 @@ import ( "github.com/ten-protocol/go-ten/go/common/errutil" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto/ecies" "github.com/ten-protocol/go-ten/go/common" "github.com/ten-protocol/go-ten/go/common/gethencoding" "github.com/ten-protocol/go-ten/go/common/log" "github.com/ten-protocol/go-ten/go/common/stopcontrol" "github.com/ten-protocol/go-ten/go/common/tracers" "github.com/ten-protocol/go-ten/go/enclave/crosschain" - "github.com/ten-protocol/go-ten/go/enclave/crypto" - "github.com/ten-protocol/go-ten/go/enclave/debugger" "github.com/ten-protocol/go-ten/go/enclave/events" - "github.com/ten-protocol/go-ten/go/enclave/rpc" "github.com/ten-protocol/go-ten/go/ethadapter/mgmtcontractlib" _ "github.com/ten-protocol/go-ten/go/common/tracers/native" // make sure the tracers are loaded @@ -62,139 +54,66 @@ func NewEnclave(config *enclaveconfig.EnclaveConfig, genesis *genesis.Genesis, m jsonConfig, _ := json.MarshalIndent(config, "", " ") logger.Info("Creating enclave service with following config", log.CfgKey, string(jsonConfig)) - // todo (#1053) - add the delay: N hashes + chainConfig := ethchainadapter.ChainParams(big.NewInt(config.ObscuroChainID)) // Initialise the database cachingService := storage.NewCacheService(logger, config.UseInMemoryDB) - chainConfig := ethchainadapter.ChainParams(big.NewInt(config.ObscuroChainID)) storage := storage.NewStorageFromConfig(config, cachingService, chainConfig, logger) - // todo (#1474) - make sure the enclave cannot be started in production with WillAttest=false - var attestationProvider components.AttestationProvider - if config.WillAttest { - attestationProvider = &components.EgoAttestationProvider{} - } else { - logger.Info("WARNING - Attestation is not enabled, enclave will not create a verified attestation report.") - attestationProvider = &components.DummyAttestationProvider{} - } - - enclaveKeyService := components.NewEnclaveKeyService(storage, logger) // attempt to fetch the enclave key from the database // the enclave key is part of the attestation and identifies the current enclave + enclaveKeyService := components.NewEnclaveKeyService(storage, logger) err := enclaveKeyService.LoadOrCreateEnclaveKey() if err != nil { logger.Crit("Failed to load or create enclave key", "err", err) } gethEncodingService := gethencoding.NewGethEncodingService(storage, cachingService, logger) - dataEncryptionService := crypto.NewDataEncryptionService(logger) - dataCompressionService := compression.NewBrotliDataCompressionService() crossChainProcessors := crosschain.New(&config.MessageBusAddress, storage, big.NewInt(config.ObscuroChainID), logger) + + // initialise system contracts scb := system.NewSystemContractCallbacks(storage, &config.SystemContractOwner, logger) + err = scb.Load() + if err != nil && !errors.Is(err, errutil.ErrNotFound) { + logger.Crit("failed to load system contracts", log.ErrKey, err) + } gasOracle := gas.NewGasOracle() blockProcessor := components.NewBlockProcessor(storage, crossChainProcessors, gasOracle, logger) - registry := components.NewBatchRegistry(storage, logger) - batchExecutor := components.NewBatchExecutor(storage, registry, *config, gethEncodingService, crossChainProcessors, genesis, gasOracle, chainConfig, config.GasBatchExecutionLimit, scb, logger) - sigVerifier, err := components.NewSignatureValidator(storage) - rProducer := components.NewRollupProducer(enclaveKeyService.EnclaveID(), storage, registry, logger) + batchRegistry := components.NewBatchRegistry(storage, config, gethEncodingService, logger) + batchExecutor := components.NewBatchExecutor(storage, batchRegistry, *config, gethEncodingService, crossChainProcessors, genesis, gasOracle, chainConfig, config.GasBatchExecutionLimit, scb, logger) + + // ensure cached chain state data is up-to-date using the persisted batch data + err = restoreStateDBCache(context.Background(), storage, batchRegistry, batchExecutor, genesis, logger) if err != nil { - logger.Crit("Could not initialise the signature validator", log.ErrKey, err) + logger.Crit("failed to resync L2 chain state DB after restart", log.ErrKey, err) } - rollupCompression := components.NewRollupCompression(registry, batchExecutor, dataEncryptionService, dataCompressionService, storage, gethEncodingService, chainConfig, logger) - rConsumer := components.NewRollupConsumer(mgmtContractLib, registry, rollupCompression, storage, logger, sigVerifier) - sharedSecretProcessor := components.NewSharedSecretProcessor(mgmtContractLib, attestationProvider, enclaveKeyService.EnclaveID(), storage, logger) - blockchain := ethchainadapter.NewEthChainAdapter(big.NewInt(config.ObscuroChainID), registry, storage, gethEncodingService, *config, logger) - // todo - mempool for backup sequencer needs to store all txs - mempool, err := txpool.NewTxPool(blockchain, config.MinGasPrice, logger) + // start the mempool in validate only. Based on the config, it might become sequencer + mempool, err := txpool.NewTxPool(batchRegistry.EthChain(), config.MinGasPrice, true, logger) if err != nil { logger.Crit("unable to init eth tx pool", log.ErrKey, err) } - var service nodetype.NodeType - if config.NodeType == common.ActiveSequencer { - // Todo - this is temporary - until the host calls `AddSequencer` - err := storage.StoreNewEnclave(context.Background(), enclaveKeyService.EnclaveID(), enclaveKeyService.PublicKey()) - if err != nil { - logger.Crit("Failed to store enclave key", log.ErrKey, err) - return nil - } - err = storage.StoreNodeType(context.Background(), enclaveKeyService.EnclaveID(), common.ActiveSequencer) - if err != nil { - logger.Crit("Failed to store node type", log.ErrKey, err) - return nil - } - - // todo - next PR - update the service logic to be swappable - service = nodetype.NewSequencer( - blockProcessor, - batchExecutor, - registry, - rProducer, - rollupCompression, - gethEncodingService, - logger, - chainConfig, - enclaveKeyService, - mempool, - storage, - dataEncryptionService, - dataCompressionService, - nodetype.SequencerSettings{ - MaxBatchSize: config.MaxBatchSize, - MaxRollupSize: config.MaxRollupSize, - GasPaymentAddress: config.GasPaymentAddress, - BatchGasLimit: config.GasBatchExecutionLimit, - BaseFee: config.BaseFee, - }, - blockchain, - ) + subscriptionManager := events.NewSubscriptionManager(storage, batchRegistry, config.ObscuroChainID, logger) + + // todo (#1474) - make sure the enclave cannot be started in production with WillAttest=false + var attestationProvider components.AttestationProvider + if config.WillAttest { + attestationProvider = &components.EgoAttestationProvider{} } else { - service = nodetype.NewValidator( - blockProcessor, - batchExecutor, - registry, - chainConfig, - storage, - sigVerifier, - mempool, - logger, - ) + logger.Info("WARNING - Attestation is not enabled, enclave will not create a verified attestation report.") + attestationProvider = &components.DummyAttestationProvider{} } - chain := l2chain.NewChain( - storage, - *config, - gethEncodingService, - chainConfig, - genesis, - logger, - registry, - config.GasLocalExecutionCapFlag, - ) - obscuroKey := crypto.GetObscuroKey(logger) - rpcEncryptionManager := rpc.NewEncryptionManager(ecies.ImportECDSA(obscuroKey), storage, cachingService, registry, crossChainProcessors, service, config, gasOracle, storage, blockProcessor, chain, logger) - subscriptionManager := events.NewSubscriptionManager(storage, registry, config.ObscuroChainID, logger) - - // ensure cached chain state data is up-to-date using the persisted batch data - err = restoreStateDBCache(context.Background(), storage, registry, batchExecutor, genesis, logger) - if err != nil { - logger.Crit("failed to resync L2 chain state DB after restart", log.ErrKey, err) - } + // signal to stop the enclave + stopControl := stopcontrol.New() - err = scb.Load() - if err != nil && !errors.Is(err, errutil.ErrNotFound) { - logger.Crit("failed to load system contracts", log.ErrKey, err) - } + initService := NewEnclaveInitService(config, storage, logger, blockProcessor, enclaveKeyService, attestationProvider) + adminService := NewEnclaveAdminService(config, storage, logger, blockProcessor, batchRegistry, batchExecutor, gethEncodingService, stopControl, subscriptionManager, enclaveKeyService, mempool, chainConfig, mgmtContractLib, attestationProvider) + rpcService := NewEnclaveRPCService(config, storage, logger, blockProcessor, batchRegistry, gethEncodingService, cachingService, mempool, chainConfig, crossChainProcessors, scb, subscriptionManager, genesis, gasOracle) - // TODO ensure debug is allowed/disallowed - debug := debugger.New(chain, storage, chainConfig) - stopControl := stopcontrol.New() - initService := NewEnclaveInitService(config, storage, blockProcessor, logger, enclaveKeyService, attestationProvider) - adminService := NewEnclaveAdminService(config, logger, blockProcessor, service, sharedSecretProcessor, rConsumer, registry, dataEncryptionService, dataCompressionService, storage, gethEncodingService, stopControl, subscriptionManager, enclaveKeyService) - rpcService := NewEnclaveRPCService(rpcEncryptionManager, registry, subscriptionManager, config, debug, storage, crossChainProcessors, scb) logger.Info("Enclave service created successfully.", log.EnclaveIDKey, enclaveKeyService.EnclaveID()) return &enclaveImpl{ initService: initService, diff --git a/go/enclave/enclave_admin_service.go b/go/enclave/enclave_admin_service.go index 0c67286757..6fe99cd4eb 100644 --- a/go/enclave/enclave_admin_service.go +++ b/go/enclave/enclave_admin_service.go @@ -4,6 +4,9 @@ import ( "context" "errors" "fmt" + "github.com/ethereum/go-ethereum/params" + "github.com/ten-protocol/go-ten/go/enclave/txpool" + "github.com/ten-protocol/go-ten/go/ethadapter/mgmtcontractlib" "math/big" "sync" "time" @@ -35,6 +38,8 @@ type enclaveAdminService struct { mainMutex sync.Mutex // serialises all data ingestion or creation to avoid weird races logger gethlog.Logger l1BlockProcessor components.L1BlockProcessor + validatorService nodetype.Validator + sequencerService nodetype.ActiveSequencer service nodetype.NodeType sharedSecretProcessor *components.SharedSecretProcessor rollupConsumer components.RollupConsumer @@ -47,9 +52,10 @@ type enclaveAdminService struct { profiler *profiler.Profiler subscriptionManager *events.SubscriptionManager enclaveKeyService *components.EnclaveKeyService + mempool *txpool.TxPool } -func NewEnclaveAdminService(config *enclaveconfig.EnclaveConfig, logger gethlog.Logger, l1BlockProcessor components.L1BlockProcessor, service nodetype.NodeType, sharedSecretProcessor *components.SharedSecretProcessor, rollupConsumer components.RollupConsumer, registry components.BatchRegistry, dataEncryptionService crypto.DataEncryptionService, dataCompressionService compression.DataCompressionService, storage storage.Storage, gethEncodingService gethencoding.EncodingService, stopControl *stopcontrol.StopControl, subscriptionManager *events.SubscriptionManager, enclaveKeyService *components.EnclaveKeyService) common.EnclaveAdmin { +func NewEnclaveAdminService(config *enclaveconfig.EnclaveConfig, storage storage.Storage, logger gethlog.Logger, blockProcessor components.L1BlockProcessor, registry components.BatchRegistry, batchExecutor components.BatchExecutor, gethEncodingService gethencoding.EncodingService, stopControl *stopcontrol.StopControl, subscriptionManager *events.SubscriptionManager, enclaveKeyService *components.EnclaveKeyService, mempool *txpool.TxPool, chainConfig *params.ChainConfig, mgmtContractLib mgmtcontractlib.MgmtContractLib, attestationProvider components.AttestationProvider) common.EnclaveAdmin { var prof *profiler.Profiler // don't run a profiler on an attested enclave if !config.WillAttest && config.ProfilerEnabled { @@ -59,13 +65,38 @@ func NewEnclaveAdminService(config *enclaveconfig.EnclaveConfig, logger gethlog. logger.Crit("unable to start the profiler", log.ErrKey, err) } } + sharedSecretProcessor := components.NewSharedSecretProcessor(mgmtContractLib, attestationProvider, enclaveKeyService.EnclaveID(), storage, logger) + sigVerifier, err := components.NewSignatureValidator(storage) + if err != nil { + logger.Crit("Could not initialise the signature validator", log.ErrKey, err) + } + + dataEncryptionService := crypto.NewDataEncryptionService(logger) + dataCompressionService := compression.NewBrotliDataCompressionService() + + rollupCompression := components.NewRollupCompression(registry, batchExecutor, dataEncryptionService, dataCompressionService, storage, gethEncodingService, chainConfig, logger) + rollupProducer := components.NewRollupProducer(enclaveKeyService.EnclaveID(), storage, registry, logger) + rollupConsumer := components.NewRollupConsumer(mgmtContractLib, registry, rollupCompression, storage, logger, sigVerifier) + + seqSettings := nodetype.SequencerSettings{ + MaxBatchSize: config.MaxBatchSize, + MaxRollupSize: config.MaxRollupSize, + GasPaymentAddress: config.GasPaymentAddress, + BatchGasLimit: config.GasBatchExecutionLimit, + BaseFee: config.BaseFee, + } + + sequencerService := nodetype.NewSequencer(blockProcessor, batchExecutor, registry, rollupProducer, rollupCompression, gethEncodingService, logger, chainConfig, enclaveKeyService, mempool, storage, dataEncryptionService, dataCompressionService, seqSettings) + validatorService := nodetype.NewValidator(blockProcessor, batchExecutor, registry, chainConfig, storage, sigVerifier, mempool, logger) - return &enclaveAdminService{ + eas := &enclaveAdminService{ config: config, mainMutex: sync.Mutex{}, logger: logger, - l1BlockProcessor: l1BlockProcessor, - service: service, + l1BlockProcessor: blockProcessor, + service: validatorService, + sequencerService: sequencerService, + validatorService: validatorService, sharedSecretProcessor: sharedSecretProcessor, rollupConsumer: rollupConsumer, registry: registry, @@ -77,27 +108,47 @@ func NewEnclaveAdminService(config *enclaveconfig.EnclaveConfig, logger gethlog. profiler: prof, subscriptionManager: subscriptionManager, enclaveKeyService: enclaveKeyService, + mempool: mempool, + } + + // if the current enclave was already marked as an active/backup sequencer, it needs to set the right mempool mode + if eas.isBackupSequencer(context.Background()) || eas.isActiveSequencer(context.Background()) { + mempool.SetValidateMode(false) + } + if eas.isActiveSequencer(context.Background()) { + eas.service = sequencerService + } + + // Todo - this is temporary - until the host calls `AddSequencer` instead of relying on the config - which can be removed + if config.NodeType == common.ActiveSequencer { + err := storage.StoreNewEnclave(context.Background(), enclaveKeyService.EnclaveID(), enclaveKeyService.PublicKey()) + if err != nil { + logger.Crit("Failed to store enclave key", log.ErrKey, err) + return nil + } + err = eas.MakeActive() + if err != nil { + logger.Crit("Failed to create sequencer", log.ErrKey, err) + } } + + return eas } func (e *enclaveAdminService) AddSequencer(id common.EnclaveID, proof types.Receipt) common.SystemError { e.mainMutex.Lock() defer e.mainMutex.Unlock() - // by default all enclaves start their life as a validator + // todo - use the proof - // store in the database the enclave id err := e.storage.StoreNodeType(context.Background(), id, common.BackupSequencer) if err != nil { return responses.ToInternalError(err) } - // compare the id with the current enclaveId and if they match - do something so that the current enclave behaves as a "backup sequencer" - // the host will specifically mark the active enclave - //currentEnclaveId, err := e.initService.EnclaveID(context.Background()) - //if err != nil { - // return err - //} + if e.enclaveKeyService.EnclaveID() == id { + e.mempool.SetValidateMode(false) + } //if currentEnclaveId == id { // todo @@ -111,16 +162,19 @@ func (e *enclaveAdminService) MakeActive() common.SystemError { e.mainMutex.Lock() defer e.mainMutex.Unlock() - if !e.isBackupSequencer(context.Background()) { - return fmt.Errorf("only backup sequencer can become active") - } - // todo - // change the node type service - // do something with the mempool - // make some other checks? - // Once we've got the sequencer Enclave IDs permission list monitoring we should include that check here probably. - // We could even make it so that all sequencer enclaves start as backup and it can't be activated until the permissioning is done? + // todo - uncomment once AddSequencer is called by the host + //if !e.isBackupSequencer(context.Background()) { + // return fmt.Errorf("only backup sequencer can become active") + //} + + // todo - remove because this enclave should already be a backup sequencer + e.mempool.SetValidateMode(false) + err := e.storage.StoreNodeType(context.Background(), e.enclaveKeyService.EnclaveID(), common.ActiveSequencer) + if err != nil { + return err + } + e.service = e.sequencerService return nil } @@ -488,12 +542,12 @@ func (e *enclaveAdminService) isValidator(ctx context.Context) bool { //nolint:u func (e *enclaveAdminService) getNodeType(ctx context.Context) common.NodeType { id := e.enclaveKeyService.EnclaveID() - _, nodeType, err := e.storage.GetEnclavePubKey(ctx, id) + attestedEnclave, err := e.storage.GetEnclavePubKey(ctx, id) if err != nil { - e.logger.Crit("could not read enclave pub key", log.ErrKey, err) - return 0 + e.logger.Warn("could not read enclave pub key. Defaulting to validator type", log.ErrKey, err) + return common.Validator } - return nodeType + return attestedEnclave.Type } func exportCrossChainData(ctx context.Context, storage storage.Storage, fromSeqNo uint64, toSeqNo uint64) (*common.ExtCrossChainBundle, error) { diff --git a/go/enclave/enclave_init_service.go b/go/enclave/enclave_init_service.go index 9f540783d2..e4c5941f6d 100644 --- a/go/enclave/enclave_init_service.go +++ b/go/enclave/enclave_init_service.go @@ -33,7 +33,7 @@ type enclaveInitService struct { attestationProvider components.AttestationProvider // interface for producing attestation reports and verifying them } -func NewEnclaveInitService(config *enclaveconfig.EnclaveConfig, storage storage.Storage, l1BlockProcessor components.L1BlockProcessor, logger gethlog.Logger, enclaveKeyService *components.EnclaveKeyService, attestationProvider components.AttestationProvider) common.EnclaveInit { +func NewEnclaveInitService(config *enclaveconfig.EnclaveConfig, storage storage.Storage, logger gethlog.Logger, l1BlockProcessor components.L1BlockProcessor, enclaveKeyService *components.EnclaveKeyService, attestationProvider components.AttestationProvider) common.EnclaveInit { return &enclaveInitService{ config: config, storage: storage, diff --git a/go/enclave/enclave_rpc_service.go b/go/enclave/enclave_rpc_service.go index 52480be54d..77cb191eb7 100644 --- a/go/enclave/enclave_rpc_service.go +++ b/go/enclave/enclave_rpc_service.go @@ -7,6 +7,16 @@ import ( "fmt" "math/big" + "github.com/ethereum/go-ethereum/crypto/ecies" + gethlog "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ten-protocol/go-ten/go/common/gethencoding" + "github.com/ten-protocol/go-ten/go/enclave/crypto" + "github.com/ten-protocol/go-ten/go/enclave/gas" + "github.com/ten-protocol/go-ten/go/enclave/genesis" + "github.com/ten-protocol/go-ten/go/enclave/l2chain" + "github.com/ten-protocol/go-ten/go/enclave/txpool" + gethcommon "github.com/ethereum/go-ethereum/common" "github.com/ten-protocol/go-ten/go/common" "github.com/ten-protocol/go-ten/go/common/syserr" @@ -32,18 +42,38 @@ type enclaveRPCService struct { storage storage.Storage crossChainProcessors *crosschain.Processors scb system.SystemContractCallbacks + logger gethlog.Logger } -func NewEnclaveRPCService(rpcEncryptionManager *rpc.EncryptionManager, registry components.BatchRegistry, subscriptionManager *events.SubscriptionManager, config *enclaveconfig.EnclaveConfig, debugger *debugger.Debugger, storage storage.Storage, crossChainProcessors *crosschain.Processors, scb system.SystemContractCallbacks) common.EnclaveClientRPC { +func NewEnclaveRPCService(config *enclaveconfig.EnclaveConfig, storage storage.Storage, logger gethlog.Logger, blockProcessor components.L1BlockProcessor, batchRegistry components.BatchRegistry, gethEncodingService gethencoding.EncodingService, cachingService *storage.CacheService, mempool *txpool.TxPool, chainConfig *params.ChainConfig, crossChainProcessors *crosschain.Processors, scb system.SystemContractCallbacks, subscriptionManager *events.SubscriptionManager, genesis *genesis.Genesis, gasOracle gas.Oracle) common.EnclaveClientRPC { + // TODO ensure debug is allowed/disallowed + chain := l2chain.NewChain( + storage, + *config, + gethEncodingService, + chainConfig, + genesis, + logger, + batchRegistry, + config.GasLocalExecutionCapFlag, + ) + debug := debugger.New(chain, storage, chainConfig) + + // todo - security + obscuroKey := crypto.GetObscuroKey(logger) + + rpcEncryptionManager := rpc.NewEncryptionManager(ecies.ImportECDSA(obscuroKey), storage, cachingService, batchRegistry, mempool, crossChainProcessors, config, gasOracle, storage, blockProcessor, chain, logger) + return &enclaveRPCService{ rpcEncryptionManager: rpcEncryptionManager, - registry: registry, + registry: batchRegistry, subscriptionManager: subscriptionManager, config: config, - debugger: debugger, + debugger: debug, storage: storage, crossChainProcessors: crossChainProcessors, scb: scb, + logger: logger, } } diff --git a/go/enclave/nodetype/interfaces.go b/go/enclave/nodetype/interfaces.go index ea370258a8..3eacd1bef5 100644 --- a/go/enclave/nodetype/interfaces.go +++ b/go/enclave/nodetype/interfaces.go @@ -13,11 +13,6 @@ import ( // NodeType - the interface for any service type running in Obscuro nodes. // Should only contain the shared functionality that every service type needs to have. type NodeType interface { - // SubmitTransaction - L2 obscuro transactions need to be passed here. Sequencers - // will put them in the mempool while validators might put them in a queue and monitor - // for censorship. - SubmitTransaction(*common.L2Tx) error - // OnL1Fork - logic to be performed when there is an L1 Fork OnL1Fork(ctx context.Context, fork *common.ChainFork) error @@ -38,15 +33,6 @@ type ActiveSequencer interface { NodeType } -type BackupSequencer interface { - // ExecuteStoredBatches - try to execute all stored by unexecuted batches - ExecuteStoredBatches(context.Context) error - - VerifySequencerSignature(*core.Batch) error - - NodeType -} - type Validator interface { // ExecuteStoredBatches - try to execute all stored by unexecuted batches ExecuteStoredBatches(context.Context) error diff --git a/go/enclave/nodetype/sequencer.go b/go/enclave/nodetype/sequencer.go index d5a3f06c6f..a6bd6182af 100644 --- a/go/enclave/nodetype/sequencer.go +++ b/go/enclave/nodetype/sequencer.go @@ -12,7 +12,6 @@ import ( "github.com/ten-protocol/go-ten/go/common/errutil" "github.com/ten-protocol/go-ten/go/common/gethencoding" "github.com/ten-protocol/go-ten/go/common/measure" - "github.com/ten-protocol/go-ten/go/enclave/evm/ethchainadapter" "github.com/ten-protocol/go-ten/go/enclave/storage" "github.com/ten-protocol/go-ten/go/enclave/txpool" @@ -56,7 +55,6 @@ type sequencer struct { dataEncryptionService crypto.DataEncryptionService dataCompressionService compression.DataCompressionService settings SequencerSettings - blockchain *ethchainadapter.EthChainAdapter } func NewSequencer( @@ -74,7 +72,6 @@ func NewSequencer( dataEncryptionService crypto.DataEncryptionService, dataCompressionService compression.DataCompressionService, settings SequencerSettings, - blockchain *ethchainadapter.EthChainAdapter, ) ActiveSequencer { return &sequencer{ blockProcessor: blockProcessor, @@ -91,7 +88,6 @@ func NewSequencer( dataEncryptionService: dataEncryptionService, dataCompressionService: dataCompressionService, settings: settings, - blockchain: blockchain, } } @@ -112,15 +108,6 @@ func (s *sequencer) CreateBatch(ctx context.Context, skipBatchIfEmpty bool) erro return s.createGenesisBatch(ctx, l1HeadBlock) } - if running := s.mempool.Running(); !running { - // the mempool can only be started after at least 1 block (the genesis) is in the blockchain object - // if the node restarted the mempool must be started again - err = s.mempool.Start() - if err != nil { - return err - } - } - return s.createNewHeadBatch(ctx, l1HeadBlock, skipBatchIfEmpty) } @@ -150,17 +137,11 @@ func (s *sequencer) createGenesisBatch(ctx context.Context, block *types.Header) } // this is the actual first block produced in chain - err = s.blockchain.IngestNewBlock(batch) + err = s.batchRegistry.EthChain().IngestNewBlock(batch) if err != nil { return fmt.Errorf("failed to feed batch into the virtual eth chain - %w", err) } - // the mempool can only be started after at least 1 block is in the blockchain object - err = s.mempool.Start() - if err != nil { - return err - } - // errors in unit test seem to suggest that batch 2 was received before batch 1 // this ensures that there is enough gap so that batch 1 is issued before batch 2 time.Sleep(time.Second) @@ -297,7 +278,7 @@ func (s *sequencer) produceBatch( "height", cb.Batch.Number(), "numTxs", len(cb.Batch.Transactions), log.BatchSeqNoKey, cb.Batch.SeqNo(), "parent", cb.Batch.Header.ParentHash) // add the batch to the chain so it can remove pending transactions from the pool - err = s.blockchain.IngestNewBlock(cb.Batch) + err = s.batchRegistry.EthChain().IngestNewBlock(cb.Batch) if err != nil { return nil, fmt.Errorf("failed to feed batch into the virtual eth chain - %w", err) } @@ -329,7 +310,10 @@ func (s *sequencer) StoreExecutedBatch(ctx context.Context, batch *core.Batch, t return fmt.Errorf("failed to store batch. Cause: %w", err) } - s.batchRegistry.OnBatchExecuted(batch.Header, txResults) + err = s.batchRegistry.OnBatchExecuted(batch.Header, txResults) + if err != nil { + return err + } return nil } @@ -443,10 +427,6 @@ func (s *sequencer) duplicateBatches(ctx context.Context, l1Head *types.Header, return nil } -func (s *sequencer) SubmitTransaction(transaction *common.L2Tx) error { - return s.mempool.Add(transaction) -} - func (s *sequencer) OnL1Fork(ctx context.Context, fork *common.ChainFork) error { if !fork.IsFork() { return nil diff --git a/go/enclave/nodetype/validator.go b/go/enclave/nodetype/validator.go index 335452a0ee..6edea60b21 100644 --- a/go/enclave/nodetype/validator.go +++ b/go/enclave/nodetype/validator.go @@ -45,8 +45,6 @@ func NewValidator( mempool *txpool.TxPool, logger gethlog.Logger, ) Validator { - startMempool(registry, mempool) - return &validator{ blockProcessor: consumer, batchExecutor: batchExecutor, @@ -59,18 +57,6 @@ func NewValidator( } } -func (val *validator) SubmitTransaction(tx *common.L2Tx) error { - headBatch := val.batchRegistry.HeadBatchSeq() - if headBatch == nil || headBatch.Uint64() <= common.L2GenesisSeqNo+1 { - return fmt.Errorf("not initialised") - } - err := val.mempool.Validate(tx) - if err != nil { - val.logger.Info("Error validating transaction.", log.ErrKey, err, log.TxKey, tx.Hash()) - } - return err -} - func (val *validator) OnL1Fork(ctx context.Context, fork *common.ChainFork) error { // nothing to do return nil @@ -94,8 +80,6 @@ func (val *validator) ExecuteStoredBatches(ctx context.Context) error { return err } - startMempool(val.batchRegistry, val.mempool) - for _, batchHeader := range batches { if batchHeader.IsGenesis() { if err = val.handleGenesis(ctx, batchHeader); err != nil { @@ -103,14 +87,14 @@ func (val *validator) ExecuteStoredBatches(ctx context.Context) error { } } - val.logger.Trace("Executing stored batchHeader", log.BatchSeqNoKey, batchHeader.SequencerOrderNo) + val.logger.Trace("Executing stored batch", log.BatchSeqNoKey, batchHeader.SequencerOrderNo) // check batchHeader execution prerequisites canExecute, err := val.executionPrerequisites(ctx, batchHeader) if err != nil { return fmt.Errorf("could not determine the execution prerequisites for batchHeader %s. Cause: %w", batchHeader.Hash(), err) } - val.logger.Trace("Can execute stored batchHeader", log.BatchSeqNoKey, batchHeader.SequencerOrderNo, "can", canExecute) + val.logger.Trace("Can execute stored batch", log.BatchSeqNoKey, batchHeader.SequencerOrderNo, "can", canExecute) if canExecute { txs, err := val.storage.FetchBatchTransactionsBySeq(ctx, batchHeader.SequencerOrderNo.Uint64()) @@ -125,17 +109,16 @@ func (val *validator) ExecuteStoredBatches(ctx context.Context) error { txResults, err := val.batchExecutor.ExecuteBatch(ctx, batch) if err != nil { - return fmt.Errorf("could not execute batchHeader %s. Cause: %w", batchHeader.Hash(), err) + return fmt.Errorf("could not execute batch %s. Cause: %w", batchHeader.Hash(), err) } err = val.storage.StoreExecutedBatch(ctx, batchHeader, txResults) if err != nil { - return fmt.Errorf("could not store executed batchHeader %s. Cause: %w", batchHeader.Hash(), err) + return fmt.Errorf("could not store executed batch %s. Cause: %w", batchHeader.Hash(), err) } - err = val.mempool.Chain.IngestNewBlock(batch) + err = val.batchRegistry.OnBatchExecuted(batchHeader, txResults) if err != nil { - return fmt.Errorf("failed to feed batchHeader into the virtual eth chain- %w", err) + return err } - val.batchRegistry.OnBatchExecuted(batchHeader, txResults) } } return nil @@ -174,7 +157,10 @@ func (val *validator) handleGenesis(ctx context.Context, batch *common.BatchHead if err != nil { return err } - val.batchRegistry.OnBatchExecuted(batch, nil) + err = val.batchRegistry.OnBatchExecuted(batch, nil) + if err != nil { + return err + } return nil } @@ -185,14 +171,3 @@ func (val *validator) OnL1Block(ctx context.Context, block *types.Header, result func (val *validator) Close() error { return val.mempool.Close() } - -func startMempool(registry components.BatchRegistry, mempool *txpool.TxPool) { - // the mempool can only be started when there are a couple of blocks already processed - headBatchSeq := registry.HeadBatchSeq() - if !mempool.Running() && headBatchSeq != nil && headBatchSeq.Uint64() > common.L2GenesisSeqNo+1 { - err := mempool.Start() - if err != nil { - panic(fmt.Errorf("could not start mempool: %w", err)) - } - } -} diff --git a/go/enclave/rpc/SubmitTx.go b/go/enclave/rpc/SubmitTx.go index fe3d2af7fe..6a1974d1c9 100644 --- a/go/enclave/rpc/SubmitTx.go +++ b/go/enclave/rpc/SubmitTx.go @@ -29,7 +29,7 @@ func SubmitTxExecute(builder *CallBuilder[common.L2Tx, gethcommon.Hash], rpc *En return nil } - if err := rpc.service.SubmitTransaction(builder.Param); err != nil { + if err := rpc.mempool.SubmitTx(builder.Param); err != nil { rpc.logger.Debug("Could not submit transaction", log.TxKey, builder.Param.Hash(), log.ErrKey, err) builder.Err = err return nil diff --git a/go/enclave/rpc/TenStorageRead.go b/go/enclave/rpc/TenStorageRead.go index ba7d850dc1..87373a8885 100644 --- a/go/enclave/rpc/TenStorageRead.go +++ b/go/enclave/rpc/TenStorageRead.go @@ -45,7 +45,7 @@ func TenStorageReadValidate(reqParams []any, builder *CallBuilder[storageReadWit } // block the call for un-transparent contracts and non-whitelisted slots - if !rpc.whitelist.AllowedStorageSlots[slot] && !contract.IsTransparent() { + if !rpc.storageSlotWhitelist.AllowedStorageSlots[slot] && !contract.IsTransparent() { builder.Err = fmt.Errorf("eth_getStorageAt is not supported for this contract") return nil } diff --git a/go/enclave/rpc/rpc_encryption_manager.go b/go/enclave/rpc/rpc_encryption_manager.go index 77121eac39..963e05974e 100644 --- a/go/enclave/rpc/rpc_encryption_manager.go +++ b/go/enclave/rpc/rpc_encryption_manager.go @@ -3,6 +3,8 @@ package rpc import ( "fmt" + "github.com/ten-protocol/go-ten/go/enclave/txpool" + "github.com/ten-protocol/go-ten/go/common/privacy" enclaveconfig "github.com/ten-protocol/go-ten/go/enclave/config" "github.com/ten-protocol/go-ten/go/enclave/gas" @@ -11,7 +13,6 @@ import ( "github.com/ten-protocol/go-ten/go/enclave/components" "github.com/ten-protocol/go-ten/go/enclave/crosschain" "github.com/ten-protocol/go-ten/go/enclave/l2chain" - "github.com/ten-protocol/go-ten/go/enclave/nodetype" "github.com/ten-protocol/go-ten/go/enclave/storage" "github.com/ethereum/go-ethereum/crypto/ecies" @@ -25,22 +26,21 @@ type EncryptionManager struct { cacheService *storage.CacheService registry components.BatchRegistry processors *crosschain.Processors - service nodetype.NodeType + mempool *txpool.TxPool gasOracle gas.Oracle blockResolver storage.BlockResolver l1BlockProcessor components.L1BlockProcessor config *enclaveconfig.EnclaveConfig logger gethlog.Logger - whitelist *privacy.Whitelist + storageSlotWhitelist *privacy.Whitelist } -func NewEncryptionManager(enclavePrivateKeyECIES *ecies.PrivateKey, storage storage.Storage, cacheService *storage.CacheService, registry components.BatchRegistry, processors *crosschain.Processors, service nodetype.NodeType, config *enclaveconfig.EnclaveConfig, oracle gas.Oracle, blockResolver storage.BlockResolver, l1BlockProcessor components.L1BlockProcessor, chain l2chain.ObscuroChain, logger gethlog.Logger) *EncryptionManager { +func NewEncryptionManager(enclavePrivateKeyECIES *ecies.PrivateKey, storage storage.Storage, cacheService *storage.CacheService, registry components.BatchRegistry, mempool *txpool.TxPool, processors *crosschain.Processors, config *enclaveconfig.EnclaveConfig, oracle gas.Oracle, blockResolver storage.BlockResolver, l1BlockProcessor components.L1BlockProcessor, chain l2chain.ObscuroChain, logger gethlog.Logger) *EncryptionManager { return &EncryptionManager{ storage: storage, cacheService: cacheService, registry: registry, processors: processors, - service: service, chain: chain, config: config, blockResolver: blockResolver, @@ -48,7 +48,8 @@ func NewEncryptionManager(enclavePrivateKeyECIES *ecies.PrivateKey, storage stor gasOracle: oracle, logger: logger, enclavePrivateKeyECIES: enclavePrivateKeyECIES, - whitelist: privacy.NewWhitelist(), + storageSlotWhitelist: privacy.NewWhitelist(), + mempool: mempool, } } diff --git a/go/enclave/storage/cache_service.go b/go/enclave/storage/cache_service.go index bad04668db..4a4ee7c39a 100644 --- a/go/enclave/storage/cache_service.go +++ b/go/enclave/storage/cache_service.go @@ -34,6 +34,7 @@ const ( receiptCost = 1024 * 50 contractCost = 60 eventTypeCost = 120 + enclaveCost = 100 ) type CacheService struct { @@ -72,6 +73,9 @@ type CacheService struct { // only sender can view configured receiptCache *cache.Cache[*CachedReceipt] + // store the enclaves from the network + attestedEnclavesCache *cache.Cache[*AttestedEnclave] + logger gethlog.Logger } @@ -87,6 +91,8 @@ func NewCacheService(logger gethlog.Logger, testMode bool) *CacheService { nrBatchesWithContent := 50 // ~100M nrReceipts := 10_000 // ~1G + nrEnclaves := 20 + if testMode { nrReceipts = 500 //~50M } @@ -105,7 +111,8 @@ func NewCacheService(logger gethlog.Logger, testMode bool) *CacheService { contractAddressCache: cache.New[*enclavedb.Contract](newCache(logger, nrContractAddresses, contractCost)), eventTypeCache: cache.New[*enclavedb.EventType](newCache(logger, nrEventTypes, eventTypeCost)), - receiptCache: cache.New[*CachedReceipt](newCache(logger, nrReceipts, receiptCost)), + receiptCache: cache.New[*CachedReceipt](newCache(logger, nrReceipts, receiptCost)), + attestedEnclavesCache: cache.New[*AttestedEnclave](newCache(logger, nrEnclaves, enclaveCost)), // cache the latest received batches to avoid a lookup when streaming it back to the host after processing lastBatchesCache: cache.New[*core.Batch](newCache(logger, nrBatchesWithContent, batchCost)), @@ -203,6 +210,10 @@ func (cs *CacheService) ReadConvertedHeader(ctx context.Context, batchHash commo return getCachedValue(ctx, cs.convertedGethHeaderCache, cs.logger, batchHash, blockHeaderCost, onCacheMiss, true) } +func (cs *CacheService) ReadEnclavePubKey(ctx context.Context, enclaveId common.EnclaveID, onCacheMiss func(any) (*AttestedEnclave, error)) (*AttestedEnclave, error) { + return getCachedValue(ctx, cs.attestedEnclavesCache, cs.logger, enclaveId, enclaveCost, onCacheMiss, true) +} + // getCachedValue - returns the cached value for the provided key. If the key is not found, then invoke the 'onCacheMiss' function // which returns the value, and cache it func getCachedValue[V any](ctx context.Context, cache *cache.Cache[*V], logger gethlog.Logger, key any, cost int64, onCacheMiss func(any) (*V, error), cacheIfMissing bool) (*V, error) { diff --git a/go/enclave/storage/interfaces.go b/go/enclave/storage/interfaces.go index 51d0fb3fd7..0affc1f4b0 100644 --- a/go/enclave/storage/interfaces.go +++ b/go/enclave/storage/interfaces.go @@ -103,7 +103,7 @@ type TransactionStorage interface { } type AttestationStorage interface { - GetEnclavePubKey(ctx context.Context, enclaveId common.EnclaveID) (*ecdsa.PublicKey, common.NodeType, error) + GetEnclavePubKey(ctx context.Context, enclaveId common.EnclaveID) (*AttestedEnclave, error) StoreNewEnclave(ctx context.Context, enclaveId common.EnclaveID, key *ecdsa.PublicKey) error StoreNodeType(ctx context.Context, enclaveId common.EnclaveID, nodeType common.NodeType) error } diff --git a/go/enclave/storage/storage.go b/go/enclave/storage/storage.go index abc8a568e8..f3b55c9e0f 100644 --- a/go/enclave/storage/storage.go +++ b/go/enclave/storage/storage.go @@ -45,6 +45,11 @@ const ( enclaveKeyCfg = "ENCLAVE_KEY" ) +type AttestedEnclave struct { + PubKey *ecdsa.PublicKey + Type common.NodeType +} + // todo - this file needs splitting up based on concerns type storageImpl struct { db enclavedb.EnclaveDB @@ -450,20 +455,21 @@ func (s *storageImpl) ExistsTransactionReceipt(ctx context.Context, txHash commo return enclavedb.ExistsReceipt(ctx, s.db.GetSQLDB(), txHash) } -// todo - cache -func (s *storageImpl) GetEnclavePubKey(ctx context.Context, enclaveId common.EnclaveID) (*ecdsa.PublicKey, common.NodeType, error) { +func (s *storageImpl) GetEnclavePubKey(ctx context.Context, enclaveId common.EnclaveID) (*AttestedEnclave, error) { defer s.logDuration("GetEnclavePubKey", measure.NewStopwatch()) - key, nodeType, err := enclavedb.FetchAttestation(ctx, s.db.GetSQLDB(), enclaveId) - if err != nil { - return nil, 0, fmt.Errorf("could not retrieve attestation key for address %s. Cause: %w", enclaveId, err) - } + return s.cachingService.ReadEnclavePubKey(ctx, enclaveId, func(a any) (*AttestedEnclave, error) { + key, nodeType, err := enclavedb.FetchAttestation(ctx, s.db.GetSQLDB(), enclaveId) + if err != nil { + return nil, fmt.Errorf("could not retrieve attestation key for address %s. Cause: %w", enclaveId, err) + } - publicKey, err := gethcrypto.DecompressPubkey(key) - if err != nil { - return nil, 0, fmt.Errorf("could not parse key from db. Cause: %w", err) - } + publicKey, err := gethcrypto.DecompressPubkey(key) + if err != nil { + return nil, fmt.Errorf("could not parse key from db. Cause: %w", err) + } - return publicKey, nodeType, nil + return &AttestedEnclave{PubKey: publicKey, Type: nodeType}, nil + }) } func (s *storageImpl) StoreNodeType(ctx context.Context, enclaveId common.EnclaveID, nodeType common.NodeType) error { diff --git a/go/enclave/txpool/txpool.go b/go/enclave/txpool/txpool.go index 851da41926..21a6a0c2bf 100644 --- a/go/enclave/txpool/txpool.go +++ b/go/enclave/txpool/txpool.go @@ -1,62 +1,111 @@ package txpool +// unsafe package imported in order to link to a private function in go-ethereum. +// This allows us to validate transactions against the tx pool rules. import ( "fmt" "math/big" "strings" "sync" + "sync/atomic" + "time" _ "unsafe" - gethcommon "github.com/ethereum/go-ethereum/common" - "github.com/holiman/uint256" + "github.com/ten-protocol/go-ten/go/enclave/components" - // unsafe package imported in order to link to a private function in go-ethereum. - // This allows us to validate transactions against the tx pool rules. - - gethlog "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/core" "github.com/ten-protocol/go-ten/go/common/log" + gethcommon "github.com/ethereum/go-ethereum/common" + "github.com/holiman/uint256" + gethtxpool "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/types" + gethlog "github.com/ethereum/go-ethereum/log" "github.com/ten-protocol/go-ten/go/common" - "github.com/ten-protocol/go-ten/go/enclave/evm/ethchainadapter" ) +// this is how long the node waits to receive the second batch +var startMempoolTimeout = 90 * time.Second + // TxPool is an obscuro wrapper around geths transaction pool type TxPool struct { txPoolConfig legacypool.Config legacyPool *legacypool.LegacyPool pool *gethtxpool.TxPool - Chain *ethchainadapter.EthChainAdapter + Chain *components.EthChainAdapter gasTip *big.Int - running bool + running atomic.Bool stateMutex sync.Mutex logger gethlog.Logger + validateOnly atomic.Bool } // NewTxPool returns a new instance of the tx pool -func NewTxPool(blockchain *ethchainadapter.EthChainAdapter, gasTip *big.Int, logger gethlog.Logger) (*TxPool, error) { - txPoolConfig := ethchainadapter.NewLegacyPoolConfig() +func NewTxPool(blockchain *components.EthChainAdapter, gasTip *big.Int, validateOnly bool, logger gethlog.Logger) (*TxPool, error) { + txPoolConfig := components.NewLegacyPoolConfig() legacyPool := legacypool.New(txPoolConfig, blockchain) - return &TxPool{ + txp := &TxPool{ Chain: blockchain, txPoolConfig: txPoolConfig, legacyPool: legacyPool, gasTip: gasTip, stateMutex: sync.Mutex{}, + validateOnly: atomic.Bool{}, logger: logger, - }, nil + } + txp.validateOnly.Store(validateOnly) + go txp.start() + return txp, nil +} + +func (t *TxPool) SetValidateMode(validateOnly bool) { + t.validateOnly.Store(validateOnly) } -// Start starts the pool // can only be started after t.blockchain has at least one block inside -func (t *TxPool) Start() error { - if t.running { - return fmt.Errorf("tx pool already started") +// note - blocking method that waits for the block.Call only as goroutine +func (t *TxPool) start() { + if t.running.Load() { + return } + cb := t.Chain.CurrentBlock() + if cb != nil && cb.Number.Uint64() > common.L2GenesisHeight+1 { + err := t._startInternalPool() + if err != nil { + t.logger.Crit("Failed to start tx pool", log.ErrKey, err) + } + return + } + + var ( + newHeadCh = make(chan core.ChainHeadEvent) + newHeadSub = t.Chain.SubscribeChainHeadEvent(newHeadCh) + ) + defer close(newHeadCh) + defer newHeadSub.Unsubscribe() + for { + select { + case event := <-newHeadCh: + newHead := event.Block.Header() + if newHead.Number.Uint64() > common.L2GenesisHeight+1 { + err := t._startInternalPool() + if err != nil { + t.logger.Crit("Failed to start tx pool", log.ErrKey, err) + } + return + } + case <-time.After(startMempoolTimeout): + t.logger.Crit("Timeout waiting to start mempool.") + return + } + } +} + +func (t *TxPool) _startInternalPool() error { t.logger.Info("Starting tx pool") memp, err := gethtxpool.New(t.gasTip.Uint64(), t.Chain, []gethtxpool.SubPool{t.legacyPool}) if err != nil { @@ -65,12 +114,54 @@ func (t *TxPool) Start() error { t.logger.Info("Tx pool started") t.pool = memp - t.running = true + t.running.Store(true) return nil } +func (t *TxPool) SubmitTx(transaction *common.L2Tx) error { + err := t.waitUntilPoolRunning() + if err != nil { + return err + } + + if t.validateOnly.Load() { + return t.validate(transaction) + } + return t.add(transaction) +} + +func (t *TxPool) waitUntilPoolRunning() error { + if t.running.Load() { + return nil + } + + timeout := time.After(startMempoolTimeout) + tick := time.NewTicker(500 * time.Millisecond) + defer tick.Stop() + for { + select { + case <-tick.C: + if t.running.Load() { + return nil + } + case <-timeout: + return fmt.Errorf("timed out waiting for tx pool to start") + } + } +} + // PendingTransactions returns all pending transactions grouped per address and ordered per nonce func (t *TxPool) PendingTransactions() map[gethcommon.Address][]*gethtxpool.LazyTransaction { + if !t.running.Load() { + t.logger.Error("tx pool not running") + return nil + } + + if t.validateOnly.Load() { + t.logger.Error("Pending transactions requested while in validate only mode") + return nil + } + // todo - for now using the base fee from the block currentBlock := t.Chain.CurrentBlock() if currentBlock == nil { @@ -83,11 +174,17 @@ func (t *TxPool) PendingTransactions() map[gethcommon.Address][]*gethtxpool.Lazy }) } +func (t *TxPool) Close() error { + defer func() { + if err := recover(); err != nil { + t.logger.Error("Could not close legacy pool", log.ErrKey, err) + } + }() + return t.pool.Close() +} + // Add adds a new transactions to the pool -func (t *TxPool) Add(transaction *common.L2Tx) error { - if !t.running { - return fmt.Errorf("tx pool not running") - } +func (t *TxPool) add(transaction *common.L2Tx) error { var strErrors []string for _, err := range t.pool.Add([]*types.Transaction{transaction}, false, false) { if err != nil { @@ -108,7 +205,7 @@ func validateTxBasics(_ *legacypool.LegacyPool, _ *types.Transaction, _ bool) er func validateTx(_ *legacypool.LegacyPool, _ *types.Transaction, _ bool) error // Validate - run the underlying tx pool validation logic -func (t *TxPool) Validate(tx *common.L2Tx) error { +func (t *TxPool) validate(tx *common.L2Tx) error { // validate against the consensus rules err := validateTxBasics(t.legacyPool, tx, false) if err != nil { @@ -120,16 +217,3 @@ func (t *TxPool) Validate(tx *common.L2Tx) error { // validate against the state. Things like nonce, balance, etc return validateTx(t.legacyPool, tx, false) } - -func (t *TxPool) Running() bool { - return t.running -} - -func (t *TxPool) Close() error { - defer func() { - if err := recover(); err != nil { - t.logger.Error("Could not close legacy pool", log.ErrKey, err) - } - }() - return t.pool.Close() -} diff --git a/go/obsclient/obsclient.go b/go/obsclient/obsclient.go index 475f01aee0..e6cdad308d 100644 --- a/go/obsclient/obsclient.go +++ b/go/obsclient/obsclient.go @@ -1,7 +1,7 @@ package obsclient import ( - "errors" + "fmt" "math/big" "strings" @@ -118,17 +118,25 @@ func (oc *ObsClient) GetTransaction(hash gethcommon.Hash) (*common.PublicTransac return tx, err } -// Health returns the health of the node. -func (oc *ObsClient) Health() (bool, error) { - var healthy *hostcommon.HealthCheck - err := oc.rpcClient.Call(&healthy, rpc.Health) - if err != nil { - return false, err +// Health returns the Health status of the node. +func (oc *ObsClient) Health() (hostcommon.HealthCheck, error) { + var healthy hostcommon.HealthCheck + + if err := oc.rpcClient.Call(&healthy, rpc.Health); err != nil { + return hostcommon.HealthCheck{ + OverallHealth: false, + Errors: []string{fmt.Sprintf("RPC call failed: %v", err)}, + }, err } + if !healthy.OverallHealth { - return false, errors.New(strings.Join(healthy.Errors, ", ")) + if len(healthy.Errors) == 0 { + healthy.Errors = []string{"Node reported unhealthy state without specific errors"} + } + return healthy, fmt.Errorf("node unhealthy: %s", strings.Join(healthy.Errors, ", ")) } - return healthy.OverallHealth, nil + + return healthy, nil } // GetTotalContractCount returns the total count of created contracts diff --git a/integration/eth2network/pos_eth2_network.go b/integration/eth2network/pos_eth2_network.go index 817b43ba09..3cdc382c33 100644 --- a/integration/eth2network/pos_eth2_network.go +++ b/integration/eth2network/pos_eth2_network.go @@ -177,6 +177,7 @@ func (n *PosImpl) Start() error { } func (n *PosImpl) Stop() error { + println("Stopping geth Network") kill(n.gethProcessID) kill(n.validatorProcessID) kill(n.beaconProcessID) @@ -399,6 +400,7 @@ func fundWallets(walletsToFund []string, buildDir string, chainID int) (string, } func kill(pid int) { + fmt.Printf("Killing %d\n", pid) process, err := os.FindProcess(pid) if err != nil { fmt.Printf("Error finding process with PID %d: %v\n", pid, err) diff --git a/integration/networktest/env/dev_network.go b/integration/networktest/env/dev_network.go index 6efa5e18e7..829546947e 100644 --- a/integration/networktest/env/dev_network.go +++ b/integration/networktest/env/dev_network.go @@ -54,7 +54,7 @@ func awaitHealthStatus(rpcAddress string, timeout time.Duration) error { if err != nil { return fmt.Errorf("failed to get host health (%s): %w", rpcAddress, err) } - if !healthy { + if !healthy.OverallHealth { return fmt.Errorf("host is not healthy (%s)", rpcAddress) } return nil diff --git a/integration/networktest/util.go b/integration/networktest/util.go index 4ec1f924d4..1c146bcd94 100644 --- a/integration/networktest/util.go +++ b/integration/networktest/util.go @@ -28,7 +28,7 @@ func NodeHealthCheck(rpcAddress string) error { if err != nil { return err } - if !health { + if !health.OverallHealth { return errors.New("node health check failed") } return nil diff --git a/integration/simulation/network/socket.go b/integration/simulation/network/socket.go index 4a6e32f619..20afad5572 100644 --- a/integration/simulation/network/socket.go +++ b/integration/simulation/network/socket.go @@ -198,7 +198,8 @@ func (n *networkOfSocketNodes) createConnections(simParams *params.SimParams) er startTime := time.Now() healthy := false for ; !healthy; time.Sleep(500 * time.Millisecond) { - healthy, _ = client.Health() + h, _ := client.Health() + healthy = h.OverallHealth if time.Now().After(startTime.Add(3 * time.Minute)) { return fmt.Errorf("nodes not healthy after 3 minutes") } diff --git a/integration/simulation/p2p/in_mem_ten_client.go b/integration/simulation/p2p/in_mem_ten_client.go index 4ccdbf5e4d..7fe042fe0d 100644 --- a/integration/simulation/p2p/in_mem_ten_client.go +++ b/integration/simulation/p2p/in_mem_ten_client.go @@ -253,8 +253,11 @@ func (c *inMemTenClient) RegisterViewingKey(_ gethcommon.Address, _ []byte) erro } func (c *inMemTenClient) health(result interface{}) error { - *result.(**hostcommon.HealthCheck) = &hostcommon.HealthCheck{OverallHealth: true} - return nil + if resPtr, ok := result.(*hostcommon.HealthCheck); ok { + *resPtr = hostcommon.HealthCheck{OverallHealth: true} + return nil + } + return fmt.Errorf("invalid type for result: expected *hostcommon.HealthCheck") } func (c *inMemTenClient) getTotalTransactions(result interface{}) error { diff --git a/integration/simulation/simulation.go b/integration/simulation/simulation.go index ecdb709440..bd1982aeaf 100644 --- a/integration/simulation/simulation.go +++ b/integration/simulation/simulation.go @@ -458,7 +458,7 @@ func (s *Simulation) checkHealthStatus() { for _, client := range s.RPCHandles.TenClients { err := retry.Do(func() error { healthy, err := client.Health() - if !healthy || err != nil { + if !healthy.OverallHealth || err != nil { return fmt.Errorf("client is not healthy: %w", err) } return nil diff --git a/testnet/launcher/docker.go b/testnet/launcher/docker.go index 071924522b..48a1993593 100644 --- a/testnet/launcher/docker.go +++ b/testnet/launcher/docker.go @@ -2,6 +2,7 @@ package launcher import ( "fmt" + "strings" "time" "github.com/ethereum/go-ethereum/common" @@ -16,6 +17,7 @@ import ( "github.com/ten-protocol/go-ten/testnet/launcher/gateway" l1cd "github.com/ten-protocol/go-ten/testnet/launcher/l1contractdeployer" + l1gs "github.com/ten-protocol/go-ten/testnet/launcher/l1grantsequencers" l2cd "github.com/ten-protocol/go-ten/testnet/launcher/l2contractdeployer" ) @@ -42,6 +44,13 @@ func (t *Testnet) Start() error { return fmt.Errorf("unable to deploy l1 contracts - %w", err) } + println("MANAGEMENT CONTRACT: ", networkConfig.ManagementContractAddress) + println("MANAGEMENT CONTRACT: ", networkConfig.ManagementContractAddress) + println("MANAGEMENT CONTRACT: ", networkConfig.ManagementContractAddress) + println("MANAGEMENT CONTRACT: ", networkConfig.ManagementContractAddress) + println("MANAGEMENT CONTRACT: ", networkConfig.ManagementContractAddress) + println("MANAGEMENT CONTRACT: ", networkConfig.ManagementContractAddress) + edgelessDBImage := "ghcr.io/edgelesssys/edgelessdb-sgx-4gb:v0.3.2" // todo: revisit how we should configure the image, this condition is not ideal if !t.cfg.isSGXEnabled { @@ -140,6 +149,11 @@ func (t *Testnet) Start() error { } fmt.Println("L2 Contracts were successfully deployed...") + err = t.grantSequencerStatus(networkConfig.ManagementContractAddress) + if err != nil { + return fmt.Errorf("failed to grant sequencer status: %w", err) + } + faucetPort := 99 faucetInst, err := faucet.NewDockerFaucet( faucet.NewFaucetConfig( @@ -272,7 +286,7 @@ func waitForHealthyNode(port int) error { // todo: hook the cfg if err != nil { return err } - if health { + if health.OverallHealth { fmt.Println("Obscuro node is ready") return nil } @@ -286,3 +300,51 @@ func waitForHealthyNode(port int) error { // todo: hook the cfg fmt.Printf("Node became healthy after %f seconds\n", time.Since(timeStart).Seconds()) return nil } + +func (t *Testnet) grantSequencerStatus(mgmtContractAddr string) error { + // fetch enclaveIDs + hostURL := fmt.Sprintf("http://localhost:%d", 80) + client, err := rpc.NewNetworkClient(hostURL) + if err != nil { + return fmt.Errorf("failed to create network client: %w", err) + } + defer client.Stop() + + obsClient := obsclient.NewObsClient(client) + health, err := obsClient.Health() + if err != nil { + return fmt.Errorf("failed to get health status: %w", err) + } + + if len(health.Enclaves) == 0 { + return fmt.Errorf("could not retrieve enclave IDs from health endpoint") + } + + var enclaveIDs []string + for _, status := range health.Enclaves { + enclaveIDs = append(enclaveIDs, status.EnclaveID.String()) + } + enclaveIDsStr := strings.Join(enclaveIDs, ",") + + l1grantsequencers, err := l1gs.NewGrantSequencers( + l1gs.NewGrantSequencerConfig( + l1gs.WithL1HTTPURL("http://eth2network:8025"), + l1gs.WithPrivateKey("f52e5418e349dccdda29b6ac8b0abe6576bb7713886aa85abea6181ba731f9bb"), + l1gs.WithDockerImage(t.cfg.contractDeployerDockerImage), + l1gs.WithMgmtContractAddress(mgmtContractAddr), + l1gs.WithEnclaveIDs(enclaveIDsStr), + ), + ) + if err != nil { + return fmt.Errorf("unable to configure l1 grant sequencersr - %w", err) + } + + err = l1grantsequencers.Start() + if err != nil { + return fmt.Errorf("unable to start l1 grant sequencers - %w", err) + } + + fmt.Println("Enclaves were successfully granted sequencer roles...") + + return nil +} diff --git a/testnet/launcher/l1grantsequencers/cmd/cli.go b/testnet/launcher/l1grantsequencers/cmd/cli.go new file mode 100644 index 0000000000..15e1418144 --- /dev/null +++ b/testnet/launcher/l1grantsequencers/cmd/cli.go @@ -0,0 +1,35 @@ +package main + +import ( + "flag" +) + +// L1GrantSequencersConfigCLI represents the configurations needed to grant enclaves sequencer roles over CLI +type L1GrantSequencersConfigCLI struct { + l1HTTPURL string + privateKey string + mgmtContractAddress string + enclaveIDs string + dockerImage string +} + +// ParseConfigCLI returns a NodeConfigCLI based the cli params and defaults. +func ParseConfigCLI() *L1GrantSequencersConfigCLI { + cfg := &L1GrantSequencersConfigCLI{} + flagUsageMap := getFlagUsageMap() + + l1HTTPURL := flag.String(l1HTTPURLFlag, "http://eth2network:8025", flagUsageMap[l1HTTPURLFlag]) + privateKey := flag.String(privateKeyFlag, "", flagUsageMap[privateKeyFlag]) + mgmtContractAddress := flag.String(mgmtContractAddressFlag, "", flagUsageMap[mgmtContractAddressFlag]) + enclaveIDs := flag.String(enclaveIDsFlag, "", flagUsageMap[enclaveIDsFlag]) + dockerImage := flag.String(dockerImageFlag, "testnetobscuronet.azurecr.io/obscuronet/hardhatdeployer:latest", flagUsageMap[dockerImageFlag]) + flag.Parse() + + cfg.l1HTTPURL = *l1HTTPURL + cfg.privateKey = *privateKey + cfg.mgmtContractAddress = *mgmtContractAddress + cfg.enclaveIDs = *enclaveIDs + cfg.dockerImage = *dockerImage + + return cfg +} diff --git a/testnet/launcher/l1grantsequencers/cmd/cli_flags.go b/testnet/launcher/l1grantsequencers/cmd/cli_flags.go new file mode 100644 index 0000000000..e85a6a9aed --- /dev/null +++ b/testnet/launcher/l1grantsequencers/cmd/cli_flags.go @@ -0,0 +1,24 @@ +package main + +// Flag names. +const ( + l1HTTPURLFlag = "l1_http_url" + privateKeyFlag = "private_key" + mgmtContractAddressFlag = "mgmt_contract_address" + enclaveIDsFlag = "enclave_ids" + dockerImageFlag = "docker_image" + contractsEnvFileFlag = "contracts_env_file" +) + +// Returns a map of the flag usages. +// While we could just use constants instead of a map, this approach allows us to test that all the expected flags are defined. +func getFlagUsageMap() map[string]string { + return map[string]string{ + l1HTTPURLFlag: "Layer 1 network http RPC addr", + privateKeyFlag: "L1 and L2 private key used in the node", + mgmtContractAddressFlag: "L1 management contract address", + enclaveIDsFlag: "List of enclave public keys", + dockerImageFlag: "Docker image to run", + contractsEnvFileFlag: "If set, it will write the contract addresses to the file", + } +} diff --git a/testnet/launcher/l1grantsequencers/cmd/main.go b/testnet/launcher/l1grantsequencers/cmd/main.go new file mode 100644 index 0000000000..6273ecee02 --- /dev/null +++ b/testnet/launcher/l1grantsequencers/cmd/main.go @@ -0,0 +1,34 @@ +package main + +import ( + "fmt" + "os" + + l1gs "github.com/ten-protocol/go-ten/testnet/launcher/l1grantsequencers" +) + +func main() { + cliConfig := ParseConfigCLI() + + l1grantsequencers, err := l1gs.NewGrantSequencers( + l1gs.NewGrantSequencerConfig( + l1gs.WithL1HTTPURL("http://eth2network:8025"), + l1gs.WithPrivateKey("f52e5418e349dccdda29b6ac8b0abe6576bb7713886aa85abea6181ba731f9bb"), + l1gs.WithDockerImage(cliConfig.dockerImage), + l1gs.WithMgmtContractAddress(cliConfig.mgmtContractAddress), + l1gs.WithEnclaveIDs(cliConfig.enclaveIDs), + ), + ) + if err != nil { + fmt.Println("unable to configure l1 contract deployer - %w", err) + os.Exit(1) + } + + err = l1grantsequencers.Start() + if err != nil { + fmt.Println("unable to start l1 contract deployer - %w", err) + os.Exit(1) + } + + os.Exit(0) +} diff --git a/testnet/launcher/l1grantsequencers/config.go b/testnet/launcher/l1grantsequencers/config.go new file mode 100644 index 0000000000..a230311cd0 --- /dev/null +++ b/testnet/launcher/l1grantsequencers/config.go @@ -0,0 +1,60 @@ +package l1grantsequencers + +// Option is a function that applies configs to a Config Object +type Option = func(c *Config) + +// Config holds the properties that configure the package +type Config struct { + l1HTTPURL string + privateKey string + mgmtContractAddress string + enclaveIDs string + dockerImage string + // debugEnabled bool +} + +func NewGrantSequencerConfig(opts ...Option) *Config { + defaultConfig := &Config{} + + for _, opt := range opts { + opt(defaultConfig) + } + + return defaultConfig +} + +func WithL1HTTPURL(s string) Option { + return func(c *Config) { + c.l1HTTPURL = s + } +} + +func WithPrivateKey(s string) Option { + return func(c *Config) { + c.privateKey = s + } +} + +func WithMgmtContractAddress(s string) Option { + return func(c *Config) { + c.mgmtContractAddress = s + } +} + +func WithEnclaveIDs(s string) Option { + return func(c *Config) { + c.enclaveIDs = s + } +} + +func WithDockerImage(s string) Option { + return func(c *Config) { + c.dockerImage = s + } +} + +//func WithDebugEnabled(b bool) Option { +// return func(c *Config) { +// c.debugEnabled = b +// } +//} diff --git a/testnet/launcher/l1grantsequencers/docker.go b/testnet/launcher/l1grantsequencers/docker.go new file mode 100644 index 0000000000..41eb56cf51 --- /dev/null +++ b/testnet/launcher/l1grantsequencers/docker.go @@ -0,0 +1,57 @@ +package l1grantsequencers + +import ( + "fmt" + + "github.com/ten-protocol/go-ten/go/common/docker" +) + +type GrantSequencers struct { + cfg *Config + containerID string +} + +func NewGrantSequencers(cfg *Config) (*GrantSequencers, error) { + return &GrantSequencers{ + cfg: cfg, + }, nil +} + +func (s *GrantSequencers) Start() error { + cmds := []string{ + "npx", + "run", + "--network", + "layer1", + "scripts/sequencer/001_grant_sequencers.ts", + s.cfg.mgmtContractAddress, + s.cfg.enclaveIDs, + } + + envs := map[string]string{ + "NETWORK_JSON": fmt.Sprintf(`{ + "layer1": { + "url": "%s", + "live": false, + "saveDeployments": true, + "accounts": [ "%s" ] + } + }`, s.cfg.l1HTTPURL, s.cfg.privateKey), + } + + containerID, err := docker.StartNewContainer( + "grant-sequencers", + s.cfg.dockerImage, + cmds, + nil, + envs, + nil, + nil, + false, + ) + if err != nil { + return err + } + s.containerID = containerID + return nil +} diff --git a/tools/tenscan/backend/obscuroscan_backend.go b/tools/tenscan/backend/obscuroscan_backend.go index 53213fe4a4..2522550401 100644 --- a/tools/tenscan/backend/obscuroscan_backend.go +++ b/tools/tenscan/backend/obscuroscan_backend.go @@ -32,7 +32,11 @@ func (b *Backend) GetLatestBatch() (*common.BatchHeader, error) { } func (b *Backend) GetTenNodeHealthStatus() (bool, error) { - return b.obsClient.Health() + health, err := b.obsClient.Health() + if err != nil { + return false, err + } + return health.OverallHealth, nil } func (b *Backend) GetLatestRollup() (*common.RollupHeader, error) { diff --git a/tools/walletextension/common/config.go b/tools/walletextension/common/config.go index 799d92f36b..c01d471c72 100644 --- a/tools/walletextension/common/config.go +++ b/tools/walletextension/common/config.go @@ -19,5 +19,6 @@ type Config struct { RateLimitUserComputeTime time.Duration RateLimitWindow time.Duration RateLimitMaxConcurrentRequests int - Debug bool + InsideEnclave bool // Indicates if the program is running inside an enclave + KeyExchangeURL string } diff --git a/tools/walletextension/common/constants.go b/tools/walletextension/common/constants.go index e39f2938e7..c06ad7a9bd 100644 --- a/tools/walletextension/common/constants.go +++ b/tools/walletextension/common/constants.go @@ -25,6 +25,7 @@ const ( PathSessionKeys = "/session-key/" PathNetworkHealth = "/network-health/" PathNetworkConfig = "/network-config/" + PathKeyExchange = "/key-exchange/" WSProtocol = "ws://" HTTPProtocol = "http://" EncryptedTokenQueryParameter = "token" diff --git a/tools/walletextension/enclave.Dockerfile b/tools/walletextension/enclave.Dockerfile index 33673df659..dd2893ed0c 100644 --- a/tools/walletextension/enclave.Dockerfile +++ b/tools/walletextension/enclave.Dockerfile @@ -6,8 +6,9 @@ # Final container folder structure: # /home/ten/go-ten/tools/walletextension/main contains the executable for the enclave +# /data persistent volume mount point - +# Trigger new build stage for compiling the enclave FROM ghcr.io/edgelesssys/ego-dev:v1.5.3 AS build-base # Install ca-certificates package and update it @@ -15,8 +16,7 @@ RUN apt-get update && apt-get install -y \ ca-certificates \ && update-ca-certificates - -# setup container data structure +# Setup container data structure RUN mkdir -p /home/ten/go-ten # Ensures container layer caching when dependencies are not changed @@ -39,16 +39,27 @@ RUN --mount=type=cache,target=/root/.cache/go-build \ # Sign the enclave executable RUN ego sign enclave.json - -# Trigger a new build stage and use the smaller ego version: FROM ghcr.io/edgelesssys/ego-deploy:v1.5.3 +# Create data directory that will be used for persistence +RUN mkdir -p /data && chmod 777 /data + # Copy just the binary for the enclave into this build stage COPY --from=build-enclave \ /home/ten/go-ten/tools/walletextension/main /home/ten/go-ten/tools/walletextension/main +# Copy the entry.sh script and make it executable +COPY tools/walletextension/main/entry.sh /home/ten/go-ten/tools/walletextension/main/entry.sh +RUN chmod +x /home/ten/go-ten/tools/walletextension/main/entry.sh + WORKDIR /home/ten/go-ten/tools/walletextension/main +# Add volume mount point +VOLUME ["/data"] + # simulation mode is ACTIVE by default ENV OE_SIMULATION=1 -EXPOSE 3000 \ No newline at end of file +EXPOSE 3000 + +# Set the entrypoint to entry.sh +ENTRYPOINT ["/home/ten/go-ten/tools/walletextension/main/entry.sh"] diff --git a/tools/walletextension/encryption/encryption.go b/tools/walletextension/encryption/encryption.go index c4e8de7c22..c6d997c782 100644 --- a/tools/walletextension/encryption/encryption.go +++ b/tools/walletextension/encryption/encryption.go @@ -63,3 +63,8 @@ func (e *Encryptor) HashWithHMAC(data []byte) []byte { h.Write(data) return h.Sum(nil) } + +// GetKey returns the encryption key +func (e *Encryptor) GetKey() []byte { + return e.key +} diff --git a/tools/walletextension/httpapi/routes.go b/tools/walletextension/httpapi/routes.go index efbbf35a07..36b1e19b69 100644 --- a/tools/walletextension/httpapi/routes.go +++ b/tools/walletextension/httpapi/routes.go @@ -1,11 +1,15 @@ package httpapi import ( + "bytes" + "crypto/sha256" "encoding/hex" "encoding/json" "fmt" "net/http" + tencommon "github.com/ten-protocol/go-ten/go/common" + "github.com/ten-protocol/go-ten/tools/walletextension/keymanager" "github.com/ten-protocol/go-ten/tools/walletextension/services" "github.com/status-im/keycard-go/hexutils" @@ -62,6 +66,10 @@ func NewHTTPRoutes(walletExt *services.Services) []node.Route { Name: common.APIVersion1 + common.PathNetworkConfig, Func: httpHandler(walletExt, networkConfigRequestHandler), }, + { + Name: common.APIVersion1 + common.PathKeyExchange, + Func: httpHandler(walletExt, keyExchangeRequestHandler), + }, { Name: common.APIVersion1 + common.PathSessionKeys + "create", Func: httpHandler(walletExt, createSKRequestHandler), @@ -613,3 +621,84 @@ func boolToByte(res bool) byte { } return 0 } + +func keyExchangeRequestHandler(walletExt *services.Services, conn UserConn) { + // Read the request + body, err := conn.ReadRequest() + if err != nil { + handleError(conn, walletExt.Logger(), fmt.Errorf("error reading request: %w", err)) + return + } + + // Step 1: Deserialize the received message + var receivedMessageOG keymanager.KeyExchangeRequest + err = json.Unmarshal(body, &receivedMessageOG) + if err != nil { + walletExt.Logger().Error("OG: Failed to deserialize received message", log.ErrKey, err) + handleError(conn, walletExt.Logger(), fmt.Errorf("failed to deserialize message: %w", err)) + return + } + + // Step 2: Deserialize the public key + receivedPubKey, err := keymanager.DeserializePublicKey(receivedMessageOG.PublicKey) + if err != nil { + walletExt.Logger().Error("OG: Failed to deserialize public key", log.ErrKey, err) + handleError(conn, walletExt.Logger(), fmt.Errorf("failed to deserialize public key: %w", err)) + return + } + + // Step 3: Deserialize the attestation report + var receivedAttestation tencommon.AttestationReport + if err := json.Unmarshal(receivedMessageOG.Attestation, &receivedAttestation); err != nil { + handleError(conn, walletExt.Logger(), fmt.Errorf("error unmarshaling attestation report: %w", err)) + return + } + + // Step 4: Verify the attestation report + verifiedData, err := keymanager.VerifyReport(&receivedAttestation) + if err != nil { + walletExt.Logger().Error("OG: Failed to verify attestation report", log.ErrKey, err) + handleError(conn, walletExt.Logger(), fmt.Errorf("failed to verify attestation report: %w", err)) + return + } + + // Hash the received public key bytes + pubKeyHash := sha256.Sum256(receivedMessageOG.PublicKey) + + // Only compare the first 32 bytes since verifiedData is padded to 64 bytes + verifiedDataTruncated := verifiedData[:32] + if bytes.Equal(verifiedDataTruncated, pubKeyHash[:]) { + walletExt.Logger().Info("OG: Public keys match") + } else { + walletExt.Logger().Error("OG: Public keys do not match") + } + + // Step 5 Encrypt the encryption key using the received public key + encryptedKeyOG, err := keymanager.EncryptWithPublicKey(walletExt.Storage.GetEncryptionKey(), receivedPubKey) + if err != nil { + walletExt.Logger().Error("OG: Encryption failed", log.ErrKey, err) + handleError(conn, walletExt.Logger(), fmt.Errorf("encryption failed: %w", err)) + return + } + + // Step 6: Encode the encrypted encryption key to Base64 + encodedEncryptedKeyOG := keymanager.EncodeBase64(encryptedKeyOG) + + // Step 7: Create the response message containing the encrypted key + messageOG := keymanager.KeyExchangeResponse{ + EncryptedKey: encodedEncryptedKeyOG, + } + + // Step 8: Serialize the response message to JSON and send it back to the requester + messageBytesOG, err := json.Marshal(messageOG) + if err != nil { + walletExt.Logger().Error("OG: Failed to serialize response message", log.ErrKey, err) + handleError(conn, walletExt.Logger(), fmt.Errorf("failed to serialize response message: %w", err)) + return + } + walletExt.Logger().Info("Shared encrypted key with another gateway enclave") + err = conn.WriteResponse(messageBytesOG) + if err != nil { + walletExt.Logger().Error("error writing response", log.ErrKey, err) + } +} diff --git a/tools/walletextension/keymanager/keymanager.go b/tools/walletextension/keymanager/keymanager.go new file mode 100644 index 0000000000..4f94681f61 --- /dev/null +++ b/tools/walletextension/keymanager/keymanager.go @@ -0,0 +1,327 @@ +package keymanager + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "path/filepath" + + "github.com/edgelesssys/ego/enclave" + gethcommon "github.com/ethereum/go-ethereum/common" + gethlog "github.com/ethereum/go-ethereum/log" + tencommon "github.com/ten-protocol/go-ten/go/common" + "github.com/ten-protocol/go-ten/go/common/log" + "github.com/ten-protocol/go-ten/go/enclave/core/egoutils" + "github.com/ten-protocol/go-ten/tools/walletextension/common" +) + +const ( + dataDir = "/data" + RSAKeySize = 2048 +) + +var encryptionKeyFile = filepath.Join(dataDir, "encryption-key.json") + +// KeyExchangeRequest represents the structure of the data sent from KeyRequester to KeyProvider +type KeyExchangeRequest struct { + PublicKey []byte `json:"public_key"` + Attestation []byte `json:"attestation"` +} + +// KeyExchangeResponse represents the structure of the data sent from KeyProvider to KeyRequester +type KeyExchangeResponse struct { + EncryptedKey string `json:"encrypted_key"` // Base64 encoded encrypted encryption key +} + +// GetEncryptionKey returns encryption key for the database +// 1.) If we use sqlite database we don't need to do anything since sqlite does not need encryption and is usually running in dev environments / for testing +// 2.) We need to check if the key is already sealed and unseal it if so +// 3.) If there is a URL to exchange the key with another enclave we need to get the key from there and seal it on this enclave +// 4.) If the key is not sealed and we don't have the URL to exchange the key we need to generate a new one and seal it +func GetEncryptionKey(config common.Config, logger gethlog.Logger) ([]byte, error) { + // 1.) check if we are using sqlite database and no encryption key needed + if config.DBType == "sqlite" { + logger.Info("using sqlite database, no encryption key needed - exiting key exchange process") + return nil, nil + } + + var encryptionKey []byte + + // 2.) Check if we have a sealed encryption key and try to unseal it + encryptionKey, found, err := tryUnsealKey(encryptionKeyFile, config.InsideEnclave) + if err != nil { + logger.Info("unable to unseal encryption key", log.ErrKey, err) + } + // If we found a sealed key we can return it + if found { + logger.Info("found sealed encryption key") + return encryptionKey, nil + } + + // 3.) We have to exchange the key with another enclave if we have a key exchange url set + if config.KeyExchangeURL != "" { + encryptionKey, err = HandleKeyExchange(config, logger) + if err != nil { + logger.Crit("unable to exchange key", log.ErrKey, err) + } else { + logger.Info("successfully exchanged key with another enclave") + } + } + + // 4.) If we don't have a key we need to generate a new one + if len(encryptionKey) == 0 { + encryptionKey, err = common.GenerateRandomKey() + if err != nil { + logger.Crit("unable to generate random encryption key", log.ErrKey, err) + return nil, err + } else { + logger.Info("Successfully generated random encryption key") + } + } + + // Seal the key that we generated / got from the key exchange from another enclave + err = trySealKey(encryptionKey, encryptionKeyFile, config.InsideEnclave) + if err != nil { + logger.Crit("unable to seal encryption key", log.ErrKey, err) + return nil, err + } + logger.Info("sealed new encryption key") + + return encryptionKey, nil +} + +// tryUnsealKey attempts to unseal an encryption key from disk +// Returns (key, found, error) +func tryUnsealKey(keyPath string, isEnclave bool) ([]byte, bool, error) { + // Only attempt unsealing if we're in an SGX enclave + if !isEnclave { + return nil, false, nil + } + + // Read the key and unseal if possible + data, err := egoutils.ReadAndUnseal(keyPath) + if err != nil { + return nil, false, err + } + + return data, true, nil +} + +// trySealKey attempts to seal an encryption key to disk +// Only seals if running in an SGX enclave +func trySealKey(key []byte, keyPath string, isEnclave bool) error { + // Only attempt sealing if we're in an SGX enclave + if !isEnclave { + return nil + } + + // Seal and persist the key to /data/encryption.key + if err := egoutils.SealAndPersist(string(key), keyPath, true); err != nil { + return fmt.Errorf("failed to seal and persist key: %w", err) + } + return nil +} + +// HandleKeyExchange handles the key exchange process from KeyRequester side. +func HandleKeyExchange(config common.Config, logger gethlog.Logger) ([]byte, error) { + // Step 1: Generate RSA key pair + privkey, err := GenerateKeyPair(RSAKeySize) + if err != nil { + logger.Error("KeyRequester: Unable to generate RSA key pair", "error", err) + return nil, fmt.Errorf("unable to generate RSA key pair: %w", err) + } + pubkey := &privkey.PublicKey + logger.Info("KeyRequester: Generated RSA key pair for key exchange") + + // Step 2: Serialize and encode the public key (needed for sending it over the network) + serializedPubKey, err := SerializePublicKey(pubkey) + if err != nil { + logger.Error("KeyRequester: Failed to serialize public key", "error", err) + return nil, fmt.Errorf("failed to serialize public key: %w", err) + } + + // Step 4: Get the attestation report + // Hash the serialized public key + pubKeyHash := sha256.Sum256(serializedPubKey) + attestationReport, err := GetReport(pubKeyHash[:]) + if err != nil { + logger.Error("KeyRequester: Failed to get attestation report", "error", err) + return nil, fmt.Errorf("failed to get attestation report: %w", err) + } + + marshalledAttestation, err := json.Marshal(attestationReport) + if err != nil { + logger.Crit("unable to marshal attestation report", log.ErrKey, err) + return nil, err + } + + // Step 6: Create the message to send (PublicKey and Attestation) + messageRequester := KeyExchangeRequest{ + PublicKey: serializedPubKey, + Attestation: marshalledAttestation, + } + + // Step 7: Serialize the message to JSON for transmission + messageBytesRequester, err := json.Marshal(messageRequester) + if err != nil { + logger.Error("KeyRequester: Failed to serialize message", "error", err) + return nil, fmt.Errorf("failed to serialize message: %w", err) + } + + // Step 8: Send the message to KeyProvider via HTTP POST + resp, err := http.Post(config.KeyExchangeURL+"/v1"+common.PathKeyExchange, "application/json", bytes.NewBuffer(messageBytesRequester)) + if err != nil { + logger.Error("KeyRequester: Failed to send message to KeyProvider", "error", err) + return nil, fmt.Errorf("failed to send message to KeyProvider: %w", err) + } + defer resp.Body.Close() + + // Step 9: Read the response body + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + logger.Error("KeyRequester: Failed to read response body from KeyProvider", "error", err) + return nil, fmt.Errorf("failed to read response body from KeyProvider: %w", err) + } + + // Check the HTTP response status + if resp.StatusCode != http.StatusOK { + logger.Error("KeyRequester: Received non-OK response from KeyProvider", "status", resp.Status, "body", string(bodyBytes)) + return nil, fmt.Errorf("received non-OK response from KeyProvider: %s", resp.Status) + } + + // Step 10: Deserialize the received message + var receivedMessageRequester KeyExchangeResponse + err = json.Unmarshal(bodyBytes, &receivedMessageRequester) + if err != nil { + logger.Error("KeyRequester: Failed to deserialize received message", "error", err) + return nil, fmt.Errorf("failed to deserialize received message: %w", err) + } + + // Step 11: Extract and decode the encrypted encryption key from Base64 + encryptedKeyBytesRequester, err := DecodeBase64(receivedMessageRequester.EncryptedKey) + if err != nil { + logger.Error("KeyRequester: Failed to decode encrypted encryption key", "error", err) + return nil, fmt.Errorf("failed to decode encrypted encryption key: %w", err) + } + + // Step 12: Decrypt the encryption key using KeyRequester's private key + decryptedKeyRequester, err := DecryptWithPrivateKey(encryptedKeyBytesRequester, privkey) + if err != nil { + logger.Error("KeyRequester: Decryption failed", "error", err) + return nil, fmt.Errorf("decryption failed: %w", err) + } + + return decryptedKeyRequester, nil +} + +// GetReport returns the attestation report for the given public key +func GetReport(pubKey []byte) (*tencommon.AttestationReport, error) { + report, err := enclave.GetRemoteReport(pubKey) + if err != nil { + return nil, err + } + return &tencommon.AttestationReport{ + Report: report, + PubKey: pubKey, + EnclaveID: gethcommon.Address{}, // this field is not needed for the key exchange + HostAddress: "", // this field is not needed for the key exchange + }, nil +} + +// VerifyReport verifies the attestation report and returns the embedded data +func VerifyReport(att *tencommon.AttestationReport) ([]byte, error) { + remoteReport, err := enclave.VerifyRemoteReport(att.Report) + if err != nil { + return []byte{}, err + } + return remoteReport.Data, nil +} + +// GenerateKeyPair generates an RSA key pair of a given bit size. +func GenerateKeyPair(bits int) (*rsa.PrivateKey, error) { + privkey, err := rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + return privkey, nil +} + +// SerializePublicKey serializes an RSA public key to DER-encoded bytes. +func SerializePublicKey(pubkey *rsa.PublicKey) ([]byte, error) { + pubkeyBytes, err := x509.MarshalPKIXPublicKey(pubkey) + if err != nil { + return nil, err + } + return pubkeyBytes, nil +} + +// DeserializePublicKey deserializes DER-encoded bytes to an RSA public key. +func DeserializePublicKey(data []byte) (*rsa.PublicKey, error) { + pubInterface, err := x509.ParsePKIXPublicKey(data) + if err != nil { + return nil, err + } + pubkey, ok := pubInterface.(*rsa.PublicKey) + if !ok { + return nil, fmt.Errorf("not RSA public key") + } + return pubkey, nil +} + +// EncryptWithPublicKey encrypts data using RSA-OAEP and a public key. +func EncryptWithPublicKey(msg []byte, pubkey *rsa.PublicKey) ([]byte, error) { + label := []byte("") // OAEP label is optional + hash := sha256.New() + ciphertext, err := rsa.EncryptOAEP(hash, rand.Reader, pubkey, msg, label) + if err != nil { + return nil, err + } + return ciphertext, nil +} + +// DecryptWithPrivateKey decrypts data using RSA-OAEP and a private key. +func DecryptWithPrivateKey(ciphertext []byte, privkey *rsa.PrivateKey) ([]byte, error) { + label := []byte("") // OAEP label is optional + hash := sha256.New() + plaintext, err := rsa.DecryptOAEP(hash, rand.Reader, privkey, ciphertext, label) + if err != nil { + return nil, err + } + return plaintext, nil +} + +// EncodeBase64 encodes data to a Base64 string. +func EncodeBase64(data []byte) string { + return base64.StdEncoding.EncodeToString(data) +} + +// DecodeBase64 decodes a Base64 string to data. +func DecodeBase64(s string) ([]byte, error) { + data, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return nil, err + } + return data, nil +} + +// SerializeAttestationReport serializes an AttestationReport to JSON bytes. +func SerializeAttestationReport(report *tencommon.AttestationReport) ([]byte, error) { + return json.Marshal(report) +} + +// DeserializeAttestationReport deserializes JSON bytes to an AttestationReport. +func DeserializeAttestationReport(data []byte) (*tencommon.AttestationReport, error) { + var report tencommon.AttestationReport + err := json.Unmarshal(data, &report) + if err != nil { + return nil, err + } + return &report, nil +} diff --git a/tools/walletextension/main/cli.go b/tools/walletextension/main/cli.go index 1d604af99f..60aa3897df 100644 --- a/tools/walletextension/main/cli.go +++ b/tools/walletextension/main/cli.go @@ -73,9 +73,13 @@ const ( rateLimitMaxConcurrentRequestsDefault = 3 rateLimitMaxConcurrentRequestsUsage = "Number of concurrent requests allowed per user. Default: 3" - debugFlagName = "debug" - debugFlagDefault = false - debugFlagUsage = "Flag to enable debug mode" + insideEnclaveFlagName = "insideEnclave" + insideEnclaveFlagDefault = false + insideEnclaveFlagUsage = "Flag to indicate if the program is running inside an enclave. Default: false" + + keyExchangeURLFlagName = "keyExchangeURL" + keyExchangeURLFlagDefault = "" + keyExchangeURLFlagUsage = "URL to exchange the key with another enclave. Default: empty" ) func parseCLIArgs() wecommon.Config { @@ -95,7 +99,8 @@ func parseCLIArgs() wecommon.Config { rateLimitUserComputeTime := flag.Duration(rateLimitUserComputeTimeName, rateLimitUserComputeTimeDefault, rateLimitUserComputeTimeUsage) rateLimitWindow := flag.Duration(rateLimitWindowName, rateLimitWindowDefault, rateLimitWindowUsage) rateLimitMaxConcurrentRequests := flag.Int(rateLimitMaxConcurrentRequestsName, rateLimitMaxConcurrentRequestsDefault, rateLimitMaxConcurrentRequestsUsage) - debugFlag := flag.Bool(debugFlagName, debugFlagDefault, debugFlagUsage) + insideEnclaveFlag := flag.Bool(insideEnclaveFlagName, insideEnclaveFlagDefault, insideEnclaveFlagUsage) + keyExchangeURL := flag.String(keyExchangeURLFlagName, keyExchangeURLFlagDefault, keyExchangeURLFlagUsage) flag.Parse() return wecommon.Config{ @@ -114,6 +119,7 @@ func parseCLIArgs() wecommon.Config { RateLimitUserComputeTime: *rateLimitUserComputeTime, RateLimitWindow: *rateLimitWindow, RateLimitMaxConcurrentRequests: *rateLimitMaxConcurrentRequests, - Debug: *debugFlag, + InsideEnclave: *insideEnclaveFlag, + KeyExchangeURL: *keyExchangeURL, } } diff --git a/tools/walletextension/main/enclave.json b/tools/walletextension/main/enclave.json index c4bf5eb33b..9ef93820db 100644 --- a/tools/walletextension/main/enclave.json +++ b/tools/walletextension/main/enclave.json @@ -6,6 +6,14 @@ "executableHeap": true, "productID": 1, "securityVersion": 1, + "mounts": [ + { + "source": "/data", + "target": "/data", + "type": "hostfs", + "readOnly": false + } + ], "env": [ { "name": "TESTMODE", diff --git a/tools/walletextension/services/wallet_extension.go b/tools/walletextension/services/wallet_extension.go index 4fa34a3fb2..dc79c30664 100644 --- a/tools/walletextension/services/wallet_extension.go +++ b/tools/walletextension/services/wallet_extension.go @@ -236,7 +236,7 @@ func (w *Services) GetTenNodeHealthStatus() (bool, error) { audit(w, "Getting TEN node health status") res, err := WithPlainRPCConnection[bool](context.Background(), w.BackendRPC, func(client *gethrpc.Client) (*bool, error) { res, err := obsclient.NewObsClient(client).Health() - return &res, err + return &res.OverallHealth, err }) return *res, err } diff --git a/tools/walletextension/storage/database/cosmosdb/cosmosdb.go b/tools/walletextension/storage/database/cosmosdb/cosmosdb.go index 3260ab95cf..f87d9662ce 100644 --- a/tools/walletextension/storage/database/cosmosdb/cosmosdb.go +++ b/tools/walletextension/storage/database/cosmosdb/cosmosdb.go @@ -281,3 +281,8 @@ func (c *CosmosDB) dbKey(userID []byte) (string, azcosmos.PartitionKey) { partitionKey := azcosmos.NewPartitionKeyString(keyString) return keyString, partitionKey } + +// GetEncryptionKey returns the encryption key used by the CosmosDB instance +func (c *CosmosDB) GetEncryptionKey() []byte { + return c.encryptor.GetKey() +} diff --git a/tools/walletextension/storage/database/sqlite/sqlite.go b/tools/walletextension/storage/database/sqlite/sqlite.go index 07c22c069b..832fc1117c 100644 --- a/tools/walletextension/storage/database/sqlite/sqlite.go +++ b/tools/walletextension/storage/database/sqlite/sqlite.go @@ -246,6 +246,11 @@ func createOrLoad(dbPath string) (string, error) { return dbPath, nil } +// GetEncryptionKey returns nil for SQLite as it doesn't use encryption +func (s *SqliteDB) GetEncryptionKey() []byte { + return nil +} + func (s *SqliteDB) withTx(fn func(*sql.Tx) error) error { tx, err := s.db.Begin() if err != nil { diff --git a/tools/walletextension/storage/storage.go b/tools/walletextension/storage/storage.go index b15f8b8fc1..ca39c628dc 100644 --- a/tools/walletextension/storage/storage.go +++ b/tools/walletextension/storage/storage.go @@ -20,6 +20,7 @@ type UserStorage interface { ActivateSessionKey(userID []byte, active bool) error RemoveSessionKey(userID []byte) error GetUser(userID []byte) (*common.GWUser, error) + GetEncryptionKey() []byte } func New(dbType, dbConnectionURL, dbPath string, randomKey []byte, logger gethlog.Logger) (UserStorage, error) { diff --git a/tools/walletextension/storage/storage_with_cache.go b/tools/walletextension/storage/storage_with_cache.go index 9dae72ba85..8f5fca5a07 100644 --- a/tools/walletextension/storage/storage_with_cache.go +++ b/tools/walletextension/storage/storage_with_cache.go @@ -85,3 +85,8 @@ func (s *UserStorageWithCache) GetUser(userID []byte) (*wecommon.GWUser, error) return s.storage.GetUser(userID) }) } + +// GetEncryptionKey delegates to the underlying storage +func (s *UserStorageWithCache) GetEncryptionKey() []byte { + return s.storage.GetEncryptionKey() +} diff --git a/tools/walletextension/walletextension_container.go b/tools/walletextension/walletextension_container.go index f6a95f8c7a..e9c58efec7 100644 --- a/tools/walletextension/walletextension_container.go +++ b/tools/walletextension/walletextension_container.go @@ -19,6 +19,7 @@ import ( "github.com/ten-protocol/go-ten/go/common/stopcontrol" gethrpc "github.com/ten-protocol/go-ten/lib/gethfork/rpc" wecommon "github.com/ten-protocol/go-ten/tools/walletextension/common" + "github.com/ten-protocol/go-ten/tools/walletextension/keymanager" "github.com/ten-protocol/go-ten/tools/walletextension/storage" ) @@ -35,14 +36,10 @@ func NewContainerFromConfig(config wecommon.Config, logger gethlog.Logger) *Cont hostRPCBindAddrWS := wecommon.WSProtocol + config.NodeRPCWebsocketAddress hostRPCBindAddrHTTP := wecommon.HTTPProtocol + config.NodeRPCHTTPAddress - // Database encryption key handling - // TODO: Check if encryption key is already sealed and unseal it and generate new one if not (part of the next PR) - // TODO: We should have a mechanism to get the key from an enclave that already runs (part of the next PR) - // TODO: Move this to a separate file along with key exchange logic (part of the next PR) - - encryptionKey, err := wecommon.GenerateRandomKey() + // get the encryption key (method is determined by the config) + encryptionKey, err := keymanager.GetEncryptionKey(config, logger) if err != nil { - logger.Crit("unable to generate random encryption key", log.ErrKey, err) + logger.Crit("unable to get encryption key", log.ErrKey, err) os.Exit(1) }