diff --git a/.github/workflows/manual-deploy-obscuro-gateway.yml b/.github/workflows/manual-deploy-obscuro-gateway.yml index 9b9c7bb1cd..4fee72c3c5 100644 --- a/.github/workflows/manual-deploy-obscuro-gateway.yml +++ b/.github/workflows/manual-deploy-obscuro-gateway.yml @@ -150,7 +150,7 @@ jobs: - name: Build and Push Docker Image run: | - DOCKER_BUILDKIT=1 docker build --build-arg TESTNET_TYPE=${{ github.event.inputs.testnet_type }} -t ${{ env.DOCKER_BUILD_TAG_GATEWAY }} -f ./tools/walletextension/Dockerfile . + DOCKER_BUILDKIT=1 docker build --build-arg TESTNET_TYPE=${{ github.event.inputs.testnet_type }} -t ${{ env.DOCKER_BUILD_TAG_GATEWAY }} -f ./tools/walletextension/enclave.Dockerfile . docker push ${{ env.DOCKER_BUILD_TAG_GATEWAY }} # This will fail some deletions due to resource dependencies ( ie. you must first delete the vm before deleting the disk) @@ -191,7 +191,7 @@ jobs: --public-ip-address "${{ env.PUBLIC_IP }}" \ --tags deploygroup="${{ env.DEPLOY_GROUP }}" ${{ env.AZURE_DEPLOY_GROUP_GATEWAY }}=true \ --vnet-name "${{ env.VNET_NAME }}" --subnet "${{ env.SUBNET_NAME }}" \ - --size Standard_D4_v5 --image Canonical:0001-com-ubuntu-server-focal:20_04-lts-gen2:latest \ + --size Standard_DC2s_v3 --storage-sku StandardSSD_LRS --image ObscuroConfUbuntu \ --authentication-type password - name: "Open TEN node-${{ matrix.host_id }} ports on Azure" @@ -211,15 +211,38 @@ jobs: inlineScript: | az vm run-command invoke -g Testnet -n "${{ env.VM_NAME }}" \ --command-id RunShellScript \ - --scripts 'mkdir -p /home/obscuro \ - && sudo apt-get update \ - && sudo apt-get install -y gcc \ - && sudo snap refresh && sudo snap install --channel=1.18 go --classic \ - && curl -fsSL https://get.docker.com -o get-docker.sh && sh ./get-docker.sh \ - && git clone --depth 1 -b ${{ env.BRANCH_NAME }} https://github.com/ten-protocol/go-ten.git /home/obscuro/go-obscuro \ - && docker network create --driver bridge node_network || true \ - && mkdir -p /home/obscuro/promtail \ - && echo " + --scripts ' + set -e + + mkdir -p /home/obscuro + + # Wait for dpkg lock to be released + while sudo fuser /var/lib/dpkg/lock-frontend >/dev/null 2>&1; do + echo "Waiting for dpkg lock to be released..." + sleep 1 + done + + # Proceed with package installations + sudo apt-get update + sudo apt-get install -y gcc + sudo snap refresh && sudo snap install --channel=1.18 go --classic + + # Wait again before running get-docker.sh + while sudo fuser /var/lib/dpkg/lock-frontend >/dev/null 2>&1; do + echo "Waiting for dpkg lock to be released before installing Docker..." + sleep 1 + done + + curl -fsSL https://get.docker.com -o get-docker.sh && sh ./get-docker.sh + git clone --depth 1 -b "${{ env.BRANCH_NAME }}" https://github.com/ten-protocol/go-ten.git /home/obscuro/go-obscuro + docker network create --driver bridge node_network || true + cd /home/obscuro/go-obscuro/ + + # Promtail Integration Start + mkdir -p /home/obscuro/promtail + + cat < /home/obscuro/promtail/promtail-config.yaml + server: http_listen_port: 9080 grpc_listen_port: 0 @@ -228,14 +251,14 @@ jobs: filename: /tmp/positions.yaml clients: - - url: ${{ vars.METRICS_URI }} + - url: "${{ vars.METRICS_URI }}" batchwait: 3s batchsize: 1048576 tls_config: insecure_skip_verify: true basic_auth: - username: ${{ secrets.LOKI_USER }} - password: ${{ secrets.LOKI_PASSWORD }} + username: "${{ secrets.LOKI_USER }}" + password: "${{ secrets.LOKI_PASSWORD }}" scrape_configs: - job_name: flog_scrape @@ -243,31 +266,35 @@ jobs: - host: unix:///var/run/docker.sock refresh_interval: 5s relabel_configs: - - source_labels: [\"__meta_docker_container_name\"] - regex: \"/(.*)\" - target_label: \"container\" - - source_labels: [\"__meta_docker_container_log_stream\"] - target_label: \"logstream\" - - source_labels: [\"__meta_docker_container_label_logging_jobname\"] - target_label: \"job\" - - replacement: ${{ env.VM_NAME }} + - source_labels: ["__meta_docker_container_name"] + regex: "/(.*)" + target_label: "container" + - source_labels: ["__meta_docker_container_log_stream"] + target_label: "logstream" + - source_labels: ["__meta_docker_container_label_logging_jobname"] + target_label: "job" + - replacement: "${{ env.VM_NAME }}" target_label: "node_name" - " > /home/obscuro/promtail/promtail-config.yaml \ - && docker run -d --name promtail \ - --network node_network \ - -e HOSTNAME=${{ env.VM_NAME }} \ - -v /var/log:/var/log \ - -v /home/obscuro/promtail:/etc/promtail \ - -v /var/lib/docker/containers:/var/lib/docker/containers:ro \ - -v /var/run/docker.sock:/var/run/docker.sock \ - grafana/promtail:latest \ - -config.file=/etc/promtail/promtail-config.yaml -config.expand-env=true \ - && cd /home/obscuro/go-obscuro/ \ - && mkdir -p /home/obscuro/promtail \ - && docker run -d -p 80:80 -p 81:81 --name "${{ env.VM_NAME }}" \ - -e OBSCURO_GATEWAY_VERSION="${{ GITHUB.RUN_NUMBER }}-${{ GITHUB.SHA }}" \ - --log-opt max-file=3 --log-opt max-size=10m \ - ${{ env.DOCKER_BUILD_TAG_GATEWAY }} \ - -host=0.0.0.0 -port=80 -portWS=81 -nodeHost=${{ env.L2_RPC_URL_VALIDATOR }} -verbose=true \ - -logPath=sys_out -dbType=mariaDB -dbConnectionURL="obscurouser:${{ secrets.OBSCURO_GATEWAY_MARIADB_USER_PWD }}@tcp(obscurogateway-mariadb-${{ github.event.inputs.testnet_type }}.uksouth.cloudapp.azure.com:3306)/ogdb" \ - -rateLimitUserComputeTime=${{ env.GATEWAY_RATE_LIMIT_USER_COMPUTE_TIME }} -rateLimitWindow=${{ env.GATEWAY_RATE_LIMIT_WINDOW }} -maxConcurrentRequestsPerUser=${{ env.GATEWAY_MAX_CONCURRENT_REQUESTS_PER_USER }} ' + EOF + + docker run -d --name promtail \ + --network node_network \ + -e HOSTNAME="${{ env.VM_NAME }}" \ + -v /var/log:/var/log \ + -v /home/obscuro/promtail:/etc/promtail \ + -v /var/lib/docker/containers:/var/lib/docker/containers:ro \ + -v /var/run/docker.sock:/var/run/docker.sock \ + grafana/promtail:latest \ + -config.file=/etc/promtail/promtail-config.yaml -config.expand-env=true + # Promtail Integration End + + # Start Ten Gateway Container + docker run -d -p 80:80 -p 81:81 --name "${{ env.VM_NAME }}" \ + --device /dev/sgx_enclave --device /dev/sgx_provision \ + -e OBSCURO_GATEWAY_VERSION="${{ github.run_number }}-${{ github.sha }}" \ + -e OE_SIMULATION=0 \ + "${{ env.DOCKER_BUILD_TAG_GATEWAY }}" \ + ego run /home/ten/go-ten/tools/walletextension/main/main \ + -host=0.0.0.0 -port=80 -portWS=81 -nodeHost="${{ env.L2_RPC_URL_VALIDATOR }}" -verbose=true \ + -logPath=sys_out -dbType=mariaDB -dbConnectionURL="obscurouser:${{ secrets.OBSCURO_GATEWAY_MARIADB_USER_PWD }}@tcp(obscurogateway-mariadb-${{ github.event.inputs.testnet_type }}.uksouth.cloudapp.azure.com:3306)/ogdb" \ + -rateLimitUserComputeTime="${{ env.GATEWAY_RATE_LIMIT_USER_COMPUTE_TIME }}" -rateLimitWindow="${{ env.GATEWAY_RATE_LIMIT_WINDOW }}" -maxConcurrentRequestsPerUser="${{ env.GATEWAY_MAX_CONCURRENT_REQUESTS_PER_USER }}" ' diff --git a/go/common/gethapi/transaction_args.go b/go/common/gethapi/transaction_args.go index 79bb729704..10e7dd5618 100644 --- a/go/common/gethapi/transaction_args.go +++ b/go/common/gethapi/transaction_args.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "math/big" + "strings" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -39,6 +40,63 @@ type TransactionArgs struct { ChainID *hexutil.Big `json:"chainId,omitempty"` } +// String returns a human-readable representation of the transaction arguments. +// This is necessary for printing the transaction arguments in SGX mode +func (args TransactionArgs) String() string { + var parts []string + if args.From != nil { + parts = append(parts, fmt.Sprintf("From:%s", args.From.Hex())) + } + if args.To != nil { + parts = append(parts, fmt.Sprintf("To:%s", args.To.Hex())) + } + if args.Gas != nil { + parts = append(parts, fmt.Sprintf("Gas:%d", *args.Gas)) + } + if args.GasPrice != nil { + parts = append(parts, fmt.Sprintf("GasPrice:%s", args.GasPrice.String())) + } + if args.MaxFeePerGas != nil { + parts = append(parts, fmt.Sprintf("MaxFeePerGas:%s", args.MaxFeePerGas.String())) + } + if args.MaxPriorityFeePerGas != nil { + parts = append(parts, fmt.Sprintf("MaxPriorityFeePerGas:%s", args.MaxPriorityFeePerGas.String())) + } + if args.Value != nil { + parts = append(parts, fmt.Sprintf("Value:%s", args.Value.String())) + } + if args.Nonce != nil { + parts = append(parts, fmt.Sprintf("Nonce:%d", *args.Nonce)) + } + if args.Data != nil { + parts = append(parts, fmt.Sprintf("Data:0x%x", *args.Data)) + } + if args.Input != nil { + parts = append(parts, fmt.Sprintf("Input:0x%x", *args.Input)) + } + if args.AccessList != nil { + parts = append(parts, fmt.Sprintf("AccessList:%s", accessListToString(*args.AccessList))) + } + if args.ChainID != nil { + parts = append(parts, fmt.Sprintf("ChainID:%s", args.ChainID.String())) + } + + return fmt.Sprintf("TransactionArgs{%s}", strings.Join(parts, " ")) +} + +// Helper function to convert AccessList to string +func accessListToString(list types.AccessList) string { + var accessListParts []string + for _, tuple := range list { + storageKeys := make([]string, len(tuple.StorageKeys)) + for i, key := range tuple.StorageKeys { + storageKeys[i] = key.Hex() + } + accessListParts = append(accessListParts, fmt.Sprintf("{%s: [%s]}", tuple.Address.Hex(), strings.Join(storageKeys, ", "))) + } + return fmt.Sprintf("[%s]", strings.Join(accessListParts, ", ")) +} + // from retrieves the transaction sender address. func (args *TransactionArgs) from() common.Address { if args.From == nil { diff --git a/lib/gethfork/rpc/types.go b/lib/gethfork/rpc/types.go index 34a1451dea..d777b00784 100644 --- a/lib/gethfork/rpc/types.go +++ b/lib/gethfork/rpc/types.go @@ -220,6 +220,9 @@ func (bnh *BlockNumberOrHash) Number() (BlockNumber, bool) { } func (bnh *BlockNumberOrHash) String() string { + if bnh == nil { + return "nil" + } if bnh.BlockNumber != nil { return strconv.Itoa(int(*bnh.BlockNumber)) } diff --git a/tools/walletextension/enclave.Dockerfile b/tools/walletextension/enclave.Dockerfile new file mode 100644 index 0000000000..e0a8b77504 --- /dev/null +++ b/tools/walletextension/enclave.Dockerfile @@ -0,0 +1,48 @@ +# Build Stages: +# build-base = downloads modules and prepares the directory for compilation. Based on the ego-dev image +# build-enclave = copies over the actual source code of the project and builds it using a compiler cache +# deploy = copies over only the enclave executable without the source +# in a lightweight base image specialized for deployment + +# Final container folder structure: +# /home/ten/go-ten/tools/walletextension/main contains the executable for the enclave + + +FROM ghcr.io/edgelesssys/ego-dev:v1.5.3 AS build-base + +# setup container data structure +RUN mkdir -p /home/ten/go-ten + +# Ensures container layer caching when dependencies are not changed +WORKDIR /home/ten/go-ten +COPY go.mod . +COPY go.sum . +RUN ego-go mod download + + +# Trigger new build stage for compiling the enclave +FROM build-base AS build-enclave +COPY . . + +WORKDIR /home/ten/go-ten/tools/walletextension/main + +# Build the enclave using the cross image build cache. +RUN --mount=type=cache,target=/root/.cache/go-build \ + ego-go build + +# Sign the enclave executable +RUN ego sign enclave.json + + +# Trigger a new build stage and use the smaller ego version: +FROM ghcr.io/edgelesssys/ego-deploy:v1.5.3 + +# Copy just the binary for the enclave into this build stage +COPY --from=build-enclave \ + /home/ten/go-ten/tools/walletextension/main /home/ten/go-ten/tools/walletextension/main + +WORKDIR /home/ten/go-ten/tools/walletextension/main + +# simulation mode is ACTIVE by default +ENV OE_SIMULATION=1 +EXPOSE 3000 \ No newline at end of file diff --git a/tools/walletextension/main/enclave.json b/tools/walletextension/main/enclave.json new file mode 100644 index 0000000000..24304b82c5 --- /dev/null +++ b/tools/walletextension/main/enclave.json @@ -0,0 +1,29 @@ +{ + "exe": "main", + "key": "testnet.pem", + "debug": true, + "heapSize": 4096, + "executableHeap": true, + "productID": 1, + "securityVersion": 1, + "env": [ + { + "name": "TESTMODE", + "value": "false" + } + ], + "files": [ + { + "source": "../storage/database/mariadb/001_init.sql", + "target": "/home/ten/go-ten/tools/walletextension/storage/database/mariadb/001_init.sql" + }, + { + "source": "../storage/database/mariadb/002_store_incoming_txs.sql", + "target": "/home/ten/go-ten/tools/walletextension/storage/database/mariadb/002_store_incoming_txs.sql" + }, + { + "source": "../storage/database/mariadb/003_add_signature_type.sql", + "target": "/home/ten/go-ten/tools/walletextension/storage/database/mariadb/003_add_signature_type.sql" + } + ] +} \ No newline at end of file diff --git a/tools/walletextension/main/entry.sh b/tools/walletextension/main/entry.sh new file mode 100755 index 0000000000..e4e0d747bc --- /dev/null +++ b/tools/walletextension/main/entry.sh @@ -0,0 +1,21 @@ +#!/bin/sh +set -e +# +# This script is the entry point for starting the enclave under a Docker container. +# It allows running SGX sdk using different parameters. +# + +# It's expected to be a link between the /dev/sgx_enclave Docker device and the container /dev/sgx/enclave +mkdir -p /dev/sgx +if [ ! -L /dev/sgx/enclave ]; then + ln -s /dev/sgx_enclave /dev/sgx/enclave +fi + +PCCS_URL=https://global.acccache.azure.net/sgx/certification/v4/ +echo "PCCS_URL: ${PCCS_URL}" + +apt-get install -qq libsgx-dcap-default-qpl + +echo "PCCS_URL=${PCCS_URL}\nUSE_SECURE_CERT=FALSE" > /etc/sgx_default_qcnl.conf + +"$@" \ No newline at end of file diff --git a/tools/walletextension/rpcapi/utils.go b/tools/walletextension/rpcapi/utils.go index 88aac6e577..3ea03623ca 100644 --- a/tools/walletextension/rpcapi/utils.go +++ b/tools/walletextension/rpcapi/utils.go @@ -6,8 +6,12 @@ import ( "encoding/json" "errors" "fmt" + "reflect" + "strings" "time" + "github.com/status-im/keycard-go/hexutils" + "github.com/ten-protocol/go-ten/go/common/measure" "github.com/ten-protocol/go-ten/go/enclave/core" @@ -22,8 +26,6 @@ import ( "github.com/ten-protocol/go-ten/lib/gethfork/rpc" - "github.com/status-im/keycard-go/hexutils" - "github.com/ten-protocol/go-ten/tools/walletextension/cache" gethcommon "github.com/ethereum/go-ethereum/common" @@ -168,8 +170,7 @@ func ExecAuthRPC[R any](ctx context.Context, w *Services, cfg *ExecCfg, method s } return nil, rpcErr }) - - audit(w, "RPC call. uid=%s, method=%s args=%v result=%s error=%s time=%d", hexutils.BytesToHex(userID), method, args, res, err, time.Since(requestStartTime).Milliseconds()) + audit(w, "RPC call. uid=%s, method=%s args=%v result=%s error=%s time=%d", hexutils.BytesToHex(userID), method, args, SafeGenericToString(res), err, time.Since(requestStartTime).Milliseconds()) return res, err } @@ -342,3 +343,58 @@ func withPlainRPCConnection[R any](ctx context.Context, w *Services, execute fun defer returnConn(w.rpcHTTPConnPool, rpcClient, w.logger) return execute(rpcClient) } + +func SafeGenericToString[R any](r *R) string { + if r == nil { + return "nil" + } + + v := reflect.ValueOf(r).Elem() + t := v.Type() + + switch v.Kind() { + case reflect.Struct: + return structToString(v, t) + default: + return fmt.Sprintf("%v", v.Interface()) + } +} + +func structToString(v reflect.Value, t reflect.Type) string { + var parts []string + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + fieldType := t.Field(i) + fieldName := fieldType.Name + + if !fieldType.IsExported() { + parts = append(parts, fmt.Sprintf("%s: ", fieldName)) + continue + } + + fieldStr := fmt.Sprintf("%s: ", fieldName) + + switch field.Kind() { + case reflect.Ptr: + if field.IsNil() { + fieldStr += "nil" + } else { + fieldStr += fmt.Sprintf("%v", field.Elem().Interface()) + } + case reflect.Slice, reflect.Array: + if field.Len() > 10 { + fieldStr += fmt.Sprintf("%v (length: %d)", field.Slice(0, 10).Interface(), field.Len()) + } else { + fieldStr += fmt.Sprintf("%v", field.Interface()) + } + case reflect.Struct: + fieldStr += "{...}" // Avoid recursive calls for nested structs + default: + fieldStr += fmt.Sprintf("%v", field.Interface()) + } + + parts = append(parts, fieldStr) + } + + return fmt.Sprintf("%s{%s}", t.Name(), strings.Join(parts, ", ")) +}