diff --git a/go/enclave/components/rollup_consumer.go b/go/enclave/components/rollup_consumer.go index 3d35cd4dda..6de403eda1 100644 --- a/go/enclave/components/rollup_consumer.go +++ b/go/enclave/components/rollup_consumer.go @@ -128,7 +128,6 @@ func (rc *rollupConsumerImpl) extractRollups(ctx context.Context, br *common.Blo continue } - println("fetching blobs with block: ", br.Block.Hash().Hex(), " and blobHashes: ", rollupHashes.BlobHashes[0].Hash.Hex()) blobs, err := rc.blobResolver.FetchBlobs(ctx, br.Block.Header(), rollupHashes.BlobHashes) if err != nil { rc.logger.Crit("could not fetch blobs consumer", log.ErrKey, err) diff --git a/go/ethadapter/beacon_client.go b/go/ethadapter/beacon_client.go index 61df1b4b30..4e7c65b88d 100644 --- a/go/ethadapter/beacon_client.go +++ b/go/ethadapter/beacon_client.go @@ -123,22 +123,23 @@ func (bc *BeaconHTTPClient) BeaconGenesis(ctx context.Context) (APIGenesisRespon return genesisResp, nil } -func (bc *BeaconHTTPClient) BeaconBlobSideCars(ctx context.Context, slot uint64, hashes []IndexedBlobHash) (APIGetBlobSidecarsResponse, error) { +func (bc *BeaconHTTPClient) BeaconBlobSideCars(ctx context.Context, slot uint64, _ []IndexedBlobHash) (APIGetBlobSidecarsResponse, error) { reqPath := path.Join(sidecarsMethodPrefix, strconv.FormatUint(slot, 10)) var reqQuery url.Values //reqQuery = url.Values{} //for i := range hashes { // reqQuery.Add("indices", strconv.FormatUint(hashes[i].Index, 10)) //} - var resp APIGetBlobSidecarsResponse + var allResp APIGetBlobSidecarsResponse - err := bc.request(ctx, &resp, reqPath, reqQuery) + err := bc.request(ctx, &allResp, reqPath, reqQuery) if err != nil { - println("ERROR GETTING SIDECAR with hash: ", hashes[0].Hash.Hex(), " with err: ", err.Error()) + println("ERROR FETCHING BLOBS: ", err.Error()) return APIGetBlobSidecarsResponse{}, err } - return resp, nil + + return allResp, nil } type ClientPool[T any] struct { @@ -216,10 +217,8 @@ func (cl *L1BeaconClient) fetchSidecars(ctx context.Context, slot uint64, hashes var errs []error for i := 0; i < cl.pool.Len(); i++ { f := cl.pool.Get() - println("FETCHING BeaconBlobSideCars with hashes: ", hashes[0].Hash.Hex()) resp, err := f.BeaconBlobSideCars(ctx, slot, hashes) if err != nil { - println("ERROR FETCHING SIDECARS: ", err.Error()) cl.pool.MoveToNext() errs = append(errs, err) } else { @@ -251,24 +250,24 @@ func (cl *L1BeaconClient) GetBlobSidecars(ctx context.Context, b *types.Header, return nil, fmt.Errorf("failed to fetch blob sidecars for slot %v block %v: %w", slot, b, err) } - apiscs := make([]*APIBlobSidecar, 0, len(hashes)) - // filter and order by hashes + sidescars := make([]*APIBlobSidecar, 0, len(hashes)) + // find the sidecars that match the provided versioned hashes for _, h := range hashes { - for _, apisc := range resp.Data { - if h.Index == uint64(apisc.Index) { - apiscs = append(apiscs, apisc) + for _, sidecar := range resp.Data { + versionedHash := KZGToVersionedHash(kzg4844.Commitment(sidecar.KZGCommitment)) + if h.Hash == versionedHash { + sidescars = append(sidescars, sidecar) break } } } - if len(hashes) != len(apiscs) { - fmt.Printf("expected %v sidecars but got %v", len(hashes), len(apiscs)) - return nil, fmt.Errorf("expected %v sidecars but got %v", len(hashes), len(apiscs)) + if len(hashes) != len(sidescars) { + return nil, fmt.Errorf("expected %v sidecars but got %v", len(hashes), len(sidescars)) } bscs := make([]*BlobSidecar, 0, len(hashes)) - for _, apisc := range apiscs { + for _, apisc := range sidescars { bscs = append(bscs, apisc.BlobSidecar()) } @@ -294,22 +293,7 @@ func blobsFromSidecars(blobSidecars []*BlobSidecar, hashes []IndexedBlobHash) ([ out := make([]*kzg4844.Blob, len(hashes)) - println("Blob sidecase length: ", len(blobSidecars)) - for i, ih := range hashes { - sidecar := blobSidecars[i] - if sidx := uint64(sidecar.Index); sidx != ih.Index { - return nil, fmt.Errorf("expected sidecars to be ordered by hashes, but got %d != %d", sidx, ih.Index) - } - - // make sure the blob's kzg commitment hashes to the expected value - hash := KZGToVersionedHash(kzg4844.Commitment(sidecar.KZGCommitment)) - if hash != ih.Hash { - println("UNSUCCESSFUL blob hash to commitment hash comparison expected: ", ih.Hash.Hex(), " but got: ", hash.Hex()) - return nil, fmt.Errorf("expected hash %s for blob at index %d but got %s", ih.Hash, ih.Index, hash) - } - - println("SUCCESSFUL blob hash to commitment hash comparison: ", hash.Hex()) - + for i, sidecar := range blobSidecars { // confirm blob data is valid by verifying its proof against the commitment if err := VerifyBlobProof(&sidecar.Blob, kzg4844.Commitment(sidecar.KZGCommitment), kzg4844.Proof(sidecar.KZGProof)); err != nil { return nil, fmt.Errorf("blob at index %d failed verification: %w", i, err) diff --git a/go/ethadapter/mgmtcontractlib/mgmt_contract_lib.go b/go/ethadapter/mgmtcontractlib/mgmt_contract_lib.go index 805f48e2cc..6d423ff3d5 100644 --- a/go/ethadapter/mgmtcontractlib/mgmt_contract_lib.go +++ b/go/ethadapter/mgmtcontractlib/mgmt_contract_lib.go @@ -92,8 +92,6 @@ func (c *contractLibImpl) DecodeTx(tx *types.Transaction) ethadapter.L1Transacti case AddRollupMethod: if tx.Type() == types.BlobTxType { blobHashes := ethadapter.ToIndexedBlobHashes(tx.BlobHashes()...) - println("DECODE TX blob hash: ", blobHashes[0].Hash.Hex()) - return ðadapter.L1RollupHashes{ BlobHashes: blobHashes, } @@ -192,14 +190,6 @@ func (c *contractLibImpl) CreateBlobRollup(t *ethadapter.L1RollupTx) (types.TxDa if sidecar, blobHashes, err = makeSidecar(blobs); err != nil { return nil, fmt.Errorf("failed to make sidecar: %w", err) } - - println("creating rollup blob tx: ", decodedRollup.Hash().Hex()) - println("creating rollup blob seq no: ", decodedRollup.Header.LastBatchSeqNo) - - for _, blobH := range blobHashes { - println("blob hash: ", blobH.Hex()) - } - return &types.BlobTx{ To: *c.addr, Data: data, @@ -526,39 +516,6 @@ func encodeBlobs(data []byte) []kzg4844.Blob { return blobs } -//// chunkRollup splits the rollup into blobs based on the max blob size and index's the blobs -//func chunkRollup(blob ethadapter.Blob) ([]ethadapter.Blob, error) { -// maxBlobSize := 128 * 1024 // 128KB in bytes TODO move to config -// base64ChunkSize := int(math.Floor(float64(maxBlobSize) * 4 / 3)) -// base64ChunkSize = base64ChunkSize - (base64ChunkSize % 4) - 4 //metadata size -// //indexByteSize := 4 // size in bytes for the chunk index metadata -// var blobs []ethadapter.Blob -// -// for i := 0; i < len(blob); i += maxBlobSize { -// end := i + maxBlobSize -// if end > len(blob) { -// end = len(blob) -// } -// -// chunkData := blob[i:end] -// -// // ethereum expects fixed blob length so we need to pad it out -// actualLength := len(chunkData) -// if actualLength < 131072 { -// // Add padding -// padding := make([]byte, 131072-actualLength) -// chunkData = append(chunkData, padding...) -// } -// -// if len(chunkData) != 131072 { -// return nil, fmt.Errorf("rollup blob must be 131072 in length") -// } -// -// blobs = append(blobs, blob) -// } -// return blobs, nil -//} - // MakeSidecar builds & returns the BlobTxSidecar and corresponding blob hashes from the raw blob // data. func makeSidecar(blobs []kzg4844.Blob) (*types.BlobTxSidecar, []gethcommon.Hash, error) { diff --git a/go/host/l1/publisher.go b/go/host/l1/publisher.go index 1cbf9d4248..14d84dc661 100644 --- a/go/host/l1/publisher.go +++ b/go/host/l1/publisher.go @@ -251,7 +251,6 @@ func (p *Publisher) ExtractObscuroRelevantTransactions(block *types.Block) ([]*e continue } - println("PUBLISHER fetching blobs with block: ", block.Hash().Hex(), " and blobHashes: ", rollupHashes.BlobHashes[0].Hash.Hex()) blobs, err := p.blobResolver.FetchBlobs(p.sendingContext, block.Header(), rollupHashes.BlobHashes) if err != nil { p.logger.Crit("could not fetch blobs publisher", log.ErrKey, err)