From 8cc17b22ebc1905871636ebc8aa0b0761aefa8c6 Mon Sep 17 00:00:00 2001 From: Brandon Liu Date: Mon, 11 Sep 2023 21:37:09 +0800 Subject: [PATCH] Support public HTTP buckets (#74) * All operations support public HTTP endpoints [#68] * fix normalization of local paths via filepath.Abs * force the parameter awssdk=v2 for s3 buckets to use newer SDK * handle maxzoom-only extracts [#68, #64] * fix default maxzoom; support raw geojson geometries [#68, #64] --- main.go | 2 +- pmtiles/bucket.go | 122 +++++++++++++++++++++++++++++++++++++++++ pmtiles/bucket_test.go | 35 ++++++++++++ pmtiles/extract.go | 106 ++++++++++++++++++----------------- pmtiles/region.go | 35 ++++++++++++ pmtiles/region_test.go | 48 ++++++++++++++++ pmtiles/server.go | 50 +++-------------- pmtiles/show.go | 44 +++++++-------- 8 files changed, 326 insertions(+), 116 deletions(-) create mode 100644 pmtiles/bucket.go create mode 100644 pmtiles/bucket_test.go create mode 100644 pmtiles/region.go create mode 100644 pmtiles/region_test.go diff --git a/main.go b/main.go index 9f0a143..19b30f6 100644 --- a/main.go +++ b/main.go @@ -49,7 +49,7 @@ var cli struct { Output string `arg:"" help:"Output archive." type:"path"` Bucket string `help:"Remote bucket of input archive."` Region string `help:"local GeoJSON Polygon or MultiPolygon file for area of interest." type:"existingfile"` - Maxzoom uint8 `help:"Maximum zoom level, inclusive."` + Maxzoom int8 `default:-1 help:"Maximum zoom level, inclusive."` DownloadThreads int `default:4 help:"Number of download threads."` DryRun bool `help:"Calculate tiles to extract, but don't download them."` Overfetch float32 `default:0.1 help:"What ratio of extra data to download to minimize # requests; 0.2 is 20%"` diff --git a/pmtiles/bucket.go b/pmtiles/bucket.go new file mode 100644 index 0000000..885a8fa --- /dev/null +++ b/pmtiles/bucket.go @@ -0,0 +1,122 @@ +package pmtiles + +import ( + "context" + "fmt" + "gocloud.dev/blob" + "io" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "strings" +) + +type Bucket interface { + Close() error + NewRangeReader(ctx context.Context, key string, offset int64, length int64) (io.ReadCloser, error) +} + +type HttpBucket struct { + baseURL string +} + +func (b HttpBucket) NewRangeReader(ctx context.Context, key string, offset, length int64) (io.ReadCloser, error) { + reqURL := b.baseURL + "/" + key + + req, err := http.NewRequest("GET", reqURL, nil) + if err != nil { + return nil, err + } + + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + resp.Body.Close() + return nil, fmt.Errorf("HTTP error: %d", resp.StatusCode) + } + + return resp.Body, nil +} + +func (b HttpBucket) Close() error { + return nil +} + +type BucketAdapter struct { + Bucket *blob.Bucket +} + +func (ba BucketAdapter) NewRangeReader(ctx context.Context, key string, offset, length int64) (io.ReadCloser, error) { + reader, err := ba.Bucket.NewRangeReader(ctx, key, offset, length, nil) + if err != nil { + return nil, err + } + return reader, nil +} + +func (ba BucketAdapter) Close() error { + return ba.Bucket.Close() +} + +func NormalizeBucketKey(bucket string, prefix string, key string) (string, string, error) { + if bucket == "" { + if strings.HasPrefix(key, "http") { + u, err := url.Parse(key) + if err != nil { + return "", "", err + } + dir, file := path.Split(u.Path) + if strings.HasSuffix(dir, "/") { + dir = dir[:len(dir)-1] + } + return u.Scheme + "://" + u.Host + dir, file, nil + } else { + if prefix != "" { + abs, err := filepath.Abs(prefix) + if err != nil { + return "", "", err + } + return "file://" + abs, key, nil + } + abs, err := filepath.Abs(key) + if err != nil { + return "", "", err + } + return "file://" + filepath.Dir(abs), filepath.Base(abs), nil + } + } + + if strings.HasPrefix(bucket, "s3") { + u, err := url.Parse(bucket) + if err != nil { + fmt.Println("Error parsing URL:", err) + return "", "", err + } + values := u.Query() + values.Set("awssdk", "v2") + u.RawQuery = values.Encode() + return u.String(), key, nil + } + return bucket, key, nil +} + +func OpenBucket(ctx context.Context, bucketURL string, bucketPrefix string) (Bucket, error) { + if strings.HasPrefix(bucketURL, "http") { + bucket := HttpBucket{bucketURL} + return bucket, nil + } else { + bucket, err := blob.OpenBucket(ctx, bucketURL) + if bucketPrefix != "/" && bucketPrefix != "." { + bucket = blob.PrefixedBucket(bucket, path.Clean(bucketPrefix)+string(os.PathSeparator)) + } + wrapped_bucket := BucketAdapter{bucket} + return wrapped_bucket, err + } +} diff --git a/pmtiles/bucket_test.go b/pmtiles/bucket_test.go new file mode 100644 index 0000000..812f983 --- /dev/null +++ b/pmtiles/bucket_test.go @@ -0,0 +1,35 @@ +package pmtiles + +import ( + "github.com/stretchr/testify/assert" + "testing" + "strings" + "fmt" +) + + +func TestNormalizeLocalFile(t *testing.T) { + bucket, key, _ := NormalizeBucketKey("", "", "../foo/bar.pmtiles") + assert.Equal(t, "bar.pmtiles", key) + assert.True(t, strings.HasSuffix(bucket, "/foo")) + assert.True(t, strings.HasPrefix(bucket, "file://")) +} + +func TestNormalizeHttp(t *testing.T) { + bucket, key, _ := NormalizeBucketKey("", "", "http://example.com/foo/bar.pmtiles") + assert.Equal(t, "bar.pmtiles", key) + assert.Equal(t, "http://example.com/foo", bucket) +} + +func TestNormalizeAwsSdkVersion(t *testing.T) { + bucket, key, _ := NormalizeBucketKey("s3://mybucket?awssdk=v1&endpoint=https://foo.bar", "", "abc") + assert.Equal(t, "abc", key) + assert.Equal(t, "s3://mybucket?awssdk=v2&endpoint=https%3A%2F%2Ffoo.bar", bucket) +} +func TestNormalizePathPrefixServer(t *testing.T) { + bucket, key, _ := NormalizeBucketKey("", "../foo", "") + assert.Equal(t, "", key) + fmt.Println(bucket) + assert.True(t, strings.HasSuffix(bucket, "/foo")) + assert.True(t, strings.HasPrefix(bucket, "file://")) +} \ No newline at end of file diff --git a/pmtiles/extract.go b/pmtiles/extract.go index 6e036ee..d0fbac0 100644 --- a/pmtiles/extract.go +++ b/pmtiles/extract.go @@ -2,15 +2,12 @@ package pmtiles import ( "bytes" - "context" "container/list" + "context" "fmt" "github.com/RoaringBitmap/roaring/roaring64" "github.com/dustin/go-humanize" - "github.com/paulmach/orb" - "github.com/paulmach/orb/geojson" "github.com/schollz/progressbar/v3" - "gocloud.dev/blob" "golang.org/x/sync/errgroup" "io" "io/ioutil" @@ -18,7 +15,6 @@ import ( "math" "os" "sort" - "strings" "sync" "time" ) @@ -252,31 +248,34 @@ func MergeRanges(ranges []SrcDstRange, overfetch float32) (*list.List, uint64) { // 10. write the leaf directories (if any) // 11. Get all tiles, and write directly to the output. -func Extract(logger *log.Logger, bucketURL string, file string, maxzoom uint8, region_file string, output string, download_threads int, overfetch float32, dry_run bool) error { +func Extract(logger *log.Logger, bucketURL string, key string, maxzoom int8, region_file string, output string, download_threads int, overfetch float32, dry_run bool) error { // 1. fetch the header - if bucketURL == "" { - if strings.HasPrefix(file, "/") { - bucketURL = "file:///" - } else { - bucketURL = "file://" - } - } - fmt.Println("WARNING: extract is an experimental feature and results may not be suitable for production use.") start := time.Now() - ctx := context.Background() - bucket, err := blob.OpenBucket(ctx, bucketURL) + + bucketURL, key, err := NormalizeBucketKey(bucketURL, "", key) + + if err != nil { + return err + } + + bucket, err := OpenBucket(ctx, bucketURL, "") + + if err != nil { + return err + } + if err != nil { return fmt.Errorf("Failed to open bucket for %s, %w", bucketURL, err) } defer bucket.Close() - r, err := bucket.NewRangeReader(ctx, file, 0, HEADERV3_LEN_BYTES, nil) + r, err := bucket.NewRangeReader(ctx, key, 0, HEADERV3_LEN_BYTES) if err != nil { - return fmt.Errorf("Failed to create range reader for %s, %w", file, err) + return fmt.Errorf("Failed to create range reader for %s, %w", key, err) } b, err := io.ReadAll(r) if err != nil { @@ -293,35 +292,46 @@ func Extract(logger *log.Logger, bucketURL string, file string, maxzoom uint8, r source_metadata_offset := header.MetadataOffset source_tile_data_offset := header.TileDataOffset - if header.MaxZoom < maxzoom || maxzoom == 0 { - maxzoom = header.MaxZoom - } - - // 2. construct a relevance bitmap - dat, _ := ioutil.ReadFile(region_file) - f, _ := geojson.UnmarshalFeature(dat) + fmt.Println(maxzoom) - var multipolygon orb.MultiPolygon - switch v := f.Geometry.(type) { - case orb.Polygon: - multipolygon = []orb.Polygon{v} - case orb.MultiPolygon: - multipolygon = v + if maxzoom == -1 || int8(header.MaxZoom) < maxzoom { + maxzoom = int8(header.MaxZoom) } - bound := multipolygon.Bound() + var relevant_set *roaring64.Bitmap + if region_file != "" { + + // 2. construct a relevance bitmap + dat, _ := ioutil.ReadFile(region_file) + multipolygon, err := UnmarshalRegion(dat) - boundary_set, interior_set := bitmapMultiPolygon(maxzoom, multipolygon) + if err != nil { + return err + } - relevant_set := boundary_set - relevant_set.Or(interior_set) - generalizeOr(relevant_set) + bound := multipolygon.Bound() + + boundary_set, interior_set := bitmapMultiPolygon(uint8(maxzoom), multipolygon) + relevant_set = boundary_set + relevant_set.Or(interior_set) + generalizeOr(relevant_set) + + header.MinLonE7 = int32(bound.Left() * 10000000) + header.MinLatE7 = int32(bound.Bottom() * 10000000) + header.MaxLonE7 = int32(bound.Right() * 10000000) + header.MaxLatE7 = int32(bound.Top() * 10000000) + header.CenterLonE7 = int32(bound.Center().X() * 10000000) + header.CenterLatE7 = int32(bound.Center().Y() * 10000000) + } else { + relevant_set = roaring64.New() + relevant_set.AddRange(0, ZxyToId(uint8(maxzoom)+1, 0, 0)) + } // 3. get relevant entries from root dir_offset := header.RootOffset dir_length := header.RootLength - root_reader, err := bucket.NewRangeReader(ctx, file, int64(dir_offset), int64(dir_length), nil) + root_reader, err := bucket.NewRangeReader(ctx, key, int64(dir_offset), int64(dir_length)) if err != nil { return err } @@ -333,7 +343,7 @@ func Extract(logger *log.Logger, bucketURL string, file string, maxzoom uint8, r root_dir := deserialize_entries(bytes.NewBuffer(root_bytes)) - tile_entries, leaves := RelevantEntries(relevant_set, maxzoom, root_dir) + tile_entries, leaves := RelevantEntries(relevant_set, uint8(maxzoom), root_dir) // 4. get all relevant leaf entries @@ -352,7 +362,7 @@ func Extract(logger *log.Logger, bucketURL string, file string, maxzoom uint8, r } or := overfetch_leaves.Remove(overfetch_leaves.Front()).(OverfetchRange) - slab_r, err := bucket.NewRangeReader(ctx, file, int64(or.Rng.SrcOffset), int64(or.Rng.Length), nil) + slab_r, err := bucket.NewRangeReader(ctx, key, int64(or.Rng.SrcOffset), int64(or.Rng.Length)) if err != nil { return err } @@ -365,7 +375,7 @@ func Extract(logger *log.Logger, bucketURL string, file string, maxzoom uint8, r return err } leafdir := deserialize_entries(bytes.NewBuffer(leaf_bytes)) - new_entries, new_leaves := RelevantEntries(relevant_set, maxzoom, leafdir) + new_entries, new_leaves := RelevantEntries(relevant_set, uint8(maxzoom), leafdir) if len(new_leaves) > 0 { panic("This doesn't support leaf level 2+.") @@ -412,13 +422,7 @@ func Extract(logger *log.Logger, bucketURL string, file string, maxzoom uint8, r header.TileEntriesCount = uint64(len(tile_entries)) header.TileContentsCount = tile_contents - header.MinLonE7 = int32(bound.Left() * 10000000) - header.MinLatE7 = int32(bound.Bottom() * 10000000) - header.MaxLonE7 = int32(bound.Right() * 10000000) - header.MaxLatE7 = int32(bound.Top() * 10000000) - header.CenterLonE7 = int32(bound.Center().X() * 10000000) - header.CenterLatE7 = int32(bound.Center().Y() * 10000000) - header.MaxZoom = maxzoom + header.MaxZoom = uint8(maxzoom) header_bytes := serialize_header(header) @@ -446,7 +450,7 @@ func Extract(logger *log.Logger, bucketURL string, file string, maxzoom uint8, r } // 9. get and write the metadata - metadata_reader, err := bucket.NewRangeReader(ctx, file, int64(source_metadata_offset), int64(header.MetadataLength), nil) + metadata_reader, err := bucket.NewRangeReader(ctx, key, int64(source_metadata_offset), int64(header.MetadataLength)) if err != nil { return err } @@ -472,7 +476,7 @@ func Extract(logger *log.Logger, bucketURL string, file string, maxzoom uint8, r var mu sync.Mutex downloadPart := func(or OverfetchRange) error { - tile_r, err := bucket.NewRangeReader(ctx, file, int64(source_tile_data_offset+or.Rng.SrcOffset), int64(or.Rng.Length), nil) + tile_r, err := bucket.NewRangeReader(ctx, key, int64(source_tile_data_offset+or.Rng.SrcOffset), int64(or.Rng.Length)) if err != nil { return err } @@ -533,9 +537,9 @@ func Extract(logger *log.Logger, bucketURL string, file string, maxzoom uint8, r } fmt.Printf("Completed in %v with %v download threads.\n", time.Since(start), download_threads) - total_requests := 2 // header + root + total_requests := 2 // header + root total_requests += num_overfetch_leaves // leaves - total_requests += 1 // metadata + total_requests += 1 // metadata total_requests += num_overfetch_ranges fmt.Printf("Extract required %d total requests.\n", total_requests) fmt.Printf("Extract transferred %s (overfetch %v) for an archive size of %s\n", humanize.Bytes(total_bytes), overfetch, humanize.Bytes(total_actual_bytes)) diff --git a/pmtiles/region.go b/pmtiles/region.go new file mode 100644 index 0000000..1b3b608 --- /dev/null +++ b/pmtiles/region.go @@ -0,0 +1,35 @@ +package pmtiles + +import ( + "fmt" + "github.com/paulmach/orb" + "github.com/paulmach/orb/geojson" +) + +func UnmarshalRegion(data []byte) (orb.MultiPolygon, error) { + f, err := geojson.UnmarshalFeature(data) + + if err == nil { + switch v := f.Geometry.(type) { + case orb.Polygon: + return []orb.Polygon{v}, nil + case orb.MultiPolygon: + return v, nil + } + } + + g, err := geojson.UnmarshalGeometry(data) + + if err != nil { + return nil, err + } + + switch v := g.Geometry().(type) { + case orb.Polygon: + return []orb.Polygon{v}, nil + case orb.MultiPolygon: + return v, nil + } + + return nil, fmt.Errorf("No geometry") +} diff --git a/pmtiles/region_test.go b/pmtiles/region_test.go new file mode 100644 index 0000000..329d3df --- /dev/null +++ b/pmtiles/region_test.go @@ -0,0 +1,48 @@ +package pmtiles + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestRawPolygonRegion(t *testing.T) { + result, err := UnmarshalRegion([]byte(`{ + "type": "Polygon", + "coordinates": [[[0, 0],[0,1],[1,1],[0,0]]] + }`)) + assert.Nil(t, err) + assert.Equal(t, 1, len(result)) +} + +func TestRawMultiPolygonRegion(t *testing.T) { + result, err := UnmarshalRegion([]byte(`{ + "type": "MultiPolygon", + "coordinates": [[[[0, 0],[0,1],[1,1],[0,0]]]] + }`)) + assert.Nil(t, err) + assert.Equal(t, 1, len(result)) +} + +func TestRawPolygonFeatureRegion(t *testing.T) { + result, err := UnmarshalRegion([]byte(`{ + "type": "Feature", + "geometry": { + "type": "Polygon", + "coordinates": [[[0, 0],[0,1],[1,1],[0,0]]] + } + }`)) + assert.Nil(t, err) + assert.Equal(t, 1, len(result)) +} + +func TestRawMultiPolygonFeatureRegion(t *testing.T) { + result, err := UnmarshalRegion([]byte(`{ + "type": "Feature", + "geometry": { + "type": "MultiPolygon", + "coordinates": [[[[0, 0],[0,1],[1,1],[0,0]]]] + } + }`)) + assert.Nil(t, err) + assert.Equal(t, 1, len(result)) +} diff --git a/pmtiles/server.go b/pmtiles/server.go index cc0e8da..ea8b4bb 100644 --- a/pmtiles/server.go +++ b/pmtiles/server.go @@ -7,15 +7,10 @@ import ( "context" "encoding/json" "errors" - "fmt" - "gocloud.dev/blob" "io" "log" - "os" - "path" "regexp" "strconv" - "strings" ) type CacheKey struct { @@ -43,29 +38,9 @@ type Response struct { ok bool } -// type HTTPFetcher struct { -// bucket string -// client *http.Client -// } - -// func (fetcher HTTPFetcher) Do(key Key, readFunc func(io.Reader)) bool { -// archive := fetcher.bucket + "/" + key.name + ".pmtiles" -// fetch, _ := http.NewRequest("GET", archive, nil) -// end := key.rng.Offset + uint64(key.rng.Length) - 1 -// range_header := fmt.Sprintf("bytes=%d-%d", key.rng.Offset, end) -// fetch.Header.Add("Range", range_header) -// fetch_resp, err := fetcher.client.Do(fetch) -// if err != nil || fetch_resp.StatusCode >= 300 || fetch_resp.ContentLength != int64(key.rng.Length) { -// return false -// } -// defer fetch_resp.Body.Close() -// readFunc(fetch_resp.Body) -// return true -// } - type Server struct { reqs chan Request - bucket *blob.Bucket + bucket Bucket logger *log.Logger cacheSize int cors string @@ -73,26 +48,19 @@ type Server struct { } func NewServer(bucketURL string, prefix string, logger *log.Logger, cacheSize int, cors string, publicHostname string) (*Server, error) { - if bucketURL == "" { - if strings.HasPrefix(prefix, "/") { - bucketURL = "file:///" - } else { - bucketURL = "file://" - } - } - reqs := make(chan Request, 8) ctx := context.Background() - bucket, err := blob.OpenBucket(ctx, bucketURL) + bucketURL, _, err := NormalizeBucketKey(bucketURL, prefix, "") - if prefix != "/" && prefix != "." { - bucket = blob.PrefixedBucket(bucket, path.Clean(prefix)+string(os.PathSeparator)) + if err != nil { + return nil, err } + bucket, err := OpenBucket(ctx, bucketURL, prefix) if err != nil { - return nil, fmt.Errorf("Failed to open bucket for %s, %w", prefix, err) + return nil, err } l := &Server{ @@ -140,7 +108,7 @@ func (server *Server) Start() { } server.logger.Printf("fetching %s %d-%d", key.name, offset, length) - r, err := server.bucket.NewRangeReader(ctx, key.name+".pmtiles", offset, length, nil) + r, err := server.bucket.NewRangeReader(ctx, key.name+".pmtiles", offset, length) // TODO: store away ETag if err != nil { @@ -225,7 +193,7 @@ func (server *Server) get_header_metadata(ctx context.Context, name string) (err return nil, false, HeaderV3{}, nil } - r, err := server.bucket.NewRangeReader(ctx, name+".pmtiles", int64(header.MetadataOffset), int64(header.MetadataLength), nil) + r, err := server.bucket.NewRangeReader(ctx, name+".pmtiles", int64(header.MetadataOffset), int64(header.MetadataLength)) if err != nil { return nil, false, HeaderV3{}, nil } @@ -351,7 +319,7 @@ func (server *Server) get_tile(ctx context.Context, http_headers map[string]stri entry, ok := find_tile(directory, tile_id) if ok { if entry.RunLength > 0 { - r, err := server.bucket.NewRangeReader(ctx, name+".pmtiles", int64(header.TileDataOffset+entry.Offset), int64(entry.Length), nil) + r, err := server.bucket.NewRangeReader(ctx, name+".pmtiles", int64(header.TileDataOffset+entry.Offset), int64(entry.Length)) if err != nil { return 500, http_headers, []byte("Network error") } diff --git a/pmtiles/show.go b/pmtiles/show.go index 67dc3e8..4a2fbc2 100644 --- a/pmtiles/show.go +++ b/pmtiles/show.go @@ -6,38 +6,36 @@ import ( "context" "encoding/json" "fmt" - "github.com/dustin/go-humanize" - "gocloud.dev/blob" + // "github.com/dustin/go-humanize" "io" "log" "os" - "strings" ) -func Show(logger *log.Logger, bucketURL string, file string, show_tile bool, z int, x int, y int) error { - if bucketURL == "" { - if strings.HasPrefix(file, "/") { - bucketURL = "file:///" - } else { - bucketURL = "file://" - } +func Show(logger *log.Logger, bucketURL string, key string, show_tile bool, z int, x int, y int) error { + ctx := context.Background() + + bucketURL, key, err := NormalizeBucketKey(bucketURL, "", key) + + if err != nil { + return err } - ctx := context.Background() - bucket, err := blob.OpenBucket(ctx, bucketURL) + bucket, err := OpenBucket(ctx, bucketURL, "") + if err != nil { return fmt.Errorf("Failed to open bucket for %s, %w", bucketURL, err) } defer bucket.Close() - r, err := bucket.NewRangeReader(ctx, file, 0, 16384, nil) + r, err := bucket.NewRangeReader(ctx, key, 0, 16384) if err != nil { - return fmt.Errorf("Failed to create range reader for %s, %w", file, err) + return fmt.Errorf("Failed to create range reader for %s, %w", key, err) } b, err := io.ReadAll(r) if err != nil { - return fmt.Errorf("Failed to read %s, %w", file, err) + return fmt.Errorf("Failed to read %s, %w", key, err) } r.Close() @@ -49,7 +47,7 @@ func Show(logger *log.Logger, bucketURL string, file string, show_tile bool, z i return fmt.Errorf("PMTiles version %d detected; please use 'pmtiles convert' to upgrade to version 3.", spec_version) } - return fmt.Errorf("Failed to read %s, %w", file, err) + return fmt.Errorf("Failed to read %s, %w", key, err) } if !show_tile { @@ -69,7 +67,7 @@ func Show(logger *log.Logger, bucketURL string, file string, show_tile bool, z i tile_type = "Unknown" } fmt.Printf("pmtiles spec version: %d\n", header.SpecVersion) - fmt.Printf("total size: %s\n", humanize.Bytes(uint64(r.Size()))) + // fmt.Printf("total size: %s\n", humanize.Bytes(uint64(r.Size()))) fmt.Printf("tile type: %s\n", tile_type) fmt.Printf("bounds: %f,%f %f,%f\n", float64(header.MinLonE7)/10000000, float64(header.MinLatE7)/10000000, float64(header.MaxLonE7)/10000000, float64(header.MaxLatE7)/10000000) fmt.Printf("min zoom: %d\n", header.MinZoom) @@ -83,9 +81,9 @@ func Show(logger *log.Logger, bucketURL string, file string, show_tile bool, z i fmt.Printf("internal compression: %d\n", header.InternalCompression) fmt.Printf("tile compression: %d\n", header.TileCompression) - metadata_reader, err := bucket.NewRangeReader(ctx, file, int64(header.MetadataOffset), int64(header.MetadataLength), nil) + metadata_reader, err := bucket.NewRangeReader(ctx, key, int64(header.MetadataOffset), int64(header.MetadataLength)) if err != nil { - return fmt.Errorf("Failed to create range reader for %s, %w", file, err) + return fmt.Errorf("Failed to create range reader for %s, %w", key, err) } var metadata_bytes []byte @@ -93,12 +91,12 @@ func Show(logger *log.Logger, bucketURL string, file string, show_tile bool, z i r, _ := gzip.NewReader(metadata_reader) metadata_bytes, err = io.ReadAll(r) if err != nil { - return fmt.Errorf("Failed to read %s, %w", file, err) + return fmt.Errorf("Failed to read %s, %w", key, err) } } else { metadata_bytes, err = io.ReadAll(metadata_reader) if err != nil { - return fmt.Errorf("Failed to read %s, %w", file, err) + return fmt.Errorf("Failed to read %s, %w", key, err) } } metadata_reader.Close() @@ -123,7 +121,7 @@ func Show(logger *log.Logger, bucketURL string, file string, show_tile bool, z i dir_length := header.RootLength for depth := 0; depth <= 3; depth++ { - r, err := bucket.NewRangeReader(ctx, file, int64(dir_offset), int64(dir_length), nil) + r, err := bucket.NewRangeReader(ctx, key, int64(dir_offset), int64(dir_length)) if err != nil { return fmt.Errorf("Network error") } @@ -136,7 +134,7 @@ func Show(logger *log.Logger, bucketURL string, file string, show_tile bool, z i entry, ok := find_tile(directory, tile_id) if ok { if entry.RunLength > 0 { - tile_r, err := bucket.NewRangeReader(ctx, file, int64(header.TileDataOffset+entry.Offset), int64(entry.Length), nil) + tile_r, err := bucket.NewRangeReader(ctx, key, int64(header.TileDataOffset+entry.Offset), int64(entry.Length)) if err != nil { return fmt.Errorf("Network error") }